xref: /freebsd/sys/dev/drm2/ttm/ttm_bo_vm.c (revision d4ae33f0721c1b170fe37d97e395228ffcfb3f80)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_vm.h"
42 
43 #include <dev/drm2/drmP.h>
44 #include <dev/drm2/ttm/ttm_module.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_placement.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
51 
52 #define TTM_BO_VM_NUM_PREFAULT 16
53 
54 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
55     ttm_bo_cmp_rb_tree_items);
56 
57 int
58 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
59     struct ttm_buffer_object *b)
60 {
61 
62 	if (a->vm_node->start < b->vm_node->start) {
63 		return (-1);
64 	} else if (a->vm_node->start > b->vm_node->start) {
65 		return (1);
66 	} else {
67 		return (0);
68 	}
69 }
70 
71 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
72 						     unsigned long page_start,
73 						     unsigned long num_pages)
74 {
75 	unsigned long cur_offset;
76 	struct ttm_buffer_object *bo;
77 	struct ttm_buffer_object *best_bo = NULL;
78 
79 	RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
80 		cur_offset = bo->vm_node->start;
81 		if (page_start >= cur_offset) {
82 			best_bo = bo;
83 			if (page_start == cur_offset)
84 				break;
85 		}
86 	}
87 
88 	if (unlikely(best_bo == NULL))
89 		return NULL;
90 
91 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
92 		     (page_start + num_pages)))
93 		return NULL;
94 
95 	return best_bo;
96 }
97 
98 static int
99 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
100     int prot, vm_page_t *mres)
101 {
102 
103 	struct ttm_buffer_object *bo = vm_obj->handle;
104 	struct ttm_bo_device *bdev = bo->bdev;
105 	struct ttm_tt *ttm = NULL;
106 	vm_page_t m, m1, oldm;
107 	int ret;
108 	int retval = VM_PAGER_OK;
109 	struct ttm_mem_type_manager *man =
110 		&bdev->man[bo->mem.mem_type];
111 
112 	vm_object_pip_add(vm_obj, 1);
113 	oldm = *mres;
114 	if (oldm != NULL) {
115 		vm_page_lock(oldm);
116 		vm_page_remove(oldm);
117 		vm_page_unlock(oldm);
118 		*mres = NULL;
119 	} else
120 		oldm = NULL;
121 retry:
122 	VM_OBJECT_WUNLOCK(vm_obj);
123 	m = NULL;
124 
125 reserve:
126 	ret = ttm_bo_reserve(bo, false, false, false, 0);
127 	if (unlikely(ret != 0)) {
128 		if (ret == -EBUSY) {
129 			kern_yield(0);
130 			goto reserve;
131 		}
132 	}
133 
134 	if (bdev->driver->fault_reserve_notify) {
135 		ret = bdev->driver->fault_reserve_notify(bo);
136 		switch (ret) {
137 		case 0:
138 			break;
139 		case -EBUSY:
140 		case -ERESTART:
141 		case -EINTR:
142 			kern_yield(0);
143 			goto reserve;
144 		default:
145 			retval = VM_PAGER_ERROR;
146 			goto out_unlock;
147 		}
148 	}
149 
150 	/*
151 	 * Wait for buffer data in transit, due to a pipelined
152 	 * move.
153 	 */
154 
155 	mtx_lock(&bdev->fence_lock);
156 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
157 		/*
158 		 * Here, the behavior differs between Linux and FreeBSD.
159 		 *
160 		 * On Linux, the wait is interruptible (3rd argument to
161 		 * ttm_bo_wait). There must be some mechanism to resume
162 		 * page fault handling, once the signal is processed.
163 		 *
164 		 * On FreeBSD, the wait is uninteruptible. This is not a
165 		 * problem as we can't end up with an unkillable process
166 		 * here, because the wait will eventually time out.
167 		 *
168 		 * An example of this situation is the Xorg process
169 		 * which uses SIGALRM internally. The signal could
170 		 * interrupt the wait, causing the page fault to fail
171 		 * and the process to receive SIGSEGV.
172 		 */
173 		ret = ttm_bo_wait(bo, false, false, false);
174 		mtx_unlock(&bdev->fence_lock);
175 		if (unlikely(ret != 0)) {
176 			retval = VM_PAGER_ERROR;
177 			goto out_unlock;
178 		}
179 	} else
180 		mtx_unlock(&bdev->fence_lock);
181 
182 	ret = ttm_mem_io_lock(man, true);
183 	if (unlikely(ret != 0)) {
184 		retval = VM_PAGER_ERROR;
185 		goto out_unlock;
186 	}
187 	ret = ttm_mem_io_reserve_vm(bo);
188 	if (unlikely(ret != 0)) {
189 		retval = VM_PAGER_ERROR;
190 		goto out_io_unlock;
191 	}
192 
193 	/*
194 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
195 	 * since the mmap_sem is only held in read mode. However, we
196 	 * modify only the caching bits of vma->vm_page_prot and
197 	 * consider those bits protected by
198 	 * the bo->mutex, as we should be the only writers.
199 	 * There shouldn't really be any readers of these bits except
200 	 * within vm_insert_mixed()? fork?
201 	 *
202 	 * TODO: Add a list of vmas to the bo, and change the
203 	 * vma->vm_page_prot when the object changes caching policy, with
204 	 * the correct locks held.
205 	 */
206 	if (!bo->mem.bus.is_iomem) {
207 		/* Allocate all page at once, most common usage */
208 		ttm = bo->ttm;
209 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
210 			retval = VM_PAGER_ERROR;
211 			goto out_io_unlock;
212 		}
213 	}
214 
215 	if (bo->mem.bus.is_iomem) {
216 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
217 		    bo->mem.bus.offset + offset);
218 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
219 	} else {
220 		ttm = bo->ttm;
221 		m = ttm->pages[OFF_TO_IDX(offset)];
222 		if (unlikely(!m)) {
223 			retval = VM_PAGER_ERROR;
224 			goto out_io_unlock;
225 		}
226 		pmap_page_set_memattr(m,
227 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
228 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
229 	}
230 
231 	VM_OBJECT_WLOCK(vm_obj);
232 	if (vm_page_busied(m)) {
233 		vm_page_lock(m);
234 		VM_OBJECT_WUNLOCK(vm_obj);
235 		vm_page_busy_sleep(m, "ttmpbs");
236 		VM_OBJECT_WLOCK(vm_obj);
237 		ttm_mem_io_unlock(man);
238 		ttm_bo_unreserve(bo);
239 		goto retry;
240 	}
241 	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
242 	if (m1 == NULL) {
243 		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
244 			VM_OBJECT_WUNLOCK(vm_obj);
245 			VM_WAIT;
246 			VM_OBJECT_WLOCK(vm_obj);
247 			ttm_mem_io_unlock(man);
248 			ttm_bo_unreserve(bo);
249 			goto retry;
250 		}
251 	} else {
252 		KASSERT(m == m1,
253 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
254 		    bo, m, m1, (uintmax_t)offset));
255 	}
256 	m->valid = VM_PAGE_BITS_ALL;
257 	*mres = m;
258 	vm_page_xbusy(m);
259 
260 	if (oldm != NULL) {
261 		vm_page_lock(oldm);
262 		vm_page_free(oldm);
263 		vm_page_unlock(oldm);
264 	}
265 
266 out_io_unlock1:
267 	ttm_mem_io_unlock(man);
268 out_unlock1:
269 	ttm_bo_unreserve(bo);
270 	vm_object_pip_wakeup(vm_obj);
271 	return (retval);
272 
273 out_io_unlock:
274 	VM_OBJECT_WLOCK(vm_obj);
275 	goto out_io_unlock1;
276 
277 out_unlock:
278 	VM_OBJECT_WLOCK(vm_obj);
279 	goto out_unlock1;
280 }
281 
282 static int
283 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
284     vm_ooffset_t foff, struct ucred *cred, u_short *color)
285 {
286 
287 	/*
288 	 * On Linux, a reference to the buffer object is acquired here.
289 	 * The reason is that this function is not called when the
290 	 * mmap() is initialized, but only when a process forks for
291 	 * instance. Therefore on Linux, the reference on the bo is
292 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
293 	 * then released in ttm_bo_vm_close().
294 	 *
295 	 * Here, this function is called during mmap() intialization.
296 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
297 	 * sufficient.
298 	 */
299 
300 	*color = 0;
301 	return (0);
302 }
303 
304 static void
305 ttm_bo_vm_dtor(void *handle)
306 {
307 	struct ttm_buffer_object *bo = handle;
308 
309 	ttm_bo_unref(&bo);
310 }
311 
312 static struct cdev_pager_ops ttm_pager_ops = {
313 	.cdev_pg_fault = ttm_bo_vm_fault,
314 	.cdev_pg_ctor = ttm_bo_vm_ctor,
315 	.cdev_pg_dtor = ttm_bo_vm_dtor
316 };
317 
318 int
319 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
320     struct vm_object **obj_res, int nprot)
321 {
322 	struct ttm_bo_driver *driver;
323 	struct ttm_buffer_object *bo;
324 	struct vm_object *vm_obj;
325 	int ret;
326 
327 	rw_wlock(&bdev->vm_lock);
328 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
329 	if (likely(bo != NULL))
330 		refcount_acquire(&bo->kref);
331 	rw_wunlock(&bdev->vm_lock);
332 
333 	if (unlikely(bo == NULL)) {
334 		printf("[TTM] Could not find buffer object to map\n");
335 		return (EINVAL);
336 	}
337 
338 	driver = bo->bdev->driver;
339 	if (unlikely(!driver->verify_access)) {
340 		ret = EPERM;
341 		goto out_unref;
342 	}
343 	ret = -driver->verify_access(bo);
344 	if (unlikely(ret != 0))
345 		goto out_unref;
346 
347 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
348 	    size, nprot, 0, curthread->td_ucred);
349 	if (vm_obj == NULL) {
350 		ret = EINVAL;
351 		goto out_unref;
352 	}
353 	/*
354 	 * Note: We're transferring the bo reference to vm_obj->handle here.
355 	 */
356 	*offset = 0;
357 	*obj_res = vm_obj;
358 	return 0;
359 out_unref:
360 	ttm_bo_unref(&bo);
361 	return ret;
362 }
363 
364 void
365 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
366 {
367 	vm_object_t vm_obj;
368 	vm_page_t m;
369 	int i;
370 
371 	vm_obj = cdev_pager_lookup(bo);
372 	if (vm_obj == NULL)
373 		return;
374 
375 	VM_OBJECT_WLOCK(vm_obj);
376 retry:
377 	for (i = 0; i < bo->num_pages; i++) {
378 		m = vm_page_lookup(vm_obj, i);
379 		if (m == NULL)
380 			continue;
381 		if (vm_page_sleep_if_busy(m, "ttm_unm"))
382 			goto retry;
383 		cdev_pager_free_page(vm_obj, m);
384 	}
385 	VM_OBJECT_WUNLOCK(vm_obj);
386 
387 	vm_object_deallocate(vm_obj);
388 }
389 
390 #if 0
391 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
392 {
393 	if (vma->vm_pgoff != 0)
394 		return -EACCES;
395 
396 	vma->vm_ops = &ttm_bo_vm_ops;
397 	vma->vm_private_data = ttm_bo_reference(bo);
398 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
399 	return 0;
400 }
401 
402 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
403 		  const char __user *wbuf, char __user *rbuf, size_t count,
404 		  loff_t *f_pos, bool write)
405 {
406 	struct ttm_buffer_object *bo;
407 	struct ttm_bo_driver *driver;
408 	struct ttm_bo_kmap_obj map;
409 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
410 	unsigned long kmap_offset;
411 	unsigned long kmap_end;
412 	unsigned long kmap_num;
413 	size_t io_size;
414 	unsigned int page_offset;
415 	char *virtual;
416 	int ret;
417 	bool no_wait = false;
418 	bool dummy;
419 
420 	read_lock(&bdev->vm_lock);
421 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
422 	if (likely(bo != NULL))
423 		ttm_bo_reference(bo);
424 	read_unlock(&bdev->vm_lock);
425 
426 	if (unlikely(bo == NULL))
427 		return -EFAULT;
428 
429 	driver = bo->bdev->driver;
430 	if (unlikely(!driver->verify_access)) {
431 		ret = -EPERM;
432 		goto out_unref;
433 	}
434 
435 	ret = driver->verify_access(bo, filp);
436 	if (unlikely(ret != 0))
437 		goto out_unref;
438 
439 	kmap_offset = dev_offset - bo->vm_node->start;
440 	if (unlikely(kmap_offset >= bo->num_pages)) {
441 		ret = -EFBIG;
442 		goto out_unref;
443 	}
444 
445 	page_offset = *f_pos & ~PAGE_MASK;
446 	io_size = bo->num_pages - kmap_offset;
447 	io_size = (io_size << PAGE_SHIFT) - page_offset;
448 	if (count < io_size)
449 		io_size = count;
450 
451 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
452 	kmap_num = kmap_end - kmap_offset + 1;
453 
454 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
455 
456 	switch (ret) {
457 	case 0:
458 		break;
459 	case -EBUSY:
460 		ret = -EAGAIN;
461 		goto out_unref;
462 	default:
463 		goto out_unref;
464 	}
465 
466 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
467 	if (unlikely(ret != 0)) {
468 		ttm_bo_unreserve(bo);
469 		goto out_unref;
470 	}
471 
472 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
473 	virtual += page_offset;
474 
475 	if (write)
476 		ret = copy_from_user(virtual, wbuf, io_size);
477 	else
478 		ret = copy_to_user(rbuf, virtual, io_size);
479 
480 	ttm_bo_kunmap(&map);
481 	ttm_bo_unreserve(bo);
482 	ttm_bo_unref(&bo);
483 
484 	if (unlikely(ret != 0))
485 		return -EFBIG;
486 
487 	*f_pos += io_size;
488 
489 	return io_size;
490 out_unref:
491 	ttm_bo_unref(&bo);
492 	return ret;
493 }
494 
495 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
496 			char __user *rbuf, size_t count, loff_t *f_pos,
497 			bool write)
498 {
499 	struct ttm_bo_kmap_obj map;
500 	unsigned long kmap_offset;
501 	unsigned long kmap_end;
502 	unsigned long kmap_num;
503 	size_t io_size;
504 	unsigned int page_offset;
505 	char *virtual;
506 	int ret;
507 	bool no_wait = false;
508 	bool dummy;
509 
510 	kmap_offset = (*f_pos >> PAGE_SHIFT);
511 	if (unlikely(kmap_offset >= bo->num_pages))
512 		return -EFBIG;
513 
514 	page_offset = *f_pos & ~PAGE_MASK;
515 	io_size = bo->num_pages - kmap_offset;
516 	io_size = (io_size << PAGE_SHIFT) - page_offset;
517 	if (count < io_size)
518 		io_size = count;
519 
520 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
521 	kmap_num = kmap_end - kmap_offset + 1;
522 
523 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
524 
525 	switch (ret) {
526 	case 0:
527 		break;
528 	case -EBUSY:
529 		return -EAGAIN;
530 	default:
531 		return ret;
532 	}
533 
534 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
535 	if (unlikely(ret != 0)) {
536 		ttm_bo_unreserve(bo);
537 		return ret;
538 	}
539 
540 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
541 	virtual += page_offset;
542 
543 	if (write)
544 		ret = copy_from_user(virtual, wbuf, io_size);
545 	else
546 		ret = copy_to_user(rbuf, virtual, io_size);
547 
548 	ttm_bo_kunmap(&map);
549 	ttm_bo_unreserve(bo);
550 	ttm_bo_unref(&bo);
551 
552 	if (unlikely(ret != 0))
553 		return ret;
554 
555 	*f_pos += io_size;
556 
557 	return io_size;
558 }
559 #endif
560