xref: /freebsd/sys/dev/drm2/ttm/ttm_bo_vm.c (revision 41840d7587afd6ce27e3725b80481dd4d8f26b1a)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_vm.h"
42 
43 #include <dev/drm2/drmP.h>
44 #include <dev/drm2/ttm/ttm_module.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_placement.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 
51 #define TTM_BO_VM_NUM_PREFAULT 16
52 
53 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
54     ttm_bo_cmp_rb_tree_items);
55 
56 int
57 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
58     struct ttm_buffer_object *b)
59 {
60 
61 	if (a->vm_node->start < b->vm_node->start) {
62 		return (-1);
63 	} else if (a->vm_node->start > b->vm_node->start) {
64 		return (1);
65 	} else {
66 		return (0);
67 	}
68 }
69 
70 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
71 						     unsigned long page_start,
72 						     unsigned long num_pages)
73 {
74 	unsigned long cur_offset;
75 	struct ttm_buffer_object *bo;
76 	struct ttm_buffer_object *best_bo = NULL;
77 
78 	RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
79 		cur_offset = bo->vm_node->start;
80 		if (page_start >= cur_offset) {
81 			best_bo = bo;
82 			if (page_start == cur_offset)
83 				break;
84 		}
85 	}
86 
87 	if (unlikely(best_bo == NULL))
88 		return NULL;
89 
90 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
91 		     (page_start + num_pages)))
92 		return NULL;
93 
94 	return best_bo;
95 }
96 
97 static int
98 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
99     int prot, vm_page_t *mres)
100 {
101 
102 	struct ttm_buffer_object *bo = vm_obj->handle;
103 	struct ttm_bo_device *bdev = bo->bdev;
104 	struct ttm_tt *ttm = NULL;
105 	vm_page_t m, m1, oldm;
106 	int ret;
107 	int retval = VM_PAGER_OK;
108 	struct ttm_mem_type_manager *man =
109 		&bdev->man[bo->mem.mem_type];
110 
111 	vm_object_pip_add(vm_obj, 1);
112 	oldm = *mres;
113 	if (oldm != NULL) {
114 		vm_page_lock(oldm);
115 		vm_page_remove(oldm);
116 		vm_page_unlock(oldm);
117 		*mres = NULL;
118 	} else
119 		oldm = NULL;
120 retry:
121 	VM_OBJECT_WUNLOCK(vm_obj);
122 	m = NULL;
123 
124 reserve:
125 	ret = ttm_bo_reserve(bo, false, false, false, 0);
126 	if (unlikely(ret != 0)) {
127 		if (ret == -EBUSY) {
128 			kern_yield(0);
129 			goto reserve;
130 		}
131 	}
132 
133 	if (bdev->driver->fault_reserve_notify) {
134 		ret = bdev->driver->fault_reserve_notify(bo);
135 		switch (ret) {
136 		case 0:
137 			break;
138 		case -EBUSY:
139 		case -ERESTART:
140 		case -EINTR:
141 			kern_yield(0);
142 			goto reserve;
143 		default:
144 			retval = VM_PAGER_ERROR;
145 			goto out_unlock;
146 		}
147 	}
148 
149 	/*
150 	 * Wait for buffer data in transit, due to a pipelined
151 	 * move.
152 	 */
153 
154 	mtx_lock(&bdev->fence_lock);
155 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
156 		ret = ttm_bo_wait(bo, false, true, false);
157 		mtx_unlock(&bdev->fence_lock);
158 		if (unlikely(ret != 0)) {
159 			retval = VM_PAGER_ERROR;
160 			goto out_unlock;
161 		}
162 	} else
163 		mtx_unlock(&bdev->fence_lock);
164 
165 	ret = ttm_mem_io_lock(man, true);
166 	if (unlikely(ret != 0)) {
167 		retval = VM_PAGER_ERROR;
168 		goto out_unlock;
169 	}
170 	ret = ttm_mem_io_reserve_vm(bo);
171 	if (unlikely(ret != 0)) {
172 		retval = VM_PAGER_ERROR;
173 		goto out_io_unlock;
174 	}
175 
176 	/*
177 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
178 	 * since the mmap_sem is only held in read mode. However, we
179 	 * modify only the caching bits of vma->vm_page_prot and
180 	 * consider those bits protected by
181 	 * the bo->mutex, as we should be the only writers.
182 	 * There shouldn't really be any readers of these bits except
183 	 * within vm_insert_mixed()? fork?
184 	 *
185 	 * TODO: Add a list of vmas to the bo, and change the
186 	 * vma->vm_page_prot when the object changes caching policy, with
187 	 * the correct locks held.
188 	 */
189 	if (!bo->mem.bus.is_iomem) {
190 		/* Allocate all page at once, most common usage */
191 		ttm = bo->ttm;
192 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
193 			retval = VM_PAGER_ERROR;
194 			goto out_io_unlock;
195 		}
196 	}
197 
198 	if (bo->mem.bus.is_iomem) {
199 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
200 		    bo->mem.bus.offset + offset);
201 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
202 	} else {
203 		ttm = bo->ttm;
204 		m = ttm->pages[OFF_TO_IDX(offset)];
205 		if (unlikely(!m)) {
206 			retval = VM_PAGER_ERROR;
207 			goto out_io_unlock;
208 		}
209 		pmap_page_set_memattr(m,
210 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
211 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
212 	}
213 
214 	VM_OBJECT_WLOCK(vm_obj);
215 	if ((m->flags & VPO_BUSY) != 0) {
216 		vm_page_sleep(m, "ttmpbs");
217 		ttm_mem_io_unlock(man);
218 		ttm_bo_unreserve(bo);
219 		goto retry;
220 	}
221 	m->valid = VM_PAGE_BITS_ALL;
222 	*mres = m;
223 	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
224 	if (m1 == NULL) {
225 		vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
226 	} else {
227 		KASSERT(m == m1,
228 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
229 		    bo, m, m1, (uintmax_t)offset));
230 	}
231 	vm_page_busy(m);
232 
233 	if (oldm != NULL) {
234 		vm_page_lock(oldm);
235 		vm_page_free(oldm);
236 		vm_page_unlock(oldm);
237 	}
238 
239 out_io_unlock1:
240 	ttm_mem_io_unlock(man);
241 out_unlock1:
242 	ttm_bo_unreserve(bo);
243 	vm_object_pip_wakeup(vm_obj);
244 	return (retval);
245 
246 out_io_unlock:
247 	VM_OBJECT_WLOCK(vm_obj);
248 	goto out_io_unlock1;
249 
250 out_unlock:
251 	VM_OBJECT_WLOCK(vm_obj);
252 	goto out_unlock1;
253 }
254 
255 static int
256 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
257     vm_ooffset_t foff, struct ucred *cred, u_short *color)
258 {
259 
260 	/*
261 	 * We don't acquire a reference on bo->kref here, because it was
262 	 * already done in ttm_bo_mmap_single().
263 	 */
264 
265 	*color = 0;
266 	return (0);
267 }
268 
269 static void
270 ttm_bo_vm_dtor(void *handle)
271 {
272 	struct ttm_buffer_object *bo = handle;
273 
274 	ttm_bo_unref(&bo);
275 }
276 
277 static struct cdev_pager_ops ttm_pager_ops = {
278 	.cdev_pg_fault = ttm_bo_vm_fault,
279 	.cdev_pg_ctor = ttm_bo_vm_ctor,
280 	.cdev_pg_dtor = ttm_bo_vm_dtor
281 };
282 
283 int
284 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
285     struct vm_object **obj_res, int nprot)
286 {
287 	struct ttm_bo_driver *driver;
288 	struct ttm_buffer_object *bo;
289 	struct vm_object *vm_obj;
290 	int ret;
291 
292 	rw_wlock(&bdev->vm_lock);
293 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
294 	if (likely(bo != NULL))
295 		refcount_acquire(&bo->kref);
296 	rw_wunlock(&bdev->vm_lock);
297 
298 	if (unlikely(bo == NULL)) {
299 		printf("[TTM] Could not find buffer object to map\n");
300 		return (EINVAL);
301 	}
302 
303 	driver = bo->bdev->driver;
304 	if (unlikely(!driver->verify_access)) {
305 		ret = EPERM;
306 		goto out_unref;
307 	}
308 	ret = -driver->verify_access(bo);
309 	if (unlikely(ret != 0))
310 		goto out_unref;
311 
312 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
313 	    size, nprot, 0, curthread->td_ucred);
314 	if (vm_obj == NULL) {
315 		ret = EINVAL;
316 		goto out_unref;
317 	}
318 	/*
319 	 * Note: We're transferring the bo reference to vm_obj->handle here.
320 	 */
321 	*offset = 0;
322 	*obj_res = vm_obj;
323 	return 0;
324 out_unref:
325 	ttm_bo_unref(&bo);
326 	return ret;
327 }
328 
329 #if 0
330 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
331 {
332 	if (vma->vm_pgoff != 0)
333 		return -EACCES;
334 
335 	vma->vm_ops = &ttm_bo_vm_ops;
336 	vma->vm_private_data = ttm_bo_reference(bo);
337 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
338 	return 0;
339 }
340 
341 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
342 		  const char __user *wbuf, char __user *rbuf, size_t count,
343 		  loff_t *f_pos, bool write)
344 {
345 	struct ttm_buffer_object *bo;
346 	struct ttm_bo_driver *driver;
347 	struct ttm_bo_kmap_obj map;
348 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
349 	unsigned long kmap_offset;
350 	unsigned long kmap_end;
351 	unsigned long kmap_num;
352 	size_t io_size;
353 	unsigned int page_offset;
354 	char *virtual;
355 	int ret;
356 	bool no_wait = false;
357 	bool dummy;
358 
359 	read_lock(&bdev->vm_lock);
360 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
361 	if (likely(bo != NULL))
362 		ttm_bo_reference(bo);
363 	read_unlock(&bdev->vm_lock);
364 
365 	if (unlikely(bo == NULL))
366 		return -EFAULT;
367 
368 	driver = bo->bdev->driver;
369 	if (unlikely(!driver->verify_access)) {
370 		ret = -EPERM;
371 		goto out_unref;
372 	}
373 
374 	ret = driver->verify_access(bo, filp);
375 	if (unlikely(ret != 0))
376 		goto out_unref;
377 
378 	kmap_offset = dev_offset - bo->vm_node->start;
379 	if (unlikely(kmap_offset >= bo->num_pages)) {
380 		ret = -EFBIG;
381 		goto out_unref;
382 	}
383 
384 	page_offset = *f_pos & ~PAGE_MASK;
385 	io_size = bo->num_pages - kmap_offset;
386 	io_size = (io_size << PAGE_SHIFT) - page_offset;
387 	if (count < io_size)
388 		io_size = count;
389 
390 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
391 	kmap_num = kmap_end - kmap_offset + 1;
392 
393 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
394 
395 	switch (ret) {
396 	case 0:
397 		break;
398 	case -EBUSY:
399 		ret = -EAGAIN;
400 		goto out_unref;
401 	default:
402 		goto out_unref;
403 	}
404 
405 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
406 	if (unlikely(ret != 0)) {
407 		ttm_bo_unreserve(bo);
408 		goto out_unref;
409 	}
410 
411 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
412 	virtual += page_offset;
413 
414 	if (write)
415 		ret = copy_from_user(virtual, wbuf, io_size);
416 	else
417 		ret = copy_to_user(rbuf, virtual, io_size);
418 
419 	ttm_bo_kunmap(&map);
420 	ttm_bo_unreserve(bo);
421 	ttm_bo_unref(&bo);
422 
423 	if (unlikely(ret != 0))
424 		return -EFBIG;
425 
426 	*f_pos += io_size;
427 
428 	return io_size;
429 out_unref:
430 	ttm_bo_unref(&bo);
431 	return ret;
432 }
433 
434 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
435 			char __user *rbuf, size_t count, loff_t *f_pos,
436 			bool write)
437 {
438 	struct ttm_bo_kmap_obj map;
439 	unsigned long kmap_offset;
440 	unsigned long kmap_end;
441 	unsigned long kmap_num;
442 	size_t io_size;
443 	unsigned int page_offset;
444 	char *virtual;
445 	int ret;
446 	bool no_wait = false;
447 	bool dummy;
448 
449 	kmap_offset = (*f_pos >> PAGE_SHIFT);
450 	if (unlikely(kmap_offset >= bo->num_pages))
451 		return -EFBIG;
452 
453 	page_offset = *f_pos & ~PAGE_MASK;
454 	io_size = bo->num_pages - kmap_offset;
455 	io_size = (io_size << PAGE_SHIFT) - page_offset;
456 	if (count < io_size)
457 		io_size = count;
458 
459 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
460 	kmap_num = kmap_end - kmap_offset + 1;
461 
462 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
463 
464 	switch (ret) {
465 	case 0:
466 		break;
467 	case -EBUSY:
468 		return -EAGAIN;
469 	default:
470 		return ret;
471 	}
472 
473 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
474 	if (unlikely(ret != 0)) {
475 		ttm_bo_unreserve(bo);
476 		return ret;
477 	}
478 
479 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
480 	virtual += page_offset;
481 
482 	if (write)
483 		ret = copy_from_user(virtual, wbuf, io_size);
484 	else
485 		ret = copy_to_user(rbuf, virtual, io_size);
486 
487 	ttm_bo_kunmap(&map);
488 	ttm_bo_unreserve(bo);
489 	ttm_bo_unref(&bo);
490 
491 	if (unlikely(ret != 0))
492 		return ret;
493 
494 	*f_pos += io_size;
495 
496 	return io_size;
497 }
498 #endif
499