xref: /linux/drivers/gpu/drm/ttm/ttm_bo_vm.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
34 #include <linux/mm.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
38 
39 #define TTM_BO_VM_NUM_PREFAULT 16
40 
41 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
42 						     unsigned long page_start,
43 						     unsigned long num_pages)
44 {
45 	struct rb_node *cur = bdev->addr_space_rb.rb_node;
46 	unsigned long cur_offset;
47 	struct ttm_buffer_object *bo;
48 	struct ttm_buffer_object *best_bo = NULL;
49 
50 	while (likely(cur != NULL)) {
51 		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52 		cur_offset = bo->vm_node->start;
53 		if (page_start >= cur_offset) {
54 			cur = cur->rb_right;
55 			best_bo = bo;
56 			if (page_start == cur_offset)
57 				break;
58 		} else
59 			cur = cur->rb_left;
60 	}
61 
62 	if (unlikely(best_bo == NULL))
63 		return NULL;
64 
65 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
66 		     (page_start + num_pages)))
67 		return NULL;
68 
69 	return best_bo;
70 }
71 
72 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
73 {
74 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75 	    vma->vm_private_data;
76 	struct ttm_bo_device *bdev = bo->bdev;
77 	unsigned long bus_base;
78 	unsigned long bus_offset;
79 	unsigned long bus_size;
80 	unsigned long page_offset;
81 	unsigned long page_last;
82 	unsigned long pfn;
83 	struct ttm_tt *ttm = NULL;
84 	struct page *page;
85 	int ret;
86 	int i;
87 	bool is_iomem;
88 	unsigned long address = (unsigned long)vmf->virtual_address;
89 	int retval = VM_FAULT_NOPAGE;
90 
91 	/*
92 	 * Work around locking order reversal in fault / nopfn
93 	 * between mmap_sem and bo_reserve: Perform a trylock operation
94 	 * for reserve, and if it fails, retry the fault after scheduling.
95 	 */
96 
97 	ret = ttm_bo_reserve(bo, true, true, false, 0);
98 	if (unlikely(ret != 0)) {
99 		if (ret == -EBUSY)
100 			set_need_resched();
101 		return VM_FAULT_NOPAGE;
102 	}
103 
104 	/*
105 	 * Wait for buffer data in transit, due to a pipelined
106 	 * move.
107 	 */
108 
109 	spin_lock(&bo->lock);
110 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
111 		ret = ttm_bo_wait(bo, false, true, false);
112 		spin_unlock(&bo->lock);
113 		if (unlikely(ret != 0)) {
114 			retval = (ret != -ERESTART) ?
115 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
116 			goto out_unlock;
117 		}
118 	} else
119 		spin_unlock(&bo->lock);
120 
121 
122 	ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
123 				&bus_size);
124 	if (unlikely(ret != 0)) {
125 		retval = VM_FAULT_SIGBUS;
126 		goto out_unlock;
127 	}
128 
129 	is_iomem = (bus_size != 0);
130 
131 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
132 	    bo->vm_node->start - vma->vm_pgoff;
133 	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
134 	    bo->vm_node->start - vma->vm_pgoff;
135 
136 	if (unlikely(page_offset >= bo->num_pages)) {
137 		retval = VM_FAULT_SIGBUS;
138 		goto out_unlock;
139 	}
140 
141 	/*
142 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
143 	 * since the mmap_sem is only held in read mode. However, we
144 	 * modify only the caching bits of vma->vm_page_prot and
145 	 * consider those bits protected by
146 	 * the bo->mutex, as we should be the only writers.
147 	 * There shouldn't really be any readers of these bits except
148 	 * within vm_insert_mixed()? fork?
149 	 *
150 	 * TODO: Add a list of vmas to the bo, and change the
151 	 * vma->vm_page_prot when the object changes caching policy, with
152 	 * the correct locks held.
153 	 */
154 
155 	if (is_iomem) {
156 		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
157 						vma->vm_page_prot);
158 	} else {
159 		ttm = bo->ttm;
160 		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
161 		    vm_get_page_prot(vma->vm_flags) :
162 		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
163 	}
164 
165 	/*
166 	 * Speculatively prefault a number of pages. Only error on
167 	 * first page.
168 	 */
169 
170 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
171 
172 		if (is_iomem)
173 			pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
174 			    page_offset;
175 		else {
176 			page = ttm_tt_get_page(ttm, page_offset);
177 			if (unlikely(!page && i == 0)) {
178 				retval = VM_FAULT_OOM;
179 				goto out_unlock;
180 			} else if (unlikely(!page)) {
181 				break;
182 			}
183 			pfn = page_to_pfn(page);
184 		}
185 
186 		ret = vm_insert_mixed(vma, address, pfn);
187 		/*
188 		 * Somebody beat us to this PTE or prefaulting to
189 		 * an already populated PTE, or prefaulting error.
190 		 */
191 
192 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
193 			break;
194 		else if (unlikely(ret != 0)) {
195 			retval =
196 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
197 			goto out_unlock;
198 
199 		}
200 
201 		address += PAGE_SIZE;
202 		if (unlikely(++page_offset >= page_last))
203 			break;
204 	}
205 
206 out_unlock:
207 	ttm_bo_unreserve(bo);
208 	return retval;
209 }
210 
211 static void ttm_bo_vm_open(struct vm_area_struct *vma)
212 {
213 	struct ttm_buffer_object *bo =
214 	    (struct ttm_buffer_object *)vma->vm_private_data;
215 
216 	(void)ttm_bo_reference(bo);
217 }
218 
219 static void ttm_bo_vm_close(struct vm_area_struct *vma)
220 {
221 	struct ttm_buffer_object *bo =
222 	    (struct ttm_buffer_object *)vma->vm_private_data;
223 
224 	ttm_bo_unref(&bo);
225 	vma->vm_private_data = NULL;
226 }
227 
228 static struct vm_operations_struct ttm_bo_vm_ops = {
229 	.fault = ttm_bo_vm_fault,
230 	.open = ttm_bo_vm_open,
231 	.close = ttm_bo_vm_close
232 };
233 
234 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
235 		struct ttm_bo_device *bdev)
236 {
237 	struct ttm_bo_driver *driver;
238 	struct ttm_buffer_object *bo;
239 	int ret;
240 
241 	read_lock(&bdev->vm_lock);
242 	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
243 				 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
244 	if (likely(bo != NULL))
245 		ttm_bo_reference(bo);
246 	read_unlock(&bdev->vm_lock);
247 
248 	if (unlikely(bo == NULL)) {
249 		printk(KERN_ERR TTM_PFX
250 		       "Could not find buffer object to map.\n");
251 		return -EINVAL;
252 	}
253 
254 	driver = bo->bdev->driver;
255 	if (unlikely(!driver->verify_access)) {
256 		ret = -EPERM;
257 		goto out_unref;
258 	}
259 	ret = driver->verify_access(bo, filp);
260 	if (unlikely(ret != 0))
261 		goto out_unref;
262 
263 	vma->vm_ops = &ttm_bo_vm_ops;
264 
265 	/*
266 	 * Note: We're transferring the bo reference to
267 	 * vma->vm_private_data here.
268 	 */
269 
270 	vma->vm_private_data = bo;
271 	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
272 	return 0;
273 out_unref:
274 	ttm_bo_unref(&bo);
275 	return ret;
276 }
277 EXPORT_SYMBOL(ttm_bo_mmap);
278 
279 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
280 {
281 	if (vma->vm_pgoff != 0)
282 		return -EACCES;
283 
284 	vma->vm_ops = &ttm_bo_vm_ops;
285 	vma->vm_private_data = ttm_bo_reference(bo);
286 	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
287 	return 0;
288 }
289 EXPORT_SYMBOL(ttm_fbdev_mmap);
290 
291 
292 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
293 		  const char __user *wbuf, char __user *rbuf, size_t count,
294 		  loff_t *f_pos, bool write)
295 {
296 	struct ttm_buffer_object *bo;
297 	struct ttm_bo_driver *driver;
298 	struct ttm_bo_kmap_obj map;
299 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
300 	unsigned long kmap_offset;
301 	unsigned long kmap_end;
302 	unsigned long kmap_num;
303 	size_t io_size;
304 	unsigned int page_offset;
305 	char *virtual;
306 	int ret;
307 	bool no_wait = false;
308 	bool dummy;
309 
310 	read_lock(&bdev->vm_lock);
311 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
312 	if (likely(bo != NULL))
313 		ttm_bo_reference(bo);
314 	read_unlock(&bdev->vm_lock);
315 
316 	if (unlikely(bo == NULL))
317 		return -EFAULT;
318 
319 	driver = bo->bdev->driver;
320 	if (unlikely(driver->verify_access)) {
321 		ret = -EPERM;
322 		goto out_unref;
323 	}
324 
325 	ret = driver->verify_access(bo, filp);
326 	if (unlikely(ret != 0))
327 		goto out_unref;
328 
329 	kmap_offset = dev_offset - bo->vm_node->start;
330 	if (unlikely(kmap_offset >= bo->num_pages)) {
331 		ret = -EFBIG;
332 		goto out_unref;
333 	}
334 
335 	page_offset = *f_pos & ~PAGE_MASK;
336 	io_size = bo->num_pages - kmap_offset;
337 	io_size = (io_size << PAGE_SHIFT) - page_offset;
338 	if (count < io_size)
339 		io_size = count;
340 
341 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
342 	kmap_num = kmap_end - kmap_offset + 1;
343 
344 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
345 
346 	switch (ret) {
347 	case 0:
348 		break;
349 	case -ERESTART:
350 		ret = -EINTR;
351 		goto out_unref;
352 	case -EBUSY:
353 		ret = -EAGAIN;
354 		goto out_unref;
355 	default:
356 		goto out_unref;
357 	}
358 
359 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
360 	if (unlikely(ret != 0)) {
361 		ttm_bo_unreserve(bo);
362 		goto out_unref;
363 	}
364 
365 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
366 	virtual += page_offset;
367 
368 	if (write)
369 		ret = copy_from_user(virtual, wbuf, io_size);
370 	else
371 		ret = copy_to_user(rbuf, virtual, io_size);
372 
373 	ttm_bo_kunmap(&map);
374 	ttm_bo_unreserve(bo);
375 	ttm_bo_unref(&bo);
376 
377 	if (unlikely(ret != 0))
378 		return -EFBIG;
379 
380 	*f_pos += io_size;
381 
382 	return io_size;
383 out_unref:
384 	ttm_bo_unref(&bo);
385 	return ret;
386 }
387 
388 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
389 			char __user *rbuf, size_t count, loff_t *f_pos,
390 			bool write)
391 {
392 	struct ttm_bo_kmap_obj map;
393 	unsigned long kmap_offset;
394 	unsigned long kmap_end;
395 	unsigned long kmap_num;
396 	size_t io_size;
397 	unsigned int page_offset;
398 	char *virtual;
399 	int ret;
400 	bool no_wait = false;
401 	bool dummy;
402 
403 	kmap_offset = (*f_pos >> PAGE_SHIFT);
404 	if (unlikely(kmap_offset >= bo->num_pages))
405 		return -EFBIG;
406 
407 	page_offset = *f_pos & ~PAGE_MASK;
408 	io_size = bo->num_pages - kmap_offset;
409 	io_size = (io_size << PAGE_SHIFT) - page_offset;
410 	if (count < io_size)
411 		io_size = count;
412 
413 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
414 	kmap_num = kmap_end - kmap_offset + 1;
415 
416 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
417 
418 	switch (ret) {
419 	case 0:
420 		break;
421 	case -ERESTART:
422 		return -EINTR;
423 	case -EBUSY:
424 		return -EAGAIN;
425 	default:
426 		return ret;
427 	}
428 
429 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
430 	if (unlikely(ret != 0)) {
431 		ttm_bo_unreserve(bo);
432 		return ret;
433 	}
434 
435 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
436 	virtual += page_offset;
437 
438 	if (write)
439 		ret = copy_from_user(virtual, wbuf, io_size);
440 	else
441 		ret = copy_to_user(rbuf, virtual, io_size);
442 
443 	ttm_bo_kunmap(&map);
444 	ttm_bo_unreserve(bo);
445 	ttm_bo_unref(&bo);
446 
447 	if (unlikely(ret != 0))
448 		return ret;
449 
450 	*f_pos += io_size;
451 
452 	return io_size;
453 }
454