xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_userptr.c (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2012-2014 Intel Corporation
4  *
5  * Based on amdgpu_mn, which bears the following notice:
6  *
7  * Copyright 2014 Advanced Micro Devices, Inc.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the
12  * "Software"), to deal in the Software without restriction, including
13  * without limitation the rights to use, copy, modify, merge, publish,
14  * distribute, sub license, and/or sell copies of the Software, and to
15  * permit persons to whom the Software is furnished to do so, subject to
16  * the following conditions:
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * The above copyright notice and this permission notice (including the
27  * next paragraph) shall be included in all copies or substantial portions
28  * of the Software.
29  *
30  */
31 /*
32  * Authors:
33  *    Christian König <christian.koenig@amd.com>
34  */
35 
36 #include <linux/mmu_context.h>
37 #include <linux/mempolicy.h>
38 #include <linux/swap.h>
39 #include <linux/sched/mm.h>
40 
41 #include "i915_drv.h"
42 #include "i915_gem_ioctls.h"
43 #include "i915_gem_object.h"
44 #include "i915_scatterlist.h"
45 
46 #ifdef CONFIG_MMU_NOTIFIER
47 
48 /**
49  * i915_gem_userptr_invalidate - callback to notify about mm change
50  *
51  * @mni: the range (mm) is about to update
52  * @range: details on the invalidation
53  * @cur_seq: Value to pass to mmu_interval_set_seq()
54  *
55  * Block for operations on BOs to finish and mark pages as accessed and
56  * potentially dirty.
57  */
58 static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
59 					const struct mmu_notifier_range *range,
60 					unsigned long cur_seq)
61 {
62 	mmu_interval_set_seq(mni, cur_seq);
63 	return true;
64 }
65 
66 static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
67 	.invalidate = i915_gem_userptr_invalidate,
68 };
69 
70 static int
71 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
72 {
73 	return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
74 					    obj->userptr.ptr, obj->base.size,
75 					    &i915_gem_userptr_notifier_ops);
76 }
77 
78 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
79 {
80 	struct page **pvec = NULL;
81 
82 	assert_object_held_shared(obj);
83 
84 	if (!--obj->userptr.page_ref) {
85 		pvec = obj->userptr.pvec;
86 		obj->userptr.pvec = NULL;
87 	}
88 	GEM_BUG_ON(obj->userptr.page_ref < 0);
89 
90 	if (pvec) {
91 		const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
92 
93 		unpin_user_pages(pvec, num_pages);
94 		kvfree(pvec);
95 	}
96 }
97 
98 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
99 {
100 	unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
101 	struct sg_table *st;
102 	struct page **pvec;
103 	unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
104 	int ret;
105 
106 	if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
107 		return -E2BIG;
108 
109 	num_pages = obj->base.size >> PAGE_SHIFT;
110 	st = kmalloc(sizeof(*st), GFP_KERNEL);
111 	if (!st)
112 		return -ENOMEM;
113 
114 	if (!obj->userptr.page_ref) {
115 		ret = -EAGAIN;
116 		goto err_free;
117 	}
118 
119 	obj->userptr.page_ref++;
120 	pvec = obj->userptr.pvec;
121 
122 alloc_table:
123 	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
124 						num_pages << PAGE_SHIFT,
125 						max_segment, GFP_KERNEL);
126 	if (ret)
127 		goto err;
128 
129 	ret = i915_gem_gtt_prepare_pages(obj, st);
130 	if (ret) {
131 		sg_free_table(st);
132 
133 		if (max_segment > PAGE_SIZE) {
134 			max_segment = PAGE_SIZE;
135 			goto alloc_table;
136 		}
137 
138 		goto err;
139 	}
140 
141 	WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
142 	if (i915_gem_object_can_bypass_llc(obj))
143 		obj->cache_dirty = true;
144 
145 	__i915_gem_object_set_pages(obj, st);
146 
147 	return 0;
148 
149 err:
150 	i915_gem_object_userptr_drop_ref(obj);
151 err_free:
152 	kfree(st);
153 	return ret;
154 }
155 
156 static void
157 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
158 			   struct sg_table *pages)
159 {
160 	struct sgt_iter sgt_iter;
161 	struct page *page;
162 
163 	if (!pages)
164 		return;
165 
166 	__i915_gem_object_release_shmem(obj, pages, true);
167 	i915_gem_gtt_finish_pages(obj, pages);
168 
169 	/*
170 	 * We always mark objects as dirty when they are used by the GPU,
171 	 * just in case. However, if we set the vma as being read-only we know
172 	 * that the object will never have been written to.
173 	 */
174 	if (i915_gem_object_is_readonly(obj))
175 		obj->mm.dirty = false;
176 
177 	for_each_sgt_page(page, sgt_iter, pages) {
178 		if (obj->mm.dirty && trylock_page(page)) {
179 			/*
180 			 * As this may not be anonymous memory (e.g. shmem)
181 			 * but exist on a real mapping, we have to lock
182 			 * the page in order to dirty it -- holding
183 			 * the page reference is not sufficient to
184 			 * prevent the inode from being truncated.
185 			 * Play safe and take the lock.
186 			 *
187 			 * However...!
188 			 *
189 			 * The mmu-notifier can be invalidated for a
190 			 * migrate_folio, that is alreadying holding the lock
191 			 * on the folio. Such a try_to_unmap() will result
192 			 * in us calling put_pages() and so recursively try
193 			 * to lock the page. We avoid that deadlock with
194 			 * a trylock_page() and in exchange we risk missing
195 			 * some page dirtying.
196 			 */
197 			set_page_dirty(page);
198 			unlock_page(page);
199 		}
200 
201 		mark_page_accessed(page);
202 	}
203 	obj->mm.dirty = false;
204 
205 	sg_free_table(pages);
206 	kfree(pages);
207 
208 	i915_gem_object_userptr_drop_ref(obj);
209 }
210 
211 static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
212 {
213 	struct sg_table *pages;
214 	int err;
215 
216 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
217 	if (err)
218 		return err;
219 
220 	if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
221 		return -EBUSY;
222 
223 	assert_object_held(obj);
224 
225 	pages = __i915_gem_object_unset_pages(obj);
226 	if (!IS_ERR_OR_NULL(pages))
227 		i915_gem_userptr_put_pages(obj, pages);
228 
229 	return err;
230 }
231 
232 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
233 {
234 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
235 	struct page **pvec;
236 	unsigned int gup_flags = 0;
237 	unsigned long notifier_seq;
238 	int pinned, ret;
239 
240 	if (obj->userptr.notifier.mm != current->mm)
241 		return -EFAULT;
242 
243 	notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
244 
245 	ret = i915_gem_object_lock_interruptible(obj, NULL);
246 	if (ret)
247 		return ret;
248 
249 	if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
250 		i915_gem_object_unlock(obj);
251 		return 0;
252 	}
253 
254 	ret = i915_gem_object_userptr_unbind(obj);
255 	i915_gem_object_unlock(obj);
256 	if (ret)
257 		return ret;
258 
259 	pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
260 	if (!pvec)
261 		return -ENOMEM;
262 
263 	if (!i915_gem_object_is_readonly(obj))
264 		gup_flags |= FOLL_WRITE;
265 
266 	pinned = 0;
267 	while (pinned < num_pages) {
268 		ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
269 					  num_pages - pinned, gup_flags,
270 					  &pvec[pinned]);
271 		if (ret < 0)
272 			goto out;
273 
274 		pinned += ret;
275 	}
276 
277 	ret = i915_gem_object_lock_interruptible(obj, NULL);
278 	if (ret)
279 		goto out;
280 
281 	if (mmu_interval_read_retry(&obj->userptr.notifier,
282 		!obj->userptr.page_ref ? notifier_seq :
283 		obj->userptr.notifier_seq)) {
284 		ret = -EAGAIN;
285 		goto out_unlock;
286 	}
287 
288 	if (!obj->userptr.page_ref++) {
289 		obj->userptr.pvec = pvec;
290 		obj->userptr.notifier_seq = notifier_seq;
291 		pvec = NULL;
292 		ret = ____i915_gem_object_get_pages(obj);
293 	}
294 
295 	obj->userptr.page_ref--;
296 
297 out_unlock:
298 	i915_gem_object_unlock(obj);
299 
300 out:
301 	if (pvec) {
302 		unpin_user_pages(pvec, pinned);
303 		kvfree(pvec);
304 	}
305 
306 	return ret;
307 }
308 
309 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
310 {
311 	if (mmu_interval_read_retry(&obj->userptr.notifier,
312 				    obj->userptr.notifier_seq)) {
313 		/* We collided with the mmu notifier, need to retry */
314 
315 		return -EAGAIN;
316 	}
317 
318 	return 0;
319 }
320 
321 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
322 {
323 	int err;
324 
325 	err = i915_gem_object_userptr_submit_init(obj);
326 	if (err)
327 		return err;
328 
329 	err = i915_gem_object_lock_interruptible(obj, NULL);
330 	if (!err) {
331 		/*
332 		 * Since we only check validity, not use the pages,
333 		 * it doesn't matter if we collide with the mmu notifier,
334 		 * and -EAGAIN handling is not required.
335 		 */
336 		err = i915_gem_object_pin_pages(obj);
337 		if (!err)
338 			i915_gem_object_unpin_pages(obj);
339 
340 		i915_gem_object_unlock(obj);
341 	}
342 
343 	return err;
344 }
345 
346 static void
347 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
348 {
349 	GEM_WARN_ON(obj->userptr.page_ref);
350 
351 	if (!obj->userptr.notifier.mm)
352 		return;
353 
354 	mmu_interval_notifier_remove(&obj->userptr.notifier);
355 	obj->userptr.notifier.mm = NULL;
356 }
357 
358 static int
359 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
360 {
361 	drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
362 
363 	return -EINVAL;
364 }
365 
366 static int
367 i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
368 			const struct drm_i915_gem_pwrite *args)
369 {
370 	drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
371 
372 	return -EINVAL;
373 }
374 
375 static int
376 i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
377 		       const struct drm_i915_gem_pread *args)
378 {
379 	drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
380 
381 	return -EINVAL;
382 }
383 
384 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
385 	.name = "i915_gem_object_userptr",
386 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
387 		 I915_GEM_OBJECT_NO_MMAP |
388 		 I915_GEM_OBJECT_IS_PROXY,
389 	.get_pages = i915_gem_userptr_get_pages,
390 	.put_pages = i915_gem_userptr_put_pages,
391 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
392 	.pwrite = i915_gem_userptr_pwrite,
393 	.pread = i915_gem_userptr_pread,
394 	.release = i915_gem_userptr_release,
395 };
396 
397 #endif
398 
399 static int
400 probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
401 {
402 	VMA_ITERATOR(vmi, mm, addr);
403 	struct vm_area_struct *vma;
404 	unsigned long end = addr + len;
405 
406 	mmap_read_lock(mm);
407 	for_each_vma_range(vmi, vma, end) {
408 		/* Check for holes, note that we also update the addr below */
409 		if (vma->vm_start > addr)
410 			break;
411 
412 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
413 			break;
414 
415 		addr = vma->vm_end;
416 	}
417 	mmap_read_unlock(mm);
418 
419 	if (vma || addr < end)
420 		return -EFAULT;
421 	return 0;
422 }
423 
424 /*
425  * Creates a new mm object that wraps some normal memory from the process
426  * context - user memory.
427  *
428  * We impose several restrictions upon the memory being mapped
429  * into the GPU.
430  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
431  * 2. It must be normal system memory, not a pointer into another map of IO
432  *    space (e.g. it must not be a GTT mmapping of another object).
433  * 3. We only allow a bo as large as we could in theory map into the GTT,
434  *    that is we limit the size to the total size of the GTT.
435  * 4. The bo is marked as being snoopable. The backing pages are left
436  *    accessible directly by the CPU, but reads and writes by the GPU may
437  *    incur the cost of a snoop (unless you have an LLC architecture).
438  *
439  * Synchronisation between multiple users and the GPU is left to userspace
440  * through the normal set-domain-ioctl. The kernel will enforce that the
441  * GPU relinquishes the VMA before it is returned back to the system
442  * i.e. upon free(), munmap() or process termination. However, the userspace
443  * malloc() library may not immediately relinquish the VMA after free() and
444  * instead reuse it whilst the GPU is still reading and writing to the VMA.
445  * Caveat emptor.
446  *
447  * Also note, that the object created here is not currently a "first class"
448  * object, in that several ioctls are banned. These are the CPU access
449  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
450  * direct access via your pointer rather than use those ioctls. Another
451  * restriction is that we do not allow userptr surfaces to be pinned to the
452  * hardware and so we reject any attempt to create a framebuffer out of a
453  * userptr.
454  *
455  * If you think this is a good interface to use to pass GPU memory between
456  * drivers, please use dma-buf instead. In fact, wherever possible use
457  * dma-buf instead.
458  */
459 int
460 i915_gem_userptr_ioctl(struct drm_device *dev,
461 		       void *data,
462 		       struct drm_file *file)
463 {
464 	static struct lock_class_key __maybe_unused lock_class;
465 	struct drm_i915_private *i915 = to_i915(dev);
466 	struct drm_i915_gem_userptr *args = data;
467 	struct drm_i915_gem_object __maybe_unused *obj;
468 	int __maybe_unused ret;
469 	u32 __maybe_unused handle;
470 
471 	if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
472 		/* We cannot support coherent userptr objects on hw without
473 		 * LLC and broken snooping.
474 		 */
475 		return -ENODEV;
476 	}
477 
478 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
479 			    I915_USERPTR_UNSYNCHRONIZED |
480 			    I915_USERPTR_PROBE))
481 		return -EINVAL;
482 
483 	if (i915_gem_object_size_2big(args->user_size))
484 		return -E2BIG;
485 
486 	if (!args->user_size)
487 		return -EINVAL;
488 
489 	if (offset_in_page(args->user_ptr | args->user_size))
490 		return -EINVAL;
491 
492 	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
493 		return -EFAULT;
494 
495 	if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
496 		return -ENODEV;
497 
498 	if (args->flags & I915_USERPTR_READ_ONLY) {
499 		/*
500 		 * On almost all of the older hw, we cannot tell the GPU that
501 		 * a page is readonly.
502 		 */
503 		if (!to_gt(i915)->vm->has_read_only)
504 			return -ENODEV;
505 	}
506 
507 	if (args->flags & I915_USERPTR_PROBE) {
508 		/*
509 		 * Check that the range pointed to represents real struct
510 		 * pages and not iomappings (at this moment in time!)
511 		 */
512 		ret = probe_range(current->mm, args->user_ptr, args->user_size);
513 		if (ret)
514 			return ret;
515 	}
516 
517 #ifdef CONFIG_MMU_NOTIFIER
518 	obj = i915_gem_object_alloc();
519 	if (obj == NULL)
520 		return -ENOMEM;
521 
522 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
523 	i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
524 			     I915_BO_ALLOC_USER);
525 	obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
526 	obj->read_domains = I915_GEM_DOMAIN_CPU;
527 	obj->write_domain = I915_GEM_DOMAIN_CPU;
528 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
529 
530 	obj->userptr.ptr = args->user_ptr;
531 	obj->userptr.notifier_seq = ULONG_MAX;
532 	if (args->flags & I915_USERPTR_READ_ONLY)
533 		i915_gem_object_set_readonly(obj);
534 
535 	/* And keep a pointer to the current->mm for resolving the user pages
536 	 * at binding. This means that we need to hook into the mmu_notifier
537 	 * in order to detect if the mmu is destroyed.
538 	 */
539 	ret = i915_gem_userptr_init__mmu_notifier(obj);
540 	if (ret == 0)
541 		ret = drm_gem_handle_create(file, &obj->base, &handle);
542 
543 	/* drop reference from allocate - handle holds it now */
544 	i915_gem_object_put(obj);
545 	if (ret)
546 		return ret;
547 
548 	args->handle = handle;
549 	return 0;
550 #else
551 	return -ENODEV;
552 #endif
553 }
554 
555