110be98a7SChris Wilson /*
210be98a7SChris Wilson * SPDX-License-Identifier: MIT
310be98a7SChris Wilson *
410be98a7SChris Wilson * Copyright © 2012-2014 Intel Corporation
5ed29c269SMaarten Lankhorst *
6ed29c269SMaarten Lankhorst * Based on amdgpu_mn, which bears the following notice:
7ed29c269SMaarten Lankhorst *
8ed29c269SMaarten Lankhorst * Copyright 2014 Advanced Micro Devices, Inc.
9ed29c269SMaarten Lankhorst * All Rights Reserved.
10ed29c269SMaarten Lankhorst *
11ed29c269SMaarten Lankhorst * Permission is hereby granted, free of charge, to any person obtaining a
12ed29c269SMaarten Lankhorst * copy of this software and associated documentation files (the
13ed29c269SMaarten Lankhorst * "Software"), to deal in the Software without restriction, including
14ed29c269SMaarten Lankhorst * without limitation the rights to use, copy, modify, merge, publish,
15ed29c269SMaarten Lankhorst * distribute, sub license, and/or sell copies of the Software, and to
16ed29c269SMaarten Lankhorst * permit persons to whom the Software is furnished to do so, subject to
17ed29c269SMaarten Lankhorst * the following conditions:
18ed29c269SMaarten Lankhorst *
19ed29c269SMaarten Lankhorst * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20ed29c269SMaarten Lankhorst * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21ed29c269SMaarten Lankhorst * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22ed29c269SMaarten Lankhorst * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23ed29c269SMaarten Lankhorst * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24ed29c269SMaarten Lankhorst * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25ed29c269SMaarten Lankhorst * USE OR OTHER DEALINGS IN THE SOFTWARE.
26ed29c269SMaarten Lankhorst *
27ed29c269SMaarten Lankhorst * The above copyright notice and this permission notice (including the
28ed29c269SMaarten Lankhorst * next paragraph) shall be included in all copies or substantial portions
29ed29c269SMaarten Lankhorst * of the Software.
30ed29c269SMaarten Lankhorst *
31ed29c269SMaarten Lankhorst */
32ed29c269SMaarten Lankhorst /*
33ed29c269SMaarten Lankhorst * Authors:
34ed29c269SMaarten Lankhorst * Christian König <christian.koenig@amd.com>
3510be98a7SChris Wilson */
3610be98a7SChris Wilson
3710be98a7SChris Wilson #include <linux/mmu_context.h>
3810be98a7SChris Wilson #include <linux/mempolicy.h>
3910be98a7SChris Wilson #include <linux/swap.h>
4010be98a7SChris Wilson #include <linux/sched/mm.h>
4110be98a7SChris Wilson
426da4a2c4SJani Nikula #include "i915_drv.h"
4310be98a7SChris Wilson #include "i915_gem_ioctls.h"
4410be98a7SChris Wilson #include "i915_gem_object.h"
4537d63f8fSChris Wilson #include "i915_scatterlist.h"
4610be98a7SChris Wilson
47ed29c269SMaarten Lankhorst #ifdef CONFIG_MMU_NOTIFIER
4820ee27bdSMaarten Lankhorst
49ed29c269SMaarten Lankhorst /**
50ed29c269SMaarten Lankhorst * i915_gem_userptr_invalidate - callback to notify about mm change
51ed29c269SMaarten Lankhorst *
52ed29c269SMaarten Lankhorst * @mni: the range (mm) is about to update
53ed29c269SMaarten Lankhorst * @range: details on the invalidation
54ed29c269SMaarten Lankhorst * @cur_seq: Value to pass to mmu_interval_set_seq()
55ed29c269SMaarten Lankhorst *
56ed29c269SMaarten Lankhorst * Block for operations on BOs to finish and mark pages as accessed and
57ed29c269SMaarten Lankhorst * potentially dirty.
58ed29c269SMaarten Lankhorst */
i915_gem_userptr_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)59ed29c269SMaarten Lankhorst static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
60ed29c269SMaarten Lankhorst const struct mmu_notifier_range *range,
61ed29c269SMaarten Lankhorst unsigned long cur_seq)
6210be98a7SChris Wilson {
63ed29c269SMaarten Lankhorst mmu_interval_set_seq(mni, cur_seq);
64ed29c269SMaarten Lankhorst return true;
6510be98a7SChris Wilson }
6610be98a7SChris Wilson
67ed29c269SMaarten Lankhorst static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
68ed29c269SMaarten Lankhorst .invalidate = i915_gem_userptr_invalidate,
6910be98a7SChris Wilson };
7010be98a7SChris Wilson
7110be98a7SChris Wilson static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object * obj)7220ee27bdSMaarten Lankhorst i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
7310be98a7SChris Wilson {
74ed29c269SMaarten Lankhorst return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
75ed29c269SMaarten Lankhorst obj->userptr.ptr, obj->base.size,
76ed29c269SMaarten Lankhorst &i915_gem_userptr_notifier_ops);
7710be98a7SChris Wilson }
7810be98a7SChris Wilson
i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object * obj)79ed29c269SMaarten Lankhorst static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
8010be98a7SChris Wilson {
81ed29c269SMaarten Lankhorst struct page **pvec = NULL;
8210be98a7SChris Wilson
83b4b9731bSThomas Hellström assert_object_held_shared(obj);
84b4b9731bSThomas Hellström
85ed29c269SMaarten Lankhorst if (!--obj->userptr.page_ref) {
86ed29c269SMaarten Lankhorst pvec = obj->userptr.pvec;
87ed29c269SMaarten Lankhorst obj->userptr.pvec = NULL;
88040e123cSChris Wilson }
89ed29c269SMaarten Lankhorst GEM_BUG_ON(obj->userptr.page_ref < 0);
9010be98a7SChris Wilson
91ed29c269SMaarten Lankhorst if (pvec) {
92ed29c269SMaarten Lankhorst const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
93ed29c269SMaarten Lankhorst
94ed29c269SMaarten Lankhorst unpin_user_pages(pvec, num_pages);
95ed29c269SMaarten Lankhorst kvfree(pvec);
96ed29c269SMaarten Lankhorst }
9710be98a7SChris Wilson }
9810be98a7SChris Wilson
i915_gem_userptr_get_pages(struct drm_i915_gem_object * obj)99ed29c269SMaarten Lankhorst static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
10010be98a7SChris Wilson {
10178a07fe7SRobert Beckett unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
10210be98a7SChris Wilson struct sg_table *st;
103ed29c269SMaarten Lankhorst struct page **pvec;
104c3bfba9aSChris Wilson unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
10510be98a7SChris Wilson int ret;
10610be98a7SChris Wilson
107c3bfba9aSChris Wilson if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
108c3bfba9aSChris Wilson return -E2BIG;
109c3bfba9aSChris Wilson
110c3bfba9aSChris Wilson num_pages = obj->base.size >> PAGE_SHIFT;
11110be98a7SChris Wilson st = kmalloc(sizeof(*st), GFP_KERNEL);
11210be98a7SChris Wilson if (!st)
113ed29c269SMaarten Lankhorst return -ENOMEM;
114ed29c269SMaarten Lankhorst
115b4b9731bSThomas Hellström if (!obj->userptr.page_ref) {
116b4b9731bSThomas Hellström ret = -EAGAIN;
117ed29c269SMaarten Lankhorst goto err_free;
118ed29c269SMaarten Lankhorst }
119ed29c269SMaarten Lankhorst
120ed29c269SMaarten Lankhorst obj->userptr.page_ref++;
121ed29c269SMaarten Lankhorst pvec = obj->userptr.pvec;
12210be98a7SChris Wilson
12310be98a7SChris Wilson alloc_table:
12490e7a6deSMaor Gottlieb ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
12590e7a6deSMaor Gottlieb num_pages << PAGE_SHIFT,
12690e7a6deSMaor Gottlieb max_segment, GFP_KERNEL);
12790e7a6deSMaor Gottlieb if (ret)
128ed29c269SMaarten Lankhorst goto err;
12910be98a7SChris Wilson
13010be98a7SChris Wilson ret = i915_gem_gtt_prepare_pages(obj, st);
13110be98a7SChris Wilson if (ret) {
13210be98a7SChris Wilson sg_free_table(st);
13310be98a7SChris Wilson
13410be98a7SChris Wilson if (max_segment > PAGE_SIZE) {
13510be98a7SChris Wilson max_segment = PAGE_SIZE;
13610be98a7SChris Wilson goto alloc_table;
13710be98a7SChris Wilson }
13810be98a7SChris Wilson
139ed29c269SMaarten Lankhorst goto err;
14010be98a7SChris Wilson }
14110be98a7SChris Wilson
14263430347SMatthew Auld WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
14363430347SMatthew Auld if (i915_gem_object_can_bypass_llc(obj))
14463430347SMatthew Auld obj->cache_dirty = true;
14510be98a7SChris Wilson
1468c949515SMatthew Auld __i915_gem_object_set_pages(obj, st);
14710be98a7SChris Wilson
148ed29c269SMaarten Lankhorst return 0;
14910be98a7SChris Wilson
150ed29c269SMaarten Lankhorst err:
151ed29c269SMaarten Lankhorst i915_gem_object_userptr_drop_ref(obj);
152ed29c269SMaarten Lankhorst err_free:
153ed29c269SMaarten Lankhorst kfree(st);
154ed29c269SMaarten Lankhorst return ret;
15510be98a7SChris Wilson }
15610be98a7SChris Wilson
15710be98a7SChris Wilson static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)15810be98a7SChris Wilson i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
15910be98a7SChris Wilson struct sg_table *pages)
16010be98a7SChris Wilson {
16110be98a7SChris Wilson struct sgt_iter sgt_iter;
16210be98a7SChris Wilson struct page *page;
16310be98a7SChris Wilson
16410be98a7SChris Wilson if (!pages)
16510be98a7SChris Wilson return;
16610be98a7SChris Wilson
16710be98a7SChris Wilson __i915_gem_object_release_shmem(obj, pages, true);
16810be98a7SChris Wilson i915_gem_gtt_finish_pages(obj, pages);
16910be98a7SChris Wilson
170681c774dSChris Wilson /*
171681c774dSChris Wilson * We always mark objects as dirty when they are used by the GPU,
172681c774dSChris Wilson * just in case. However, if we set the vma as being read-only we know
173681c774dSChris Wilson * that the object will never have been written to.
174681c774dSChris Wilson */
175681c774dSChris Wilson if (i915_gem_object_is_readonly(obj))
176681c774dSChris Wilson obj->mm.dirty = false;
177681c774dSChris Wilson
17810be98a7SChris Wilson for_each_sgt_page(page, sgt_iter, pages) {
1790d4bbe3dSChris Wilson if (obj->mm.dirty && trylock_page(page)) {
1800d4bbe3dSChris Wilson /*
1810d4bbe3dSChris Wilson * As this may not be anonymous memory (e.g. shmem)
1820d4bbe3dSChris Wilson * but exist on a real mapping, we have to lock
1830d4bbe3dSChris Wilson * the page in order to dirty it -- holding
1840d4bbe3dSChris Wilson * the page reference is not sufficient to
1850d4bbe3dSChris Wilson * prevent the inode from being truncated.
1860d4bbe3dSChris Wilson * Play safe and take the lock.
1870d4bbe3dSChris Wilson *
1880d4bbe3dSChris Wilson * However...!
1890d4bbe3dSChris Wilson *
1900d4bbe3dSChris Wilson * The mmu-notifier can be invalidated for a
19154184650SMatthew Wilcox (Oracle) * migrate_folio, that is alreadying holding the lock
19254184650SMatthew Wilcox (Oracle) * on the folio. Such a try_to_unmap() will result
1930d4bbe3dSChris Wilson * in us calling put_pages() and so recursively try
1940d4bbe3dSChris Wilson * to lock the page. We avoid that deadlock with
1950d4bbe3dSChris Wilson * a trylock_page() and in exchange we risk missing
1960d4bbe3dSChris Wilson * some page dirtying.
1970d4bbe3dSChris Wilson */
198505a8ec7SChris Wilson set_page_dirty(page);
1990d4bbe3dSChris Wilson unlock_page(page);
2000d4bbe3dSChris Wilson }
20110be98a7SChris Wilson
20210be98a7SChris Wilson mark_page_accessed(page);
20310be98a7SChris Wilson }
20410be98a7SChris Wilson obj->mm.dirty = false;
20510be98a7SChris Wilson
20610be98a7SChris Wilson sg_free_table(pages);
20710be98a7SChris Wilson kfree(pages);
208ed29c269SMaarten Lankhorst
209ed29c269SMaarten Lankhorst i915_gem_object_userptr_drop_ref(obj);
210ed29c269SMaarten Lankhorst }
211ed29c269SMaarten Lankhorst
i915_gem_object_userptr_unbind(struct drm_i915_gem_object * obj)212b4b9731bSThomas Hellström static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
213ed29c269SMaarten Lankhorst {
214ed29c269SMaarten Lankhorst struct sg_table *pages;
215ed29c269SMaarten Lankhorst int err;
216ed29c269SMaarten Lankhorst
217ed29c269SMaarten Lankhorst err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
218ed29c269SMaarten Lankhorst if (err)
219ed29c269SMaarten Lankhorst return err;
220ed29c269SMaarten Lankhorst
221ed29c269SMaarten Lankhorst if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
222ed29c269SMaarten Lankhorst return -EBUSY;
223ed29c269SMaarten Lankhorst
224cf41a8f1SMaarten Lankhorst assert_object_held(obj);
225ed29c269SMaarten Lankhorst
226ed29c269SMaarten Lankhorst pages = __i915_gem_object_unset_pages(obj);
227ed29c269SMaarten Lankhorst if (!IS_ERR_OR_NULL(pages))
228ed29c269SMaarten Lankhorst i915_gem_userptr_put_pages(obj, pages);
229ed29c269SMaarten Lankhorst
230ed29c269SMaarten Lankhorst return err;
231ed29c269SMaarten Lankhorst }
232ed29c269SMaarten Lankhorst
i915_gem_object_userptr_submit_init(struct drm_i915_gem_object * obj)233ed29c269SMaarten Lankhorst int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
234ed29c269SMaarten Lankhorst {
235ed29c269SMaarten Lankhorst const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
236ed29c269SMaarten Lankhorst struct page **pvec;
237ed29c269SMaarten Lankhorst unsigned int gup_flags = 0;
238ed29c269SMaarten Lankhorst unsigned long notifier_seq;
239ed29c269SMaarten Lankhorst int pinned, ret;
240ed29c269SMaarten Lankhorst
241ed29c269SMaarten Lankhorst if (obj->userptr.notifier.mm != current->mm)
242ed29c269SMaarten Lankhorst return -EFAULT;
243ed29c269SMaarten Lankhorst
244b4b9731bSThomas Hellström notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
245b4b9731bSThomas Hellström
246ed29c269SMaarten Lankhorst ret = i915_gem_object_lock_interruptible(obj, NULL);
247ed29c269SMaarten Lankhorst if (ret)
248ed29c269SMaarten Lankhorst return ret;
249ed29c269SMaarten Lankhorst
250b4b9731bSThomas Hellström if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
251ed29c269SMaarten Lankhorst i915_gem_object_unlock(obj);
252fd995a3cSMaarten Lankhorst return 0;
253b4b9731bSThomas Hellström }
254fd995a3cSMaarten Lankhorst
255b4b9731bSThomas Hellström ret = i915_gem_object_userptr_unbind(obj);
256b4b9731bSThomas Hellström i915_gem_object_unlock(obj);
257b4b9731bSThomas Hellström if (ret)
258b4b9731bSThomas Hellström return ret;
259ed29c269SMaarten Lankhorst
260ed29c269SMaarten Lankhorst pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
261ed29c269SMaarten Lankhorst if (!pvec)
262ed29c269SMaarten Lankhorst return -ENOMEM;
263ed29c269SMaarten Lankhorst
264ed29c269SMaarten Lankhorst if (!i915_gem_object_is_readonly(obj))
265ed29c269SMaarten Lankhorst gup_flags |= FOLL_WRITE;
266ed29c269SMaarten Lankhorst
267178ce94aSColin Ian King pinned = 0;
268ed29c269SMaarten Lankhorst while (pinned < num_pages) {
269ed29c269SMaarten Lankhorst ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
270ed29c269SMaarten Lankhorst num_pages - pinned, gup_flags,
271ed29c269SMaarten Lankhorst &pvec[pinned]);
272ed29c269SMaarten Lankhorst if (ret < 0)
273ed29c269SMaarten Lankhorst goto out;
274ed29c269SMaarten Lankhorst
275ed29c269SMaarten Lankhorst pinned += ret;
276ed29c269SMaarten Lankhorst }
277ed29c269SMaarten Lankhorst
278b4b9731bSThomas Hellström ret = i915_gem_object_lock_interruptible(obj, NULL);
279b4b9731bSThomas Hellström if (ret)
280b4b9731bSThomas Hellström goto out;
281ed29c269SMaarten Lankhorst
282ed29c269SMaarten Lankhorst if (mmu_interval_read_retry(&obj->userptr.notifier,
283ed29c269SMaarten Lankhorst !obj->userptr.page_ref ? notifier_seq :
284ed29c269SMaarten Lankhorst obj->userptr.notifier_seq)) {
285ed29c269SMaarten Lankhorst ret = -EAGAIN;
286ed29c269SMaarten Lankhorst goto out_unlock;
287ed29c269SMaarten Lankhorst }
288ed29c269SMaarten Lankhorst
289ed29c269SMaarten Lankhorst if (!obj->userptr.page_ref++) {
290ed29c269SMaarten Lankhorst obj->userptr.pvec = pvec;
291ed29c269SMaarten Lankhorst obj->userptr.notifier_seq = notifier_seq;
292ed29c269SMaarten Lankhorst pvec = NULL;
293b4b9731bSThomas Hellström ret = ____i915_gem_object_get_pages(obj);
294ed29c269SMaarten Lankhorst }
295ed29c269SMaarten Lankhorst
296b4b9731bSThomas Hellström obj->userptr.page_ref--;
297b4b9731bSThomas Hellström
298ed29c269SMaarten Lankhorst out_unlock:
299b4b9731bSThomas Hellström i915_gem_object_unlock(obj);
300ed29c269SMaarten Lankhorst
301ed29c269SMaarten Lankhorst out:
302ed29c269SMaarten Lankhorst if (pvec) {
303ed29c269SMaarten Lankhorst unpin_user_pages(pvec, pinned);
304ed29c269SMaarten Lankhorst kvfree(pvec);
305ed29c269SMaarten Lankhorst }
306ed29c269SMaarten Lankhorst
307ed29c269SMaarten Lankhorst return ret;
308ed29c269SMaarten Lankhorst }
309ed29c269SMaarten Lankhorst
i915_gem_object_userptr_submit_done(struct drm_i915_gem_object * obj)310ed29c269SMaarten Lankhorst int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
311ed29c269SMaarten Lankhorst {
312ed29c269SMaarten Lankhorst if (mmu_interval_read_retry(&obj->userptr.notifier,
313ed29c269SMaarten Lankhorst obj->userptr.notifier_seq)) {
314ed29c269SMaarten Lankhorst /* We collided with the mmu notifier, need to retry */
315ed29c269SMaarten Lankhorst
316ed29c269SMaarten Lankhorst return -EAGAIN;
317ed29c269SMaarten Lankhorst }
318ed29c269SMaarten Lankhorst
319ed29c269SMaarten Lankhorst return 0;
320ed29c269SMaarten Lankhorst }
321ed29c269SMaarten Lankhorst
i915_gem_object_userptr_validate(struct drm_i915_gem_object * obj)322ed29c269SMaarten Lankhorst int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
323ed29c269SMaarten Lankhorst {
324ed29c269SMaarten Lankhorst int err;
325ed29c269SMaarten Lankhorst
326ed29c269SMaarten Lankhorst err = i915_gem_object_userptr_submit_init(obj);
327ed29c269SMaarten Lankhorst if (err)
328ed29c269SMaarten Lankhorst return err;
329ed29c269SMaarten Lankhorst
330ed29c269SMaarten Lankhorst err = i915_gem_object_lock_interruptible(obj, NULL);
331ed29c269SMaarten Lankhorst if (!err) {
332ed29c269SMaarten Lankhorst /*
333ed29c269SMaarten Lankhorst * Since we only check validity, not use the pages,
334ed29c269SMaarten Lankhorst * it doesn't matter if we collide with the mmu notifier,
335ed29c269SMaarten Lankhorst * and -EAGAIN handling is not required.
336ed29c269SMaarten Lankhorst */
337ed29c269SMaarten Lankhorst err = i915_gem_object_pin_pages(obj);
338ed29c269SMaarten Lankhorst if (!err)
339ed29c269SMaarten Lankhorst i915_gem_object_unpin_pages(obj);
340ed29c269SMaarten Lankhorst
341ed29c269SMaarten Lankhorst i915_gem_object_unlock(obj);
342ed29c269SMaarten Lankhorst }
343ed29c269SMaarten Lankhorst
344ed29c269SMaarten Lankhorst return err;
34510be98a7SChris Wilson }
34610be98a7SChris Wilson
34710be98a7SChris Wilson static void
i915_gem_userptr_release(struct drm_i915_gem_object * obj)34810be98a7SChris Wilson i915_gem_userptr_release(struct drm_i915_gem_object *obj)
34910be98a7SChris Wilson {
350ed29c269SMaarten Lankhorst GEM_WARN_ON(obj->userptr.page_ref);
351ed29c269SMaarten Lankhorst
352db7bbd13SNirmoy Das if (!obj->userptr.notifier.mm)
353db7bbd13SNirmoy Das return;
354db7bbd13SNirmoy Das
355ed29c269SMaarten Lankhorst mmu_interval_notifier_remove(&obj->userptr.notifier);
356ed29c269SMaarten Lankhorst obj->userptr.notifier.mm = NULL;
35710be98a7SChris Wilson }
35810be98a7SChris Wilson
35910be98a7SChris Wilson static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object * obj)36010be98a7SChris Wilson i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
36110be98a7SChris Wilson {
362ae4e55b8SMaarten Lankhorst drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
36310be98a7SChris Wilson
364ae4e55b8SMaarten Lankhorst return -EINVAL;
36510be98a7SChris Wilson }
36610be98a7SChris Wilson
367ae30af84SMaarten Lankhorst static int
i915_gem_userptr_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)368ae30af84SMaarten Lankhorst i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
369ae30af84SMaarten Lankhorst const struct drm_i915_gem_pwrite *args)
370ae30af84SMaarten Lankhorst {
371ae30af84SMaarten Lankhorst drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
372ae30af84SMaarten Lankhorst
373ae30af84SMaarten Lankhorst return -EINVAL;
374ae30af84SMaarten Lankhorst }
375ae30af84SMaarten Lankhorst
376ae30af84SMaarten Lankhorst static int
i915_gem_userptr_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)377ae30af84SMaarten Lankhorst i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
378ae30af84SMaarten Lankhorst const struct drm_i915_gem_pread *args)
379ae30af84SMaarten Lankhorst {
380ae30af84SMaarten Lankhorst drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
381ae30af84SMaarten Lankhorst
382ae30af84SMaarten Lankhorst return -EINVAL;
383ae30af84SMaarten Lankhorst }
384ae30af84SMaarten Lankhorst
38510be98a7SChris Wilson static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
3867d192daaSChris Wilson .name = "i915_gem_object_userptr",
387c471748dSMaarten Lankhorst .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
388f6c26b55SJanusz Krzysztofik I915_GEM_OBJECT_NO_MMAP |
38902b64a4aSMaarten Lankhorst I915_GEM_OBJECT_IS_PROXY,
39010be98a7SChris Wilson .get_pages = i915_gem_userptr_get_pages,
39110be98a7SChris Wilson .put_pages = i915_gem_userptr_put_pages,
39210be98a7SChris Wilson .dmabuf_export = i915_gem_userptr_dmabuf_export,
393ae30af84SMaarten Lankhorst .pwrite = i915_gem_userptr_pwrite,
394ae30af84SMaarten Lankhorst .pread = i915_gem_userptr_pread,
39510be98a7SChris Wilson .release = i915_gem_userptr_release,
39610be98a7SChris Wilson };
39710be98a7SChris Wilson
39820ee27bdSMaarten Lankhorst #endif
39920ee27bdSMaarten Lankhorst
400b65a9489SChris Wilson static int
probe_range(struct mm_struct * mm,unsigned long addr,unsigned long len)401b65a9489SChris Wilson probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
402b65a9489SChris Wilson {
403f683b9d6SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, addr);
404b65a9489SChris Wilson struct vm_area_struct *vma;
4056f7de35bSMatthew Auld unsigned long end = addr + len;
406b65a9489SChris Wilson
407b65a9489SChris Wilson mmap_read_lock(mm);
4086f7de35bSMatthew Auld for_each_vma_range(vmi, vma, end) {
409b65a9489SChris Wilson /* Check for holes, note that we also update the addr below */
410b65a9489SChris Wilson if (vma->vm_start > addr)
411b65a9489SChris Wilson break;
412b65a9489SChris Wilson
413b65a9489SChris Wilson if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
414b65a9489SChris Wilson break;
415b65a9489SChris Wilson
416b65a9489SChris Wilson addr = vma->vm_end;
417b65a9489SChris Wilson }
418b65a9489SChris Wilson mmap_read_unlock(mm);
419b65a9489SChris Wilson
4206f7de35bSMatthew Auld if (vma || addr < end)
421f683b9d6SMatthew Wilcox (Oracle) return -EFAULT;
422f683b9d6SMatthew Wilcox (Oracle) return 0;
423b65a9489SChris Wilson }
424b65a9489SChris Wilson
42510be98a7SChris Wilson /*
42610be98a7SChris Wilson * Creates a new mm object that wraps some normal memory from the process
42710be98a7SChris Wilson * context - user memory.
42810be98a7SChris Wilson *
42910be98a7SChris Wilson * We impose several restrictions upon the memory being mapped
43010be98a7SChris Wilson * into the GPU.
43110be98a7SChris Wilson * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
43210be98a7SChris Wilson * 2. It must be normal system memory, not a pointer into another map of IO
43310be98a7SChris Wilson * space (e.g. it must not be a GTT mmapping of another object).
43410be98a7SChris Wilson * 3. We only allow a bo as large as we could in theory map into the GTT,
43510be98a7SChris Wilson * that is we limit the size to the total size of the GTT.
43610be98a7SChris Wilson * 4. The bo is marked as being snoopable. The backing pages are left
43710be98a7SChris Wilson * accessible directly by the CPU, but reads and writes by the GPU may
43810be98a7SChris Wilson * incur the cost of a snoop (unless you have an LLC architecture).
43910be98a7SChris Wilson *
44010be98a7SChris Wilson * Synchronisation between multiple users and the GPU is left to userspace
44110be98a7SChris Wilson * through the normal set-domain-ioctl. The kernel will enforce that the
44210be98a7SChris Wilson * GPU relinquishes the VMA before it is returned back to the system
44310be98a7SChris Wilson * i.e. upon free(), munmap() or process termination. However, the userspace
44410be98a7SChris Wilson * malloc() library may not immediately relinquish the VMA after free() and
44510be98a7SChris Wilson * instead reuse it whilst the GPU is still reading and writing to the VMA.
44610be98a7SChris Wilson * Caveat emptor.
44710be98a7SChris Wilson *
44810be98a7SChris Wilson * Also note, that the object created here is not currently a "first class"
44910be98a7SChris Wilson * object, in that several ioctls are banned. These are the CPU access
45010be98a7SChris Wilson * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
45110be98a7SChris Wilson * direct access via your pointer rather than use those ioctls. Another
45210be98a7SChris Wilson * restriction is that we do not allow userptr surfaces to be pinned to the
45310be98a7SChris Wilson * hardware and so we reject any attempt to create a framebuffer out of a
45410be98a7SChris Wilson * userptr.
45510be98a7SChris Wilson *
45610be98a7SChris Wilson * If you think this is a good interface to use to pass GPU memory between
45710be98a7SChris Wilson * drivers, please use dma-buf instead. In fact, wherever possible use
45810be98a7SChris Wilson * dma-buf instead.
45910be98a7SChris Wilson */
46010be98a7SChris Wilson int
i915_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * file)46110be98a7SChris Wilson i915_gem_userptr_ioctl(struct drm_device *dev,
46210be98a7SChris Wilson void *data,
46310be98a7SChris Wilson struct drm_file *file)
46410be98a7SChris Wilson {
46520ee27bdSMaarten Lankhorst static struct lock_class_key __maybe_unused lock_class;
466*fc58c693SAndi Shyti struct drm_i915_private *i915 = to_i915(dev);
46710be98a7SChris Wilson struct drm_i915_gem_userptr *args = data;
46820ee27bdSMaarten Lankhorst struct drm_i915_gem_object __maybe_unused *obj;
46920ee27bdSMaarten Lankhorst int __maybe_unused ret;
47020ee27bdSMaarten Lankhorst u32 __maybe_unused handle;
47110be98a7SChris Wilson
472*fc58c693SAndi Shyti if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
47310be98a7SChris Wilson /* We cannot support coherent userptr objects on hw without
47410be98a7SChris Wilson * LLC and broken snooping.
47510be98a7SChris Wilson */
47610be98a7SChris Wilson return -ENODEV;
47710be98a7SChris Wilson }
47810be98a7SChris Wilson
47910be98a7SChris Wilson if (args->flags & ~(I915_USERPTR_READ_ONLY |
480b65a9489SChris Wilson I915_USERPTR_UNSYNCHRONIZED |
481b65a9489SChris Wilson I915_USERPTR_PROBE))
48210be98a7SChris Wilson return -EINVAL;
48310be98a7SChris Wilson
484ae2fb480SMatthew Auld if (i915_gem_object_size_2big(args->user_size))
48524860ad7SMatthew Auld return -E2BIG;
48624860ad7SMatthew Auld
48710be98a7SChris Wilson if (!args->user_size)
48810be98a7SChris Wilson return -EINVAL;
48910be98a7SChris Wilson
49010be98a7SChris Wilson if (offset_in_page(args->user_ptr | args->user_size))
49110be98a7SChris Wilson return -EINVAL;
49210be98a7SChris Wilson
49310be98a7SChris Wilson if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
49410be98a7SChris Wilson return -EFAULT;
49510be98a7SChris Wilson
49620ee27bdSMaarten Lankhorst if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
49720ee27bdSMaarten Lankhorst return -ENODEV;
49820ee27bdSMaarten Lankhorst
49910be98a7SChris Wilson if (args->flags & I915_USERPTR_READ_ONLY) {
50010be98a7SChris Wilson /*
50110be98a7SChris Wilson * On almost all of the older hw, we cannot tell the GPU that
50210be98a7SChris Wilson * a page is readonly.
50310be98a7SChris Wilson */
504*fc58c693SAndi Shyti if (!to_gt(i915)->vm->has_read_only)
50510be98a7SChris Wilson return -ENODEV;
50610be98a7SChris Wilson }
50710be98a7SChris Wilson
508b65a9489SChris Wilson if (args->flags & I915_USERPTR_PROBE) {
509b65a9489SChris Wilson /*
510b65a9489SChris Wilson * Check that the range pointed to represents real struct
511b65a9489SChris Wilson * pages and not iomappings (at this moment in time!)
512b65a9489SChris Wilson */
513b65a9489SChris Wilson ret = probe_range(current->mm, args->user_ptr, args->user_size);
514b65a9489SChris Wilson if (ret)
515b65a9489SChris Wilson return ret;
516b65a9489SChris Wilson }
517b65a9489SChris Wilson
51820ee27bdSMaarten Lankhorst #ifdef CONFIG_MMU_NOTIFIER
51910be98a7SChris Wilson obj = i915_gem_object_alloc();
52010be98a7SChris Wilson if (obj == NULL)
52110be98a7SChris Wilson return -ENOMEM;
52210be98a7SChris Wilson
52310be98a7SChris Wilson drm_gem_private_object_init(dev, &obj->base, args->user_size);
524f7858cb4SMatthew Auld i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
525f7858cb4SMatthew Auld I915_BO_ALLOC_USER);
5260ff37575SThomas Hellström obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
52710be98a7SChris Wilson obj->read_domains = I915_GEM_DOMAIN_CPU;
52810be98a7SChris Wilson obj->write_domain = I915_GEM_DOMAIN_CPU;
52910be98a7SChris Wilson i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
53010be98a7SChris Wilson
53110be98a7SChris Wilson obj->userptr.ptr = args->user_ptr;
532ed29c269SMaarten Lankhorst obj->userptr.notifier_seq = ULONG_MAX;
53310be98a7SChris Wilson if (args->flags & I915_USERPTR_READ_ONLY)
53410be98a7SChris Wilson i915_gem_object_set_readonly(obj);
53510be98a7SChris Wilson
53610be98a7SChris Wilson /* And keep a pointer to the current->mm for resolving the user pages
53710be98a7SChris Wilson * at binding. This means that we need to hook into the mmu_notifier
53810be98a7SChris Wilson * in order to detect if the mmu is destroyed.
53910be98a7SChris Wilson */
54020ee27bdSMaarten Lankhorst ret = i915_gem_userptr_init__mmu_notifier(obj);
54110be98a7SChris Wilson if (ret == 0)
54210be98a7SChris Wilson ret = drm_gem_handle_create(file, &obj->base, &handle);
54310be98a7SChris Wilson
54410be98a7SChris Wilson /* drop reference from allocate - handle holds it now */
54510be98a7SChris Wilson i915_gem_object_put(obj);
54610be98a7SChris Wilson if (ret)
54710be98a7SChris Wilson return ret;
54810be98a7SChris Wilson
54910be98a7SChris Wilson args->handle = handle;
55010be98a7SChris Wilson return 0;
55120ee27bdSMaarten Lankhorst #else
55220ee27bdSMaarten Lankhorst return -ENODEV;
55320ee27bdSMaarten Lankhorst #endif
55410be98a7SChris Wilson }
55510be98a7SChris Wilson
556