11297bf2eSDirk Hohndel /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2ba4e7d97SThomas Hellstrom /**************************************************************************
3ba4e7d97SThomas Hellstrom *
4ba4e7d97SThomas Hellstrom * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5ba4e7d97SThomas Hellstrom * All Rights Reserved.
6ba4e7d97SThomas Hellstrom *
7ba4e7d97SThomas Hellstrom * Permission is hereby granted, free of charge, to any person obtaining a
8ba4e7d97SThomas Hellstrom * copy of this software and associated documentation files (the
9ba4e7d97SThomas Hellstrom * "Software"), to deal in the Software without restriction, including
10ba4e7d97SThomas Hellstrom * without limitation the rights to use, copy, modify, merge, publish,
11ba4e7d97SThomas Hellstrom * distribute, sub license, and/or sell copies of the Software, and to
12ba4e7d97SThomas Hellstrom * permit persons to whom the Software is furnished to do so, subject to
13ba4e7d97SThomas Hellstrom * the following conditions:
14ba4e7d97SThomas Hellstrom *
15ba4e7d97SThomas Hellstrom * The above copyright notice and this permission notice (including the
16ba4e7d97SThomas Hellstrom * next paragraph) shall be included in all copies or substantial portions
17ba4e7d97SThomas Hellstrom * of the Software.
18ba4e7d97SThomas Hellstrom *
19ba4e7d97SThomas Hellstrom * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20ba4e7d97SThomas Hellstrom * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21ba4e7d97SThomas Hellstrom * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22ba4e7d97SThomas Hellstrom * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23ba4e7d97SThomas Hellstrom * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24ba4e7d97SThomas Hellstrom * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25ba4e7d97SThomas Hellstrom * USE OR OTHER DEALINGS IN THE SOFTWARE.
26ba4e7d97SThomas Hellstrom *
27ba4e7d97SThomas Hellstrom **************************************************************************/
28ba4e7d97SThomas Hellstrom /*
29ba4e7d97SThomas Hellstrom * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30ba4e7d97SThomas Hellstrom */
31ba4e7d97SThomas Hellstrom
3225d0479aSJoe Perches #define pr_fmt(fmt) "[TTM] " fmt
3325d0479aSJoe Perches
34a3185f91SChristian König #include <drm/ttm/ttm_bo.h>
352da83319SMasahiro Yamada #include <drm/ttm/ttm_placement.h>
36a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
37a3185f91SChristian König
38267d51d7SAndrey Grodzovsky #include <drm/drm_drv.h>
39267d51d7SAndrey Grodzovsky #include <drm/drm_managed.h>
40ba4e7d97SThomas Hellstrom
ttm_bo_vm_fault_idle(struct ttm_buffer_object * bo,struct vm_fault * vmf)414daa4fbaSSouptick Joarder static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
42cbe12e74SThomas Hellstrom struct vm_fault *vmf)
43cbe12e74SThomas Hellstrom {
448bb31587SChristian König long err = 0;
45cbe12e74SThomas Hellstrom
46cbe12e74SThomas Hellstrom /*
47cbe12e74SThomas Hellstrom * Quick non-stalling check for idle.
48cbe12e74SThomas Hellstrom */
498bb31587SChristian König if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
508bb31587SChristian König return 0;
51cbe12e74SThomas Hellstrom
52cbe12e74SThomas Hellstrom /*
53c1e8d7c6SMichel Lespinasse * If possible, avoid waiting for GPU with mmap_lock
544064b982SPeter Xu * held. We only do this if the fault allows retry and this
554064b982SPeter Xu * is the first attempt.
56cbe12e74SThomas Hellstrom */
574064b982SPeter Xu if (fault_flag_allow_retry_first(vmf->flags)) {
58cbe12e74SThomas Hellstrom if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
598bb31587SChristian König return VM_FAULT_RETRY;
60cbe12e74SThomas Hellstrom
618129fdadSThomas Zimmermann ttm_bo_get(bo);
62d8ed45c5SMichel Lespinasse mmap_read_unlock(vmf->vma->vm_mm);
638bb31587SChristian König (void)dma_resv_wait_timeout(bo->base.resv,
648bb31587SChristian König DMA_RESV_USAGE_KERNEL, true,
658bb31587SChristian König MAX_SCHEDULE_TIMEOUT);
6652791eeeSChristian König dma_resv_unlock(bo->base.resv);
67f4490759SThomas Zimmermann ttm_bo_put(bo);
688bb31587SChristian König return VM_FAULT_RETRY;
69cbe12e74SThomas Hellstrom }
70cbe12e74SThomas Hellstrom
71cbe12e74SThomas Hellstrom /*
72cbe12e74SThomas Hellstrom * Ordinary wait.
73cbe12e74SThomas Hellstrom */
748bb31587SChristian König err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
758bb31587SChristian König MAX_SCHEDULE_TIMEOUT);
768bb31587SChristian König if (unlikely(err < 0)) {
778bb31587SChristian König return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
78cbe12e74SThomas Hellstrom VM_FAULT_NOPAGE;
795bc73067SChristian König }
805bc73067SChristian König
818bb31587SChristian König return 0;
82cbe12e74SThomas Hellstrom }
83cbe12e74SThomas Hellstrom
ttm_bo_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)84c67fa6edSTan Xiaojun static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
85c67fa6edSTan Xiaojun unsigned long page_offset)
86c67fa6edSTan Xiaojun {
878af8a109SChristian König struct ttm_device *bdev = bo->bdev;
88c67fa6edSTan Xiaojun
898af8a109SChristian König if (bdev->funcs->io_mem_pfn)
908af8a109SChristian König return bdev->funcs->io_mem_pfn(bo, page_offset);
91c67fa6edSTan Xiaojun
92d3116756SChristian König return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
93c67fa6edSTan Xiaojun }
94c67fa6edSTan Xiaojun
957aef29f4SThomas Hellstrom /**
967aef29f4SThomas Hellstrom * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
977aef29f4SThomas Hellstrom * @bo: The buffer object
987aef29f4SThomas Hellstrom * @vmf: The fault structure handed to the callback
997aef29f4SThomas Hellstrom *
1005b449489SFlorian Rommel * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
1017aef29f4SThomas Hellstrom * during long waits, and after the wait the callback will be restarted. This
1027aef29f4SThomas Hellstrom * is to allow other threads using the same virtual memory space concurrent
1037aef29f4SThomas Hellstrom * access to map(), unmap() completely unrelated buffer objects. TTM buffer
1047aef29f4SThomas Hellstrom * object reservations sometimes wait for GPU and should therefore be
1057aef29f4SThomas Hellstrom * considered long waits. This function reserves the buffer object interruptibly
1067aef29f4SThomas Hellstrom * taking this into account. Starvation is avoided by the vm system not
1077aef29f4SThomas Hellstrom * allowing too many repeated restarts.
1087aef29f4SThomas Hellstrom * This function is intended to be used in customized fault() and _mkwrite()
1097aef29f4SThomas Hellstrom * handlers.
1107aef29f4SThomas Hellstrom *
1117aef29f4SThomas Hellstrom * Return:
1127aef29f4SThomas Hellstrom * 0 on success and the bo was reserved.
1137aef29f4SThomas Hellstrom * VM_FAULT_RETRY if blocking wait.
1147aef29f4SThomas Hellstrom * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
1157aef29f4SThomas Hellstrom */
ttm_bo_vm_reserve(struct ttm_buffer_object * bo,struct vm_fault * vmf)1167aef29f4SThomas Hellstrom vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
1177aef29f4SThomas Hellstrom struct vm_fault *vmf)
118ba4e7d97SThomas Hellstrom {
119ba4e7d97SThomas Hellstrom /*
120ba4e7d97SThomas Hellstrom * Work around locking order reversal in fault / nopfn
121c1e8d7c6SMichel Lespinasse * between mmap_lock and bo_reserve: Perform a trylock operation
122c58f009eSThomas Hellstrom * for reserve, and if it fails, retry the fault after waiting
123c58f009eSThomas Hellstrom * for the buffer to become unreserved.
124ba4e7d97SThomas Hellstrom */
12552791eeeSChristian König if (unlikely(!dma_resv_trylock(bo->base.resv))) {
1264064b982SPeter Xu /*
1274064b982SPeter Xu * If the fault allows retry and this is the first
128c1e8d7c6SMichel Lespinasse * fault attempt, we try to release the mmap_lock
1294064b982SPeter Xu * before waiting
1304064b982SPeter Xu */
1314064b982SPeter Xu if (fault_flag_allow_retry_first(vmf->flags)) {
132c58f009eSThomas Hellstrom if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1338129fdadSThomas Zimmermann ttm_bo_get(bo);
134d8ed45c5SMichel Lespinasse mmap_read_unlock(vmf->vma->vm_mm);
1356b1ce0a2SDaniel Vetter if (!dma_resv_lock_interruptible(bo->base.resv,
1366b1ce0a2SDaniel Vetter NULL))
1376b1ce0a2SDaniel Vetter dma_resv_unlock(bo->base.resv);
138f4490759SThomas Zimmermann ttm_bo_put(bo);
139c58f009eSThomas Hellstrom }
140c58f009eSThomas Hellstrom
141c58f009eSThomas Hellstrom return VM_FAULT_RETRY;
142c58f009eSThomas Hellstrom }
143c58f009eSThomas Hellstrom
1446b1ce0a2SDaniel Vetter if (dma_resv_lock_interruptible(bo->base.resv, NULL))
145ba4e7d97SThomas Hellstrom return VM_FAULT_NOPAGE;
146ba4e7d97SThomas Hellstrom }
147ba4e7d97SThomas Hellstrom
1480b5aebecSChristian König /*
1490b5aebecSChristian König * Refuse to fault imported pages. This should be handled
1500b5aebecSChristian König * (if at all) by redirecting mmap to the exporter.
1510b5aebecSChristian König */
15243d46f0bSMatthew Auld if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
15349e7f76fSMatthew Auld if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
1540b5aebecSChristian König dma_resv_unlock(bo->base.resv);
1550b5aebecSChristian König return VM_FAULT_SIGBUS;
1560b5aebecSChristian König }
15749e7f76fSMatthew Auld }
1580b5aebecSChristian König
1597aef29f4SThomas Hellstrom return 0;
1607aef29f4SThomas Hellstrom }
1617aef29f4SThomas Hellstrom EXPORT_SYMBOL(ttm_bo_vm_reserve);
1627aef29f4SThomas Hellstrom
1637aef29f4SThomas Hellstrom /**
1647aef29f4SThomas Hellstrom * ttm_bo_vm_fault_reserved - TTM fault helper
1657aef29f4SThomas Hellstrom * @vmf: The struct vm_fault given as argument to the fault callback
1667aef29f4SThomas Hellstrom * @prot: The page protection to be used for this memory area.
1677aef29f4SThomas Hellstrom * @num_prefault: Maximum number of prefault pages. The caller may want to
1687aef29f4SThomas Hellstrom * specify this based on madvice settings and the size of the GPU object
1697aef29f4SThomas Hellstrom * backed by the memory.
1707aef29f4SThomas Hellstrom *
1717aef29f4SThomas Hellstrom * This function inserts one or more page table entries pointing to the
1727aef29f4SThomas Hellstrom * memory backing the buffer object, and then returns a return code
1737aef29f4SThomas Hellstrom * instructing the caller to retry the page access.
1747aef29f4SThomas Hellstrom *
1757aef29f4SThomas Hellstrom * Return:
1767aef29f4SThomas Hellstrom * VM_FAULT_NOPAGE on success or pending signal
1777aef29f4SThomas Hellstrom * VM_FAULT_SIGBUS on unspecified error
1787aef29f4SThomas Hellstrom * VM_FAULT_OOM on out-of-memory
1797aef29f4SThomas Hellstrom * VM_FAULT_RETRY if retryable wait
1807aef29f4SThomas Hellstrom */
ttm_bo_vm_fault_reserved(struct vm_fault * vmf,pgprot_t prot,pgoff_t num_prefault)1817aef29f4SThomas Hellstrom vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
1827aef29f4SThomas Hellstrom pgprot_t prot,
1830d979509SJason Gunthorpe pgoff_t num_prefault)
1847aef29f4SThomas Hellstrom {
1857aef29f4SThomas Hellstrom struct vm_area_struct *vma = vmf->vma;
1867aef29f4SThomas Hellstrom struct ttm_buffer_object *bo = vma->vm_private_data;
1878af8a109SChristian König struct ttm_device *bdev = bo->bdev;
1887aef29f4SThomas Hellstrom unsigned long page_offset;
1897aef29f4SThomas Hellstrom unsigned long page_last;
1907aef29f4SThomas Hellstrom unsigned long pfn;
1917aef29f4SThomas Hellstrom struct ttm_tt *ttm = NULL;
1927aef29f4SThomas Hellstrom struct page *page;
1937aef29f4SThomas Hellstrom int err;
1947aef29f4SThomas Hellstrom pgoff_t i;
1957aef29f4SThomas Hellstrom vm_fault_t ret = VM_FAULT_NOPAGE;
1967aef29f4SThomas Hellstrom unsigned long address = vmf->address;
1977aef29f4SThomas Hellstrom
198ba4e7d97SThomas Hellstrom /*
199ba4e7d97SThomas Hellstrom * Wait for buffer data in transit, due to a pipelined
200ba4e7d97SThomas Hellstrom * move.
201ba4e7d97SThomas Hellstrom */
20211bac800SDave Jiang ret = ttm_bo_vm_fault_idle(bo, vmf);
2037aef29f4SThomas Hellstrom if (unlikely(ret != 0))
204de8dfb8eSTom St Denis return ret;
205ba4e7d97SThomas Hellstrom
206d3116756SChristian König err = ttm_mem_io_reserve(bdev, bo->resource);
2077aef29f4SThomas Hellstrom if (unlikely(err != 0))
208fe662d84SChristian König return VM_FAULT_SIGBUS;
209ba4e7d97SThomas Hellstrom
210ba4e7d97SThomas Hellstrom page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
211b96f3e7cSGerd Hoffmann vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
212d3867355SThomas Hellstrom page_last = vma_pages(vma) + vma->vm_pgoff -
213b96f3e7cSGerd Hoffmann drm_vma_node_start(&bo->base.vma_node);
214ba4e7d97SThomas Hellstrom
215e3c92eb4SSomalapuram Amaranath if (unlikely(page_offset >= PFN_UP(bo->base.size)))
216fe662d84SChristian König return VM_FAULT_SIGBUS;
217ba4e7d97SThomas Hellstrom
218d3116756SChristian König prot = ttm_io_prot(bo, bo->resource, prot);
219d3116756SChristian König if (!bo->resource->bus.is_iomem) {
220d0cef9faSRoger He struct ttm_operation_ctx ctx = {
221*e44f18c6SThomas Hellström .interruptible = true,
222aa7662b6SRoger He .no_wait_gpu = false,
223c44dfe4dSChristian König .force_alloc = true
224d0cef9faSRoger He };
225d0cef9faSRoger He
226ba4e7d97SThomas Hellstrom ttm = bo->ttm;
227*e44f18c6SThomas Hellström err = ttm_tt_populate(bdev, bo->ttm, &ctx);
228*e44f18c6SThomas Hellström if (err) {
229*e44f18c6SThomas Hellström if (err == -EINTR || err == -ERESTARTSYS ||
230*e44f18c6SThomas Hellström err == -EAGAIN)
231*e44f18c6SThomas Hellström return VM_FAULT_NOPAGE;
232*e44f18c6SThomas Hellström
233*e44f18c6SThomas Hellström pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
234*e44f18c6SThomas Hellström return VM_FAULT_SIGBUS;
235*e44f18c6SThomas Hellström }
2367aef29f4SThomas Hellstrom } else {
2377aef29f4SThomas Hellstrom /* Iomem should not be marked encrypted */
2385379e4ddSThomas Hellstrom prot = pgprot_decrypted(prot);
239ba4e7d97SThomas Hellstrom }
240ba4e7d97SThomas Hellstrom
241ba4e7d97SThomas Hellstrom /*
242ba4e7d97SThomas Hellstrom * Speculatively prefault a number of pages. Only error on
243ba4e7d97SThomas Hellstrom * first page.
244ba4e7d97SThomas Hellstrom */
2457aef29f4SThomas Hellstrom for (i = 0; i < num_prefault; ++i) {
246d3116756SChristian König if (bo->resource->bus.is_iomem) {
247c67fa6edSTan Xiaojun pfn = ttm_bo_io_mem_pfn(bo, page_offset);
24895cf9264STom Lendacky } else {
249b1e5f172SJerome Glisse page = ttm->pages[page_offset];
250ba4e7d97SThomas Hellstrom if (unlikely(!page && i == 0)) {
251fe662d84SChristian König return VM_FAULT_OOM;
252ba4e7d97SThomas Hellstrom } else if (unlikely(!page)) {
253ba4e7d97SThomas Hellstrom break;
254ba4e7d97SThomas Hellstrom }
255ba4e7d97SThomas Hellstrom pfn = page_to_pfn(page);
256ba4e7d97SThomas Hellstrom }
257ba4e7d97SThomas Hellstrom
2585379e4ddSThomas Hellstrom /*
2595379e4ddSThomas Hellstrom * Note that the value of @prot at this point may differ from
2605379e4ddSThomas Hellstrom * the value of @vma->vm_page_prot in the caching- and
2615379e4ddSThomas Hellstrom * encryption bits. This is because the exact location of the
2625379e4ddSThomas Hellstrom * data may not be known at mmap() time and may also change
2635379e4ddSThomas Hellstrom * at arbitrary times while the data is mmap'ed.
2644a06f6f3SLorenzo Stoakes * See vmf_insert_pfn_prot() for a discussion.
2655379e4ddSThomas Hellstrom */
2665379e4ddSThomas Hellstrom ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
2677dfe8b61SThomas Hellstrom
268941f2f72SThomas Hellstrom /* Never error on prefaulted PTEs */
269941f2f72SThomas Hellstrom if (unlikely((ret & VM_FAULT_ERROR))) {
270941f2f72SThomas Hellstrom if (i == 0)
271fe662d84SChristian König return VM_FAULT_NOPAGE;
272941f2f72SThomas Hellstrom else
273941f2f72SThomas Hellstrom break;
274941f2f72SThomas Hellstrom }
275ba4e7d97SThomas Hellstrom
276ba4e7d97SThomas Hellstrom address += PAGE_SIZE;
277ba4e7d97SThomas Hellstrom if (unlikely(++page_offset >= page_last))
278ba4e7d97SThomas Hellstrom break;
279ba4e7d97SThomas Hellstrom }
2807aef29f4SThomas Hellstrom return ret;
2817aef29f4SThomas Hellstrom }
2827aef29f4SThomas Hellstrom EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
2837aef29f4SThomas Hellstrom
ttm_bo_release_dummy_page(struct drm_device * dev,void * res)284267d51d7SAndrey Grodzovsky static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
285267d51d7SAndrey Grodzovsky {
286267d51d7SAndrey Grodzovsky struct page *dummy_page = (struct page *)res;
287267d51d7SAndrey Grodzovsky
288267d51d7SAndrey Grodzovsky __free_page(dummy_page);
289267d51d7SAndrey Grodzovsky }
290267d51d7SAndrey Grodzovsky
ttm_bo_vm_dummy_page(struct vm_fault * vmf,pgprot_t prot)291267d51d7SAndrey Grodzovsky vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
292267d51d7SAndrey Grodzovsky {
293267d51d7SAndrey Grodzovsky struct vm_area_struct *vma = vmf->vma;
294267d51d7SAndrey Grodzovsky struct ttm_buffer_object *bo = vma->vm_private_data;
295267d51d7SAndrey Grodzovsky struct drm_device *ddev = bo->base.dev;
296267d51d7SAndrey Grodzovsky vm_fault_t ret = VM_FAULT_NOPAGE;
297267d51d7SAndrey Grodzovsky unsigned long address;
298267d51d7SAndrey Grodzovsky unsigned long pfn;
299267d51d7SAndrey Grodzovsky struct page *page;
300267d51d7SAndrey Grodzovsky
301267d51d7SAndrey Grodzovsky /* Allocate new dummy page to map all the VA range in this VMA to it*/
302267d51d7SAndrey Grodzovsky page = alloc_page(GFP_KERNEL | __GFP_ZERO);
303267d51d7SAndrey Grodzovsky if (!page)
304267d51d7SAndrey Grodzovsky return VM_FAULT_OOM;
305267d51d7SAndrey Grodzovsky
306267d51d7SAndrey Grodzovsky /* Set the page to be freed using drmm release action */
307267d51d7SAndrey Grodzovsky if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
308267d51d7SAndrey Grodzovsky return VM_FAULT_OOM;
309267d51d7SAndrey Grodzovsky
310267d51d7SAndrey Grodzovsky pfn = page_to_pfn(page);
311267d51d7SAndrey Grodzovsky
312267d51d7SAndrey Grodzovsky /* Prefault the entire VMA range right away to avoid further faults */
313f91142c6SChristian König for (address = vma->vm_start; address < vma->vm_end;
314f91142c6SChristian König address += PAGE_SIZE)
315267d51d7SAndrey Grodzovsky ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
316267d51d7SAndrey Grodzovsky
317267d51d7SAndrey Grodzovsky return ret;
318267d51d7SAndrey Grodzovsky }
319267d51d7SAndrey Grodzovsky EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
320267d51d7SAndrey Grodzovsky
ttm_bo_vm_fault(struct vm_fault * vmf)32120c012b2SChristian König vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
3227aef29f4SThomas Hellstrom {
3237aef29f4SThomas Hellstrom struct vm_area_struct *vma = vmf->vma;
3247aef29f4SThomas Hellstrom pgprot_t prot;
3257aef29f4SThomas Hellstrom struct ttm_buffer_object *bo = vma->vm_private_data;
326267d51d7SAndrey Grodzovsky struct drm_device *ddev = bo->base.dev;
3277aef29f4SThomas Hellstrom vm_fault_t ret;
328267d51d7SAndrey Grodzovsky int idx;
3297aef29f4SThomas Hellstrom
3307aef29f4SThomas Hellstrom ret = ttm_bo_vm_reserve(bo, vmf);
3317aef29f4SThomas Hellstrom if (ret)
3327aef29f4SThomas Hellstrom return ret;
3337aef29f4SThomas Hellstrom
3345379e4ddSThomas Hellstrom prot = vma->vm_page_prot;
335267d51d7SAndrey Grodzovsky if (drm_dev_enter(ddev, &idx)) {
3360d979509SJason Gunthorpe ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
337267d51d7SAndrey Grodzovsky drm_dev_exit(idx);
338267d51d7SAndrey Grodzovsky } else {
339267d51d7SAndrey Grodzovsky ret = ttm_bo_vm_dummy_page(vmf, prot);
340267d51d7SAndrey Grodzovsky }
3417aef29f4SThomas Hellstrom if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
3427aef29f4SThomas Hellstrom return ret;
3437aef29f4SThomas Hellstrom
34452791eeeSChristian König dma_resv_unlock(bo->base.resv);
3457aef29f4SThomas Hellstrom
346de8dfb8eSTom St Denis return ret;
347ba4e7d97SThomas Hellstrom }
34820c012b2SChristian König EXPORT_SYMBOL(ttm_bo_vm_fault);
349ba4e7d97SThomas Hellstrom
ttm_bo_vm_open(struct vm_area_struct * vma)3507aef29f4SThomas Hellstrom void ttm_bo_vm_open(struct vm_area_struct *vma)
351ba4e7d97SThomas Hellstrom {
35213f8a614SThomas Hellstrom struct ttm_buffer_object *bo = vma->vm_private_data;
353ba4e7d97SThomas Hellstrom
35458aa6622SThomas Hellstrom WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
35558aa6622SThomas Hellstrom
3568129fdadSThomas Zimmermann ttm_bo_get(bo);
357ba4e7d97SThomas Hellstrom }
3587aef29f4SThomas Hellstrom EXPORT_SYMBOL(ttm_bo_vm_open);
359ba4e7d97SThomas Hellstrom
ttm_bo_vm_close(struct vm_area_struct * vma)3607aef29f4SThomas Hellstrom void ttm_bo_vm_close(struct vm_area_struct *vma)
361ba4e7d97SThomas Hellstrom {
36213f8a614SThomas Hellstrom struct ttm_buffer_object *bo = vma->vm_private_data;
363ba4e7d97SThomas Hellstrom
364f4490759SThomas Zimmermann ttm_bo_put(bo);
365ba4e7d97SThomas Hellstrom vma->vm_private_data = NULL;
366ba4e7d97SThomas Hellstrom }
3677aef29f4SThomas Hellstrom EXPORT_SYMBOL(ttm_bo_vm_close);
368ba4e7d97SThomas Hellstrom
ttm_bo_vm_access_kmap(struct ttm_buffer_object * bo,unsigned long offset,uint8_t * buf,int len,int write)36909ac4fcbSFelix Kuehling static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
37009ac4fcbSFelix Kuehling unsigned long offset,
37195244db2STom St Denis uint8_t *buf, int len, int write)
37209ac4fcbSFelix Kuehling {
37309ac4fcbSFelix Kuehling unsigned long page = offset >> PAGE_SHIFT;
37409ac4fcbSFelix Kuehling unsigned long bytes_left = len;
37509ac4fcbSFelix Kuehling int ret;
37609ac4fcbSFelix Kuehling
37709ac4fcbSFelix Kuehling /* Copy a page at a time, that way no extra virtual address
37809ac4fcbSFelix Kuehling * mapping is needed
37909ac4fcbSFelix Kuehling */
38009ac4fcbSFelix Kuehling offset -= page << PAGE_SHIFT;
38109ac4fcbSFelix Kuehling do {
38209ac4fcbSFelix Kuehling unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
38309ac4fcbSFelix Kuehling struct ttm_bo_kmap_obj map;
38409ac4fcbSFelix Kuehling void *ptr;
38509ac4fcbSFelix Kuehling bool is_iomem;
38609ac4fcbSFelix Kuehling
38709ac4fcbSFelix Kuehling ret = ttm_bo_kmap(bo, page, 1, &map);
38809ac4fcbSFelix Kuehling if (ret)
38909ac4fcbSFelix Kuehling return ret;
39009ac4fcbSFelix Kuehling
39109ac4fcbSFelix Kuehling ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
39209ac4fcbSFelix Kuehling WARN_ON_ONCE(is_iomem);
39309ac4fcbSFelix Kuehling if (write)
39409ac4fcbSFelix Kuehling memcpy(ptr, buf, bytes);
39509ac4fcbSFelix Kuehling else
39609ac4fcbSFelix Kuehling memcpy(buf, ptr, bytes);
39709ac4fcbSFelix Kuehling ttm_bo_kunmap(&map);
39809ac4fcbSFelix Kuehling
39909ac4fcbSFelix Kuehling page++;
40095244db2STom St Denis buf += bytes;
40109ac4fcbSFelix Kuehling bytes_left -= bytes;
40209ac4fcbSFelix Kuehling offset = 0;
40309ac4fcbSFelix Kuehling } while (bytes_left);
40409ac4fcbSFelix Kuehling
40509ac4fcbSFelix Kuehling return len;
40609ac4fcbSFelix Kuehling }
40709ac4fcbSFelix Kuehling
ttm_bo_vm_access(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)40820c012b2SChristian König int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
40909ac4fcbSFelix Kuehling void *buf, int len, int write)
41009ac4fcbSFelix Kuehling {
41109ac4fcbSFelix Kuehling struct ttm_buffer_object *bo = vma->vm_private_data;
412c0001213SFelix Kuehling unsigned long offset = (addr) - vma->vm_start +
413c0001213SFelix Kuehling ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
414c0001213SFelix Kuehling << PAGE_SHIFT);
41509ac4fcbSFelix Kuehling int ret;
41609ac4fcbSFelix Kuehling
417e3c92eb4SSomalapuram Amaranath if (len < 1 || (offset + len) > bo->base.size)
41809ac4fcbSFelix Kuehling return -EIO;
41909ac4fcbSFelix Kuehling
42009ac4fcbSFelix Kuehling ret = ttm_bo_reserve(bo, true, false, NULL);
42109ac4fcbSFelix Kuehling if (ret)
42209ac4fcbSFelix Kuehling return ret;
42309ac4fcbSFelix Kuehling
424d3116756SChristian König switch (bo->resource->mem_type) {
42509ac4fcbSFelix Kuehling case TTM_PL_SYSTEM:
426df561f66SGustavo A. R. Silva fallthrough;
42709ac4fcbSFelix Kuehling case TTM_PL_TT:
42809ac4fcbSFelix Kuehling ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
42909ac4fcbSFelix Kuehling break;
43009ac4fcbSFelix Kuehling default:
4318af8a109SChristian König if (bo->bdev->funcs->access_memory)
4328af8a109SChristian König ret = bo->bdev->funcs->access_memory(
43309ac4fcbSFelix Kuehling bo, offset, buf, len, write);
43409ac4fcbSFelix Kuehling else
43509ac4fcbSFelix Kuehling ret = -EIO;
43609ac4fcbSFelix Kuehling }
43709ac4fcbSFelix Kuehling
43809ac4fcbSFelix Kuehling ttm_bo_unreserve(bo);
43909ac4fcbSFelix Kuehling
44009ac4fcbSFelix Kuehling return ret;
44109ac4fcbSFelix Kuehling }
44220c012b2SChristian König EXPORT_SYMBOL(ttm_bo_vm_access);
44309ac4fcbSFelix Kuehling
444f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct ttm_bo_vm_ops = {
445ba4e7d97SThomas Hellstrom .fault = ttm_bo_vm_fault,
446ba4e7d97SThomas Hellstrom .open = ttm_bo_vm_open,
44709ac4fcbSFelix Kuehling .close = ttm_bo_vm_close,
448314b6580SThomas Hellstrom (VMware) .access = ttm_bo_vm_access,
449ba4e7d97SThomas Hellstrom };
450ba4e7d97SThomas Hellstrom
451a3185f91SChristian König /**
452a3185f91SChristian König * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
453a3185f91SChristian König *
454a3185f91SChristian König * @vma: vma as input from the fbdev mmap method.
455a3185f91SChristian König * @bo: The bo backing the address space.
456a3185f91SChristian König *
457a3185f91SChristian König * Maps a buffer object.
458a3185f91SChristian König */
ttm_bo_mmap_obj(struct vm_area_struct * vma,struct ttm_buffer_object * bo)459f91142c6SChristian König int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
46024e25ea6SGerd Hoffmann {
461f91142c6SChristian König /* Enforce no COW since would have really strange behavior with it. */
462f91142c6SChristian König if (is_cow_mapping(vma->vm_flags))
463f91142c6SChristian König return -EINVAL;
464f91142c6SChristian König
465f91142c6SChristian König ttm_bo_get(bo);
466f91142c6SChristian König
467ccd9fe97SThomas Zimmermann /*
468ccd9fe97SThomas Zimmermann * Drivers may want to override the vm_ops field. Otherwise we
469ccd9fe97SThomas Zimmermann * use TTM's default callbacks.
470ccd9fe97SThomas Zimmermann */
471ccd9fe97SThomas Zimmermann if (!vma->vm_ops)
47224e25ea6SGerd Hoffmann vma->vm_ops = &ttm_bo_vm_ops;
47324e25ea6SGerd Hoffmann
47424e25ea6SGerd Hoffmann /*
47524e25ea6SGerd Hoffmann * Note: We're transferring the bo reference to
47624e25ea6SGerd Hoffmann * vma->vm_private_data here.
47724e25ea6SGerd Hoffmann */
47824e25ea6SGerd Hoffmann
47924e25ea6SGerd Hoffmann vma->vm_private_data = bo;
48024e25ea6SGerd Hoffmann
4811c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
482ba4e7d97SThomas Hellstrom return 0;
483ba4e7d97SThomas Hellstrom }
48412067e0eSGerd Hoffmann EXPORT_SYMBOL(ttm_bo_mmap_obj);
485