xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_phys.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm_cache.h>
12 
13 #include "gt/intel_gt.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_object_frontbuffer.h"
17 #include "i915_gem_region.h"
18 #include "i915_gem_tiling.h"
19 #include "i915_scatterlist.h"
20 
21 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
22 {
23 	struct address_space *mapping = obj->base.filp->f_mapping;
24 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
25 	struct scatterlist *sg;
26 	struct sg_table *st;
27 	dma_addr_t dma;
28 	void *vaddr;
29 	void *dst;
30 	int i;
31 
32 	/* Contiguous chunk, with a single scatterlist element */
33 	if (overflows_type(obj->base.size, sg->length))
34 		return -E2BIG;
35 
36 	if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
37 		return -EINVAL;
38 
39 	/*
40 	 * Always aligning to the object size, allows a single allocation
41 	 * to handle all possible callers, and given typical object sizes,
42 	 * the alignment of the buddy allocation will naturally match.
43 	 */
44 	vaddr = dma_alloc_coherent(obj->base.dev->dev,
45 				   roundup_pow_of_two(obj->base.size),
46 				   &dma, GFP_KERNEL);
47 	if (!vaddr)
48 		return -ENOMEM;
49 
50 	st = kmalloc(sizeof(*st), GFP_KERNEL);
51 	if (!st)
52 		goto err_pci;
53 
54 	if (sg_alloc_table(st, 1, GFP_KERNEL))
55 		goto err_st;
56 
57 	sg = st->sgl;
58 	sg->offset = 0;
59 	sg->length = obj->base.size;
60 
61 	sg_assign_page(sg, (struct page *)vaddr);
62 	sg_dma_address(sg) = dma;
63 	sg_dma_len(sg) = obj->base.size;
64 
65 	dst = vaddr;
66 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
67 		struct page *page;
68 		void *src;
69 
70 		page = shmem_read_mapping_page(mapping, i);
71 		if (IS_ERR(page))
72 			goto err_st;
73 
74 		src = kmap_atomic(page);
75 		memcpy(dst, src, PAGE_SIZE);
76 		drm_clflush_virt_range(dst, PAGE_SIZE);
77 		kunmap_atomic(src);
78 
79 		put_page(page);
80 		dst += PAGE_SIZE;
81 	}
82 
83 	intel_gt_chipset_flush(to_gt(i915));
84 
85 	/* We're no longer struct page backed */
86 	obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
87 	__i915_gem_object_set_pages(obj, st);
88 
89 	return 0;
90 
91 err_st:
92 	kfree(st);
93 err_pci:
94 	dma_free_coherent(obj->base.dev->dev,
95 			  roundup_pow_of_two(obj->base.size),
96 			  vaddr, dma);
97 	return -ENOMEM;
98 }
99 
100 void
101 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
102 			       struct sg_table *pages)
103 {
104 	dma_addr_t dma = sg_dma_address(pages->sgl);
105 	void *vaddr = sg_page(pages->sgl);
106 
107 	__i915_gem_object_release_shmem(obj, pages, false);
108 
109 	if (obj->mm.dirty) {
110 		struct address_space *mapping = obj->base.filp->f_mapping;
111 		void *src = vaddr;
112 		int i;
113 
114 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
115 			struct page *page;
116 			char *dst;
117 
118 			page = shmem_read_mapping_page(mapping, i);
119 			if (IS_ERR(page))
120 				continue;
121 
122 			dst = kmap_atomic(page);
123 			drm_clflush_virt_range(src, PAGE_SIZE);
124 			memcpy(dst, src, PAGE_SIZE);
125 			kunmap_atomic(dst);
126 
127 			set_page_dirty(page);
128 			if (obj->mm.madv == I915_MADV_WILLNEED)
129 				mark_page_accessed(page);
130 			put_page(page);
131 
132 			src += PAGE_SIZE;
133 		}
134 		obj->mm.dirty = false;
135 	}
136 
137 	sg_free_table(pages);
138 	kfree(pages);
139 
140 	dma_free_coherent(obj->base.dev->dev,
141 			  roundup_pow_of_two(obj->base.size),
142 			  vaddr, dma);
143 }
144 
145 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
146 				const struct drm_i915_gem_pwrite *args)
147 {
148 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
149 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
150 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
151 	int err;
152 
153 	err = i915_gem_object_wait(obj,
154 				   I915_WAIT_INTERRUPTIBLE |
155 				   I915_WAIT_ALL,
156 				   MAX_SCHEDULE_TIMEOUT);
157 	if (err)
158 		return err;
159 
160 	/*
161 	 * We manually control the domain here and pretend that it
162 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
163 	 */
164 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
165 
166 	if (copy_from_user(vaddr, user_data, args->size))
167 		return -EFAULT;
168 
169 	drm_clflush_virt_range(vaddr, args->size);
170 	intel_gt_chipset_flush(to_gt(i915));
171 
172 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
173 	return 0;
174 }
175 
176 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
177 			       const struct drm_i915_gem_pread *args)
178 {
179 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
180 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
181 	int err;
182 
183 	err = i915_gem_object_wait(obj,
184 				   I915_WAIT_INTERRUPTIBLE,
185 				   MAX_SCHEDULE_TIMEOUT);
186 	if (err)
187 		return err;
188 
189 	drm_clflush_virt_range(vaddr, args->size);
190 	if (copy_to_user(user_data, vaddr, args->size))
191 		return -EFAULT;
192 
193 	return 0;
194 }
195 
196 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
197 {
198 	struct sg_table *pages;
199 	int err;
200 
201 	pages = __i915_gem_object_unset_pages(obj);
202 
203 	err = i915_gem_object_get_pages_phys(obj);
204 	if (err)
205 		goto err_xfer;
206 
207 	/* Perma-pin (until release) the physical set of pages */
208 	__i915_gem_object_pin_pages(obj);
209 
210 	if (!IS_ERR_OR_NULL(pages))
211 		i915_gem_object_put_pages_shmem(obj, pages);
212 
213 	i915_gem_object_release_memory_region(obj);
214 	return 0;
215 
216 err_xfer:
217 	if (!IS_ERR_OR_NULL(pages))
218 		__i915_gem_object_set_pages(obj, pages);
219 	return err;
220 }
221 
222 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
223 {
224 	int err;
225 
226 	assert_object_held(obj);
227 
228 	if (align > obj->base.size)
229 		return -EINVAL;
230 
231 	if (!i915_gem_object_is_shmem(obj))
232 		return -EINVAL;
233 
234 	if (!i915_gem_object_has_struct_page(obj))
235 		return 0;
236 
237 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
238 	if (err)
239 		return err;
240 
241 	if (obj->mm.madv != I915_MADV_WILLNEED)
242 		return -EFAULT;
243 
244 	if (i915_gem_object_has_tiling_quirk(obj))
245 		return -EFAULT;
246 
247 	if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
248 		return -EBUSY;
249 
250 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
251 		drm_dbg(obj->base.dev,
252 			"Attempting to obtain a purgeable object\n");
253 		return -EFAULT;
254 	}
255 
256 	return i915_gem_object_shmem_to_phys(obj);
257 }
258 
259 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
260 #include "selftests/i915_gem_phys.c"
261 #endif
262