xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_phys.c (revision 56fb34d86e875dbb0d3e6a81c5d3d035db373031)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
15 
16 #include "gt/intel_gt.h"
17 #include "i915_drv.h"
18 #include "i915_gem_object.h"
19 #include "i915_scatterlist.h"
20 
21 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
22 {
23 	struct address_space *mapping = obj->base.filp->f_mapping;
24 	struct drm_dma_handle *phys;
25 	struct sg_table *st;
26 	struct scatterlist *sg;
27 	char *vaddr;
28 	int i;
29 	int err;
30 
31 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
32 		return -EINVAL;
33 
34 	/* Always aligning to the object size, allows a single allocation
35 	 * to handle all possible callers, and given typical object sizes,
36 	 * the alignment of the buddy allocation will naturally match.
37 	 */
38 	phys = drm_pci_alloc(obj->base.dev,
39 			     roundup_pow_of_two(obj->base.size),
40 			     roundup_pow_of_two(obj->base.size));
41 	if (!phys)
42 		return -ENOMEM;
43 
44 	vaddr = phys->vaddr;
45 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
46 		struct page *page;
47 		char *src;
48 
49 		page = shmem_read_mapping_page(mapping, i);
50 		if (IS_ERR(page)) {
51 			err = PTR_ERR(page);
52 			goto err_phys;
53 		}
54 
55 		src = kmap_atomic(page);
56 		memcpy(vaddr, src, PAGE_SIZE);
57 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
58 		kunmap_atomic(src);
59 
60 		put_page(page);
61 		vaddr += PAGE_SIZE;
62 	}
63 
64 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
65 
66 	st = kmalloc(sizeof(*st), GFP_KERNEL);
67 	if (!st) {
68 		err = -ENOMEM;
69 		goto err_phys;
70 	}
71 
72 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
73 		kfree(st);
74 		err = -ENOMEM;
75 		goto err_phys;
76 	}
77 
78 	sg = st->sgl;
79 	sg->offset = 0;
80 	sg->length = obj->base.size;
81 
82 	sg_dma_address(sg) = phys->busaddr;
83 	sg_dma_len(sg) = obj->base.size;
84 
85 	obj->phys_handle = phys;
86 
87 	__i915_gem_object_set_pages(obj, st, sg->length);
88 
89 	return 0;
90 
91 err_phys:
92 	drm_pci_free(obj->base.dev, phys);
93 
94 	return err;
95 }
96 
97 static void
98 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
99 			       struct sg_table *pages)
100 {
101 	__i915_gem_object_release_shmem(obj, pages, false);
102 
103 	if (obj->mm.dirty) {
104 		struct address_space *mapping = obj->base.filp->f_mapping;
105 		char *vaddr = obj->phys_handle->vaddr;
106 		int i;
107 
108 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
109 			struct page *page;
110 			char *dst;
111 
112 			page = shmem_read_mapping_page(mapping, i);
113 			if (IS_ERR(page))
114 				continue;
115 
116 			dst = kmap_atomic(page);
117 			drm_clflush_virt_range(vaddr, PAGE_SIZE);
118 			memcpy(dst, vaddr, PAGE_SIZE);
119 			kunmap_atomic(dst);
120 
121 			set_page_dirty(page);
122 			if (obj->mm.madv == I915_MADV_WILLNEED)
123 				mark_page_accessed(page);
124 			put_page(page);
125 			vaddr += PAGE_SIZE;
126 		}
127 		obj->mm.dirty = false;
128 	}
129 
130 	sg_free_table(pages);
131 	kfree(pages);
132 
133 	drm_pci_free(obj->base.dev, obj->phys_handle);
134 }
135 
136 static void phys_release(struct drm_i915_gem_object *obj)
137 {
138 	fput(obj->base.filp);
139 }
140 
141 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
142 	.get_pages = i915_gem_object_get_pages_phys,
143 	.put_pages = i915_gem_object_put_pages_phys,
144 
145 	.release = phys_release,
146 };
147 
148 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
149 {
150 	struct sg_table *pages;
151 	int err;
152 
153 	if (align > obj->base.size)
154 		return -EINVAL;
155 
156 	if (obj->ops == &i915_gem_phys_ops)
157 		return 0;
158 
159 	if (obj->ops != &i915_gem_shmem_ops)
160 		return -EINVAL;
161 
162 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
163 	if (err)
164 		return err;
165 
166 	mutex_lock(&obj->mm.lock);
167 
168 	if (obj->mm.madv != I915_MADV_WILLNEED) {
169 		err = -EFAULT;
170 		goto err_unlock;
171 	}
172 
173 	if (obj->mm.quirked) {
174 		err = -EFAULT;
175 		goto err_unlock;
176 	}
177 
178 	if (obj->mm.mapping) {
179 		err = -EBUSY;
180 		goto err_unlock;
181 	}
182 
183 	pages = __i915_gem_object_unset_pages(obj);
184 
185 	obj->ops = &i915_gem_phys_ops;
186 
187 	err = ____i915_gem_object_get_pages(obj);
188 	if (err)
189 		goto err_xfer;
190 
191 	/* Perma-pin (until release) the physical set of pages */
192 	__i915_gem_object_pin_pages(obj);
193 
194 	if (!IS_ERR_OR_NULL(pages))
195 		i915_gem_shmem_ops.put_pages(obj, pages);
196 	mutex_unlock(&obj->mm.lock);
197 	return 0;
198 
199 err_xfer:
200 	obj->ops = &i915_gem_shmem_ops;
201 	if (!IS_ERR_OR_NULL(pages)) {
202 		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
203 
204 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
205 	}
206 err_unlock:
207 	mutex_unlock(&obj->mm.lock);
208 	return err;
209 }
210 
211 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
212 #include "selftests/i915_gem_phys.c"
213 #endif
214