xref: /linux/drivers/gpu/drm/qxl/qxl_object.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32 	struct qxl_bo *bo;
33 	struct qxl_device *qdev;
34 
35 	bo = container_of(tbo, struct qxl_bo, tbo);
36 	qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37 
38 	qxl_surface_evict(qdev, bo, false);
39 	qxl_fence_fini(&bo->fence);
40 	mutex_lock(&qdev->gem.mutex);
41 	list_del_init(&bo->list);
42 	mutex_unlock(&qdev->gem.mutex);
43 	drm_gem_object_release(&bo->gem_base);
44 	kfree(bo);
45 }
46 
47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49 	if (bo->destroy == &qxl_ttm_bo_destroy)
50 		return true;
51 	return false;
52 }
53 
54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55 {
56 	u32 c = 0;
57 	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
58 
59 	qbo->placement.fpfn = 0;
60 	qbo->placement.lpfn = 0;
61 	qbo->placement.placement = qbo->placements;
62 	qbo->placement.busy_placement = qbo->placements;
63 	if (domain == QXL_GEM_DOMAIN_VRAM)
64 		qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
65 	if (domain == QXL_GEM_DOMAIN_SURFACE)
66 		qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
67 	if (domain == QXL_GEM_DOMAIN_CPU)
68 		qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
69 	if (!c)
70 		qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
71 	qbo->placement.num_placement = c;
72 	qbo->placement.num_busy_placement = c;
73 }
74 
75 
76 int qxl_bo_create(struct qxl_device *qdev,
77 		  unsigned long size, bool kernel, bool pinned, u32 domain,
78 		  struct qxl_surface *surf,
79 		  struct qxl_bo **bo_ptr)
80 {
81 	struct qxl_bo *bo;
82 	enum ttm_bo_type type;
83 	int r;
84 
85 	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
86 		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
87 	if (kernel)
88 		type = ttm_bo_type_kernel;
89 	else
90 		type = ttm_bo_type_device;
91 	*bo_ptr = NULL;
92 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
93 	if (bo == NULL)
94 		return -ENOMEM;
95 	size = roundup(size, PAGE_SIZE);
96 	r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
97 	if (unlikely(r)) {
98 		kfree(bo);
99 		return r;
100 	}
101 	bo->type = domain;
102 	bo->pin_count = pinned ? 1 : 0;
103 	bo->surface_id = 0;
104 	qxl_fence_init(qdev, &bo->fence);
105 	INIT_LIST_HEAD(&bo->list);
106 
107 	if (surf)
108 		bo->surf = *surf;
109 
110 	qxl_ttm_placement_from_domain(bo, domain, pinned);
111 
112 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 			&bo->placement, 0, !kernel, NULL, size,
114 			NULL, &qxl_ttm_bo_destroy);
115 	if (unlikely(r != 0)) {
116 		if (r != -ERESTARTSYS)
117 			dev_err(qdev->dev,
118 				"object_init failed for (%lu, 0x%08X)\n",
119 				size, domain);
120 		return r;
121 	}
122 	*bo_ptr = bo;
123 	return 0;
124 }
125 
126 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
127 {
128 	bool is_iomem;
129 	int r;
130 
131 	if (bo->kptr) {
132 		if (ptr)
133 			*ptr = bo->kptr;
134 		return 0;
135 	}
136 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
137 	if (r)
138 		return r;
139 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
140 	if (ptr)
141 		*ptr = bo->kptr;
142 	return 0;
143 }
144 
145 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
146 			      struct qxl_bo *bo, int page_offset)
147 {
148 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
149 	void *rptr;
150 	int ret;
151 	struct io_mapping *map;
152 
153 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
154 		map = qdev->vram_mapping;
155 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
156 		map = qdev->surface_mapping;
157 	else
158 		goto fallback;
159 
160 	(void) ttm_mem_io_lock(man, false);
161 	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
162 	ttm_mem_io_unlock(man);
163 
164 	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
165 fallback:
166 	if (bo->kptr) {
167 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
168 		return rptr;
169 	}
170 
171 	ret = qxl_bo_kmap(bo, &rptr);
172 	if (ret)
173 		return NULL;
174 
175 	rptr += page_offset * PAGE_SIZE;
176 	return rptr;
177 }
178 
179 void qxl_bo_kunmap(struct qxl_bo *bo)
180 {
181 	if (bo->kptr == NULL)
182 		return;
183 	bo->kptr = NULL;
184 	ttm_bo_kunmap(&bo->kmap);
185 }
186 
187 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
188 			       struct qxl_bo *bo, void *pmap)
189 {
190 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
191 	struct io_mapping *map;
192 
193 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
194 		map = qdev->vram_mapping;
195 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
196 		map = qdev->surface_mapping;
197 	else
198 		goto fallback;
199 
200 	io_mapping_unmap_atomic(pmap);
201 
202 	(void) ttm_mem_io_lock(man, false);
203 	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
204 	ttm_mem_io_unlock(man);
205 	return ;
206  fallback:
207 	qxl_bo_kunmap(bo);
208 }
209 
210 void qxl_bo_unref(struct qxl_bo **bo)
211 {
212 	struct ttm_buffer_object *tbo;
213 
214 	if ((*bo) == NULL)
215 		return;
216 	tbo = &((*bo)->tbo);
217 	ttm_bo_unref(&tbo);
218 	if (tbo == NULL)
219 		*bo = NULL;
220 }
221 
222 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
223 {
224 	ttm_bo_reference(&bo->tbo);
225 	return bo;
226 }
227 
228 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229 {
230 	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 	int r;
232 
233 	if (bo->pin_count) {
234 		bo->pin_count++;
235 		if (gpu_addr)
236 			*gpu_addr = qxl_bo_gpu_offset(bo);
237 		return 0;
238 	}
239 	qxl_ttm_placement_from_domain(bo, domain, true);
240 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
241 	if (likely(r == 0)) {
242 		bo->pin_count = 1;
243 		if (gpu_addr != NULL)
244 			*gpu_addr = qxl_bo_gpu_offset(bo);
245 	}
246 	if (unlikely(r != 0))
247 		dev_err(qdev->dev, "%p pin failed\n", bo);
248 	return r;
249 }
250 
251 int qxl_bo_unpin(struct qxl_bo *bo)
252 {
253 	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
254 	int r, i;
255 
256 	if (!bo->pin_count) {
257 		dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
258 		return 0;
259 	}
260 	bo->pin_count--;
261 	if (bo->pin_count)
262 		return 0;
263 	for (i = 0; i < bo->placement.num_placement; i++)
264 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
265 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
266 	if (unlikely(r != 0))
267 		dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
268 	return r;
269 }
270 
271 void qxl_bo_force_delete(struct qxl_device *qdev)
272 {
273 	struct qxl_bo *bo, *n;
274 
275 	if (list_empty(&qdev->gem.objects))
276 		return;
277 	dev_err(qdev->dev, "Userspace still has active objects !\n");
278 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
279 		mutex_lock(&qdev->ddev->struct_mutex);
280 		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
281 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
282 			*((unsigned long *)&bo->gem_base.refcount));
283 		mutex_lock(&qdev->gem.mutex);
284 		list_del_init(&bo->list);
285 		mutex_unlock(&qdev->gem.mutex);
286 		/* this should unref the ttm bo */
287 		drm_gem_object_unreference(&bo->gem_base);
288 		mutex_unlock(&qdev->ddev->struct_mutex);
289 	}
290 }
291 
292 int qxl_bo_init(struct qxl_device *qdev)
293 {
294 	return qxl_ttm_init(qdev);
295 }
296 
297 void qxl_bo_fini(struct qxl_device *qdev)
298 {
299 	qxl_ttm_fini(qdev);
300 }
301 
302 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
303 {
304 	int ret;
305 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
306 		/* allocate a surface id for this surface now */
307 		ret = qxl_surface_id_alloc(qdev, bo);
308 		if (ret)
309 			return ret;
310 
311 		ret = qxl_hw_surface_alloc(qdev, bo, NULL);
312 		if (ret)
313 			return ret;
314 	}
315 	return 0;
316 }
317 
318 int qxl_surf_evict(struct qxl_device *qdev)
319 {
320 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
321 }
322 
323 int qxl_vram_evict(struct qxl_device *qdev)
324 {
325 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
326 }
327