xref: /linux/drivers/gpu/drm/qxl/qxl_object.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/iosys-map.h>
27 #include <linux/io-mapping.h>
28 
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31 
32 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
33 {
34 	struct qxl_bo *bo;
35 	struct qxl_device *qdev;
36 
37 	bo = to_qxl_bo(tbo);
38 	qdev = to_qxl(bo->tbo.base.dev);
39 
40 	qxl_surface_evict(qdev, bo, false);
41 	WARN_ON_ONCE(bo->map_count > 0);
42 	mutex_lock(&qdev->gem.mutex);
43 	list_del_init(&bo->list);
44 	mutex_unlock(&qdev->gem.mutex);
45 	drm_gem_object_release(&bo->tbo.base);
46 	kfree(bo);
47 }
48 
49 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
50 {
51 	if (bo->destroy == &qxl_ttm_bo_destroy)
52 		return true;
53 	return false;
54 }
55 
56 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
57 {
58 	u32 c = 0;
59 	u32 pflag = 0;
60 	unsigned int i;
61 
62 	if (qbo->tbo.base.size <= PAGE_SIZE)
63 		pflag |= TTM_PL_FLAG_TOPDOWN;
64 
65 	qbo->placement.placement = qbo->placements;
66 	if (domain == QXL_GEM_DOMAIN_VRAM) {
67 		qbo->placements[c].mem_type = TTM_PL_VRAM;
68 		qbo->placements[c++].flags = pflag;
69 	}
70 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
71 		qbo->placements[c].mem_type = TTM_PL_PRIV;
72 		qbo->placements[c++].flags = pflag;
73 		qbo->placements[c].mem_type = TTM_PL_VRAM;
74 		qbo->placements[c++].flags = pflag;
75 	}
76 	if (domain == QXL_GEM_DOMAIN_CPU) {
77 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
78 		qbo->placements[c++].flags = pflag;
79 	}
80 	if (!c) {
81 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
82 		qbo->placements[c++].flags = 0;
83 	}
84 	qbo->placement.num_placement = c;
85 	for (i = 0; i < c; ++i) {
86 		qbo->placements[i].fpfn = 0;
87 		qbo->placements[i].lpfn = 0;
88 	}
89 }
90 
91 static const struct drm_gem_object_funcs qxl_object_funcs = {
92 	.free = qxl_gem_object_free,
93 	.open = qxl_gem_object_open,
94 	.close = qxl_gem_object_close,
95 	.pin = qxl_gem_prime_pin,
96 	.unpin = qxl_gem_prime_unpin,
97 	.get_sg_table = qxl_gem_prime_get_sg_table,
98 	.vmap = qxl_gem_prime_vmap,
99 	.vunmap = qxl_gem_prime_vunmap,
100 	.mmap = drm_gem_ttm_mmap,
101 	.print_info = drm_gem_ttm_print_info,
102 };
103 
104 int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
105 		  bool kernel, bool pinned, u32 domain, u32 priority,
106 		  struct qxl_surface *surf,
107 		  struct qxl_bo **bo_ptr)
108 {
109 	struct ttm_operation_ctx ctx = { !kernel, false };
110 	struct qxl_bo *bo;
111 	enum ttm_bo_type type;
112 	int r;
113 
114 	if (kernel)
115 		type = ttm_bo_type_kernel;
116 	else
117 		type = ttm_bo_type_device;
118 	*bo_ptr = NULL;
119 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
120 	if (bo == NULL)
121 		return -ENOMEM;
122 	size = roundup(size, PAGE_SIZE);
123 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
124 	if (unlikely(r)) {
125 		kfree(bo);
126 		return r;
127 	}
128 	bo->tbo.base.funcs = &qxl_object_funcs;
129 	bo->type = domain;
130 	bo->surface_id = 0;
131 	INIT_LIST_HEAD(&bo->list);
132 
133 	if (surf)
134 		bo->surf = *surf;
135 
136 	qxl_ttm_placement_from_domain(bo, domain);
137 
138 	bo->tbo.priority = priority;
139 	r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
140 				 &bo->placement, 0, &ctx, NULL, NULL,
141 				 &qxl_ttm_bo_destroy);
142 	if (unlikely(r != 0)) {
143 		if (r != -ERESTARTSYS)
144 			dev_err(qdev->ddev.dev,
145 				"object_init failed for (%lu, 0x%08X)\n",
146 				size, domain);
147 		return r;
148 	}
149 	if (pinned)
150 		ttm_bo_pin(&bo->tbo);
151 	ttm_bo_unreserve(&bo->tbo);
152 	*bo_ptr = bo;
153 	return 0;
154 }
155 
156 int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
157 {
158 	int r;
159 
160 	dma_resv_assert_held(bo->tbo.base.resv);
161 
162 	if (bo->kptr) {
163 		bo->map_count++;
164 		goto out;
165 	}
166 
167 	r = ttm_bo_vmap(&bo->tbo, &bo->map);
168 	if (r) {
169 		qxl_bo_unpin_locked(bo);
170 		return r;
171 	}
172 	bo->map_count = 1;
173 
174 	/* TODO: Remove kptr in favor of map everywhere. */
175 	if (bo->map.is_iomem)
176 		bo->kptr = (void *)bo->map.vaddr_iomem;
177 	else
178 		bo->kptr = bo->map.vaddr;
179 
180 out:
181 	*map = bo->map;
182 	return 0;
183 }
184 
185 int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
186 {
187 	int r;
188 
189 	r = qxl_bo_reserve(bo);
190 	if (r)
191 		return r;
192 
193 	r = qxl_bo_vmap_locked(bo, map);
194 	qxl_bo_unreserve(bo);
195 	return r;
196 }
197 
198 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
199 			      struct qxl_bo *bo, int page_offset)
200 {
201 	unsigned long offset;
202 	void *rptr;
203 	int ret;
204 	struct io_mapping *map;
205 	struct iosys_map bo_map;
206 
207 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
208 		map = qdev->vram_mapping;
209 	else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
210 		map = qdev->surface_mapping;
211 	else
212 		goto fallback;
213 
214 	offset = bo->tbo.resource->start << PAGE_SHIFT;
215 	return io_mapping_map_atomic_wc(map, offset + page_offset);
216 fallback:
217 	if (bo->kptr) {
218 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
219 		return rptr;
220 	}
221 
222 	ret = qxl_bo_vmap_locked(bo, &bo_map);
223 	if (ret)
224 		return NULL;
225 	rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
226 
227 	rptr += page_offset * PAGE_SIZE;
228 	return rptr;
229 }
230 
231 void qxl_bo_vunmap_locked(struct qxl_bo *bo)
232 {
233 	dma_resv_assert_held(bo->tbo.base.resv);
234 
235 	if (bo->kptr == NULL)
236 		return;
237 	bo->map_count--;
238 	if (bo->map_count > 0)
239 		return;
240 	bo->kptr = NULL;
241 	ttm_bo_vunmap(&bo->tbo, &bo->map);
242 }
243 
244 int qxl_bo_vunmap(struct qxl_bo *bo)
245 {
246 	int r;
247 
248 	r = qxl_bo_reserve(bo);
249 	if (r)
250 		return r;
251 
252 	qxl_bo_vunmap_locked(bo);
253 	qxl_bo_unreserve(bo);
254 	return 0;
255 }
256 
257 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
258 			       struct qxl_bo *bo, void *pmap)
259 {
260 	if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
261 	    (bo->tbo.resource->mem_type != TTM_PL_PRIV))
262 		goto fallback;
263 
264 	io_mapping_unmap_atomic(pmap);
265 	return;
266  fallback:
267 	qxl_bo_vunmap_locked(bo);
268 }
269 
270 void qxl_bo_unref(struct qxl_bo **bo)
271 {
272 	if ((*bo) == NULL)
273 		return;
274 
275 	drm_gem_object_put(&(*bo)->tbo.base);
276 	*bo = NULL;
277 }
278 
279 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
280 {
281 	drm_gem_object_get(&bo->tbo.base);
282 	return bo;
283 }
284 
285 int qxl_bo_pin_locked(struct qxl_bo *bo)
286 {
287 	struct ttm_operation_ctx ctx = { false, false };
288 	struct drm_device *ddev = bo->tbo.base.dev;
289 	int r;
290 
291 	dma_resv_assert_held(bo->tbo.base.resv);
292 
293 	if (bo->tbo.pin_count) {
294 		ttm_bo_pin(&bo->tbo);
295 		return 0;
296 	}
297 	qxl_ttm_placement_from_domain(bo, bo->type);
298 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
299 	if (likely(r == 0))
300 		ttm_bo_pin(&bo->tbo);
301 	if (unlikely(r != 0))
302 		dev_err(ddev->dev, "%p pin failed\n", bo);
303 	return r;
304 }
305 
306 void qxl_bo_unpin_locked(struct qxl_bo *bo)
307 {
308 	dma_resv_assert_held(bo->tbo.base.resv);
309 
310 	ttm_bo_unpin(&bo->tbo);
311 }
312 
313 /*
314  * Reserve the BO before pinning the object.  If the BO was reserved
315  * beforehand, use the internal version directly qxl_bo_pin_locked.
316  *
317  */
318 int qxl_bo_pin(struct qxl_bo *bo)
319 {
320 	int r;
321 
322 	r = qxl_bo_reserve(bo);
323 	if (r)
324 		return r;
325 
326 	r = qxl_bo_pin_locked(bo);
327 	qxl_bo_unreserve(bo);
328 	return r;
329 }
330 
331 /*
332  * Reserve the BO before pinning the object.  If the BO was reserved
333  * beforehand, use the internal version directly qxl_bo_unpin_locked.
334  *
335  */
336 int qxl_bo_unpin(struct qxl_bo *bo)
337 {
338 	int r;
339 
340 	r = qxl_bo_reserve(bo);
341 	if (r)
342 		return r;
343 
344 	qxl_bo_unpin_locked(bo);
345 	qxl_bo_unreserve(bo);
346 	return 0;
347 }
348 
349 void qxl_bo_force_delete(struct qxl_device *qdev)
350 {
351 	struct qxl_bo *bo, *n;
352 
353 	if (list_empty(&qdev->gem.objects))
354 		return;
355 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
356 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
357 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
358 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
359 			*((unsigned long *)&bo->tbo.base.refcount));
360 		mutex_lock(&qdev->gem.mutex);
361 		list_del_init(&bo->list);
362 		mutex_unlock(&qdev->gem.mutex);
363 		/* this should unref the ttm bo */
364 		drm_gem_object_put(&bo->tbo.base);
365 	}
366 }
367 
368 int qxl_bo_init(struct qxl_device *qdev)
369 {
370 	return qxl_ttm_init(qdev);
371 }
372 
373 void qxl_bo_fini(struct qxl_device *qdev)
374 {
375 	qxl_ttm_fini(qdev);
376 }
377 
378 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
379 {
380 	int ret;
381 
382 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
383 		/* allocate a surface id for this surface now */
384 		ret = qxl_surface_id_alloc(qdev, bo);
385 		if (ret)
386 			return ret;
387 
388 		ret = qxl_hw_surface_alloc(qdev, bo);
389 		if (ret)
390 			return ret;
391 	}
392 	return 0;
393 }
394 
395 int qxl_surf_evict(struct qxl_device *qdev)
396 {
397 	struct ttm_resource_manager *man;
398 
399 	man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
400 	return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
401 }
402 
403 int qxl_vram_evict(struct qxl_device *qdev)
404 {
405 	struct ttm_resource_manager *man;
406 
407 	man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
408 	return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
409 }
410