xref: /linux/drivers/gpu/drm/qxl/qxl_object.c (revision 1517d90cfafe0f95fd7863d04e1596f7beb7dfa8)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32 	struct qxl_bo *bo;
33 	struct qxl_device *qdev;
34 
35 	bo = to_qxl_bo(tbo);
36 	qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
37 
38 	qxl_surface_evict(qdev, bo, false);
39 	WARN_ON_ONCE(bo->map_count > 0);
40 	mutex_lock(&qdev->gem.mutex);
41 	list_del_init(&bo->list);
42 	mutex_unlock(&qdev->gem.mutex);
43 	drm_gem_object_release(&bo->tbo.base);
44 	kfree(bo);
45 }
46 
47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49 	if (bo->destroy == &qxl_ttm_bo_destroy)
50 		return true;
51 	return false;
52 }
53 
54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55 {
56 	u32 c = 0;
57 	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
58 	unsigned int i;
59 
60 	qbo->placement.placement = qbo->placements;
61 	qbo->placement.busy_placement = qbo->placements;
62 	if (domain == QXL_GEM_DOMAIN_VRAM)
63 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
64 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
65 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
66 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
67 	}
68 	if (domain == QXL_GEM_DOMAIN_CPU)
69 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
70 	if (!c)
71 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
72 	qbo->placement.num_placement = c;
73 	qbo->placement.num_busy_placement = c;
74 	for (i = 0; i < c; ++i) {
75 		qbo->placements[i].fpfn = 0;
76 		qbo->placements[i].lpfn = 0;
77 	}
78 }
79 
80 int qxl_bo_create(struct qxl_device *qdev,
81 		  unsigned long size, bool kernel, bool pinned, u32 domain,
82 		  struct qxl_surface *surf,
83 		  struct qxl_bo **bo_ptr)
84 {
85 	struct qxl_bo *bo;
86 	enum ttm_bo_type type;
87 	int r;
88 
89 	if (kernel)
90 		type = ttm_bo_type_kernel;
91 	else
92 		type = ttm_bo_type_device;
93 	*bo_ptr = NULL;
94 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
95 	if (bo == NULL)
96 		return -ENOMEM;
97 	size = roundup(size, PAGE_SIZE);
98 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
99 	if (unlikely(r)) {
100 		kfree(bo);
101 		return r;
102 	}
103 	bo->type = domain;
104 	bo->pin_count = pinned ? 1 : 0;
105 	bo->surface_id = 0;
106 	INIT_LIST_HEAD(&bo->list);
107 
108 	if (surf)
109 		bo->surf = *surf;
110 
111 	qxl_ttm_placement_from_domain(bo, domain, pinned);
112 
113 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
114 			&bo->placement, 0, !kernel, size,
115 			NULL, NULL, &qxl_ttm_bo_destroy);
116 	if (unlikely(r != 0)) {
117 		if (r != -ERESTARTSYS)
118 			dev_err(qdev->ddev.dev,
119 				"object_init failed for (%lu, 0x%08X)\n",
120 				size, domain);
121 		return r;
122 	}
123 	*bo_ptr = bo;
124 	return 0;
125 }
126 
127 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
128 {
129 	bool is_iomem;
130 	int r;
131 
132 	if (bo->kptr) {
133 		if (ptr)
134 			*ptr = bo->kptr;
135 		bo->map_count++;
136 		return 0;
137 	}
138 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
139 	if (r)
140 		return r;
141 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
142 	if (ptr)
143 		*ptr = bo->kptr;
144 	bo->map_count = 1;
145 	return 0;
146 }
147 
148 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
149 			      struct qxl_bo *bo, int page_offset)
150 {
151 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
152 	void *rptr;
153 	int ret;
154 	struct io_mapping *map;
155 
156 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
157 		map = qdev->vram_mapping;
158 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
159 		map = qdev->surface_mapping;
160 	else
161 		goto fallback;
162 
163 	(void) ttm_mem_io_lock(man, false);
164 	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
165 	ttm_mem_io_unlock(man);
166 
167 	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
168 fallback:
169 	if (bo->kptr) {
170 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
171 		return rptr;
172 	}
173 
174 	ret = qxl_bo_kmap(bo, &rptr);
175 	if (ret)
176 		return NULL;
177 
178 	rptr += page_offset * PAGE_SIZE;
179 	return rptr;
180 }
181 
182 void qxl_bo_kunmap(struct qxl_bo *bo)
183 {
184 	if (bo->kptr == NULL)
185 		return;
186 	bo->map_count--;
187 	if (bo->map_count > 0)
188 		return;
189 	bo->kptr = NULL;
190 	ttm_bo_kunmap(&bo->kmap);
191 }
192 
193 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
194 			       struct qxl_bo *bo, void *pmap)
195 {
196 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
197 
198 	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
199 	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
200 		goto fallback;
201 
202 	io_mapping_unmap_atomic(pmap);
203 
204 	(void) ttm_mem_io_lock(man, false);
205 	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
206 	ttm_mem_io_unlock(man);
207 	return;
208  fallback:
209 	qxl_bo_kunmap(bo);
210 }
211 
212 void qxl_bo_unref(struct qxl_bo **bo)
213 {
214 	if ((*bo) == NULL)
215 		return;
216 
217 	drm_gem_object_put_unlocked(&(*bo)->tbo.base);
218 	*bo = NULL;
219 }
220 
221 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
222 {
223 	drm_gem_object_get(&bo->tbo.base);
224 	return bo;
225 }
226 
227 static int __qxl_bo_pin(struct qxl_bo *bo)
228 {
229 	struct ttm_operation_ctx ctx = { false, false };
230 	struct drm_device *ddev = bo->tbo.base.dev;
231 	int r;
232 
233 	if (bo->pin_count) {
234 		bo->pin_count++;
235 		return 0;
236 	}
237 	qxl_ttm_placement_from_domain(bo, bo->type, true);
238 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
239 	if (likely(r == 0)) {
240 		bo->pin_count = 1;
241 	}
242 	if (unlikely(r != 0))
243 		dev_err(ddev->dev, "%p pin failed\n", bo);
244 	return r;
245 }
246 
247 static int __qxl_bo_unpin(struct qxl_bo *bo)
248 {
249 	struct ttm_operation_ctx ctx = { false, false };
250 	struct drm_device *ddev = bo->tbo.base.dev;
251 	int r, i;
252 
253 	if (!bo->pin_count) {
254 		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
255 		return 0;
256 	}
257 	bo->pin_count--;
258 	if (bo->pin_count)
259 		return 0;
260 	for (i = 0; i < bo->placement.num_placement; i++)
261 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
262 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
263 	if (unlikely(r != 0))
264 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
265 	return r;
266 }
267 
268 /*
269  * Reserve the BO before pinning the object.  If the BO was reserved
270  * beforehand, use the internal version directly __qxl_bo_pin.
271  *
272  */
273 int qxl_bo_pin(struct qxl_bo *bo)
274 {
275 	int r;
276 
277 	r = qxl_bo_reserve(bo, false);
278 	if (r)
279 		return r;
280 
281 	r = __qxl_bo_pin(bo);
282 	qxl_bo_unreserve(bo);
283 	return r;
284 }
285 
286 /*
287  * Reserve the BO before pinning the object.  If the BO was reserved
288  * beforehand, use the internal version directly __qxl_bo_unpin.
289  *
290  */
291 int qxl_bo_unpin(struct qxl_bo *bo)
292 {
293 	int r;
294 
295 	r = qxl_bo_reserve(bo, false);
296 	if (r)
297 		return r;
298 
299 	r = __qxl_bo_unpin(bo);
300 	qxl_bo_unreserve(bo);
301 	return r;
302 }
303 
304 void qxl_bo_force_delete(struct qxl_device *qdev)
305 {
306 	struct qxl_bo *bo, *n;
307 
308 	if (list_empty(&qdev->gem.objects))
309 		return;
310 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
311 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
312 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
313 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
314 			*((unsigned long *)&bo->tbo.base.refcount));
315 		mutex_lock(&qdev->gem.mutex);
316 		list_del_init(&bo->list);
317 		mutex_unlock(&qdev->gem.mutex);
318 		/* this should unref the ttm bo */
319 		drm_gem_object_put_unlocked(&bo->tbo.base);
320 	}
321 }
322 
323 int qxl_bo_init(struct qxl_device *qdev)
324 {
325 	return qxl_ttm_init(qdev);
326 }
327 
328 void qxl_bo_fini(struct qxl_device *qdev)
329 {
330 	qxl_ttm_fini(qdev);
331 }
332 
333 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
334 {
335 	int ret;
336 
337 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
338 		/* allocate a surface id for this surface now */
339 		ret = qxl_surface_id_alloc(qdev, bo);
340 		if (ret)
341 			return ret;
342 
343 		ret = qxl_hw_surface_alloc(qdev, bo);
344 		if (ret)
345 			return ret;
346 	}
347 	return 0;
348 }
349 
350 int qxl_surf_evict(struct qxl_device *qdev)
351 {
352 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
353 }
354 
355 int qxl_vram_evict(struct qxl_device *qdev)
356 {
357 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
358 }
359