xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33 
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
37 
38 struct vmw_user_context {
39 	struct ttm_base_object base;
40 	struct vmw_resource res;
41 };
42 
43 struct vmw_user_surface {
44 	struct ttm_base_object base;
45 	struct vmw_surface srf;
46 };
47 
48 struct vmw_user_dma_buffer {
49 	struct ttm_base_object base;
50 	struct vmw_dma_buffer dma;
51 };
52 
53 struct vmw_bo_user_rep {
54 	uint32_t handle;
55 	uint64_t map_handle;
56 };
57 
58 struct vmw_stream {
59 	struct vmw_resource res;
60 	uint32_t stream_id;
61 };
62 
63 struct vmw_user_stream {
64 	struct ttm_base_object base;
65 	struct vmw_stream stream;
66 };
67 
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
70 {
71 	return container_of(bo, struct vmw_dma_buffer, base);
72 }
73 
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76 {
77 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79 }
80 
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82 {
83 	kref_get(&res->kref);
84 	return res;
85 }
86 
87 static void vmw_resource_release(struct kref *kref)
88 {
89 	struct vmw_resource *res =
90 	    container_of(kref, struct vmw_resource, kref);
91 	struct vmw_private *dev_priv = res->dev_priv;
92 
93 	idr_remove(res->idr, res->id);
94 	write_unlock(&dev_priv->resource_lock);
95 
96 	if (likely(res->hw_destroy != NULL))
97 		res->hw_destroy(res);
98 
99 	if (res->res_free != NULL)
100 		res->res_free(res);
101 	else
102 		kfree(res);
103 
104 	write_lock(&dev_priv->resource_lock);
105 }
106 
107 void vmw_resource_unreference(struct vmw_resource **p_res)
108 {
109 	struct vmw_resource *res = *p_res;
110 	struct vmw_private *dev_priv = res->dev_priv;
111 
112 	*p_res = NULL;
113 	write_lock(&dev_priv->resource_lock);
114 	kref_put(&res->kref, vmw_resource_release);
115 	write_unlock(&dev_priv->resource_lock);
116 }
117 
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119 			     struct vmw_resource *res,
120 			     struct idr *idr,
121 			     enum ttm_object_type obj_type,
122 			     void (*res_free) (struct vmw_resource *res))
123 {
124 	int ret;
125 
126 	kref_init(&res->kref);
127 	res->hw_destroy = NULL;
128 	res->res_free = res_free;
129 	res->res_type = obj_type;
130 	res->idr = idr;
131 	res->avail = false;
132 	res->dev_priv = dev_priv;
133 
134 	do {
135 		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 			return -ENOMEM;
137 
138 		write_lock(&dev_priv->resource_lock);
139 		ret = idr_get_new_above(idr, res, 1, &res->id);
140 		write_unlock(&dev_priv->resource_lock);
141 
142 	} while (ret == -EAGAIN);
143 
144 	return ret;
145 }
146 
147 /**
148  * vmw_resource_activate
149  *
150  * @res:        Pointer to the newly created resource
151  * @hw_destroy: Destroy function. NULL if none.
152  *
153  * Activate a resource after the hardware has been made aware of it.
154  * Set tye destroy function to @destroy. Typically this frees the
155  * resource and destroys the hardware resources associated with it.
156  * Activate basically means that the function vmw_resource_lookup will
157  * find it.
158  */
159 
160 static void vmw_resource_activate(struct vmw_resource *res,
161 				  void (*hw_destroy) (struct vmw_resource *))
162 {
163 	struct vmw_private *dev_priv = res->dev_priv;
164 
165 	write_lock(&dev_priv->resource_lock);
166 	res->avail = true;
167 	res->hw_destroy = hw_destroy;
168 	write_unlock(&dev_priv->resource_lock);
169 }
170 
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 					 struct idr *idr, int id)
173 {
174 	struct vmw_resource *res;
175 
176 	read_lock(&dev_priv->resource_lock);
177 	res = idr_find(idr, id);
178 	if (res && res->avail)
179 		kref_get(&res->kref);
180 	else
181 		res = NULL;
182 	read_unlock(&dev_priv->resource_lock);
183 
184 	if (unlikely(res == NULL))
185 		return NULL;
186 
187 	return res;
188 }
189 
190 /**
191  * Context management:
192  */
193 
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
195 {
196 
197 	struct vmw_private *dev_priv = res->dev_priv;
198 	struct {
199 		SVGA3dCmdHeader header;
200 		SVGA3dCmdDestroyContext body;
201 	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202 
203 	if (unlikely(cmd == NULL)) {
204 		DRM_ERROR("Failed reserving FIFO space for surface "
205 			  "destruction.\n");
206 		return;
207 	}
208 
209 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 	cmd->body.cid = cpu_to_le32(res->id);
212 
213 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 	vmw_3d_resource_dec(dev_priv);
215 }
216 
217 static int vmw_context_init(struct vmw_private *dev_priv,
218 			    struct vmw_resource *res,
219 			    void (*res_free) (struct vmw_resource *res))
220 {
221 	int ret;
222 
223 	struct {
224 		SVGA3dCmdHeader header;
225 		SVGA3dCmdDefineContext body;
226 	} *cmd;
227 
228 	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
229 				VMW_RES_CONTEXT, res_free);
230 
231 	if (unlikely(ret != 0)) {
232 		if (res_free == NULL)
233 			kfree(res);
234 		else
235 			res_free(res);
236 		return ret;
237 	}
238 
239 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
240 	if (unlikely(cmd == NULL)) {
241 		DRM_ERROR("Fifo reserve failed.\n");
242 		vmw_resource_unreference(&res);
243 		return -ENOMEM;
244 	}
245 
246 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
247 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
248 	cmd->body.cid = cpu_to_le32(res->id);
249 
250 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
251 	(void) vmw_3d_resource_inc(dev_priv);
252 	vmw_resource_activate(res, vmw_hw_context_destroy);
253 	return 0;
254 }
255 
256 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
257 {
258 	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
259 	int ret;
260 
261 	if (unlikely(res == NULL))
262 		return NULL;
263 
264 	ret = vmw_context_init(dev_priv, res, NULL);
265 	return (ret == 0) ? res : NULL;
266 }
267 
268 /**
269  * User-space context management:
270  */
271 
272 static void vmw_user_context_free(struct vmw_resource *res)
273 {
274 	struct vmw_user_context *ctx =
275 	    container_of(res, struct vmw_user_context, res);
276 
277 	kfree(ctx);
278 }
279 
280 /**
281  * This function is called when user space has no more references on the
282  * base object. It releases the base-object's reference on the resource object.
283  */
284 
285 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
286 {
287 	struct ttm_base_object *base = *p_base;
288 	struct vmw_user_context *ctx =
289 	    container_of(base, struct vmw_user_context, base);
290 	struct vmw_resource *res = &ctx->res;
291 
292 	*p_base = NULL;
293 	vmw_resource_unreference(&res);
294 }
295 
296 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
297 			      struct drm_file *file_priv)
298 {
299 	struct vmw_private *dev_priv = vmw_priv(dev);
300 	struct vmw_resource *res;
301 	struct vmw_user_context *ctx;
302 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
303 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
304 	int ret = 0;
305 
306 	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
307 	if (unlikely(res == NULL))
308 		return -EINVAL;
309 
310 	if (res->res_free != &vmw_user_context_free) {
311 		ret = -EINVAL;
312 		goto out;
313 	}
314 
315 	ctx = container_of(res, struct vmw_user_context, res);
316 	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
317 		ret = -EPERM;
318 		goto out;
319 	}
320 
321 	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
322 out:
323 	vmw_resource_unreference(&res);
324 	return ret;
325 }
326 
327 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
328 			     struct drm_file *file_priv)
329 {
330 	struct vmw_private *dev_priv = vmw_priv(dev);
331 	struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
332 	struct vmw_resource *res;
333 	struct vmw_resource *tmp;
334 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
335 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
336 	int ret;
337 
338 	if (unlikely(ctx == NULL))
339 		return -ENOMEM;
340 
341 	res = &ctx->res;
342 	ctx->base.shareable = false;
343 	ctx->base.tfile = NULL;
344 
345 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
346 	if (unlikely(ret != 0))
347 		return ret;
348 
349 	tmp = vmw_resource_reference(&ctx->res);
350 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
351 				   &vmw_user_context_base_release, NULL);
352 
353 	if (unlikely(ret != 0)) {
354 		vmw_resource_unreference(&tmp);
355 		goto out_err;
356 	}
357 
358 	arg->cid = res->id;
359 out_err:
360 	vmw_resource_unreference(&res);
361 	return ret;
362 
363 }
364 
365 int vmw_context_check(struct vmw_private *dev_priv,
366 		      struct ttm_object_file *tfile,
367 		      int id)
368 {
369 	struct vmw_resource *res;
370 	int ret = 0;
371 
372 	read_lock(&dev_priv->resource_lock);
373 	res = idr_find(&dev_priv->context_idr, id);
374 	if (res && res->avail) {
375 		struct vmw_user_context *ctx =
376 			container_of(res, struct vmw_user_context, res);
377 		if (ctx->base.tfile != tfile && !ctx->base.shareable)
378 			ret = -EPERM;
379 	} else
380 		ret = -EINVAL;
381 	read_unlock(&dev_priv->resource_lock);
382 
383 	return ret;
384 }
385 
386 
387 /**
388  * Surface management.
389  */
390 
391 static void vmw_hw_surface_destroy(struct vmw_resource *res)
392 {
393 
394 	struct vmw_private *dev_priv = res->dev_priv;
395 	struct {
396 		SVGA3dCmdHeader header;
397 		SVGA3dCmdDestroySurface body;
398 	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
399 
400 	if (unlikely(cmd == NULL)) {
401 		DRM_ERROR("Failed reserving FIFO space for surface "
402 			  "destruction.\n");
403 		return;
404 	}
405 
406 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
407 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
408 	cmd->body.sid = cpu_to_le32(res->id);
409 
410 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 	vmw_3d_resource_dec(dev_priv);
412 }
413 
414 void vmw_surface_res_free(struct vmw_resource *res)
415 {
416 	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
417 
418 	kfree(srf->sizes);
419 	kfree(srf->snooper.image);
420 	kfree(srf);
421 }
422 
423 int vmw_surface_init(struct vmw_private *dev_priv,
424 		     struct vmw_surface *srf,
425 		     void (*res_free) (struct vmw_resource *res))
426 {
427 	int ret;
428 	struct {
429 		SVGA3dCmdHeader header;
430 		SVGA3dCmdDefineSurface body;
431 	} *cmd;
432 	SVGA3dSize *cmd_size;
433 	struct vmw_resource *res = &srf->res;
434 	struct drm_vmw_size *src_size;
435 	size_t submit_size;
436 	uint32_t cmd_len;
437 	int i;
438 
439 	BUG_ON(res_free == NULL);
440 	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
441 				VMW_RES_SURFACE, res_free);
442 
443 	if (unlikely(ret != 0)) {
444 		res_free(res);
445 		return ret;
446 	}
447 
448 	submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
449 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
450 
451 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
452 	if (unlikely(cmd == NULL)) {
453 		DRM_ERROR("Fifo reserve failed for create surface.\n");
454 		vmw_resource_unreference(&res);
455 		return -ENOMEM;
456 	}
457 
458 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
459 	cmd->header.size = cpu_to_le32(cmd_len);
460 	cmd->body.sid = cpu_to_le32(res->id);
461 	cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
462 	cmd->body.format = cpu_to_le32(srf->format);
463 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
464 		cmd->body.face[i].numMipLevels =
465 		    cpu_to_le32(srf->mip_levels[i]);
466 	}
467 
468 	cmd += 1;
469 	cmd_size = (SVGA3dSize *) cmd;
470 	src_size = srf->sizes;
471 
472 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
473 		cmd_size->width = cpu_to_le32(src_size->width);
474 		cmd_size->height = cpu_to_le32(src_size->height);
475 		cmd_size->depth = cpu_to_le32(src_size->depth);
476 	}
477 
478 	vmw_fifo_commit(dev_priv, submit_size);
479 	(void) vmw_3d_resource_inc(dev_priv);
480 	vmw_resource_activate(res, vmw_hw_surface_destroy);
481 	return 0;
482 }
483 
484 static void vmw_user_surface_free(struct vmw_resource *res)
485 {
486 	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
487 	struct vmw_user_surface *user_srf =
488 	    container_of(srf, struct vmw_user_surface, srf);
489 
490 	kfree(srf->sizes);
491 	kfree(srf->snooper.image);
492 	kfree(user_srf);
493 }
494 
495 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
496 				   struct ttm_object_file *tfile,
497 				   uint32_t handle, struct vmw_surface **out)
498 {
499 	struct vmw_resource *res;
500 	struct vmw_surface *srf;
501 	struct vmw_user_surface *user_srf;
502 	struct ttm_base_object *base;
503 	int ret = -EINVAL;
504 
505 	base = ttm_base_object_lookup(tfile, handle);
506 	if (unlikely(base == NULL))
507 		return -EINVAL;
508 
509 	if (unlikely(base->object_type != VMW_RES_SURFACE))
510 		goto out_bad_resource;
511 
512 	user_srf = container_of(base, struct vmw_user_surface, base);
513 	srf = &user_srf->srf;
514 	res = &srf->res;
515 
516 	read_lock(&dev_priv->resource_lock);
517 
518 	if (!res->avail || res->res_free != &vmw_user_surface_free) {
519 		read_unlock(&dev_priv->resource_lock);
520 		goto out_bad_resource;
521 	}
522 
523 	kref_get(&res->kref);
524 	read_unlock(&dev_priv->resource_lock);
525 
526 	*out = srf;
527 	ret = 0;
528 
529 out_bad_resource:
530 	ttm_base_object_unref(&base);
531 
532 	return ret;
533 }
534 
535 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
536 {
537 	struct ttm_base_object *base = *p_base;
538 	struct vmw_user_surface *user_srf =
539 	    container_of(base, struct vmw_user_surface, base);
540 	struct vmw_resource *res = &user_srf->srf.res;
541 
542 	*p_base = NULL;
543 	vmw_resource_unreference(&res);
544 }
545 
546 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
547 			      struct drm_file *file_priv)
548 {
549 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
550 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
551 
552 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
553 }
554 
555 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
556 			     struct drm_file *file_priv)
557 {
558 	struct vmw_private *dev_priv = vmw_priv(dev);
559 	struct vmw_user_surface *user_srf =
560 	    kmalloc(sizeof(*user_srf), GFP_KERNEL);
561 	struct vmw_surface *srf;
562 	struct vmw_resource *res;
563 	struct vmw_resource *tmp;
564 	union drm_vmw_surface_create_arg *arg =
565 	    (union drm_vmw_surface_create_arg *)data;
566 	struct drm_vmw_surface_create_req *req = &arg->req;
567 	struct drm_vmw_surface_arg *rep = &arg->rep;
568 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
569 	struct drm_vmw_size __user *user_sizes;
570 	int ret;
571 	int i;
572 
573 	if (unlikely(user_srf == NULL))
574 		return -ENOMEM;
575 
576 	srf = &user_srf->srf;
577 	res = &srf->res;
578 
579 	srf->flags = req->flags;
580 	srf->format = req->format;
581 	srf->scanout = req->scanout;
582 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583 	srf->num_sizes = 0;
584 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
585 		srf->num_sizes += srf->mip_levels[i];
586 
587 	if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
588 	    DRM_VMW_MAX_MIP_LEVELS) {
589 		ret = -EINVAL;
590 		goto out_err0;
591 	}
592 
593 	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
594 	if (unlikely(srf->sizes == NULL)) {
595 		ret = -ENOMEM;
596 		goto out_err0;
597 	}
598 
599 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
600 	    req->size_addr;
601 
602 	ret = copy_from_user(srf->sizes, user_sizes,
603 			     srf->num_sizes * sizeof(*srf->sizes));
604 	if (unlikely(ret != 0)) {
605 		ret = -EFAULT;
606 		goto out_err1;
607 	}
608 
609 	if (srf->scanout &&
610 	    srf->num_sizes == 1 &&
611 	    srf->sizes[0].width == 64 &&
612 	    srf->sizes[0].height == 64 &&
613 	    srf->format == SVGA3D_A8R8G8B8) {
614 
615 		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
616 		/* clear the image */
617 		if (srf->snooper.image) {
618 			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
619 		} else {
620 			DRM_ERROR("Failed to allocate cursor_image\n");
621 			ret = -ENOMEM;
622 			goto out_err1;
623 		}
624 	} else {
625 		srf->snooper.image = NULL;
626 	}
627 	srf->snooper.crtc = NULL;
628 
629 	user_srf->base.shareable = false;
630 	user_srf->base.tfile = NULL;
631 
632 	/**
633 	 * From this point, the generic resource management functions
634 	 * destroy the object on failure.
635 	 */
636 
637 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
638 	if (unlikely(ret != 0))
639 		return ret;
640 
641 	tmp = vmw_resource_reference(&srf->res);
642 	ret = ttm_base_object_init(tfile, &user_srf->base,
643 				   req->shareable, VMW_RES_SURFACE,
644 				   &vmw_user_surface_base_release, NULL);
645 
646 	if (unlikely(ret != 0)) {
647 		vmw_resource_unreference(&tmp);
648 		vmw_resource_unreference(&res);
649 		return ret;
650 	}
651 
652 	rep->sid = user_srf->base.hash.key;
653 	if (rep->sid == SVGA3D_INVALID_ID)
654 		DRM_ERROR("Created bad Surface ID.\n");
655 
656 	vmw_resource_unreference(&res);
657 	return 0;
658 out_err1:
659 	kfree(srf->sizes);
660 out_err0:
661 	kfree(user_srf);
662 	return ret;
663 }
664 
665 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
666 				struct drm_file *file_priv)
667 {
668 	union drm_vmw_surface_reference_arg *arg =
669 	    (union drm_vmw_surface_reference_arg *)data;
670 	struct drm_vmw_surface_arg *req = &arg->req;
671 	struct drm_vmw_surface_create_req *rep = &arg->rep;
672 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
673 	struct vmw_surface *srf;
674 	struct vmw_user_surface *user_srf;
675 	struct drm_vmw_size __user *user_sizes;
676 	struct ttm_base_object *base;
677 	int ret = -EINVAL;
678 
679 	base = ttm_base_object_lookup(tfile, req->sid);
680 	if (unlikely(base == NULL)) {
681 		DRM_ERROR("Could not find surface to reference.\n");
682 		return -EINVAL;
683 	}
684 
685 	if (unlikely(base->object_type != VMW_RES_SURFACE))
686 		goto out_bad_resource;
687 
688 	user_srf = container_of(base, struct vmw_user_surface, base);
689 	srf = &user_srf->srf;
690 
691 	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
692 	if (unlikely(ret != 0)) {
693 		DRM_ERROR("Could not add a reference to a surface.\n");
694 		goto out_no_reference;
695 	}
696 
697 	rep->flags = srf->flags;
698 	rep->format = srf->format;
699 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
700 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
701 	    rep->size_addr;
702 
703 	if (user_sizes)
704 		ret = copy_to_user(user_sizes, srf->sizes,
705 				   srf->num_sizes * sizeof(*srf->sizes));
706 	if (unlikely(ret != 0)) {
707 		DRM_ERROR("copy_to_user failed %p %u\n",
708 			  user_sizes, srf->num_sizes);
709 		ret = -EFAULT;
710 	}
711 out_bad_resource:
712 out_no_reference:
713 	ttm_base_object_unref(&base);
714 
715 	return ret;
716 }
717 
718 int vmw_surface_check(struct vmw_private *dev_priv,
719 		      struct ttm_object_file *tfile,
720 		      uint32_t handle, int *id)
721 {
722 	struct ttm_base_object *base;
723 	struct vmw_user_surface *user_srf;
724 
725 	int ret = -EPERM;
726 
727 	base = ttm_base_object_lookup(tfile, handle);
728 	if (unlikely(base == NULL))
729 		return -EINVAL;
730 
731 	if (unlikely(base->object_type != VMW_RES_SURFACE))
732 		goto out_bad_surface;
733 
734 	user_srf = container_of(base, struct vmw_user_surface, base);
735 	*id = user_srf->srf.res.id;
736 	ret = 0;
737 
738 out_bad_surface:
739 	/**
740 	 * FIXME: May deadlock here when called from the
741 	 * command parsing code.
742 	 */
743 
744 	ttm_base_object_unref(&base);
745 	return ret;
746 }
747 
748 /**
749  * Buffer management.
750  */
751 
752 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
753 				  unsigned long num_pages)
754 {
755 	static size_t bo_user_size = ~0;
756 
757 	size_t page_array_size =
758 	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
759 
760 	if (unlikely(bo_user_size == ~0)) {
761 		bo_user_size = glob->ttm_bo_extra_size +
762 		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
763 	}
764 
765 	return bo_user_size + page_array_size;
766 }
767 
768 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
769 {
770 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
771 	struct ttm_bo_global *glob = bo->glob;
772 	struct vmw_private *dev_priv =
773 		container_of(bo->bdev, struct vmw_private, bdev);
774 
775 	if (vmw_bo->gmr_bound) {
776 		vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
777 		spin_lock(&glob->lru_lock);
778 		ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
779 		spin_unlock(&glob->lru_lock);
780 		vmw_bo->gmr_bound = false;
781 	}
782 }
783 
784 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
785 {
786 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
787 	struct ttm_bo_global *glob = bo->glob;
788 
789 	vmw_dmabuf_gmr_unbind(bo);
790 	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
791 	kfree(vmw_bo);
792 }
793 
794 int vmw_dmabuf_init(struct vmw_private *dev_priv,
795 		    struct vmw_dma_buffer *vmw_bo,
796 		    size_t size, struct ttm_placement *placement,
797 		    bool interruptible,
798 		    void (*bo_free) (struct ttm_buffer_object *bo))
799 {
800 	struct ttm_bo_device *bdev = &dev_priv->bdev;
801 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
802 	size_t acc_size;
803 	int ret;
804 
805 	BUG_ON(!bo_free);
806 
807 	acc_size =
808 	    vmw_dmabuf_acc_size(bdev->glob,
809 				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
810 
811 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
812 	if (unlikely(ret != 0)) {
813 		/* we must free the bo here as
814 		 * ttm_buffer_object_init does so as well */
815 		bo_free(&vmw_bo->base);
816 		return ret;
817 	}
818 
819 	memset(vmw_bo, 0, sizeof(*vmw_bo));
820 
821 	INIT_LIST_HEAD(&vmw_bo->gmr_lru);
822 	INIT_LIST_HEAD(&vmw_bo->validate_list);
823 	vmw_bo->gmr_id = 0;
824 	vmw_bo->gmr_bound = false;
825 
826 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
827 			  ttm_bo_type_device, placement,
828 			  0, 0, interruptible,
829 			  NULL, acc_size, bo_free);
830 	return ret;
831 }
832 
833 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
834 {
835 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
836 	struct ttm_bo_global *glob = bo->glob;
837 
838 	vmw_dmabuf_gmr_unbind(bo);
839 	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
840 	kfree(vmw_user_bo);
841 }
842 
843 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
844 {
845 	struct vmw_user_dma_buffer *vmw_user_bo;
846 	struct ttm_base_object *base = *p_base;
847 	struct ttm_buffer_object *bo;
848 
849 	*p_base = NULL;
850 
851 	if (unlikely(base == NULL))
852 		return;
853 
854 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
855 	bo = &vmw_user_bo->dma.base;
856 	ttm_bo_unref(&bo);
857 }
858 
859 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
860 			   struct drm_file *file_priv)
861 {
862 	struct vmw_private *dev_priv = vmw_priv(dev);
863 	union drm_vmw_alloc_dmabuf_arg *arg =
864 	    (union drm_vmw_alloc_dmabuf_arg *)data;
865 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
866 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
867 	struct vmw_user_dma_buffer *vmw_user_bo;
868 	struct ttm_buffer_object *tmp;
869 	struct vmw_master *vmaster = vmw_master(file_priv->master);
870 	int ret;
871 
872 	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
873 	if (unlikely(vmw_user_bo == NULL))
874 		return -ENOMEM;
875 
876 	ret = ttm_read_lock(&vmaster->lock, true);
877 	if (unlikely(ret != 0)) {
878 		kfree(vmw_user_bo);
879 		return ret;
880 	}
881 
882 	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
883 			      &vmw_vram_sys_placement, true,
884 			      &vmw_user_dmabuf_destroy);
885 	if (unlikely(ret != 0))
886 		return ret;
887 
888 	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
889 	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
890 				   &vmw_user_bo->base,
891 				   false,
892 				   ttm_buffer_type,
893 				   &vmw_user_dmabuf_release, NULL);
894 	if (unlikely(ret != 0)) {
895 		ttm_bo_unref(&tmp);
896 	} else {
897 		rep->handle = vmw_user_bo->base.hash.key;
898 		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
899 		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
900 		rep->cur_gmr_offset = 0;
901 	}
902 	ttm_bo_unref(&tmp);
903 
904 	ttm_read_unlock(&vmaster->lock);
905 
906 	return 0;
907 }
908 
909 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
910 			   struct drm_file *file_priv)
911 {
912 	struct drm_vmw_unref_dmabuf_arg *arg =
913 	    (struct drm_vmw_unref_dmabuf_arg *)data;
914 
915 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
916 					 arg->handle,
917 					 TTM_REF_USAGE);
918 }
919 
920 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
921 				  uint32_t cur_validate_node)
922 {
923 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
924 
925 	if (likely(vmw_bo->on_validate_list))
926 		return vmw_bo->cur_validate_node;
927 
928 	vmw_bo->cur_validate_node = cur_validate_node;
929 	vmw_bo->on_validate_list = true;
930 
931 	return cur_validate_node;
932 }
933 
934 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
935 {
936 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
937 
938 	vmw_bo->on_validate_list = false;
939 }
940 
941 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
942 {
943 	struct vmw_dma_buffer *vmw_bo;
944 
945 	if (bo->mem.mem_type == TTM_PL_VRAM)
946 		return SVGA_GMR_FRAMEBUFFER;
947 
948 	vmw_bo = vmw_dma_buffer(bo);
949 
950 	return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
951 }
952 
953 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
954 {
955 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
956 	vmw_bo->gmr_bound = true;
957 	vmw_bo->gmr_id = id;
958 }
959 
960 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
961 			   uint32_t handle, struct vmw_dma_buffer **out)
962 {
963 	struct vmw_user_dma_buffer *vmw_user_bo;
964 	struct ttm_base_object *base;
965 
966 	base = ttm_base_object_lookup(tfile, handle);
967 	if (unlikely(base == NULL)) {
968 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
969 		       (unsigned long)handle);
970 		return -ESRCH;
971 	}
972 
973 	if (unlikely(base->object_type != ttm_buffer_type)) {
974 		ttm_base_object_unref(&base);
975 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
976 		       (unsigned long)handle);
977 		return -EINVAL;
978 	}
979 
980 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
981 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
982 	ttm_base_object_unref(&base);
983 	*out = &vmw_user_bo->dma;
984 
985 	return 0;
986 }
987 
988 /**
989  * TODO: Implement a gmr id eviction mechanism. Currently we just fail
990  * when we're out of ids, causing GMR space to be allocated
991  * out of VRAM.
992  */
993 
994 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
995 {
996 	struct ttm_bo_global *glob = dev_priv->bdev.glob;
997 	int id;
998 	int ret;
999 
1000 	do {
1001 		if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1002 			return -ENOMEM;
1003 
1004 		spin_lock(&glob->lru_lock);
1005 		ret = ida_get_new(&dev_priv->gmr_ida, &id);
1006 		spin_unlock(&glob->lru_lock);
1007 	} while (ret == -EAGAIN);
1008 
1009 	if (unlikely(ret != 0))
1010 		return ret;
1011 
1012 	if (unlikely(id >= dev_priv->max_gmr_ids)) {
1013 		spin_lock(&glob->lru_lock);
1014 		ida_remove(&dev_priv->gmr_ida, id);
1015 		spin_unlock(&glob->lru_lock);
1016 		return -EBUSY;
1017 	}
1018 
1019 	*p_id = (uint32_t) id;
1020 	return 0;
1021 }
1022 
1023 /*
1024  * Stream management
1025  */
1026 
1027 static void vmw_stream_destroy(struct vmw_resource *res)
1028 {
1029 	struct vmw_private *dev_priv = res->dev_priv;
1030 	struct vmw_stream *stream;
1031 	int ret;
1032 
1033 	DRM_INFO("%s: unref\n", __func__);
1034 	stream = container_of(res, struct vmw_stream, res);
1035 
1036 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1037 	WARN_ON(ret != 0);
1038 }
1039 
1040 static int vmw_stream_init(struct vmw_private *dev_priv,
1041 			   struct vmw_stream *stream,
1042 			   void (*res_free) (struct vmw_resource *res))
1043 {
1044 	struct vmw_resource *res = &stream->res;
1045 	int ret;
1046 
1047 	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1048 				VMW_RES_STREAM, res_free);
1049 
1050 	if (unlikely(ret != 0)) {
1051 		if (res_free == NULL)
1052 			kfree(stream);
1053 		else
1054 			res_free(&stream->res);
1055 		return ret;
1056 	}
1057 
1058 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1059 	if (ret) {
1060 		vmw_resource_unreference(&res);
1061 		return ret;
1062 	}
1063 
1064 	DRM_INFO("%s: claimed\n", __func__);
1065 
1066 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
1067 	return 0;
1068 }
1069 
1070 /**
1071  * User-space context management:
1072  */
1073 
1074 static void vmw_user_stream_free(struct vmw_resource *res)
1075 {
1076 	struct vmw_user_stream *stream =
1077 	    container_of(res, struct vmw_user_stream, stream.res);
1078 
1079 	kfree(stream);
1080 }
1081 
1082 /**
1083  * This function is called when user space has no more references on the
1084  * base object. It releases the base-object's reference on the resource object.
1085  */
1086 
1087 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1088 {
1089 	struct ttm_base_object *base = *p_base;
1090 	struct vmw_user_stream *stream =
1091 	    container_of(base, struct vmw_user_stream, base);
1092 	struct vmw_resource *res = &stream->stream.res;
1093 
1094 	*p_base = NULL;
1095 	vmw_resource_unreference(&res);
1096 }
1097 
1098 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1099 			   struct drm_file *file_priv)
1100 {
1101 	struct vmw_private *dev_priv = vmw_priv(dev);
1102 	struct vmw_resource *res;
1103 	struct vmw_user_stream *stream;
1104 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1105 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1106 	int ret = 0;
1107 
1108 	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1109 	if (unlikely(res == NULL))
1110 		return -EINVAL;
1111 
1112 	if (res->res_free != &vmw_user_stream_free) {
1113 		ret = -EINVAL;
1114 		goto out;
1115 	}
1116 
1117 	stream = container_of(res, struct vmw_user_stream, stream.res);
1118 	if (stream->base.tfile != tfile) {
1119 		ret = -EINVAL;
1120 		goto out;
1121 	}
1122 
1123 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1124 out:
1125 	vmw_resource_unreference(&res);
1126 	return ret;
1127 }
1128 
1129 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1130 			   struct drm_file *file_priv)
1131 {
1132 	struct vmw_private *dev_priv = vmw_priv(dev);
1133 	struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1134 	struct vmw_resource *res;
1135 	struct vmw_resource *tmp;
1136 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1137 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1138 	int ret;
1139 
1140 	if (unlikely(stream == NULL))
1141 		return -ENOMEM;
1142 
1143 	res = &stream->stream.res;
1144 	stream->base.shareable = false;
1145 	stream->base.tfile = NULL;
1146 
1147 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1148 	if (unlikely(ret != 0))
1149 		return ret;
1150 
1151 	tmp = vmw_resource_reference(res);
1152 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1153 				   &vmw_user_stream_base_release, NULL);
1154 
1155 	if (unlikely(ret != 0)) {
1156 		vmw_resource_unreference(&tmp);
1157 		goto out_err;
1158 	}
1159 
1160 	arg->stream_id = res->id;
1161 out_err:
1162 	vmw_resource_unreference(&res);
1163 	return ret;
1164 }
1165 
1166 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1167 			   struct ttm_object_file *tfile,
1168 			   uint32_t *inout_id, struct vmw_resource **out)
1169 {
1170 	struct vmw_user_stream *stream;
1171 	struct vmw_resource *res;
1172 	int ret;
1173 
1174 	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1175 	if (unlikely(res == NULL))
1176 		return -EINVAL;
1177 
1178 	if (res->res_free != &vmw_user_stream_free) {
1179 		ret = -EINVAL;
1180 		goto err_ref;
1181 	}
1182 
1183 	stream = container_of(res, struct vmw_user_stream, stream.res);
1184 	if (stream->base.tfile != tfile) {
1185 		ret = -EPERM;
1186 		goto err_ref;
1187 	}
1188 
1189 	*inout_id = stream->stream.stream_id;
1190 	*out = res;
1191 	return 0;
1192 err_ref:
1193 	vmw_resource_unreference(&res);
1194 	return ret;
1195 }
1196