xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_context.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "vmwgfx_binding.h"
31 #include "ttm/ttm_placement.h"
32 
33 struct vmw_user_context {
34 	struct ttm_base_object base;
35 	struct vmw_resource res;
36 	struct vmw_ctx_binding_state *cbs;
37 	struct vmw_cmdbuf_res_manager *man;
38 	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
39 	spinlock_t cotable_lock;
40 	struct vmw_dma_buffer *dx_query_mob;
41 };
42 
43 static void vmw_user_context_free(struct vmw_resource *res);
44 static struct vmw_resource *
45 vmw_user_context_base_to_res(struct ttm_base_object *base);
46 
47 static int vmw_gb_context_create(struct vmw_resource *res);
48 static int vmw_gb_context_bind(struct vmw_resource *res,
49 			       struct ttm_validate_buffer *val_buf);
50 static int vmw_gb_context_unbind(struct vmw_resource *res,
51 				 bool readback,
52 				 struct ttm_validate_buffer *val_buf);
53 static int vmw_gb_context_destroy(struct vmw_resource *res);
54 static int vmw_dx_context_create(struct vmw_resource *res);
55 static int vmw_dx_context_bind(struct vmw_resource *res,
56 			       struct ttm_validate_buffer *val_buf);
57 static int vmw_dx_context_unbind(struct vmw_resource *res,
58 				 bool readback,
59 				 struct ttm_validate_buffer *val_buf);
60 static int vmw_dx_context_destroy(struct vmw_resource *res);
61 
62 static uint64_t vmw_user_context_size;
63 
64 static const struct vmw_user_resource_conv user_context_conv = {
65 	.object_type = VMW_RES_CONTEXT,
66 	.base_obj_to_res = vmw_user_context_base_to_res,
67 	.res_free = vmw_user_context_free
68 };
69 
70 const struct vmw_user_resource_conv *user_context_converter =
71 	&user_context_conv;
72 
73 
74 static const struct vmw_res_func vmw_legacy_context_func = {
75 	.res_type = vmw_res_context,
76 	.needs_backup = false,
77 	.may_evict = false,
78 	.type_name = "legacy contexts",
79 	.backup_placement = NULL,
80 	.create = NULL,
81 	.destroy = NULL,
82 	.bind = NULL,
83 	.unbind = NULL
84 };
85 
86 static const struct vmw_res_func vmw_gb_context_func = {
87 	.res_type = vmw_res_context,
88 	.needs_backup = true,
89 	.may_evict = true,
90 	.type_name = "guest backed contexts",
91 	.backup_placement = &vmw_mob_placement,
92 	.create = vmw_gb_context_create,
93 	.destroy = vmw_gb_context_destroy,
94 	.bind = vmw_gb_context_bind,
95 	.unbind = vmw_gb_context_unbind
96 };
97 
98 static const struct vmw_res_func vmw_dx_context_func = {
99 	.res_type = vmw_res_dx_context,
100 	.needs_backup = true,
101 	.may_evict = true,
102 	.type_name = "dx contexts",
103 	.backup_placement = &vmw_mob_placement,
104 	.create = vmw_dx_context_create,
105 	.destroy = vmw_dx_context_destroy,
106 	.bind = vmw_dx_context_bind,
107 	.unbind = vmw_dx_context_unbind
108 };
109 
110 /**
111  * Context management:
112  */
113 
114 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
115 {
116 	struct vmw_resource *res;
117 	int i;
118 
119 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
120 		spin_lock(&uctx->cotable_lock);
121 		res = uctx->cotables[i];
122 		uctx->cotables[i] = NULL;
123 		spin_unlock(&uctx->cotable_lock);
124 
125 		if (res)
126 			vmw_resource_unreference(&res);
127 	}
128 }
129 
130 static void vmw_hw_context_destroy(struct vmw_resource *res)
131 {
132 	struct vmw_user_context *uctx =
133 		container_of(res, struct vmw_user_context, res);
134 	struct vmw_private *dev_priv = res->dev_priv;
135 	struct {
136 		SVGA3dCmdHeader header;
137 		SVGA3dCmdDestroyContext body;
138 	} *cmd;
139 
140 
141 	if (res->func->destroy == vmw_gb_context_destroy ||
142 	    res->func->destroy == vmw_dx_context_destroy) {
143 		mutex_lock(&dev_priv->cmdbuf_mutex);
144 		vmw_cmdbuf_res_man_destroy(uctx->man);
145 		mutex_lock(&dev_priv->binding_mutex);
146 		vmw_binding_state_kill(uctx->cbs);
147 		(void) res->func->destroy(res);
148 		mutex_unlock(&dev_priv->binding_mutex);
149 		if (dev_priv->pinned_bo != NULL &&
150 		    !dev_priv->query_cid_valid)
151 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
152 		mutex_unlock(&dev_priv->cmdbuf_mutex);
153 		vmw_context_cotables_unref(uctx);
154 		return;
155 	}
156 
157 	vmw_execbuf_release_pinned_bo(dev_priv);
158 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
159 	if (unlikely(cmd == NULL)) {
160 		DRM_ERROR("Failed reserving FIFO space for surface "
161 			  "destruction.\n");
162 		return;
163 	}
164 
165 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
166 	cmd->header.size = sizeof(cmd->body);
167 	cmd->body.cid = res->id;
168 
169 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
170 	vmw_fifo_resource_dec(dev_priv);
171 }
172 
173 static int vmw_gb_context_init(struct vmw_private *dev_priv,
174 			       bool dx,
175 			       struct vmw_resource *res,
176 			       void (*res_free)(struct vmw_resource *res))
177 {
178 	int ret, i;
179 	struct vmw_user_context *uctx =
180 		container_of(res, struct vmw_user_context, res);
181 
182 	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
183 			    SVGA3D_CONTEXT_DATA_SIZE);
184 	ret = vmw_resource_init(dev_priv, res, true,
185 				res_free,
186 				dx ? &vmw_dx_context_func :
187 				&vmw_gb_context_func);
188 	if (unlikely(ret != 0))
189 		goto out_err;
190 
191 	if (dev_priv->has_mob) {
192 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
193 		if (IS_ERR(uctx->man)) {
194 			ret = PTR_ERR(uctx->man);
195 			uctx->man = NULL;
196 			goto out_err;
197 		}
198 	}
199 
200 	uctx->cbs = vmw_binding_state_alloc(dev_priv);
201 	if (IS_ERR(uctx->cbs)) {
202 		ret = PTR_ERR(uctx->cbs);
203 		goto out_err;
204 	}
205 
206 	spin_lock_init(&uctx->cotable_lock);
207 
208 	if (dx) {
209 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
210 			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
211 							      &uctx->res, i);
212 			if (unlikely(uctx->cotables[i] == NULL)) {
213 				ret = -ENOMEM;
214 				goto out_cotables;
215 			}
216 		}
217 	}
218 
219 
220 
221 	vmw_resource_activate(res, vmw_hw_context_destroy);
222 	return 0;
223 
224 out_cotables:
225 	vmw_context_cotables_unref(uctx);
226 out_err:
227 	if (res_free)
228 		res_free(res);
229 	else
230 		kfree(res);
231 	return ret;
232 }
233 
234 static int vmw_context_init(struct vmw_private *dev_priv,
235 			    struct vmw_resource *res,
236 			    void (*res_free)(struct vmw_resource *res),
237 			    bool dx)
238 {
239 	int ret;
240 
241 	struct {
242 		SVGA3dCmdHeader header;
243 		SVGA3dCmdDefineContext body;
244 	} *cmd;
245 
246 	if (dev_priv->has_mob)
247 		return vmw_gb_context_init(dev_priv, dx, res, res_free);
248 
249 	ret = vmw_resource_init(dev_priv, res, false,
250 				res_free, &vmw_legacy_context_func);
251 
252 	if (unlikely(ret != 0)) {
253 		DRM_ERROR("Failed to allocate a resource id.\n");
254 		goto out_early;
255 	}
256 
257 	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
258 		DRM_ERROR("Out of hw context ids.\n");
259 		vmw_resource_unreference(&res);
260 		return -ENOMEM;
261 	}
262 
263 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264 	if (unlikely(cmd == NULL)) {
265 		DRM_ERROR("Fifo reserve failed.\n");
266 		vmw_resource_unreference(&res);
267 		return -ENOMEM;
268 	}
269 
270 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
271 	cmd->header.size = sizeof(cmd->body);
272 	cmd->body.cid = res->id;
273 
274 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
275 	vmw_fifo_resource_inc(dev_priv);
276 	vmw_resource_activate(res, vmw_hw_context_destroy);
277 	return 0;
278 
279 out_early:
280 	if (res_free == NULL)
281 		kfree(res);
282 	else
283 		res_free(res);
284 	return ret;
285 }
286 
287 
288 /*
289  * GB context.
290  */
291 
292 static int vmw_gb_context_create(struct vmw_resource *res)
293 {
294 	struct vmw_private *dev_priv = res->dev_priv;
295 	int ret;
296 	struct {
297 		SVGA3dCmdHeader header;
298 		SVGA3dCmdDefineGBContext body;
299 	} *cmd;
300 
301 	if (likely(res->id != -1))
302 		return 0;
303 
304 	ret = vmw_resource_alloc_id(res);
305 	if (unlikely(ret != 0)) {
306 		DRM_ERROR("Failed to allocate a context id.\n");
307 		goto out_no_id;
308 	}
309 
310 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
311 		ret = -EBUSY;
312 		goto out_no_fifo;
313 	}
314 
315 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
316 	if (unlikely(cmd == NULL)) {
317 		DRM_ERROR("Failed reserving FIFO space for context "
318 			  "creation.\n");
319 		ret = -ENOMEM;
320 		goto out_no_fifo;
321 	}
322 
323 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
324 	cmd->header.size = sizeof(cmd->body);
325 	cmd->body.cid = res->id;
326 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
327 	vmw_fifo_resource_inc(dev_priv);
328 
329 	return 0;
330 
331 out_no_fifo:
332 	vmw_resource_release_id(res);
333 out_no_id:
334 	return ret;
335 }
336 
337 static int vmw_gb_context_bind(struct vmw_resource *res,
338 			       struct ttm_validate_buffer *val_buf)
339 {
340 	struct vmw_private *dev_priv = res->dev_priv;
341 	struct {
342 		SVGA3dCmdHeader header;
343 		SVGA3dCmdBindGBContext body;
344 	} *cmd;
345 	struct ttm_buffer_object *bo = val_buf->bo;
346 
347 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
348 
349 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
350 	if (unlikely(cmd == NULL)) {
351 		DRM_ERROR("Failed reserving FIFO space for context "
352 			  "binding.\n");
353 		return -ENOMEM;
354 	}
355 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
356 	cmd->header.size = sizeof(cmd->body);
357 	cmd->body.cid = res->id;
358 	cmd->body.mobid = bo->mem.start;
359 	cmd->body.validContents = res->backup_dirty;
360 	res->backup_dirty = false;
361 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
362 
363 	return 0;
364 }
365 
366 static int vmw_gb_context_unbind(struct vmw_resource *res,
367 				 bool readback,
368 				 struct ttm_validate_buffer *val_buf)
369 {
370 	struct vmw_private *dev_priv = res->dev_priv;
371 	struct ttm_buffer_object *bo = val_buf->bo;
372 	struct vmw_fence_obj *fence;
373 	struct vmw_user_context *uctx =
374 		container_of(res, struct vmw_user_context, res);
375 
376 	struct {
377 		SVGA3dCmdHeader header;
378 		SVGA3dCmdReadbackGBContext body;
379 	} *cmd1;
380 	struct {
381 		SVGA3dCmdHeader header;
382 		SVGA3dCmdBindGBContext body;
383 	} *cmd2;
384 	uint32_t submit_size;
385 	uint8_t *cmd;
386 
387 
388 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
389 
390 	mutex_lock(&dev_priv->binding_mutex);
391 	vmw_binding_state_scrub(uctx->cbs);
392 
393 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
394 
395 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
396 	if (unlikely(cmd == NULL)) {
397 		DRM_ERROR("Failed reserving FIFO space for context "
398 			  "unbinding.\n");
399 		mutex_unlock(&dev_priv->binding_mutex);
400 		return -ENOMEM;
401 	}
402 
403 	cmd2 = (void *) cmd;
404 	if (readback) {
405 		cmd1 = (void *) cmd;
406 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407 		cmd1->header.size = sizeof(cmd1->body);
408 		cmd1->body.cid = res->id;
409 		cmd2 = (void *) (&cmd1[1]);
410 	}
411 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412 	cmd2->header.size = sizeof(cmd2->body);
413 	cmd2->body.cid = res->id;
414 	cmd2->body.mobid = SVGA3D_INVALID_ID;
415 
416 	vmw_fifo_commit(dev_priv, submit_size);
417 	mutex_unlock(&dev_priv->binding_mutex);
418 
419 	/*
420 	 * Create a fence object and fence the backup buffer.
421 	 */
422 
423 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
424 					  &fence, NULL);
425 
426 	vmw_fence_single_bo(bo, fence);
427 
428 	if (likely(fence != NULL))
429 		vmw_fence_obj_unreference(&fence);
430 
431 	return 0;
432 }
433 
434 static int vmw_gb_context_destroy(struct vmw_resource *res)
435 {
436 	struct vmw_private *dev_priv = res->dev_priv;
437 	struct {
438 		SVGA3dCmdHeader header;
439 		SVGA3dCmdDestroyGBContext body;
440 	} *cmd;
441 
442 	if (likely(res->id == -1))
443 		return 0;
444 
445 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
446 	if (unlikely(cmd == NULL)) {
447 		DRM_ERROR("Failed reserving FIFO space for context "
448 			  "destruction.\n");
449 		return -ENOMEM;
450 	}
451 
452 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
453 	cmd->header.size = sizeof(cmd->body);
454 	cmd->body.cid = res->id;
455 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
456 	if (dev_priv->query_cid == res->id)
457 		dev_priv->query_cid_valid = false;
458 	vmw_resource_release_id(res);
459 	vmw_fifo_resource_dec(dev_priv);
460 
461 	return 0;
462 }
463 
464 /*
465  * DX context.
466  */
467 
468 static int vmw_dx_context_create(struct vmw_resource *res)
469 {
470 	struct vmw_private *dev_priv = res->dev_priv;
471 	int ret;
472 	struct {
473 		SVGA3dCmdHeader header;
474 		SVGA3dCmdDXDefineContext body;
475 	} *cmd;
476 
477 	if (likely(res->id != -1))
478 		return 0;
479 
480 	ret = vmw_resource_alloc_id(res);
481 	if (unlikely(ret != 0)) {
482 		DRM_ERROR("Failed to allocate a context id.\n");
483 		goto out_no_id;
484 	}
485 
486 	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
487 		ret = -EBUSY;
488 		goto out_no_fifo;
489 	}
490 
491 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
492 	if (unlikely(cmd == NULL)) {
493 		DRM_ERROR("Failed reserving FIFO space for context "
494 			  "creation.\n");
495 		ret = -ENOMEM;
496 		goto out_no_fifo;
497 	}
498 
499 	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
500 	cmd->header.size = sizeof(cmd->body);
501 	cmd->body.cid = res->id;
502 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
503 	vmw_fifo_resource_inc(dev_priv);
504 
505 	return 0;
506 
507 out_no_fifo:
508 	vmw_resource_release_id(res);
509 out_no_id:
510 	return ret;
511 }
512 
513 static int vmw_dx_context_bind(struct vmw_resource *res,
514 			       struct ttm_validate_buffer *val_buf)
515 {
516 	struct vmw_private *dev_priv = res->dev_priv;
517 	struct {
518 		SVGA3dCmdHeader header;
519 		SVGA3dCmdDXBindContext body;
520 	} *cmd;
521 	struct ttm_buffer_object *bo = val_buf->bo;
522 
523 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
524 
525 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
526 	if (unlikely(cmd == NULL)) {
527 		DRM_ERROR("Failed reserving FIFO space for context "
528 			  "binding.\n");
529 		return -ENOMEM;
530 	}
531 
532 	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
533 	cmd->header.size = sizeof(cmd->body);
534 	cmd->body.cid = res->id;
535 	cmd->body.mobid = bo->mem.start;
536 	cmd->body.validContents = res->backup_dirty;
537 	res->backup_dirty = false;
538 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
539 
540 
541 	return 0;
542 }
543 
544 /**
545  * vmw_dx_context_scrub_cotables - Scrub all bindings and
546  * cotables from a context
547  *
548  * @ctx: Pointer to the context resource
549  * @readback: Whether to save the otable contents on scrubbing.
550  *
551  * COtables must be unbound before their context, but unbinding requires
552  * the backup buffer being reserved, whereas scrubbing does not.
553  * This function scrubs all cotables of a context, potentially reading back
554  * the contents into their backup buffers. However, scrubbing cotables
555  * also makes the device context invalid, so scrub all bindings first so
556  * that doesn't have to be done later with an invalid context.
557  */
558 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
559 				   bool readback)
560 {
561 	struct vmw_user_context *uctx =
562 		container_of(ctx, struct vmw_user_context, res);
563 	int i;
564 
565 	vmw_binding_state_scrub(uctx->cbs);
566 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
567 		struct vmw_resource *res;
568 
569 		/* Avoid racing with ongoing cotable destruction. */
570 		spin_lock(&uctx->cotable_lock);
571 		res = uctx->cotables[vmw_cotable_scrub_order[i]];
572 		if (res)
573 			res = vmw_resource_reference_unless_doomed(res);
574 		spin_unlock(&uctx->cotable_lock);
575 		if (!res)
576 			continue;
577 
578 		WARN_ON(vmw_cotable_scrub(res, readback));
579 		vmw_resource_unreference(&res);
580 	}
581 }
582 
583 static int vmw_dx_context_unbind(struct vmw_resource *res,
584 				 bool readback,
585 				 struct ttm_validate_buffer *val_buf)
586 {
587 	struct vmw_private *dev_priv = res->dev_priv;
588 	struct ttm_buffer_object *bo = val_buf->bo;
589 	struct vmw_fence_obj *fence;
590 	struct vmw_user_context *uctx =
591 		container_of(res, struct vmw_user_context, res);
592 
593 	struct {
594 		SVGA3dCmdHeader header;
595 		SVGA3dCmdDXReadbackContext body;
596 	} *cmd1;
597 	struct {
598 		SVGA3dCmdHeader header;
599 		SVGA3dCmdDXBindContext body;
600 	} *cmd2;
601 	uint32_t submit_size;
602 	uint8_t *cmd;
603 
604 
605 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
606 
607 	mutex_lock(&dev_priv->binding_mutex);
608 	vmw_dx_context_scrub_cotables(res, readback);
609 
610 	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
611 	    readback) {
612 		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
613 		if (vmw_query_readback_all(uctx->dx_query_mob))
614 			DRM_ERROR("Failed to read back query states\n");
615 	}
616 
617 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
618 
619 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
620 	if (unlikely(cmd == NULL)) {
621 		DRM_ERROR("Failed reserving FIFO space for context "
622 			  "unbinding.\n");
623 		mutex_unlock(&dev_priv->binding_mutex);
624 		return -ENOMEM;
625 	}
626 
627 	cmd2 = (void *) cmd;
628 	if (readback) {
629 		cmd1 = (void *) cmd;
630 		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
631 		cmd1->header.size = sizeof(cmd1->body);
632 		cmd1->body.cid = res->id;
633 		cmd2 = (void *) (&cmd1[1]);
634 	}
635 	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
636 	cmd2->header.size = sizeof(cmd2->body);
637 	cmd2->body.cid = res->id;
638 	cmd2->body.mobid = SVGA3D_INVALID_ID;
639 
640 	vmw_fifo_commit(dev_priv, submit_size);
641 	mutex_unlock(&dev_priv->binding_mutex);
642 
643 	/*
644 	 * Create a fence object and fence the backup buffer.
645 	 */
646 
647 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
648 					  &fence, NULL);
649 
650 	vmw_fence_single_bo(bo, fence);
651 
652 	if (likely(fence != NULL))
653 		vmw_fence_obj_unreference(&fence);
654 
655 	return 0;
656 }
657 
658 static int vmw_dx_context_destroy(struct vmw_resource *res)
659 {
660 	struct vmw_private *dev_priv = res->dev_priv;
661 	struct {
662 		SVGA3dCmdHeader header;
663 		SVGA3dCmdDXDestroyContext body;
664 	} *cmd;
665 
666 	if (likely(res->id == -1))
667 		return 0;
668 
669 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
670 	if (unlikely(cmd == NULL)) {
671 		DRM_ERROR("Failed reserving FIFO space for context "
672 			  "destruction.\n");
673 		return -ENOMEM;
674 	}
675 
676 	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
677 	cmd->header.size = sizeof(cmd->body);
678 	cmd->body.cid = res->id;
679 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
680 	if (dev_priv->query_cid == res->id)
681 		dev_priv->query_cid_valid = false;
682 	vmw_resource_release_id(res);
683 	vmw_fifo_resource_dec(dev_priv);
684 
685 	return 0;
686 }
687 
688 /**
689  * User-space context management:
690  */
691 
692 static struct vmw_resource *
693 vmw_user_context_base_to_res(struct ttm_base_object *base)
694 {
695 	return &(container_of(base, struct vmw_user_context, base)->res);
696 }
697 
698 static void vmw_user_context_free(struct vmw_resource *res)
699 {
700 	struct vmw_user_context *ctx =
701 	    container_of(res, struct vmw_user_context, res);
702 	struct vmw_private *dev_priv = res->dev_priv;
703 
704 	if (ctx->cbs)
705 		vmw_binding_state_free(ctx->cbs);
706 
707 	(void) vmw_context_bind_dx_query(res, NULL);
708 
709 	ttm_base_object_kfree(ctx, base);
710 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
711 			    vmw_user_context_size);
712 }
713 
714 /**
715  * This function is called when user space has no more references on the
716  * base object. It releases the base-object's reference on the resource object.
717  */
718 
719 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
720 {
721 	struct ttm_base_object *base = *p_base;
722 	struct vmw_user_context *ctx =
723 	    container_of(base, struct vmw_user_context, base);
724 	struct vmw_resource *res = &ctx->res;
725 
726 	*p_base = NULL;
727 	vmw_resource_unreference(&res);
728 }
729 
730 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
731 			      struct drm_file *file_priv)
732 {
733 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
734 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735 
736 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
737 }
738 
739 static int vmw_context_define(struct drm_device *dev, void *data,
740 			      struct drm_file *file_priv, bool dx)
741 {
742 	struct vmw_private *dev_priv = vmw_priv(dev);
743 	struct vmw_user_context *ctx;
744 	struct vmw_resource *res;
745 	struct vmw_resource *tmp;
746 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
747 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
748 	int ret;
749 
750 	if (!dev_priv->has_dx && dx) {
751 		DRM_ERROR("DX contexts not supported by device.\n");
752 		return -EINVAL;
753 	}
754 
755 	/*
756 	 * Approximate idr memory usage with 128 bytes. It will be limited
757 	 * by maximum number_of contexts anyway.
758 	 */
759 
760 	if (unlikely(vmw_user_context_size == 0))
761 		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
762 		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
763 
764 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
765 	if (unlikely(ret != 0))
766 		return ret;
767 
768 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
769 				   vmw_user_context_size,
770 				   false, true);
771 	if (unlikely(ret != 0)) {
772 		if (ret != -ERESTARTSYS)
773 			DRM_ERROR("Out of graphics memory for context"
774 				  " creation.\n");
775 		goto out_unlock;
776 	}
777 
778 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
779 	if (unlikely(ctx == NULL)) {
780 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
781 				    vmw_user_context_size);
782 		ret = -ENOMEM;
783 		goto out_unlock;
784 	}
785 
786 	res = &ctx->res;
787 	ctx->base.shareable = false;
788 	ctx->base.tfile = NULL;
789 
790 	/*
791 	 * From here on, the destructor takes over resource freeing.
792 	 */
793 
794 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
795 	if (unlikely(ret != 0))
796 		goto out_unlock;
797 
798 	tmp = vmw_resource_reference(&ctx->res);
799 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
800 				   &vmw_user_context_base_release, NULL);
801 
802 	if (unlikely(ret != 0)) {
803 		vmw_resource_unreference(&tmp);
804 		goto out_err;
805 	}
806 
807 	arg->cid = ctx->base.hash.key;
808 out_err:
809 	vmw_resource_unreference(&res);
810 out_unlock:
811 	ttm_read_unlock(&dev_priv->reservation_sem);
812 	return ret;
813 }
814 
815 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
816 			     struct drm_file *file_priv)
817 {
818 	return vmw_context_define(dev, data, file_priv, false);
819 }
820 
821 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
822 				      struct drm_file *file_priv)
823 {
824 	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
825 	struct drm_vmw_context_arg *rep = &arg->rep;
826 
827 	switch (arg->req) {
828 	case drm_vmw_context_legacy:
829 		return vmw_context_define(dev, rep, file_priv, false);
830 	case drm_vmw_context_dx:
831 		return vmw_context_define(dev, rep, file_priv, true);
832 	default:
833 		break;
834 	}
835 	return -EINVAL;
836 }
837 
838 /**
839  * vmw_context_binding_list - Return a list of context bindings
840  *
841  * @ctx: The context resource
842  *
843  * Returns the current list of bindings of the given context. Note that
844  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
845  */
846 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
847 {
848 	struct vmw_user_context *uctx =
849 		container_of(ctx, struct vmw_user_context, res);
850 
851 	return vmw_binding_state_list(uctx->cbs);
852 }
853 
854 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
855 {
856 	return container_of(ctx, struct vmw_user_context, res)->man;
857 }
858 
859 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
860 					 SVGACOTableType cotable_type)
861 {
862 	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
863 		return ERR_PTR(-EINVAL);
864 
865 	return vmw_resource_reference
866 		(container_of(ctx, struct vmw_user_context, res)->
867 		 cotables[cotable_type]);
868 }
869 
870 /**
871  * vmw_context_binding_state -
872  * Return a pointer to a context binding state structure
873  *
874  * @ctx: The context resource
875  *
876  * Returns the current state of bindings of the given context. Note that
877  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
878  */
879 struct vmw_ctx_binding_state *
880 vmw_context_binding_state(struct vmw_resource *ctx)
881 {
882 	return container_of(ctx, struct vmw_user_context, res)->cbs;
883 }
884 
885 /**
886  * vmw_context_bind_dx_query -
887  * Sets query MOB for the context.  If @mob is NULL, then this function will
888  * remove the association between the MOB and the context.  This function
889  * assumes the binding_mutex is held.
890  *
891  * @ctx_res: The context resource
892  * @mob: a reference to the query MOB
893  *
894  * Returns -EINVAL if a MOB has already been set and does not match the one
895  * specified in the parameter.  0 otherwise.
896  */
897 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
898 			      struct vmw_dma_buffer *mob)
899 {
900 	struct vmw_user_context *uctx =
901 		container_of(ctx_res, struct vmw_user_context, res);
902 
903 	if (mob == NULL) {
904 		if (uctx->dx_query_mob) {
905 			uctx->dx_query_mob->dx_query_ctx = NULL;
906 			vmw_dmabuf_unreference(&uctx->dx_query_mob);
907 			uctx->dx_query_mob = NULL;
908 		}
909 
910 		return 0;
911 	}
912 
913 	/* Can only have one MOB per context for queries */
914 	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
915 		return -EINVAL;
916 
917 	mob->dx_query_ctx  = ctx_res;
918 
919 	if (!uctx->dx_query_mob)
920 		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
921 
922 	return 0;
923 }
924 
925 /**
926  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
927  *
928  * @ctx_res: The context resource
929  */
930 struct vmw_dma_buffer *
931 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
932 {
933 	struct vmw_user_context *uctx =
934 		container_of(ctx_res, struct vmw_user_context, res);
935 
936 	return uctx->dx_query_mob;
937 }
938