xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/sync_file.h>
28 
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35 
36 #define VMW_RES_HT_ORDER 12
37 
38 /*
39  * Helper macro to get dx_ctx_node if available otherwise print an error
40  * message. This is for use in command verifier function where if dx_ctx_node
41  * is not set then command is invalid.
42  */
43 #define VMW_GET_CTX_NODE(__sw_context)                                        \
44 ({                                                                            \
45 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
46 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
47 		__sw_context->dx_ctx_node;                                    \
48 	});                                                                   \
49 })
50 
51 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
52 	struct {                                                              \
53 		SVGA3dCmdHeader header;                                       \
54 		__type body;                                                  \
55 	} __var
56 
57 /**
58  * struct vmw_relocation - Buffer object relocation
59  *
60  * @head: List head for the command submission context's relocation list
61  * @vbo: Non ref-counted pointer to buffer object
62  * @mob_loc: Pointer to location for mob id to be modified
63  * @location: Pointer to location for guest pointer to be modified
64  */
65 struct vmw_relocation {
66 	struct list_head head;
67 	struct vmw_buffer_object *vbo;
68 	union {
69 		SVGAMobId *mob_loc;
70 		SVGAGuestPtr *location;
71 	};
72 };
73 
74 /**
75  * enum vmw_resource_relocation_type - Relocation type for resources
76  *
77  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78  * command stream is replaced with the actual id after validation.
79  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80  * with a NOP.
81  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82  * validation is -1, the command is replaced with a NOP. Otherwise no action.
83  */
84 enum vmw_resource_relocation_type {
85 	vmw_res_rel_normal,
86 	vmw_res_rel_nop,
87 	vmw_res_rel_cond_nop,
88 	vmw_res_rel_max
89 };
90 
91 /**
92  * struct vmw_resource_relocation - Relocation info for resources
93  *
94  * @head: List head for the software context's relocation list.
95  * @res: Non-ref-counted pointer to the resource.
96  * @offset: Offset of single byte entries into the command buffer where the id
97  * that needs fixup is located.
98  * @rel_type: Type of relocation.
99  */
100 struct vmw_resource_relocation {
101 	struct list_head head;
102 	const struct vmw_resource *res;
103 	u32 offset:29;
104 	enum vmw_resource_relocation_type rel_type:3;
105 };
106 
107 /**
108  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
109  *
110  * @head: List head of context list
111  * @ctx: The context resource
112  * @cur: The context's persistent binding state
113  * @staged: The binding state changes of this command buffer
114  */
115 struct vmw_ctx_validation_info {
116 	struct list_head head;
117 	struct vmw_resource *ctx;
118 	struct vmw_ctx_binding_state *cur;
119 	struct vmw_ctx_binding_state *staged;
120 };
121 
122 /**
123  * struct vmw_cmd_entry - Describe a command for the verifier
124  *
125  * @user_allow: Whether allowed from the execbuf ioctl.
126  * @gb_disable: Whether disabled if guest-backed objects are available.
127  * @gb_enable: Whether enabled iff guest-backed objects are available.
128  */
129 struct vmw_cmd_entry {
130 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
131 		     SVGA3dCmdHeader *);
132 	bool user_allow;
133 	bool gb_disable;
134 	bool gb_enable;
135 	const char *cmd_name;
136 };
137 
138 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
139 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140 				       (_gb_disable), (_gb_enable), #_cmd}
141 
142 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 					struct vmw_sw_context *sw_context,
144 					struct vmw_resource *ctx);
145 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 				 struct vmw_sw_context *sw_context,
147 				 SVGAMobId *id,
148 				 struct vmw_buffer_object **vmw_bo_p);
149 /**
150  * vmw_ptr_diff - Compute the offset from a to b in bytes
151  *
152  * @a: A starting pointer.
153  * @b: A pointer offset in the same address space.
154  *
155  * Returns: The offset in bytes between the two pointers.
156  */
157 static size_t vmw_ptr_diff(void *a, void *b)
158 {
159 	return (unsigned long) b - (unsigned long) a;
160 }
161 
162 /**
163  * vmw_execbuf_bindings_commit - Commit modified binding state
164  *
165  * @sw_context: The command submission context
166  * @backoff: Whether this is part of the error path and binding state changes
167  * should be ignored
168  */
169 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170 					bool backoff)
171 {
172 	struct vmw_ctx_validation_info *entry;
173 
174 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
175 		if (!backoff)
176 			vmw_binding_state_commit(entry->cur, entry->staged);
177 
178 		if (entry->staged != sw_context->staged_bindings)
179 			vmw_binding_state_free(entry->staged);
180 		else
181 			sw_context->staged_bindings_inuse = false;
182 	}
183 
184 	/* List entries are freed with the validation context */
185 	INIT_LIST_HEAD(&sw_context->ctx_list);
186 }
187 
188 /**
189  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
190  *
191  * @sw_context: The command submission context
192  */
193 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194 {
195 	if (sw_context->dx_query_mob)
196 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 					  sw_context->dx_query_mob);
198 }
199 
200 /**
201  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202  * the validate list.
203  *
204  * @dev_priv: Pointer to the device private:
205  * @sw_context: The command submission context
206  * @node: The validation node holding the context resource metadata
207  */
208 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 				   struct vmw_sw_context *sw_context,
210 				   struct vmw_resource *res,
211 				   struct vmw_ctx_validation_info *node)
212 {
213 	int ret;
214 
215 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
216 	if (unlikely(ret != 0))
217 		goto out_err;
218 
219 	if (!sw_context->staged_bindings) {
220 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
221 		if (IS_ERR(sw_context->staged_bindings)) {
222 			ret = PTR_ERR(sw_context->staged_bindings);
223 			sw_context->staged_bindings = NULL;
224 			goto out_err;
225 		}
226 	}
227 
228 	if (sw_context->staged_bindings_inuse) {
229 		node->staged = vmw_binding_state_alloc(dev_priv);
230 		if (IS_ERR(node->staged)) {
231 			ret = PTR_ERR(node->staged);
232 			node->staged = NULL;
233 			goto out_err;
234 		}
235 	} else {
236 		node->staged = sw_context->staged_bindings;
237 		sw_context->staged_bindings_inuse = true;
238 	}
239 
240 	node->ctx = res;
241 	node->cur = vmw_context_binding_state(res);
242 	list_add_tail(&node->head, &sw_context->ctx_list);
243 
244 	return 0;
245 
246 out_err:
247 	return ret;
248 }
249 
250 /**
251  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252  *
253  * @dev_priv: Pointer to the device private struct.
254  * @res_type: The resource type.
255  *
256  * Guest-backed contexts and DX contexts require extra size to store execbuf
257  * private information in the validation node. Typically the binding manager
258  * associated data structures.
259  *
260  * Returns: The extra size requirement based on resource type.
261  */
262 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 					 enum vmw_res_type res_type)
264 {
265 	return (res_type == vmw_res_dx_context ||
266 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
267 		sizeof(struct vmw_ctx_validation_info) : 0;
268 }
269 
270 /**
271  * vmw_execbuf_rcache_update - Update a resource-node cache entry
272  *
273  * @rcache: Pointer to the entry to update.
274  * @res: Pointer to the resource.
275  * @private: Pointer to the execbuf-private space in the resource validation
276  * node.
277  */
278 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 				      struct vmw_resource *res,
280 				      void *private)
281 {
282 	rcache->res = res;
283 	rcache->private = private;
284 	rcache->valid = 1;
285 	rcache->valid_handle = 0;
286 }
287 
288 /**
289  * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290  * rcu-protected pointer to the validation list.
291  *
292  * @sw_context: Pointer to the software context.
293  * @res: Unreferenced rcu-protected pointer to the resource.
294  * @dirty: Whether to change dirty status.
295  *
296  * Returns: 0 on success. Negative error code on failure. Typical error codes
297  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
298  */
299 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
300 					 struct vmw_resource *res,
301 					 u32 dirty)
302 {
303 	struct vmw_private *dev_priv = res->dev_priv;
304 	int ret;
305 	enum vmw_res_type res_type = vmw_res_type(res);
306 	struct vmw_res_cache_entry *rcache;
307 	struct vmw_ctx_validation_info *ctx_info;
308 	bool first_usage;
309 	unsigned int priv_size;
310 
311 	rcache = &sw_context->res_cache[res_type];
312 	if (likely(rcache->valid && rcache->res == res)) {
313 		if (dirty)
314 			vmw_validation_res_set_dirty(sw_context->ctx,
315 						     rcache->private, dirty);
316 		vmw_user_resource_noref_release();
317 		return 0;
318 	}
319 
320 	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321 	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322 					  dirty, (void **)&ctx_info,
323 					  &first_usage);
324 	vmw_user_resource_noref_release();
325 	if (ret)
326 		return ret;
327 
328 	if (priv_size && first_usage) {
329 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330 					      ctx_info);
331 		if (ret) {
332 			VMW_DEBUG_USER("Failed first usage context setup.\n");
333 			return ret;
334 		}
335 	}
336 
337 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 	return 0;
339 }
340 
341 /**
342  * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343  * validation list if it's not already on it
344  *
345  * @sw_context: Pointer to the software context.
346  * @res: Pointer to the resource.
347  * @dirty: Whether to change dirty status.
348  *
349  * Returns: Zero on success. Negative error code on failure.
350  */
351 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
352 					 struct vmw_resource *res,
353 					 u32 dirty)
354 {
355 	struct vmw_res_cache_entry *rcache;
356 	enum vmw_res_type res_type = vmw_res_type(res);
357 	void *ptr;
358 	int ret;
359 
360 	rcache = &sw_context->res_cache[res_type];
361 	if (likely(rcache->valid && rcache->res == res)) {
362 		if (dirty)
363 			vmw_validation_res_set_dirty(sw_context->ctx,
364 						     rcache->private, dirty);
365 		return 0;
366 	}
367 
368 	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369 					  &ptr, NULL);
370 	if (ret)
371 		return ret;
372 
373 	vmw_execbuf_rcache_update(rcache, res, ptr);
374 
375 	return 0;
376 }
377 
378 /**
379  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380  * validation list
381  *
382  * @sw_context: The software context holding the validation list.
383  * @view: Pointer to the view resource.
384  *
385  * Returns 0 if success, negative error code otherwise.
386  */
387 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 				struct vmw_resource *view)
389 {
390 	int ret;
391 
392 	/*
393 	 * First add the resource the view is pointing to, otherwise it may be
394 	 * swapped out when the view is validated.
395 	 */
396 	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 					    vmw_view_dirtying(view));
398 	if (ret)
399 		return ret;
400 
401 	return vmw_execbuf_res_noctx_val_add(sw_context, view,
402 					     VMW_RES_DIRTY_NONE);
403 }
404 
405 /**
406  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407  * to to the validation list.
408  *
409  * @sw_context: The software context holding the validation list.
410  * @view_type: The view type to look up.
411  * @id: view id of the view.
412  *
413  * The view is represented by a view id and the DX context it's created on, or
414  * scheduled for creation on. If there is no DX context set, the function will
415  * return an -EINVAL error pointer.
416  *
417  * Returns: Unreferenced pointer to the resource on success, negative error
418  * pointer on failure.
419  */
420 static struct vmw_resource *
421 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 		    enum vmw_view_type view_type, u32 id)
423 {
424 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
425 	struct vmw_resource *view;
426 	int ret;
427 
428 	if (!ctx_node)
429 		return ERR_PTR(-EINVAL);
430 
431 	view = vmw_view_lookup(sw_context->man, view_type, id);
432 	if (IS_ERR(view))
433 		return view;
434 
435 	ret = vmw_view_res_val_add(sw_context, view);
436 	if (ret)
437 		return ERR_PTR(ret);
438 
439 	return view;
440 }
441 
442 /**
443  * vmw_resource_context_res_add - Put resources previously bound to a context on
444  * the validation list
445  *
446  * @dev_priv: Pointer to a device private structure
447  * @sw_context: Pointer to a software context used for this command submission
448  * @ctx: Pointer to the context resource
449  *
450  * This function puts all resources that were previously bound to @ctx on the
451  * resource validation list. This is part of the context state reemission
452  */
453 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 					struct vmw_sw_context *sw_context,
455 					struct vmw_resource *ctx)
456 {
457 	struct list_head *binding_list;
458 	struct vmw_ctx_bindinfo *entry;
459 	int ret = 0;
460 	struct vmw_resource *res;
461 	u32 i;
462 
463 	/* Add all cotables to the validation list. */
464 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
465 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
466 			res = vmw_context_cotable(ctx, i);
467 			if (IS_ERR(res))
468 				continue;
469 
470 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
471 							    VMW_RES_DIRTY_SET);
472 			if (unlikely(ret != 0))
473 				return ret;
474 		}
475 	}
476 
477 	/* Add all resources bound to the context to the validation list */
478 	mutex_lock(&dev_priv->binding_mutex);
479 	binding_list = vmw_context_binding_list(ctx);
480 
481 	list_for_each_entry(entry, binding_list, ctx_list) {
482 		if (vmw_res_type(entry->res) == vmw_res_view)
483 			ret = vmw_view_res_val_add(sw_context, entry->res);
484 		else
485 			ret = vmw_execbuf_res_noctx_val_add
486 				(sw_context, entry->res,
487 				 vmw_binding_dirtying(entry->bt));
488 		if (unlikely(ret != 0))
489 			break;
490 	}
491 
492 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
493 		struct vmw_buffer_object *dx_query_mob;
494 
495 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
496 		if (dx_query_mob)
497 			ret = vmw_validation_add_bo(sw_context->ctx,
498 						    dx_query_mob, true, false);
499 	}
500 
501 	mutex_unlock(&dev_priv->binding_mutex);
502 	return ret;
503 }
504 
505 /**
506  * vmw_resource_relocation_add - Add a relocation to the relocation list
507  *
508  * @list: Pointer to head of relocation list.
509  * @res: The resource.
510  * @offset: Offset into the command buffer currently being parsed where the id
511  * that needs fixup is located. Granularity is one byte.
512  * @rel_type: Relocation type.
513  */
514 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
515 				       const struct vmw_resource *res,
516 				       unsigned long offset,
517 				       enum vmw_resource_relocation_type
518 				       rel_type)
519 {
520 	struct vmw_resource_relocation *rel;
521 
522 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
523 	if (unlikely(!rel)) {
524 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
525 		return -ENOMEM;
526 	}
527 
528 	rel->res = res;
529 	rel->offset = offset;
530 	rel->rel_type = rel_type;
531 	list_add_tail(&rel->head, &sw_context->res_relocations);
532 
533 	return 0;
534 }
535 
536 /**
537  * vmw_resource_relocations_free - Free all relocations on a list
538  *
539  * @list: Pointer to the head of the relocation list
540  */
541 static void vmw_resource_relocations_free(struct list_head *list)
542 {
543 	/* Memory is validation context memory, so no need to free it */
544 	INIT_LIST_HEAD(list);
545 }
546 
547 /**
548  * vmw_resource_relocations_apply - Apply all relocations on a list
549  *
550  * @cb: Pointer to the start of the command buffer bein patch. This need not be
551  * the same buffer as the one being parsed when the relocation list was built,
552  * but the contents must be the same modulo the resource ids.
553  * @list: Pointer to the head of the relocation list.
554  */
555 static void vmw_resource_relocations_apply(uint32_t *cb,
556 					   struct list_head *list)
557 {
558 	struct vmw_resource_relocation *rel;
559 
560 	/* Validate the struct vmw_resource_relocation member size */
561 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
562 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
563 
564 	list_for_each_entry(rel, list, head) {
565 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
566 		switch (rel->rel_type) {
567 		case vmw_res_rel_normal:
568 			*addr = rel->res->id;
569 			break;
570 		case vmw_res_rel_nop:
571 			*addr = SVGA_3D_CMD_NOP;
572 			break;
573 		default:
574 			if (rel->res->id == -1)
575 				*addr = SVGA_3D_CMD_NOP;
576 			break;
577 		}
578 	}
579 }
580 
581 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
582 			   struct vmw_sw_context *sw_context,
583 			   SVGA3dCmdHeader *header)
584 {
585 	return -EINVAL;
586 }
587 
588 static int vmw_cmd_ok(struct vmw_private *dev_priv,
589 		      struct vmw_sw_context *sw_context,
590 		      SVGA3dCmdHeader *header)
591 {
592 	return 0;
593 }
594 
595 /**
596  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
597  * list.
598  *
599  * @sw_context: Pointer to the software context.
600  *
601  * Note that since vmware's command submission currently is protected by the
602  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
603  * only a single thread at once will attempt this.
604  */
605 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
606 {
607 	int ret;
608 
609 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
610 	if (ret)
611 		return ret;
612 
613 	if (sw_context->dx_query_mob) {
614 		struct vmw_buffer_object *expected_dx_query_mob;
615 
616 		expected_dx_query_mob =
617 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
618 		if (expected_dx_query_mob &&
619 		    expected_dx_query_mob != sw_context->dx_query_mob) {
620 			ret = -EINVAL;
621 		}
622 	}
623 
624 	return ret;
625 }
626 
627 /**
628  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
629  * resource validate list unless it's already there.
630  *
631  * @dev_priv: Pointer to a device private structure.
632  * @sw_context: Pointer to the software context.
633  * @res_type: Resource type.
634  * @dirty: Whether to change dirty status.
635  * @converter: User-space visisble type specific information.
636  * @id_loc: Pointer to the location in the command buffer currently being parsed
637  * from where the user-space resource id handle is located.
638  * @p_val: Pointer to pointer to resource validalidation node. Populated on
639  * exit.
640  */
641 static int
642 vmw_cmd_res_check(struct vmw_private *dev_priv,
643 		  struct vmw_sw_context *sw_context,
644 		  enum vmw_res_type res_type,
645 		  u32 dirty,
646 		  const struct vmw_user_resource_conv *converter,
647 		  uint32_t *id_loc,
648 		  struct vmw_resource **p_res)
649 {
650 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
651 	struct vmw_resource *res;
652 	int ret;
653 
654 	if (p_res)
655 		*p_res = NULL;
656 
657 	if (*id_loc == SVGA3D_INVALID_ID) {
658 		if (res_type == vmw_res_context) {
659 			VMW_DEBUG_USER("Illegal context invalid id.\n");
660 			return -EINVAL;
661 		}
662 		return 0;
663 	}
664 
665 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
666 		res = rcache->res;
667 		if (dirty)
668 			vmw_validation_res_set_dirty(sw_context->ctx,
669 						     rcache->private, dirty);
670 	} else {
671 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
672 
673 		ret = vmw_validation_preload_res(sw_context->ctx, size);
674 		if (ret)
675 			return ret;
676 
677 		res = vmw_user_resource_noref_lookup_handle
678 			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
679 		if (IS_ERR(res)) {
680 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
681 				       (unsigned int) *id_loc);
682 			return PTR_ERR(res);
683 		}
684 
685 		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
686 		if (unlikely(ret != 0))
687 			return ret;
688 
689 		if (rcache->valid && rcache->res == res) {
690 			rcache->valid_handle = true;
691 			rcache->handle = *id_loc;
692 		}
693 	}
694 
695 	ret = vmw_resource_relocation_add(sw_context, res,
696 					  vmw_ptr_diff(sw_context->buf_start,
697 						       id_loc),
698 					  vmw_res_rel_normal);
699 	if (p_res)
700 		*p_res = res;
701 
702 	return 0;
703 }
704 
705 /**
706  * vmw_rebind_dx_query - Rebind DX query associated with the context
707  *
708  * @ctx_res: context the query belongs to
709  *
710  * This function assumes binding_mutex is held.
711  */
712 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
713 {
714 	struct vmw_private *dev_priv = ctx_res->dev_priv;
715 	struct vmw_buffer_object *dx_query_mob;
716 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
717 
718 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
719 
720 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
721 		return 0;
722 
723 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
724 	if (cmd == NULL)
725 		return -ENOMEM;
726 
727 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
728 	cmd->header.size = sizeof(cmd->body);
729 	cmd->body.cid = ctx_res->id;
730 	cmd->body.mobid = dx_query_mob->base.mem.start;
731 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
732 
733 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
734 
735 	return 0;
736 }
737 
738 /**
739  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
740  * contexts.
741  *
742  * @sw_context: Pointer to the software context.
743  *
744  * Rebind context binding points that have been scrubbed because of eviction.
745  */
746 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
747 {
748 	struct vmw_ctx_validation_info *val;
749 	int ret;
750 
751 	list_for_each_entry(val, &sw_context->ctx_list, head) {
752 		ret = vmw_binding_rebind_all(val->cur);
753 		if (unlikely(ret != 0)) {
754 			if (ret != -ERESTARTSYS)
755 				VMW_DEBUG_USER("Failed to rebind context.\n");
756 			return ret;
757 		}
758 
759 		ret = vmw_rebind_all_dx_query(val->ctx);
760 		if (ret != 0) {
761 			VMW_DEBUG_USER("Failed to rebind queries.\n");
762 			return ret;
763 		}
764 	}
765 
766 	return 0;
767 }
768 
769 /**
770  * vmw_view_bindings_add - Add an array of view bindings to a context binding
771  * state tracker.
772  *
773  * @sw_context: The execbuf state used for this command.
774  * @view_type: View type for the bindings.
775  * @binding_type: Binding type for the bindings.
776  * @shader_slot: The shader slot to user for the bindings.
777  * @view_ids: Array of view ids to be bound.
778  * @num_views: Number of view ids in @view_ids.
779  * @first_slot: The binding slot to be used for the first view id in @view_ids.
780  */
781 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
782 				 enum vmw_view_type view_type,
783 				 enum vmw_ctx_binding_type binding_type,
784 				 uint32 shader_slot,
785 				 uint32 view_ids[], u32 num_views,
786 				 u32 first_slot)
787 {
788 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
789 	u32 i;
790 
791 	if (!ctx_node)
792 		return -EINVAL;
793 
794 	for (i = 0; i < num_views; ++i) {
795 		struct vmw_ctx_bindinfo_view binding;
796 		struct vmw_resource *view = NULL;
797 
798 		if (view_ids[i] != SVGA3D_INVALID_ID) {
799 			view = vmw_view_id_val_add(sw_context, view_type,
800 						   view_ids[i]);
801 			if (IS_ERR(view)) {
802 				VMW_DEBUG_USER("View not found.\n");
803 				return PTR_ERR(view);
804 			}
805 		}
806 		binding.bi.ctx = ctx_node->ctx;
807 		binding.bi.res = view;
808 		binding.bi.bt = binding_type;
809 		binding.shader_slot = shader_slot;
810 		binding.slot = first_slot + i;
811 		vmw_binding_add(ctx_node->staged, &binding.bi,
812 				shader_slot, binding.slot);
813 	}
814 
815 	return 0;
816 }
817 
818 /**
819  * vmw_cmd_cid_check - Check a command header for valid context information.
820  *
821  * @dev_priv: Pointer to a device private structure.
822  * @sw_context: Pointer to the software context.
823  * @header: A command header with an embedded user-space context handle.
824  *
825  * Convenience function: Call vmw_cmd_res_check with the user-space context
826  * handle embedded in @header.
827  */
828 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
829 			     struct vmw_sw_context *sw_context,
830 			     SVGA3dCmdHeader *header)
831 {
832 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
833 		container_of(header, typeof(*cmd), header);
834 
835 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
836 				 VMW_RES_DIRTY_SET, user_context_converter,
837 				 &cmd->body, NULL);
838 }
839 
840 /**
841  * vmw_execbuf_info_from_res - Get the private validation metadata for a
842  * recently validated resource
843  *
844  * @sw_context: Pointer to the command submission context
845  * @res: The resource
846  *
847  * The resource pointed to by @res needs to be present in the command submission
848  * context's resource cache and hence the last resource of that type to be
849  * processed by the validation code.
850  *
851  * Return: a pointer to the private metadata of the resource, or NULL if it
852  * wasn't found
853  */
854 static struct vmw_ctx_validation_info *
855 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
856 			  struct vmw_resource *res)
857 {
858 	struct vmw_res_cache_entry *rcache =
859 		&sw_context->res_cache[vmw_res_type(res)];
860 
861 	if (rcache->valid && rcache->res == res)
862 		return rcache->private;
863 
864 	WARN_ON_ONCE(true);
865 	return NULL;
866 }
867 
868 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
869 					   struct vmw_sw_context *sw_context,
870 					   SVGA3dCmdHeader *header)
871 {
872 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
873 	struct vmw_resource *ctx;
874 	struct vmw_resource *res;
875 	int ret;
876 
877 	cmd = container_of(header, typeof(*cmd), header);
878 
879 	if (cmd->body.type >= SVGA3D_RT_MAX) {
880 		VMW_DEBUG_USER("Illegal render target type %u.\n",
881 			       (unsigned int) cmd->body.type);
882 		return -EINVAL;
883 	}
884 
885 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
886 				VMW_RES_DIRTY_SET, user_context_converter,
887 				&cmd->body.cid, &ctx);
888 	if (unlikely(ret != 0))
889 		return ret;
890 
891 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
892 				VMW_RES_DIRTY_SET, user_surface_converter,
893 				&cmd->body.target.sid, &res);
894 	if (unlikely(ret))
895 		return ret;
896 
897 	if (dev_priv->has_mob) {
898 		struct vmw_ctx_bindinfo_view binding;
899 		struct vmw_ctx_validation_info *node;
900 
901 		node = vmw_execbuf_info_from_res(sw_context, ctx);
902 		if (!node)
903 			return -EINVAL;
904 
905 		binding.bi.ctx = ctx;
906 		binding.bi.res = res;
907 		binding.bi.bt = vmw_ctx_binding_rt;
908 		binding.slot = cmd->body.type;
909 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
910 	}
911 
912 	return 0;
913 }
914 
915 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
916 				      struct vmw_sw_context *sw_context,
917 				      SVGA3dCmdHeader *header)
918 {
919 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
920 	int ret;
921 
922 	cmd = container_of(header, typeof(*cmd), header);
923 
924 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
925 				VMW_RES_DIRTY_NONE, user_surface_converter,
926 				&cmd->body.src.sid, NULL);
927 	if (ret)
928 		return ret;
929 
930 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
931 				 VMW_RES_DIRTY_SET, user_surface_converter,
932 				 &cmd->body.dest.sid, NULL);
933 }
934 
935 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
936 				     struct vmw_sw_context *sw_context,
937 				     SVGA3dCmdHeader *header)
938 {
939 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
940 	int ret;
941 
942 	cmd = container_of(header, typeof(*cmd), header);
943 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
944 				VMW_RES_DIRTY_NONE, user_surface_converter,
945 				&cmd->body.src, NULL);
946 	if (ret != 0)
947 		return ret;
948 
949 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
950 				 VMW_RES_DIRTY_SET, user_surface_converter,
951 				 &cmd->body.dest, NULL);
952 }
953 
954 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
955 				   struct vmw_sw_context *sw_context,
956 				   SVGA3dCmdHeader *header)
957 {
958 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
959 	int ret;
960 
961 	cmd = container_of(header, typeof(*cmd), header);
962 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
963 				VMW_RES_DIRTY_NONE, user_surface_converter,
964 				&cmd->body.srcSid, NULL);
965 	if (ret != 0)
966 		return ret;
967 
968 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
969 				 VMW_RES_DIRTY_SET, user_surface_converter,
970 				 &cmd->body.dstSid, NULL);
971 }
972 
973 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
974 				     struct vmw_sw_context *sw_context,
975 				     SVGA3dCmdHeader *header)
976 {
977 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
978 	int ret;
979 
980 	cmd = container_of(header, typeof(*cmd), header);
981 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
982 				VMW_RES_DIRTY_NONE, user_surface_converter,
983 				&cmd->body.src.sid, NULL);
984 	if (unlikely(ret != 0))
985 		return ret;
986 
987 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
988 				 VMW_RES_DIRTY_SET, user_surface_converter,
989 				 &cmd->body.dest.sid, NULL);
990 }
991 
992 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
993 					 struct vmw_sw_context *sw_context,
994 					 SVGA3dCmdHeader *header)
995 {
996 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
997 		container_of(header, typeof(*cmd), header);
998 
999 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1000 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1001 				 &cmd->body.srcImage.sid, NULL);
1002 }
1003 
1004 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1005 				 struct vmw_sw_context *sw_context,
1006 				 SVGA3dCmdHeader *header)
1007 {
1008 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1009 		container_of(header, typeof(*cmd), header);
1010 
1011 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1012 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1013 				 &cmd->body.sid, NULL);
1014 }
1015 
1016 /**
1017  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1018  *
1019  * @dev_priv: The device private structure.
1020  * @new_query_bo: The new buffer holding query results.
1021  * @sw_context: The software context used for this command submission.
1022  *
1023  * This function checks whether @new_query_bo is suitable for holding query
1024  * results, and if another buffer currently is pinned for query results. If so,
1025  * the function prepares the state of @sw_context for switching pinned buffers
1026  * after successful submission of the current command batch.
1027  */
1028 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1029 				       struct vmw_buffer_object *new_query_bo,
1030 				       struct vmw_sw_context *sw_context)
1031 {
1032 	struct vmw_res_cache_entry *ctx_entry =
1033 		&sw_context->res_cache[vmw_res_context];
1034 	int ret;
1035 
1036 	BUG_ON(!ctx_entry->valid);
1037 	sw_context->last_query_ctx = ctx_entry->res;
1038 
1039 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1040 
1041 		if (unlikely(new_query_bo->base.num_pages > 4)) {
1042 			VMW_DEBUG_USER("Query buffer too large.\n");
1043 			return -EINVAL;
1044 		}
1045 
1046 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1047 			sw_context->needs_post_query_barrier = true;
1048 			ret = vmw_validation_add_bo(sw_context->ctx,
1049 						    sw_context->cur_query_bo,
1050 						    dev_priv->has_mob, false);
1051 			if (unlikely(ret != 0))
1052 				return ret;
1053 		}
1054 		sw_context->cur_query_bo = new_query_bo;
1055 
1056 		ret = vmw_validation_add_bo(sw_context->ctx,
1057 					    dev_priv->dummy_query_bo,
1058 					    dev_priv->has_mob, false);
1059 		if (unlikely(ret != 0))
1060 			return ret;
1061 	}
1062 
1063 	return 0;
1064 }
1065 
1066 /**
1067  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1068  *
1069  * @dev_priv: The device private structure.
1070  * @sw_context: The software context used for this command submission batch.
1071  *
1072  * This function will check if we're switching query buffers, and will then,
1073  * issue a dummy occlusion query wait used as a query barrier. When the fence
1074  * object following that query wait has signaled, we are sure that all preceding
1075  * queries have finished, and the old query buffer can be unpinned. However,
1076  * since both the new query buffer and the old one are fenced with that fence,
1077  * we can do an asynchronus unpin now, and be sure that the old query buffer
1078  * won't be moved until the fence has signaled.
1079  *
1080  * As mentioned above, both the new - and old query buffers need to be fenced
1081  * using a sequence emitted *after* calling this function.
1082  */
1083 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1084 				     struct vmw_sw_context *sw_context)
1085 {
1086 	/*
1087 	 * The validate list should still hold references to all
1088 	 * contexts here.
1089 	 */
1090 	if (sw_context->needs_post_query_barrier) {
1091 		struct vmw_res_cache_entry *ctx_entry =
1092 			&sw_context->res_cache[vmw_res_context];
1093 		struct vmw_resource *ctx;
1094 		int ret;
1095 
1096 		BUG_ON(!ctx_entry->valid);
1097 		ctx = ctx_entry->res;
1098 
1099 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1100 
1101 		if (unlikely(ret != 0))
1102 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1103 	}
1104 
1105 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1106 		if (dev_priv->pinned_bo) {
1107 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1108 			vmw_bo_unreference(&dev_priv->pinned_bo);
1109 		}
1110 
1111 		if (!sw_context->needs_post_query_barrier) {
1112 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1113 
1114 			/*
1115 			 * We pin also the dummy_query_bo buffer so that we
1116 			 * don't need to validate it when emitting dummy queries
1117 			 * in context destroy paths.
1118 			 */
1119 			if (!dev_priv->dummy_query_bo_pinned) {
1120 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1121 						    true);
1122 				dev_priv->dummy_query_bo_pinned = true;
1123 			}
1124 
1125 			BUG_ON(sw_context->last_query_ctx == NULL);
1126 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1127 			dev_priv->query_cid_valid = true;
1128 			dev_priv->pinned_bo =
1129 				vmw_bo_reference(sw_context->cur_query_bo);
1130 		}
1131 	}
1132 }
1133 
1134 /**
1135  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1136  * to a MOB id.
1137  *
1138  * @dev_priv: Pointer to a device private structure.
1139  * @sw_context: The software context used for this command batch validation.
1140  * @id: Pointer to the user-space handle to be translated.
1141  * @vmw_bo_p: Points to a location that, on successful return will carry a
1142  * non-reference-counted pointer to the buffer object identified by the
1143  * user-space handle in @id.
1144  *
1145  * This function saves information needed to translate a user-space buffer
1146  * handle to a MOB id. The translation does not take place immediately, but
1147  * during a call to vmw_apply_relocations().
1148  *
1149  * This function builds a relocation list and a list of buffers to validate. The
1150  * former needs to be freed using either vmw_apply_relocations() or
1151  * vmw_free_relocations(). The latter needs to be freed using
1152  * vmw_clear_validations.
1153  */
1154 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1155 				 struct vmw_sw_context *sw_context,
1156 				 SVGAMobId *id,
1157 				 struct vmw_buffer_object **vmw_bo_p)
1158 {
1159 	struct vmw_buffer_object *vmw_bo;
1160 	uint32_t handle = *id;
1161 	struct vmw_relocation *reloc;
1162 	int ret;
1163 
1164 	vmw_validation_preload_bo(sw_context->ctx);
1165 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1166 	if (IS_ERR(vmw_bo)) {
1167 		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1168 		return PTR_ERR(vmw_bo);
1169 	}
1170 
1171 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1172 	vmw_user_bo_noref_release();
1173 	if (unlikely(ret != 0))
1174 		return ret;
1175 
1176 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1177 	if (!reloc)
1178 		return -ENOMEM;
1179 
1180 	reloc->mob_loc = id;
1181 	reloc->vbo = vmw_bo;
1182 
1183 	*vmw_bo_p = vmw_bo;
1184 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1185 
1186 	return 0;
1187 }
1188 
1189 /**
1190  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1191  * to a valid SVGAGuestPtr
1192  *
1193  * @dev_priv: Pointer to a device private structure.
1194  * @sw_context: The software context used for this command batch validation.
1195  * @ptr: Pointer to the user-space handle to be translated.
1196  * @vmw_bo_p: Points to a location that, on successful return will carry a
1197  * non-reference-counted pointer to the DMA buffer identified by the user-space
1198  * handle in @id.
1199  *
1200  * This function saves information needed to translate a user-space buffer
1201  * handle to a valid SVGAGuestPtr. The translation does not take place
1202  * immediately, but during a call to vmw_apply_relocations().
1203  *
1204  * This function builds a relocation list and a list of buffers to validate.
1205  * The former needs to be freed using either vmw_apply_relocations() or
1206  * vmw_free_relocations(). The latter needs to be freed using
1207  * vmw_clear_validations.
1208  */
1209 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1210 				   struct vmw_sw_context *sw_context,
1211 				   SVGAGuestPtr *ptr,
1212 				   struct vmw_buffer_object **vmw_bo_p)
1213 {
1214 	struct vmw_buffer_object *vmw_bo;
1215 	uint32_t handle = ptr->gmrId;
1216 	struct vmw_relocation *reloc;
1217 	int ret;
1218 
1219 	vmw_validation_preload_bo(sw_context->ctx);
1220 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1221 	if (IS_ERR(vmw_bo)) {
1222 		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1223 		return PTR_ERR(vmw_bo);
1224 	}
1225 
1226 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1227 	vmw_user_bo_noref_release();
1228 	if (unlikely(ret != 0))
1229 		return ret;
1230 
1231 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1232 	if (!reloc)
1233 		return -ENOMEM;
1234 
1235 	reloc->location = ptr;
1236 	reloc->vbo = vmw_bo;
1237 	*vmw_bo_p = vmw_bo;
1238 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1239 
1240 	return 0;
1241 }
1242 
1243 /**
1244  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1245  *
1246  * @dev_priv: Pointer to a device private struct.
1247  * @sw_context: The software context used for this command submission.
1248  * @header: Pointer to the command header in the command stream.
1249  *
1250  * This function adds the new query into the query COTABLE
1251  */
1252 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1253 				   struct vmw_sw_context *sw_context,
1254 				   SVGA3dCmdHeader *header)
1255 {
1256 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1257 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1258 	struct vmw_resource *cotable_res;
1259 	int ret;
1260 
1261 	if (!ctx_node)
1262 		return -EINVAL;
1263 
1264 	cmd = container_of(header, typeof(*cmd), header);
1265 
1266 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1267 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1268 		return -EINVAL;
1269 
1270 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1271 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1272 
1273 	return ret;
1274 }
1275 
1276 /**
1277  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1278  *
1279  * @dev_priv: Pointer to a device private struct.
1280  * @sw_context: The software context used for this command submission.
1281  * @header: Pointer to the command header in the command stream.
1282  *
1283  * The query bind operation will eventually associate the query ID with its
1284  * backing MOB.  In this function, we take the user mode MOB ID and use
1285  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1286  */
1287 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288 				 struct vmw_sw_context *sw_context,
1289 				 SVGA3dCmdHeader *header)
1290 {
1291 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1292 	struct vmw_buffer_object *vmw_bo;
1293 	int ret;
1294 
1295 	cmd = container_of(header, typeof(*cmd), header);
1296 
1297 	/*
1298 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299 	 * list so its kernel mode MOB ID can be filled in later
1300 	 */
1301 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1302 				    &vmw_bo);
1303 
1304 	if (ret != 0)
1305 		return ret;
1306 
1307 	sw_context->dx_query_mob = vmw_bo;
1308 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1309 	return 0;
1310 }
1311 
1312 /**
1313  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1314  *
1315  * @dev_priv: Pointer to a device private struct.
1316  * @sw_context: The software context used for this command submission.
1317  * @header: Pointer to the command header in the command stream.
1318  */
1319 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320 				  struct vmw_sw_context *sw_context,
1321 				  SVGA3dCmdHeader *header)
1322 {
1323 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324 		container_of(header, typeof(*cmd), header);
1325 
1326 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1327 				 VMW_RES_DIRTY_SET, user_context_converter,
1328 				 &cmd->body.cid, NULL);
1329 }
1330 
1331 /**
1332  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1333  *
1334  * @dev_priv: Pointer to a device private struct.
1335  * @sw_context: The software context used for this command submission.
1336  * @header: Pointer to the command header in the command stream.
1337  */
1338 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339 			       struct vmw_sw_context *sw_context,
1340 			       SVGA3dCmdHeader *header)
1341 {
1342 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343 		container_of(header, typeof(*cmd), header);
1344 
1345 	if (unlikely(dev_priv->has_mob)) {
1346 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1347 
1348 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1349 
1350 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351 		gb_cmd.header.size = cmd->header.size;
1352 		gb_cmd.body.cid = cmd->body.cid;
1353 		gb_cmd.body.type = cmd->body.type;
1354 
1355 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1357 	}
1358 
1359 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1360 				 VMW_RES_DIRTY_SET, user_context_converter,
1361 				 &cmd->body.cid, NULL);
1362 }
1363 
1364 /**
1365  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1366  *
1367  * @dev_priv: Pointer to a device private struct.
1368  * @sw_context: The software context used for this command submission.
1369  * @header: Pointer to the command header in the command stream.
1370  */
1371 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372 				struct vmw_sw_context *sw_context,
1373 				SVGA3dCmdHeader *header)
1374 {
1375 	struct vmw_buffer_object *vmw_bo;
1376 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1377 	int ret;
1378 
1379 	cmd = container_of(header, typeof(*cmd), header);
1380 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381 	if (unlikely(ret != 0))
1382 		return ret;
1383 
1384 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1385 				    &vmw_bo);
1386 	if (unlikely(ret != 0))
1387 		return ret;
1388 
1389 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1390 
1391 	return ret;
1392 }
1393 
1394 /**
1395  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1396  *
1397  * @dev_priv: Pointer to a device private struct.
1398  * @sw_context: The software context used for this command submission.
1399  * @header: Pointer to the command header in the command stream.
1400  */
1401 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402 			     struct vmw_sw_context *sw_context,
1403 			     SVGA3dCmdHeader *header)
1404 {
1405 	struct vmw_buffer_object *vmw_bo;
1406 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1407 	int ret;
1408 
1409 	cmd = container_of(header, typeof(*cmd), header);
1410 	if (dev_priv->has_mob) {
1411 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1412 
1413 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1414 
1415 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416 		gb_cmd.header.size = cmd->header.size;
1417 		gb_cmd.body.cid = cmd->body.cid;
1418 		gb_cmd.body.type = cmd->body.type;
1419 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1421 
1422 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1424 	}
1425 
1426 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427 	if (unlikely(ret != 0))
1428 		return ret;
1429 
1430 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1431 				      &cmd->body.guestResult, &vmw_bo);
1432 	if (unlikely(ret != 0))
1433 		return ret;
1434 
1435 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1436 
1437 	return ret;
1438 }
1439 
1440 /**
1441  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1442  *
1443  * @dev_priv: Pointer to a device private struct.
1444  * @sw_context: The software context used for this command submission.
1445  * @header: Pointer to the command header in the command stream.
1446  */
1447 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448 				 struct vmw_sw_context *sw_context,
1449 				 SVGA3dCmdHeader *header)
1450 {
1451 	struct vmw_buffer_object *vmw_bo;
1452 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1453 	int ret;
1454 
1455 	cmd = container_of(header, typeof(*cmd), header);
1456 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457 	if (unlikely(ret != 0))
1458 		return ret;
1459 
1460 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1461 				    &vmw_bo);
1462 	if (unlikely(ret != 0))
1463 		return ret;
1464 
1465 	return 0;
1466 }
1467 
1468 /**
1469  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1470  *
1471  * @dev_priv: Pointer to a device private struct.
1472  * @sw_context: The software context used for this command submission.
1473  * @header: Pointer to the command header in the command stream.
1474  */
1475 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476 			      struct vmw_sw_context *sw_context,
1477 			      SVGA3dCmdHeader *header)
1478 {
1479 	struct vmw_buffer_object *vmw_bo;
1480 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1481 	int ret;
1482 
1483 	cmd = container_of(header, typeof(*cmd), header);
1484 	if (dev_priv->has_mob) {
1485 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1486 
1487 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1488 
1489 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490 		gb_cmd.header.size = cmd->header.size;
1491 		gb_cmd.body.cid = cmd->body.cid;
1492 		gb_cmd.body.type = cmd->body.type;
1493 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1495 
1496 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1498 	}
1499 
1500 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501 	if (unlikely(ret != 0))
1502 		return ret;
1503 
1504 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1505 				      &cmd->body.guestResult, &vmw_bo);
1506 	if (unlikely(ret != 0))
1507 		return ret;
1508 
1509 	return 0;
1510 }
1511 
1512 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513 		       struct vmw_sw_context *sw_context,
1514 		       SVGA3dCmdHeader *header)
1515 {
1516 	struct vmw_buffer_object *vmw_bo = NULL;
1517 	struct vmw_surface *srf = NULL;
1518 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1519 	int ret;
1520 	SVGA3dCmdSurfaceDMASuffix *suffix;
1521 	uint32_t bo_size;
1522 	bool dirty;
1523 
1524 	cmd = container_of(header, typeof(*cmd), header);
1525 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1526 					       header->size - sizeof(*suffix));
1527 
1528 	/* Make sure device and verifier stays in sync. */
1529 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1530 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1531 		return -EINVAL;
1532 	}
1533 
1534 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1535 				      &cmd->body.guest.ptr, &vmw_bo);
1536 	if (unlikely(ret != 0))
1537 		return ret;
1538 
1539 	/* Make sure DMA doesn't cross BO boundaries. */
1540 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1541 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1542 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1543 		return -EINVAL;
1544 	}
1545 
1546 	bo_size -= cmd->body.guest.ptr.offset;
1547 	if (unlikely(suffix->maximumOffset > bo_size))
1548 		suffix->maximumOffset = bo_size;
1549 
1550 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1551 		VMW_RES_DIRTY_SET : 0;
1552 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1553 				dirty, user_surface_converter,
1554 				&cmd->body.host.sid, NULL);
1555 	if (unlikely(ret != 0)) {
1556 		if (unlikely(ret != -ERESTARTSYS))
1557 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1558 		return ret;
1559 	}
1560 
1561 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1562 
1563 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1564 
1565 	return 0;
1566 }
1567 
1568 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569 			struct vmw_sw_context *sw_context,
1570 			SVGA3dCmdHeader *header)
1571 {
1572 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1573 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574 		(unsigned long)header + sizeof(*cmd));
1575 	SVGA3dPrimitiveRange *range;
1576 	uint32_t i;
1577 	uint32_t maxnum;
1578 	int ret;
1579 
1580 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581 	if (unlikely(ret != 0))
1582 		return ret;
1583 
1584 	cmd = container_of(header, typeof(*cmd), header);
1585 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1586 
1587 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1588 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1589 		return -EINVAL;
1590 	}
1591 
1592 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1593 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1594 					VMW_RES_DIRTY_NONE,
1595 					user_surface_converter,
1596 					&decl->array.surfaceId, NULL);
1597 		if (unlikely(ret != 0))
1598 			return ret;
1599 	}
1600 
1601 	maxnum = (header->size - sizeof(cmd->body) -
1602 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603 	if (unlikely(cmd->body.numRanges > maxnum)) {
1604 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1605 		return -EINVAL;
1606 	}
1607 
1608 	range = (SVGA3dPrimitiveRange *) decl;
1609 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1610 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1611 					VMW_RES_DIRTY_NONE,
1612 					user_surface_converter,
1613 					&range->indexArray.surfaceId, NULL);
1614 		if (unlikely(ret != 0))
1615 			return ret;
1616 	}
1617 	return 0;
1618 }
1619 
1620 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621 			     struct vmw_sw_context *sw_context,
1622 			     SVGA3dCmdHeader *header)
1623 {
1624 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1625 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626 	  ((unsigned long) header + header->size + sizeof(header));
1627 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1628 		((unsigned long) header + sizeof(*cmd));
1629 	struct vmw_resource *ctx;
1630 	struct vmw_resource *res;
1631 	int ret;
1632 
1633 	cmd = container_of(header, typeof(*cmd), header);
1634 
1635 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1636 				VMW_RES_DIRTY_SET, user_context_converter,
1637 				&cmd->body.cid, &ctx);
1638 	if (unlikely(ret != 0))
1639 		return ret;
1640 
1641 	for (; cur_state < last_state; ++cur_state) {
1642 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1643 			continue;
1644 
1645 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1646 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647 				       (unsigned int) cur_state->stage);
1648 			return -EINVAL;
1649 		}
1650 
1651 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1652 					VMW_RES_DIRTY_NONE,
1653 					user_surface_converter,
1654 					&cur_state->value, &res);
1655 		if (unlikely(ret != 0))
1656 			return ret;
1657 
1658 		if (dev_priv->has_mob) {
1659 			struct vmw_ctx_bindinfo_tex binding;
1660 			struct vmw_ctx_validation_info *node;
1661 
1662 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1663 			if (!node)
1664 				return -EINVAL;
1665 
1666 			binding.bi.ctx = ctx;
1667 			binding.bi.res = res;
1668 			binding.bi.bt = vmw_ctx_binding_tex;
1669 			binding.texture_stage = cur_state->stage;
1670 			vmw_binding_add(node->staged, &binding.bi, 0,
1671 					binding.texture_stage);
1672 		}
1673 	}
1674 
1675 	return 0;
1676 }
1677 
1678 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679 				      struct vmw_sw_context *sw_context,
1680 				      void *buf)
1681 {
1682 	struct vmw_buffer_object *vmw_bo;
1683 
1684 	struct {
1685 		uint32_t header;
1686 		SVGAFifoCmdDefineGMRFB body;
1687 	} *cmd = buf;
1688 
1689 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1690 				       &vmw_bo);
1691 }
1692 
1693 /**
1694  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1695  * switching
1696  *
1697  * @dev_priv: Pointer to a device private struct.
1698  * @sw_context: The software context being used for this batch.
1699  * @val_node: The validation node representing the resource.
1700  * @buf_id: Pointer to the user-space backup buffer handle in the command
1701  * stream.
1702  * @backup_offset: Offset of backup into MOB.
1703  *
1704  * This function prepares for registering a switch of backup buffers in the
1705  * resource metadata just prior to unreserving. It's basically a wrapper around
1706  * vmw_cmd_res_switch_backup with a different interface.
1707  */
1708 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709 				     struct vmw_sw_context *sw_context,
1710 				     struct vmw_resource *res, uint32_t *buf_id,
1711 				     unsigned long backup_offset)
1712 {
1713 	struct vmw_buffer_object *vbo;
1714 	void *info;
1715 	int ret;
1716 
1717 	info = vmw_execbuf_info_from_res(sw_context, res);
1718 	if (!info)
1719 		return -EINVAL;
1720 
1721 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1722 	if (ret)
1723 		return ret;
1724 
1725 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1726 					 backup_offset);
1727 	return 0;
1728 }
1729 
1730 /**
1731  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1732  *
1733  * @dev_priv: Pointer to a device private struct.
1734  * @sw_context: The software context being used for this batch.
1735  * @res_type: The resource type.
1736  * @converter: Information about user-space binding for this resource type.
1737  * @res_id: Pointer to the user-space resource handle in the command stream.
1738  * @buf_id: Pointer to the user-space backup buffer handle in the command
1739  * stream.
1740  * @backup_offset: Offset of backup into MOB.
1741  *
1742  * This function prepares for registering a switch of backup buffers in the
1743  * resource metadata just prior to unreserving. It's basically a wrapper around
1744  * vmw_cmd_res_switch_backup with a different interface.
1745  */
1746 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747 				 struct vmw_sw_context *sw_context,
1748 				 enum vmw_res_type res_type,
1749 				 const struct vmw_user_resource_conv
1750 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1751 				 unsigned long backup_offset)
1752 {
1753 	struct vmw_resource *res;
1754 	int ret;
1755 
1756 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1757 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1758 	if (ret)
1759 		return ret;
1760 
1761 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762 					 backup_offset);
1763 }
1764 
1765 /**
1766  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1767  *
1768  * @dev_priv: Pointer to a device private struct.
1769  * @sw_context: The software context being used for this batch.
1770  * @header: Pointer to the command header in the command stream.
1771  */
1772 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773 				   struct vmw_sw_context *sw_context,
1774 				   SVGA3dCmdHeader *header)
1775 {
1776 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777 		container_of(header, typeof(*cmd), header);
1778 
1779 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1780 				     user_surface_converter, &cmd->body.sid,
1781 				     &cmd->body.mobid, 0);
1782 }
1783 
1784 /**
1785  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1786  *
1787  * @dev_priv: Pointer to a device private struct.
1788  * @sw_context: The software context being used for this batch.
1789  * @header: Pointer to the command header in the command stream.
1790  */
1791 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792 				   struct vmw_sw_context *sw_context,
1793 				   SVGA3dCmdHeader *header)
1794 {
1795 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796 		container_of(header, typeof(*cmd), header);
1797 
1798 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1799 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1800 				 &cmd->body.image.sid, NULL);
1801 }
1802 
1803 /**
1804  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1805  *
1806  * @dev_priv: Pointer to a device private struct.
1807  * @sw_context: The software context being used for this batch.
1808  * @header: Pointer to the command header in the command stream.
1809  */
1810 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811 				     struct vmw_sw_context *sw_context,
1812 				     SVGA3dCmdHeader *header)
1813 {
1814 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815 		container_of(header, typeof(*cmd), header);
1816 
1817 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1819 				 &cmd->body.sid, NULL);
1820 }
1821 
1822 /**
1823  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1824  *
1825  * @dev_priv: Pointer to a device private struct.
1826  * @sw_context: The software context being used for this batch.
1827  * @header: Pointer to the command header in the command stream.
1828  */
1829 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830 				     struct vmw_sw_context *sw_context,
1831 				     SVGA3dCmdHeader *header)
1832 {
1833 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834 		container_of(header, typeof(*cmd), header);
1835 
1836 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1837 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1838 				 &cmd->body.image.sid, NULL);
1839 }
1840 
1841 /**
1842  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1843  * command
1844  *
1845  * @dev_priv: Pointer to a device private struct.
1846  * @sw_context: The software context being used for this batch.
1847  * @header: Pointer to the command header in the command stream.
1848  */
1849 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850 				       struct vmw_sw_context *sw_context,
1851 				       SVGA3dCmdHeader *header)
1852 {
1853 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854 		container_of(header, typeof(*cmd), header);
1855 
1856 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1857 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1858 				 &cmd->body.sid, NULL);
1859 }
1860 
1861 /**
1862  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1863  * command
1864  *
1865  * @dev_priv: Pointer to a device private struct.
1866  * @sw_context: The software context being used for this batch.
1867  * @header: Pointer to the command header in the command stream.
1868  */
1869 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870 				       struct vmw_sw_context *sw_context,
1871 				       SVGA3dCmdHeader *header)
1872 {
1873 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874 		container_of(header, typeof(*cmd), header);
1875 
1876 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1877 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1878 				 &cmd->body.image.sid, NULL);
1879 }
1880 
1881 /**
1882  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1883  * command
1884  *
1885  * @dev_priv: Pointer to a device private struct.
1886  * @sw_context: The software context being used for this batch.
1887  * @header: Pointer to the command header in the command stream.
1888  */
1889 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890 					 struct vmw_sw_context *sw_context,
1891 					 SVGA3dCmdHeader *header)
1892 {
1893 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894 		container_of(header, typeof(*cmd), header);
1895 
1896 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1897 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1898 				 &cmd->body.sid, NULL);
1899 }
1900 
1901 /**
1902  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1903  *
1904  * @dev_priv: Pointer to a device private struct.
1905  * @sw_context: The software context being used for this batch.
1906  * @header: Pointer to the command header in the command stream.
1907  */
1908 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909 				 struct vmw_sw_context *sw_context,
1910 				 SVGA3dCmdHeader *header)
1911 {
1912 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1913 	int ret;
1914 	size_t size;
1915 	struct vmw_resource *ctx;
1916 
1917 	cmd = container_of(header, typeof(*cmd), header);
1918 
1919 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1920 				VMW_RES_DIRTY_SET, user_context_converter,
1921 				&cmd->body.cid, &ctx);
1922 	if (unlikely(ret != 0))
1923 		return ret;
1924 
1925 	if (unlikely(!dev_priv->has_mob))
1926 		return 0;
1927 
1928 	size = cmd->header.size - sizeof(cmd->body);
1929 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930 				    cmd->body.shid, cmd + 1, cmd->body.type,
1931 				    size, &sw_context->staged_cmd_res);
1932 	if (unlikely(ret != 0))
1933 		return ret;
1934 
1935 	return vmw_resource_relocation_add(sw_context, NULL,
1936 					   vmw_ptr_diff(sw_context->buf_start,
1937 							&cmd->header.id),
1938 					   vmw_res_rel_nop);
1939 }
1940 
1941 /**
1942  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1943  *
1944  * @dev_priv: Pointer to a device private struct.
1945  * @sw_context: The software context being used for this batch.
1946  * @header: Pointer to the command header in the command stream.
1947  */
1948 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949 				  struct vmw_sw_context *sw_context,
1950 				  SVGA3dCmdHeader *header)
1951 {
1952 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1953 	int ret;
1954 	struct vmw_resource *ctx;
1955 
1956 	cmd = container_of(header, typeof(*cmd), header);
1957 
1958 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959 				VMW_RES_DIRTY_SET, user_context_converter,
1960 				&cmd->body.cid, &ctx);
1961 	if (unlikely(ret != 0))
1962 		return ret;
1963 
1964 	if (unlikely(!dev_priv->has_mob))
1965 		return 0;
1966 
1967 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968 				cmd->body.type, &sw_context->staged_cmd_res);
1969 	if (unlikely(ret != 0))
1970 		return ret;
1971 
1972 	return vmw_resource_relocation_add(sw_context, NULL,
1973 					   vmw_ptr_diff(sw_context->buf_start,
1974 							&cmd->header.id),
1975 					   vmw_res_rel_nop);
1976 }
1977 
1978 /**
1979  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1980  *
1981  * @dev_priv: Pointer to a device private struct.
1982  * @sw_context: The software context being used for this batch.
1983  * @header: Pointer to the command header in the command stream.
1984  */
1985 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986 			      struct vmw_sw_context *sw_context,
1987 			      SVGA3dCmdHeader *header)
1988 {
1989 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1990 	struct vmw_ctx_bindinfo_shader binding;
1991 	struct vmw_resource *ctx, *res = NULL;
1992 	struct vmw_ctx_validation_info *ctx_info;
1993 	int ret;
1994 
1995 	cmd = container_of(header, typeof(*cmd), header);
1996 
1997 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1998 		VMW_DEBUG_USER("Illegal shader type %u.\n",
1999 			       (unsigned int) cmd->body.type);
2000 		return -EINVAL;
2001 	}
2002 
2003 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2004 				VMW_RES_DIRTY_SET, user_context_converter,
2005 				&cmd->body.cid, &ctx);
2006 	if (unlikely(ret != 0))
2007 		return ret;
2008 
2009 	if (!dev_priv->has_mob)
2010 		return 0;
2011 
2012 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2013 		/*
2014 		 * This is the compat shader path - Per device guest-backed
2015 		 * shaders, but user-space thinks it's per context host-
2016 		 * backed shaders.
2017 		 */
2018 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2019 					cmd->body.shid, cmd->body.type);
2020 		if (!IS_ERR(res)) {
2021 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2022 							    VMW_RES_DIRTY_NONE);
2023 			if (unlikely(ret != 0))
2024 				return ret;
2025 
2026 			ret = vmw_resource_relocation_add
2027 				(sw_context, res,
2028 				 vmw_ptr_diff(sw_context->buf_start,
2029 					      &cmd->body.shid),
2030 				 vmw_res_rel_normal);
2031 			if (unlikely(ret != 0))
2032 				return ret;
2033 		}
2034 	}
2035 
2036 	if (IS_ERR_OR_NULL(res)) {
2037 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2038 					VMW_RES_DIRTY_NONE,
2039 					user_shader_converter, &cmd->body.shid,
2040 					&res);
2041 		if (unlikely(ret != 0))
2042 			return ret;
2043 	}
2044 
2045 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2046 	if (!ctx_info)
2047 		return -EINVAL;
2048 
2049 	binding.bi.ctx = ctx;
2050 	binding.bi.res = res;
2051 	binding.bi.bt = vmw_ctx_binding_shader;
2052 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2053 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2054 
2055 	return 0;
2056 }
2057 
2058 /**
2059  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2060  *
2061  * @dev_priv: Pointer to a device private struct.
2062  * @sw_context: The software context being used for this batch.
2063  * @header: Pointer to the command header in the command stream.
2064  */
2065 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2066 				    struct vmw_sw_context *sw_context,
2067 				    SVGA3dCmdHeader *header)
2068 {
2069 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2070 	int ret;
2071 
2072 	cmd = container_of(header, typeof(*cmd), header);
2073 
2074 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2075 				VMW_RES_DIRTY_SET, user_context_converter,
2076 				&cmd->body.cid, NULL);
2077 	if (unlikely(ret != 0))
2078 		return ret;
2079 
2080 	if (dev_priv->has_mob)
2081 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2082 
2083 	return 0;
2084 }
2085 
2086 /**
2087  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2088  *
2089  * @dev_priv: Pointer to a device private struct.
2090  * @sw_context: The software context being used for this batch.
2091  * @header: Pointer to the command header in the command stream.
2092  */
2093 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2094 				  struct vmw_sw_context *sw_context,
2095 				  SVGA3dCmdHeader *header)
2096 {
2097 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2098 		container_of(header, typeof(*cmd), header);
2099 
2100 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2101 				     user_shader_converter, &cmd->body.shid,
2102 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2103 }
2104 
2105 /**
2106  * vmw_cmd_dx_set_single_constant_buffer - Validate
2107  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2108  *
2109  * @dev_priv: Pointer to a device private struct.
2110  * @sw_context: The software context being used for this batch.
2111  * @header: Pointer to the command header in the command stream.
2112  */
2113 static int
2114 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2115 				      struct vmw_sw_context *sw_context,
2116 				      SVGA3dCmdHeader *header)
2117 {
2118 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2119 	struct vmw_resource *res = NULL;
2120 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2121 	struct vmw_ctx_bindinfo_cb binding;
2122 	int ret;
2123 
2124 	if (!ctx_node)
2125 		return -EINVAL;
2126 
2127 	cmd = container_of(header, typeof(*cmd), header);
2128 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2129 				VMW_RES_DIRTY_NONE, user_surface_converter,
2130 				&cmd->body.sid, &res);
2131 	if (unlikely(ret != 0))
2132 		return ret;
2133 
2134 	binding.bi.ctx = ctx_node->ctx;
2135 	binding.bi.res = res;
2136 	binding.bi.bt = vmw_ctx_binding_cb;
2137 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2138 	binding.offset = cmd->body.offsetInBytes;
2139 	binding.size = cmd->body.sizeInBytes;
2140 	binding.slot = cmd->body.slot;
2141 
2142 	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2143 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2144 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2145 			       (unsigned int) cmd->body.type,
2146 			       (unsigned int) binding.slot);
2147 		return -EINVAL;
2148 	}
2149 
2150 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2151 			binding.slot);
2152 
2153 	return 0;
2154 }
2155 
2156 /**
2157  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2158  * command
2159  *
2160  * @dev_priv: Pointer to a device private struct.
2161  * @sw_context: The software context being used for this batch.
2162  * @header: Pointer to the command header in the command stream.
2163  */
2164 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2165 				     struct vmw_sw_context *sw_context,
2166 				     SVGA3dCmdHeader *header)
2167 {
2168 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2169 		container_of(header, typeof(*cmd), header);
2170 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2171 		sizeof(SVGA3dShaderResourceViewId);
2172 
2173 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2174 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2175 	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2176 		VMW_DEBUG_USER("Invalid shader binding.\n");
2177 		return -EINVAL;
2178 	}
2179 
2180 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2181 				     vmw_ctx_binding_sr,
2182 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2183 				     (void *) &cmd[1], num_sr_view,
2184 				     cmd->body.startView);
2185 }
2186 
2187 /**
2188  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2189  *
2190  * @dev_priv: Pointer to a device private struct.
2191  * @sw_context: The software context being used for this batch.
2192  * @header: Pointer to the command header in the command stream.
2193  */
2194 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2195 				 struct vmw_sw_context *sw_context,
2196 				 SVGA3dCmdHeader *header)
2197 {
2198 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2199 	struct vmw_resource *res = NULL;
2200 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2201 	struct vmw_ctx_bindinfo_shader binding;
2202 	int ret = 0;
2203 
2204 	if (!ctx_node)
2205 		return -EINVAL;
2206 
2207 	cmd = container_of(header, typeof(*cmd), header);
2208 
2209 	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2210 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2211 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2212 			       (unsigned int) cmd->body.type);
2213 		return -EINVAL;
2214 	}
2215 
2216 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2217 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2218 		if (IS_ERR(res)) {
2219 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2220 			return PTR_ERR(res);
2221 		}
2222 
2223 		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2224 						    VMW_RES_DIRTY_NONE);
2225 		if (ret)
2226 			return ret;
2227 	}
2228 
2229 	binding.bi.ctx = ctx_node->ctx;
2230 	binding.bi.res = res;
2231 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2232 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2233 
2234 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2235 
2236 	return 0;
2237 }
2238 
2239 /**
2240  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2241  * command
2242  *
2243  * @dev_priv: Pointer to a device private struct.
2244  * @sw_context: The software context being used for this batch.
2245  * @header: Pointer to the command header in the command stream.
2246  */
2247 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2248 					 struct vmw_sw_context *sw_context,
2249 					 SVGA3dCmdHeader *header)
2250 {
2251 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2252 	struct vmw_ctx_bindinfo_vb binding;
2253 	struct vmw_resource *res;
2254 	struct {
2255 		SVGA3dCmdHeader header;
2256 		SVGA3dCmdDXSetVertexBuffers body;
2257 		SVGA3dVertexBuffer buf[];
2258 	} *cmd;
2259 	int i, ret, num;
2260 
2261 	if (!ctx_node)
2262 		return -EINVAL;
2263 
2264 	cmd = container_of(header, typeof(*cmd), header);
2265 	num = (cmd->header.size - sizeof(cmd->body)) /
2266 		sizeof(SVGA3dVertexBuffer);
2267 	if ((u64)num + (u64)cmd->body.startBuffer >
2268 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2269 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2270 		return -EINVAL;
2271 	}
2272 
2273 	for (i = 0; i < num; i++) {
2274 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2275 					VMW_RES_DIRTY_NONE,
2276 					user_surface_converter,
2277 					&cmd->buf[i].sid, &res);
2278 		if (unlikely(ret != 0))
2279 			return ret;
2280 
2281 		binding.bi.ctx = ctx_node->ctx;
2282 		binding.bi.bt = vmw_ctx_binding_vb;
2283 		binding.bi.res = res;
2284 		binding.offset = cmd->buf[i].offset;
2285 		binding.stride = cmd->buf[i].stride;
2286 		binding.slot = i + cmd->body.startBuffer;
2287 
2288 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2289 	}
2290 
2291 	return 0;
2292 }
2293 
2294 /**
2295  * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2296  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2297  *
2298  * @dev_priv: Pointer to a device private struct.
2299  * @sw_context: The software context being used for this batch.
2300  * @header: Pointer to the command header in the command stream.
2301  */
2302 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2303 				       struct vmw_sw_context *sw_context,
2304 				       SVGA3dCmdHeader *header)
2305 {
2306 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2307 	struct vmw_ctx_bindinfo_ib binding;
2308 	struct vmw_resource *res;
2309 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2310 	int ret;
2311 
2312 	if (!ctx_node)
2313 		return -EINVAL;
2314 
2315 	cmd = container_of(header, typeof(*cmd), header);
2316 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317 				VMW_RES_DIRTY_NONE, user_surface_converter,
2318 				&cmd->body.sid, &res);
2319 	if (unlikely(ret != 0))
2320 		return ret;
2321 
2322 	binding.bi.ctx = ctx_node->ctx;
2323 	binding.bi.res = res;
2324 	binding.bi.bt = vmw_ctx_binding_ib;
2325 	binding.offset = cmd->body.offset;
2326 	binding.format = cmd->body.format;
2327 
2328 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2329 
2330 	return 0;
2331 }
2332 
2333 /**
2334  * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2335  * command
2336  *
2337  * @dev_priv: Pointer to a device private struct.
2338  * @sw_context: The software context being used for this batch.
2339  * @header: Pointer to the command header in the command stream.
2340  */
2341 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2342 					struct vmw_sw_context *sw_context,
2343 					SVGA3dCmdHeader *header)
2344 {
2345 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2346 		container_of(header, typeof(*cmd), header);
2347 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2348 		sizeof(SVGA3dRenderTargetViewId);
2349 	int ret;
2350 
2351 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2352 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2353 		return -EINVAL;
2354 	}
2355 
2356 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2357 				    0, &cmd->body.depthStencilViewId, 1, 0);
2358 	if (ret)
2359 		return ret;
2360 
2361 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2362 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2363 				     num_rt_view, 0);
2364 }
2365 
2366 /**
2367  * vmw_cmd_dx_clear_rendertarget_view - Validate
2368  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2369  *
2370  * @dev_priv: Pointer to a device private struct.
2371  * @sw_context: The software context being used for this batch.
2372  * @header: Pointer to the command header in the command stream.
2373  */
2374 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2375 					      struct vmw_sw_context *sw_context,
2376 					      SVGA3dCmdHeader *header)
2377 {
2378 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2379 		container_of(header, typeof(*cmd), header);
2380 
2381 	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2382 					   cmd->body.renderTargetViewId));
2383 }
2384 
2385 /**
2386  * vmw_cmd_dx_clear_rendertarget_view - Validate
2387  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2388  *
2389  * @dev_priv: Pointer to a device private struct.
2390  * @sw_context: The software context being used for this batch.
2391  * @header: Pointer to the command header in the command stream.
2392  */
2393 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2394 					      struct vmw_sw_context *sw_context,
2395 					      SVGA3dCmdHeader *header)
2396 {
2397 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2398 		container_of(header, typeof(*cmd), header);
2399 
2400 	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2401 					   cmd->body.depthStencilViewId));
2402 }
2403 
2404 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2405 				  struct vmw_sw_context *sw_context,
2406 				  SVGA3dCmdHeader *header)
2407 {
2408 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2409 	struct vmw_resource *srf;
2410 	struct vmw_resource *res;
2411 	enum vmw_view_type view_type;
2412 	int ret;
2413 	/*
2414 	 * This is based on the fact that all affected define commands have the
2415 	 * same initial command body layout.
2416 	 */
2417 	struct {
2418 		SVGA3dCmdHeader header;
2419 		uint32 defined_id;
2420 		uint32 sid;
2421 	} *cmd;
2422 
2423 	if (!ctx_node)
2424 		return -EINVAL;
2425 
2426 	view_type = vmw_view_cmd_to_type(header->id);
2427 	if (view_type == vmw_view_max)
2428 		return -EINVAL;
2429 
2430 	cmd = container_of(header, typeof(*cmd), header);
2431 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2432 		VMW_DEBUG_USER("Invalid surface id.\n");
2433 		return -EINVAL;
2434 	}
2435 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2436 				VMW_RES_DIRTY_NONE, user_surface_converter,
2437 				&cmd->sid, &srf);
2438 	if (unlikely(ret != 0))
2439 		return ret;
2440 
2441 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2442 	ret = vmw_cotable_notify(res, cmd->defined_id);
2443 	if (unlikely(ret != 0))
2444 		return ret;
2445 
2446 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2447 			    cmd->defined_id, header,
2448 			    header->size + sizeof(*header),
2449 			    &sw_context->staged_cmd_res);
2450 }
2451 
2452 /**
2453  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2454  *
2455  * @dev_priv: Pointer to a device private struct.
2456  * @sw_context: The software context being used for this batch.
2457  * @header: Pointer to the command header in the command stream.
2458  */
2459 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2460 				     struct vmw_sw_context *sw_context,
2461 				     SVGA3dCmdHeader *header)
2462 {
2463 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2464 	struct vmw_ctx_bindinfo_so binding;
2465 	struct vmw_resource *res;
2466 	struct {
2467 		SVGA3dCmdHeader header;
2468 		SVGA3dCmdDXSetSOTargets body;
2469 		SVGA3dSoTarget targets[];
2470 	} *cmd;
2471 	int i, ret, num;
2472 
2473 	if (!ctx_node)
2474 		return -EINVAL;
2475 
2476 	cmd = container_of(header, typeof(*cmd), header);
2477 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2478 
2479 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2480 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2481 		return -EINVAL;
2482 	}
2483 
2484 	for (i = 0; i < num; i++) {
2485 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2486 					VMW_RES_DIRTY_SET,
2487 					user_surface_converter,
2488 					&cmd->targets[i].sid, &res);
2489 		if (unlikely(ret != 0))
2490 			return ret;
2491 
2492 		binding.bi.ctx = ctx_node->ctx;
2493 		binding.bi.res = res;
2494 		binding.bi.bt = vmw_ctx_binding_so,
2495 		binding.offset = cmd->targets[i].offset;
2496 		binding.size = cmd->targets[i].sizeInBytes;
2497 		binding.slot = i;
2498 
2499 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2500 	}
2501 
2502 	return 0;
2503 }
2504 
2505 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2506 				struct vmw_sw_context *sw_context,
2507 				SVGA3dCmdHeader *header)
2508 {
2509 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2510 	struct vmw_resource *res;
2511 	/*
2512 	 * This is based on the fact that all affected define commands have
2513 	 * the same initial command body layout.
2514 	 */
2515 	struct {
2516 		SVGA3dCmdHeader header;
2517 		uint32 defined_id;
2518 	} *cmd;
2519 	enum vmw_so_type so_type;
2520 	int ret;
2521 
2522 	if (!ctx_node)
2523 		return -EINVAL;
2524 
2525 	so_type = vmw_so_cmd_to_type(header->id);
2526 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2527 	cmd = container_of(header, typeof(*cmd), header);
2528 	ret = vmw_cotable_notify(res, cmd->defined_id);
2529 
2530 	return ret;
2531 }
2532 
2533 /**
2534  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2535  * command
2536  *
2537  * @dev_priv: Pointer to a device private struct.
2538  * @sw_context: The software context being used for this batch.
2539  * @header: Pointer to the command header in the command stream.
2540  */
2541 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2542 					struct vmw_sw_context *sw_context,
2543 					SVGA3dCmdHeader *header)
2544 {
2545 	struct {
2546 		SVGA3dCmdHeader header;
2547 		union {
2548 			SVGA3dCmdDXReadbackSubResource r_body;
2549 			SVGA3dCmdDXInvalidateSubResource i_body;
2550 			SVGA3dCmdDXUpdateSubResource u_body;
2551 			SVGA3dSurfaceId sid;
2552 		};
2553 	} *cmd;
2554 
2555 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2556 		     offsetof(typeof(*cmd), sid));
2557 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2558 		     offsetof(typeof(*cmd), sid));
2559 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2560 		     offsetof(typeof(*cmd), sid));
2561 
2562 	cmd = container_of(header, typeof(*cmd), header);
2563 
2564 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2565 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2566 				 &cmd->sid, NULL);
2567 }
2568 
2569 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2570 				struct vmw_sw_context *sw_context,
2571 				SVGA3dCmdHeader *header)
2572 {
2573 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2574 
2575 	if (!ctx_node)
2576 		return -EINVAL;
2577 
2578 	return 0;
2579 }
2580 
2581 /**
2582  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2583  * resource for removal.
2584  *
2585  * @dev_priv: Pointer to a device private struct.
2586  * @sw_context: The software context being used for this batch.
2587  * @header: Pointer to the command header in the command stream.
2588  *
2589  * Check that the view exists, and if it was not created using this command
2590  * batch, conditionally make this command a NOP.
2591  */
2592 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2593 				  struct vmw_sw_context *sw_context,
2594 				  SVGA3dCmdHeader *header)
2595 {
2596 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2597 	struct {
2598 		SVGA3dCmdHeader header;
2599 		union vmw_view_destroy body;
2600 	} *cmd = container_of(header, typeof(*cmd), header);
2601 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2602 	struct vmw_resource *view;
2603 	int ret;
2604 
2605 	if (!ctx_node)
2606 		return -EINVAL;
2607 
2608 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2609 			      &sw_context->staged_cmd_res, &view);
2610 	if (ret || !view)
2611 		return ret;
2612 
2613 	/*
2614 	 * If the view wasn't created during this command batch, it might
2615 	 * have been removed due to a context swapout, so add a
2616 	 * relocation to conditionally make this command a NOP to avoid
2617 	 * device errors.
2618 	 */
2619 	return vmw_resource_relocation_add(sw_context, view,
2620 					   vmw_ptr_diff(sw_context->buf_start,
2621 							&cmd->header.id),
2622 					   vmw_res_rel_cond_nop);
2623 }
2624 
2625 /**
2626  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2627  *
2628  * @dev_priv: Pointer to a device private struct.
2629  * @sw_context: The software context being used for this batch.
2630  * @header: Pointer to the command header in the command stream.
2631  */
2632 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2633 				    struct vmw_sw_context *sw_context,
2634 				    SVGA3dCmdHeader *header)
2635 {
2636 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2637 	struct vmw_resource *res;
2638 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2639 		container_of(header, typeof(*cmd), header);
2640 	int ret;
2641 
2642 	if (!ctx_node)
2643 		return -EINVAL;
2644 
2645 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2646 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2647 	if (ret)
2648 		return ret;
2649 
2650 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2651 				 cmd->body.shaderId, cmd->body.type,
2652 				 &sw_context->staged_cmd_res);
2653 }
2654 
2655 /**
2656  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2657  *
2658  * @dev_priv: Pointer to a device private struct.
2659  * @sw_context: The software context being used for this batch.
2660  * @header: Pointer to the command header in the command stream.
2661  */
2662 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2663 				     struct vmw_sw_context *sw_context,
2664 				     SVGA3dCmdHeader *header)
2665 {
2666 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2667 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2668 		container_of(header, typeof(*cmd), header);
2669 	int ret;
2670 
2671 	if (!ctx_node)
2672 		return -EINVAL;
2673 
2674 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2675 				&sw_context->staged_cmd_res);
2676 
2677 	return ret;
2678 }
2679 
2680 /**
2681  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2682  *
2683  * @dev_priv: Pointer to a device private struct.
2684  * @sw_context: The software context being used for this batch.
2685  * @header: Pointer to the command header in the command stream.
2686  */
2687 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2688 				  struct vmw_sw_context *sw_context,
2689 				  SVGA3dCmdHeader *header)
2690 {
2691 	struct vmw_resource *ctx;
2692 	struct vmw_resource *res;
2693 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2694 		container_of(header, typeof(*cmd), header);
2695 	int ret;
2696 
2697 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2698 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2699 					VMW_RES_DIRTY_SET,
2700 					user_context_converter, &cmd->body.cid,
2701 					&ctx);
2702 		if (ret)
2703 			return ret;
2704 	} else {
2705 		struct vmw_ctx_validation_info *ctx_node =
2706 			VMW_GET_CTX_NODE(sw_context);
2707 
2708 		if (!ctx_node)
2709 			return -EINVAL;
2710 
2711 		ctx = ctx_node->ctx;
2712 	}
2713 
2714 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2715 	if (IS_ERR(res)) {
2716 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2717 		return PTR_ERR(res);
2718 	}
2719 
2720 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2721 					    VMW_RES_DIRTY_NONE);
2722 	if (ret) {
2723 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2724 		return ret;
2725 	}
2726 
2727 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2728 					 &cmd->body.mobid,
2729 					 cmd->body.offsetInBytes);
2730 }
2731 
2732 /**
2733  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2734  *
2735  * @dev_priv: Pointer to a device private struct.
2736  * @sw_context: The software context being used for this batch.
2737  * @header: Pointer to the command header in the command stream.
2738  */
2739 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2740 			      struct vmw_sw_context *sw_context,
2741 			      SVGA3dCmdHeader *header)
2742 {
2743 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2744 		container_of(header, typeof(*cmd), header);
2745 
2746 	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2747 					   cmd->body.shaderResourceViewId));
2748 }
2749 
2750 /**
2751  * vmw_cmd_dx_transfer_from_buffer - Validate
2752  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2753  *
2754  * @dev_priv: Pointer to a device private struct.
2755  * @sw_context: The software context being used for this batch.
2756  * @header: Pointer to the command header in the command stream.
2757  */
2758 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2759 					   struct vmw_sw_context *sw_context,
2760 					   SVGA3dCmdHeader *header)
2761 {
2762 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2763 		container_of(header, typeof(*cmd), header);
2764 	int ret;
2765 
2766 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2767 				VMW_RES_DIRTY_NONE, user_surface_converter,
2768 				&cmd->body.srcSid, NULL);
2769 	if (ret != 0)
2770 		return ret;
2771 
2772 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2773 				 VMW_RES_DIRTY_SET, user_surface_converter,
2774 				 &cmd->body.destSid, NULL);
2775 }
2776 
2777 /**
2778  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2779  *
2780  * @dev_priv: Pointer to a device private struct.
2781  * @sw_context: The software context being used for this batch.
2782  * @header: Pointer to the command header in the command stream.
2783  */
2784 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2785 					   struct vmw_sw_context *sw_context,
2786 					   SVGA3dCmdHeader *header)
2787 {
2788 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2789 		container_of(header, typeof(*cmd), header);
2790 
2791 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2792 		return -EINVAL;
2793 
2794 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795 				 VMW_RES_DIRTY_SET, user_surface_converter,
2796 				 &cmd->body.surface.sid, NULL);
2797 }
2798 
2799 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2800 				struct vmw_sw_context *sw_context,
2801 				void *buf, uint32_t *size)
2802 {
2803 	uint32_t size_remaining = *size;
2804 	uint32_t cmd_id;
2805 
2806 	cmd_id = ((uint32_t *)buf)[0];
2807 	switch (cmd_id) {
2808 	case SVGA_CMD_UPDATE:
2809 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2810 		break;
2811 	case SVGA_CMD_DEFINE_GMRFB:
2812 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2813 		break;
2814 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2815 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2816 		break;
2817 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2818 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2819 		break;
2820 	default:
2821 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
2822 		return -EINVAL;
2823 	}
2824 
2825 	if (*size > size_remaining) {
2826 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2827 			       cmd_id);
2828 		return -EINVAL;
2829 	}
2830 
2831 	if (unlikely(!sw_context->kernel)) {
2832 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
2833 		return -EPERM;
2834 	}
2835 
2836 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2837 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2838 
2839 	return 0;
2840 }
2841 
2842 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2843 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2844 		    false, false, false),
2845 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2846 		    false, false, false),
2847 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2848 		    true, false, false),
2849 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2850 		    true, false, false),
2851 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2852 		    true, false, false),
2853 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2854 		    false, false, false),
2855 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2856 		    false, false, false),
2857 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2858 		    true, false, false),
2859 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2860 		    true, false, false),
2861 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2862 		    true, false, false),
2863 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2864 		    &vmw_cmd_set_render_target_check, true, false, false),
2865 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2866 		    true, false, false),
2867 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2868 		    true, false, false),
2869 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2870 		    true, false, false),
2871 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2872 		    true, false, false),
2873 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2874 		    true, false, false),
2875 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2876 		    true, false, false),
2877 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2878 		    true, false, false),
2879 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2880 		    false, false, false),
2881 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2882 		    true, false, false),
2883 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2884 		    true, false, false),
2885 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2886 		    true, false, false),
2887 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2888 		    true, false, false),
2889 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2890 		    true, false, false),
2891 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2892 		    true, false, false),
2893 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2894 		    true, false, false),
2895 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2896 		    true, false, false),
2897 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2898 		    true, false, false),
2899 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2900 		    true, false, false),
2901 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
2902 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
2903 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2904 		    false, false, false),
2905 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2906 		    false, false, false),
2907 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2908 		    false, false, false),
2909 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2910 		    false, false, false),
2911 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2912 		    false, false, false),
2913 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
2914 		    false, false, false),
2915 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
2916 		    false, false, false),
2917 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2918 		    false, false, false),
2919 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2920 		    false, false, false),
2921 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2922 		    false, false, false),
2923 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2924 		    false, false, false),
2925 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2926 		    false, false, false),
2927 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2928 		    false, false, false),
2929 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2930 		    false, false, true),
2931 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2932 		    false, false, true),
2933 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2934 		    false, false, true),
2935 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2936 		    false, false, true),
2937 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2938 		    false, false, true),
2939 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2940 		    false, false, true),
2941 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2942 		    false, false, true),
2943 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2944 		    false, false, true),
2945 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2946 		    true, false, true),
2947 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2948 		    false, false, true),
2949 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2950 		    true, false, true),
2951 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2952 		    &vmw_cmd_update_gb_surface, true, false, true),
2953 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2954 		    &vmw_cmd_readback_gb_image, true, false, true),
2955 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2956 		    &vmw_cmd_readback_gb_surface, true, false, true),
2957 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2958 		    &vmw_cmd_invalidate_gb_image, true, false, true),
2959 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2960 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
2961 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2962 		    false, false, true),
2963 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2964 		    false, false, true),
2965 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2966 		    false, false, true),
2967 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2968 		    false, false, true),
2969 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2970 		    false, false, true),
2971 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2972 		    false, false, true),
2973 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2974 		    true, false, true),
2975 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2976 		    false, false, true),
2977 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2978 		    false, false, false),
2979 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2980 		    true, false, true),
2981 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2982 		    true, false, true),
2983 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2984 		    true, false, true),
2985 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2986 		    true, false, true),
2987 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
2988 		    true, false, true),
2989 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2990 		    false, false, true),
2991 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2992 		    false, false, true),
2993 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2994 		    false, false, true),
2995 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2996 		    false, false, true),
2997 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2998 		    false, false, true),
2999 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3000 		    false, false, true),
3001 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3002 		    false, false, true),
3003 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3004 		    false, false, true),
3005 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3006 		    false, false, true),
3007 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3008 		    false, false, true),
3009 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3010 		    true, false, true),
3011 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3012 		    false, false, true),
3013 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3014 		    false, false, true),
3015 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3016 		    false, false, true),
3017 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3018 		    false, false, true),
3019 
3020 	/* SM commands */
3021 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3022 		    false, false, true),
3023 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3024 		    false, false, true),
3025 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3026 		    false, false, true),
3027 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3028 		    false, false, true),
3029 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3030 		    false, false, true),
3031 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3032 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3033 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3034 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3035 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3036 		    true, false, true),
3037 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3038 		    true, false, true),
3039 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3040 		    true, false, true),
3041 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3042 		    true, false, true),
3043 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3044 		    true, false, true),
3045 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3046 		    &vmw_cmd_dx_cid_check, true, false, true),
3047 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3048 		    true, false, true),
3049 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3050 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3051 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3052 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3053 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3054 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3055 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3056 		    true, false, true),
3057 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3058 		    &vmw_cmd_dx_cid_check, true, false, true),
3059 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3060 		    &vmw_cmd_dx_cid_check, true, false, true),
3061 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3062 		    true, false, true),
3063 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3064 		    true, false, true),
3065 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3066 		    true, false, true),
3067 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3068 		    &vmw_cmd_dx_cid_check, true, false, true),
3069 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3070 		    true, false, true),
3071 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3072 		    true, false, true),
3073 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3074 		    true, false, true),
3075 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3076 		    true, false, true),
3077 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3078 		    true, false, true),
3079 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3080 		    true, false, true),
3081 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3082 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3083 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3084 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3085 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3086 		    true, false, true),
3087 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3088 		    true, false, true),
3089 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3090 		    &vmw_cmd_dx_check_subresource, true, false, true),
3091 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3092 		    &vmw_cmd_dx_check_subresource, true, false, true),
3093 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3094 		    &vmw_cmd_dx_check_subresource, true, false, true),
3095 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3096 		    &vmw_cmd_dx_view_define, true, false, true),
3097 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3098 		    &vmw_cmd_dx_view_remove, true, false, true),
3099 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3100 		    &vmw_cmd_dx_view_define, true, false, true),
3101 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3102 		    &vmw_cmd_dx_view_remove, true, false, true),
3103 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3104 		    &vmw_cmd_dx_view_define, true, false, true),
3105 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3106 		    &vmw_cmd_dx_view_remove, true, false, true),
3107 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3108 		    &vmw_cmd_dx_so_define, true, false, true),
3109 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3110 		    &vmw_cmd_dx_cid_check, true, false, true),
3111 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3112 		    &vmw_cmd_dx_so_define, true, false, true),
3113 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3114 		    &vmw_cmd_dx_cid_check, true, false, true),
3115 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3116 		    &vmw_cmd_dx_so_define, true, false, true),
3117 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3118 		    &vmw_cmd_dx_cid_check, true, false, true),
3119 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3120 		    &vmw_cmd_dx_so_define, true, false, true),
3121 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3122 		    &vmw_cmd_dx_cid_check, true, false, true),
3123 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3124 		    &vmw_cmd_dx_so_define, true, false, true),
3125 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3126 		    &vmw_cmd_dx_cid_check, true, false, true),
3127 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3128 		    &vmw_cmd_dx_define_shader, true, false, true),
3129 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3130 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3131 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3132 		    &vmw_cmd_dx_bind_shader, true, false, true),
3133 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3134 		    &vmw_cmd_dx_so_define, true, false, true),
3135 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3136 		    &vmw_cmd_dx_cid_check, true, false, true),
3137 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3138 		    true, false, true),
3139 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3140 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3141 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3142 		    &vmw_cmd_dx_cid_check, true, false, true),
3143 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3144 		    &vmw_cmd_dx_cid_check, true, false, true),
3145 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3146 		    &vmw_cmd_buffer_copy_check, true, false, true),
3147 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3148 		    &vmw_cmd_pred_copy_check, true, false, true),
3149 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3150 		    &vmw_cmd_dx_transfer_from_buffer,
3151 		    true, false, true),
3152 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3153 		    true, false, true),
3154 };
3155 
3156 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3157 {
3158 	u32 cmd_id = ((u32 *) buf)[0];
3159 
3160 	if (cmd_id >= SVGA_CMD_MAX) {
3161 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3162 		const struct vmw_cmd_entry *entry;
3163 
3164 		*size = header->size + sizeof(SVGA3dCmdHeader);
3165 		cmd_id = header->id;
3166 		if (cmd_id >= SVGA_3D_CMD_MAX)
3167 			return false;
3168 
3169 		cmd_id -= SVGA_3D_CMD_BASE;
3170 		entry = &vmw_cmd_entries[cmd_id];
3171 		*cmd = entry->cmd_name;
3172 		return true;
3173 	}
3174 
3175 	switch (cmd_id) {
3176 	case SVGA_CMD_UPDATE:
3177 		*cmd = "SVGA_CMD_UPDATE";
3178 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3179 		break;
3180 	case SVGA_CMD_DEFINE_GMRFB:
3181 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3182 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3183 		break;
3184 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3185 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3186 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3187 		break;
3188 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3189 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3190 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3191 		break;
3192 	default:
3193 		*cmd = "UNKNOWN";
3194 		*size = 0;
3195 		return false;
3196 	}
3197 
3198 	return true;
3199 }
3200 
3201 static int vmw_cmd_check(struct vmw_private *dev_priv,
3202 			 struct vmw_sw_context *sw_context, void *buf,
3203 			 uint32_t *size)
3204 {
3205 	uint32_t cmd_id;
3206 	uint32_t size_remaining = *size;
3207 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3208 	int ret;
3209 	const struct vmw_cmd_entry *entry;
3210 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3211 
3212 	cmd_id = ((uint32_t *)buf)[0];
3213 	/* Handle any none 3D commands */
3214 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3215 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3216 
3217 
3218 	cmd_id = header->id;
3219 	*size = header->size + sizeof(SVGA3dCmdHeader);
3220 
3221 	cmd_id -= SVGA_3D_CMD_BASE;
3222 	if (unlikely(*size > size_remaining))
3223 		goto out_invalid;
3224 
3225 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3226 		goto out_invalid;
3227 
3228 	entry = &vmw_cmd_entries[cmd_id];
3229 	if (unlikely(!entry->func))
3230 		goto out_invalid;
3231 
3232 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3233 		goto out_privileged;
3234 
3235 	if (unlikely(entry->gb_disable && gb))
3236 		goto out_old;
3237 
3238 	if (unlikely(entry->gb_enable && !gb))
3239 		goto out_new;
3240 
3241 	ret = entry->func(dev_priv, sw_context, header);
3242 	if (unlikely(ret != 0)) {
3243 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3244 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3245 		return ret;
3246 	}
3247 
3248 	return 0;
3249 out_invalid:
3250 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3251 		       cmd_id + SVGA_3D_CMD_BASE);
3252 	return -EINVAL;
3253 out_privileged:
3254 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3255 		       cmd_id + SVGA_3D_CMD_BASE);
3256 	return -EPERM;
3257 out_old:
3258 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3259 		       cmd_id + SVGA_3D_CMD_BASE);
3260 	return -EINVAL;
3261 out_new:
3262 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3263 		       cmd_id + SVGA_3D_CMD_BASE);
3264 	return -EINVAL;
3265 }
3266 
3267 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3268 			     struct vmw_sw_context *sw_context, void *buf,
3269 			     uint32_t size)
3270 {
3271 	int32_t cur_size = size;
3272 	int ret;
3273 
3274 	sw_context->buf_start = buf;
3275 
3276 	while (cur_size > 0) {
3277 		size = cur_size;
3278 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3279 		if (unlikely(ret != 0))
3280 			return ret;
3281 		buf = (void *)((unsigned long) buf + size);
3282 		cur_size -= size;
3283 	}
3284 
3285 	if (unlikely(cur_size != 0)) {
3286 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3287 		return -EINVAL;
3288 	}
3289 
3290 	return 0;
3291 }
3292 
3293 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3294 {
3295 	/* Memory is validation context memory, so no need to free it */
3296 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3297 }
3298 
3299 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3300 {
3301 	struct vmw_relocation *reloc;
3302 	struct ttm_buffer_object *bo;
3303 
3304 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3305 		bo = &reloc->vbo->base;
3306 		switch (bo->mem.mem_type) {
3307 		case TTM_PL_VRAM:
3308 			reloc->location->offset += bo->offset;
3309 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3310 			break;
3311 		case VMW_PL_GMR:
3312 			reloc->location->gmrId = bo->mem.start;
3313 			break;
3314 		case VMW_PL_MOB:
3315 			*reloc->mob_loc = bo->mem.start;
3316 			break;
3317 		default:
3318 			BUG();
3319 		}
3320 	}
3321 	vmw_free_relocations(sw_context);
3322 }
3323 
3324 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3325 				 uint32_t size)
3326 {
3327 	if (likely(sw_context->cmd_bounce_size >= size))
3328 		return 0;
3329 
3330 	if (sw_context->cmd_bounce_size == 0)
3331 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3332 
3333 	while (sw_context->cmd_bounce_size < size) {
3334 		sw_context->cmd_bounce_size =
3335 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3336 				   (sw_context->cmd_bounce_size >> 1));
3337 	}
3338 
3339 	vfree(sw_context->cmd_bounce);
3340 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3341 
3342 	if (sw_context->cmd_bounce == NULL) {
3343 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3344 		sw_context->cmd_bounce_size = 0;
3345 		return -ENOMEM;
3346 	}
3347 
3348 	return 0;
3349 }
3350 
3351 /**
3352  * vmw_execbuf_fence_commands - create and submit a command stream fence
3353  *
3354  * Creates a fence object and submits a command stream marker.
3355  * If this fails for some reason, We sync the fifo and return NULL.
3356  * It is then safe to fence buffers with a NULL pointer.
3357  *
3358  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3359  * userspace handle if @p_handle is not NULL, otherwise not.
3360  */
3361 
3362 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3363 			       struct vmw_private *dev_priv,
3364 			       struct vmw_fence_obj **p_fence,
3365 			       uint32_t *p_handle)
3366 {
3367 	uint32_t sequence;
3368 	int ret;
3369 	bool synced = false;
3370 
3371 	/* p_handle implies file_priv. */
3372 	BUG_ON(p_handle != NULL && file_priv == NULL);
3373 
3374 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3375 	if (unlikely(ret != 0)) {
3376 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3377 		synced = true;
3378 	}
3379 
3380 	if (p_handle != NULL)
3381 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3382 					    sequence, p_fence, p_handle);
3383 	else
3384 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3385 
3386 	if (unlikely(ret != 0 && !synced)) {
3387 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3388 					 false, VMW_FENCE_WAIT_TIMEOUT);
3389 		*p_fence = NULL;
3390 	}
3391 
3392 	return ret;
3393 }
3394 
3395 /**
3396  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3397  *
3398  * @dev_priv: Pointer to a vmw_private struct.
3399  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3400  * @ret: Return value from fence object creation.
3401  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3402  * the information should be copied.
3403  * @fence: Pointer to the fenc object.
3404  * @fence_handle: User-space fence handle.
3405  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3406  * @sync_file:  Only used to clean up in case of an error in this function.
3407  *
3408  * This function copies fence information to user-space. If copying fails, the
3409  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3410  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3411  * will hopefully be detected.
3412  *
3413  * Also if copying fails, user-space will be unable to signal the fence object
3414  * so we wait for it immediately, and then unreference the user-space reference.
3415  */
3416 void
3417 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3418 			    struct vmw_fpriv *vmw_fp, int ret,
3419 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3420 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3421 			    int32_t out_fence_fd, struct sync_file *sync_file)
3422 {
3423 	struct drm_vmw_fence_rep fence_rep;
3424 
3425 	if (user_fence_rep == NULL)
3426 		return;
3427 
3428 	memset(&fence_rep, 0, sizeof(fence_rep));
3429 
3430 	fence_rep.error = ret;
3431 	fence_rep.fd = out_fence_fd;
3432 	if (ret == 0) {
3433 		BUG_ON(fence == NULL);
3434 
3435 		fence_rep.handle = fence_handle;
3436 		fence_rep.seqno = fence->base.seqno;
3437 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3438 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3439 	}
3440 
3441 	/*
3442 	 * copy_to_user errors will be detected by user space not seeing
3443 	 * fence_rep::error filled in. Typically user-space would have pre-set
3444 	 * that member to -EFAULT.
3445 	 */
3446 	ret = copy_to_user(user_fence_rep, &fence_rep,
3447 			   sizeof(fence_rep));
3448 
3449 	/*
3450 	 * User-space lost the fence object. We need to sync and unreference the
3451 	 * handle.
3452 	 */
3453 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3454 		if (sync_file)
3455 			fput(sync_file->file);
3456 
3457 		if (fence_rep.fd != -1) {
3458 			put_unused_fd(fence_rep.fd);
3459 			fence_rep.fd = -1;
3460 		}
3461 
3462 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3463 					  TTM_REF_USAGE);
3464 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3465 		(void) vmw_fence_obj_wait(fence, false, false,
3466 					  VMW_FENCE_WAIT_TIMEOUT);
3467 	}
3468 }
3469 
3470 /**
3471  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3472  *
3473  * @dev_priv: Pointer to a device private structure.
3474  * @kernel_commands: Pointer to the unpatched command batch.
3475  * @command_size: Size of the unpatched command batch.
3476  * @sw_context: Structure holding the relocation lists.
3477  *
3478  * Side effects: If this function returns 0, then the command batch pointed to
3479  * by @kernel_commands will have been modified.
3480  */
3481 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3482 				   void *kernel_commands, u32 command_size,
3483 				   struct vmw_sw_context *sw_context)
3484 {
3485 	void *cmd;
3486 
3487 	if (sw_context->dx_ctx_node)
3488 		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3489 					  sw_context->dx_ctx_node->ctx->id);
3490 	else
3491 		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3492 
3493 	if (!cmd)
3494 		return -ENOMEM;
3495 
3496 	vmw_apply_relocations(sw_context);
3497 	memcpy(cmd, kernel_commands, command_size);
3498 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3499 	vmw_resource_relocations_free(&sw_context->res_relocations);
3500 	vmw_fifo_commit(dev_priv, command_size);
3501 
3502 	return 0;
3503 }
3504 
3505 /**
3506  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3507  * command buffer manager.
3508  *
3509  * @dev_priv: Pointer to a device private structure.
3510  * @header: Opaque handle to the command buffer allocation.
3511  * @command_size: Size of the unpatched command batch.
3512  * @sw_context: Structure holding the relocation lists.
3513  *
3514  * Side effects: If this function returns 0, then the command buffer represented
3515  * by @header will have been modified.
3516  */
3517 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3518 				     struct vmw_cmdbuf_header *header,
3519 				     u32 command_size,
3520 				     struct vmw_sw_context *sw_context)
3521 {
3522 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3523 		  SVGA3D_INVALID_ID);
3524 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3525 				       header);
3526 
3527 	vmw_apply_relocations(sw_context);
3528 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3529 	vmw_resource_relocations_free(&sw_context->res_relocations);
3530 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3531 
3532 	return 0;
3533 }
3534 
3535 /**
3536  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3537  * submission using a command buffer.
3538  *
3539  * @dev_priv: Pointer to a device private structure.
3540  * @user_commands: User-space pointer to the commands to be submitted.
3541  * @command_size: Size of the unpatched command batch.
3542  * @header: Out parameter returning the opaque pointer to the command buffer.
3543  *
3544  * This function checks whether we can use the command buffer manager for
3545  * submission and if so, creates a command buffer of suitable size and copies
3546  * the user data into that buffer.
3547  *
3548  * On successful return, the function returns a pointer to the data in the
3549  * command buffer and *@header is set to non-NULL.
3550  *
3551  * If command buffers could not be used, the function will return the value of
3552  * @kernel_commands on function call. That value may be NULL. In that case, the
3553  * value of *@header will be set to NULL.
3554  *
3555  * If an error is encountered, the function will return a pointer error value.
3556  * If the function is interrupted by a signal while sleeping, it will return
3557  * -ERESTARTSYS casted to a pointer error value.
3558  */
3559 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3560 				void __user *user_commands,
3561 				void *kernel_commands, u32 command_size,
3562 				struct vmw_cmdbuf_header **header)
3563 {
3564 	size_t cmdbuf_size;
3565 	int ret;
3566 
3567 	*header = NULL;
3568 	if (command_size > SVGA_CB_MAX_SIZE) {
3569 		VMW_DEBUG_USER("Command buffer is too large.\n");
3570 		return ERR_PTR(-EINVAL);
3571 	}
3572 
3573 	if (!dev_priv->cman || kernel_commands)
3574 		return kernel_commands;
3575 
3576 	/* If possible, add a little space for fencing. */
3577 	cmdbuf_size = command_size + 512;
3578 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3579 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3580 					   header);
3581 	if (IS_ERR(kernel_commands))
3582 		return kernel_commands;
3583 
3584 	ret = copy_from_user(kernel_commands, user_commands, command_size);
3585 	if (ret) {
3586 		VMW_DEBUG_USER("Failed copying commands.\n");
3587 		vmw_cmdbuf_header_free(*header);
3588 		*header = NULL;
3589 		return ERR_PTR(-EFAULT);
3590 	}
3591 
3592 	return kernel_commands;
3593 }
3594 
3595 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3596 				   struct vmw_sw_context *sw_context,
3597 				   uint32_t handle)
3598 {
3599 	struct vmw_resource *res;
3600 	int ret;
3601 	unsigned int size;
3602 
3603 	if (handle == SVGA3D_INVALID_ID)
3604 		return 0;
3605 
3606 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3607 	ret = vmw_validation_preload_res(sw_context->ctx, size);
3608 	if (ret)
3609 		return ret;
3610 
3611 	res = vmw_user_resource_noref_lookup_handle
3612 		(dev_priv, sw_context->fp->tfile, handle,
3613 		 user_context_converter);
3614 	if (IS_ERR(res)) {
3615 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3616 			       (unsigned int) handle);
3617 		return PTR_ERR(res);
3618 	}
3619 
3620 	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
3621 	if (unlikely(ret != 0))
3622 		return ret;
3623 
3624 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3625 	sw_context->man = vmw_context_res_man(res);
3626 
3627 	return 0;
3628 }
3629 
3630 int vmw_execbuf_process(struct drm_file *file_priv,
3631 			struct vmw_private *dev_priv,
3632 			void __user *user_commands, void *kernel_commands,
3633 			uint32_t command_size, uint64_t throttle_us,
3634 			uint32_t dx_context_handle,
3635 			struct drm_vmw_fence_rep __user *user_fence_rep,
3636 			struct vmw_fence_obj **out_fence, uint32_t flags)
3637 {
3638 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
3639 	struct vmw_fence_obj *fence = NULL;
3640 	struct vmw_cmdbuf_header *header;
3641 	uint32_t handle = 0;
3642 	int ret;
3643 	int32_t out_fence_fd = -1;
3644 	struct sync_file *sync_file = NULL;
3645 	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3646 
3647 	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3648 
3649 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3650 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3651 		if (out_fence_fd < 0) {
3652 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
3653 			return out_fence_fd;
3654 		}
3655 	}
3656 
3657 	if (throttle_us) {
3658 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3659 				   throttle_us);
3660 
3661 		if (ret)
3662 			goto out_free_fence_fd;
3663 	}
3664 
3665 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3666 					     kernel_commands, command_size,
3667 					     &header);
3668 	if (IS_ERR(kernel_commands)) {
3669 		ret = PTR_ERR(kernel_commands);
3670 		goto out_free_fence_fd;
3671 	}
3672 
3673 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3674 	if (ret) {
3675 		ret = -ERESTARTSYS;
3676 		goto out_free_header;
3677 	}
3678 
3679 	sw_context->kernel = false;
3680 	if (kernel_commands == NULL) {
3681 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
3682 		if (unlikely(ret != 0))
3683 			goto out_unlock;
3684 
3685 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3686 				     command_size);
3687 		if (unlikely(ret != 0)) {
3688 			ret = -EFAULT;
3689 			VMW_DEBUG_USER("Failed copying commands.\n");
3690 			goto out_unlock;
3691 		}
3692 
3693 		kernel_commands = sw_context->cmd_bounce;
3694 	} else if (!header) {
3695 		sw_context->kernel = true;
3696 	}
3697 
3698 	sw_context->fp = vmw_fpriv(file_priv);
3699 	INIT_LIST_HEAD(&sw_context->ctx_list);
3700 	sw_context->cur_query_bo = dev_priv->pinned_bo;
3701 	sw_context->last_query_ctx = NULL;
3702 	sw_context->needs_post_query_barrier = false;
3703 	sw_context->dx_ctx_node = NULL;
3704 	sw_context->dx_query_mob = NULL;
3705 	sw_context->dx_query_ctx = NULL;
3706 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3707 	INIT_LIST_HEAD(&sw_context->res_relocations);
3708 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3709 
3710 	if (sw_context->staged_bindings)
3711 		vmw_binding_state_reset(sw_context->staged_bindings);
3712 
3713 	if (!sw_context->res_ht_initialized) {
3714 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3715 		if (unlikely(ret != 0))
3716 			goto out_unlock;
3717 
3718 		sw_context->res_ht_initialized = true;
3719 	}
3720 
3721 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3722 	sw_context->ctx = &val_ctx;
3723 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3724 	if (unlikely(ret != 0))
3725 		goto out_err_nores;
3726 
3727 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3728 				command_size);
3729 	if (unlikely(ret != 0))
3730 		goto out_err_nores;
3731 
3732 	ret = vmw_resources_reserve(sw_context);
3733 	if (unlikely(ret != 0))
3734 		goto out_err_nores;
3735 
3736 	ret = vmw_validation_bo_reserve(&val_ctx, true);
3737 	if (unlikely(ret != 0))
3738 		goto out_err_nores;
3739 
3740 	ret = vmw_validation_bo_validate(&val_ctx, true);
3741 	if (unlikely(ret != 0))
3742 		goto out_err;
3743 
3744 	ret = vmw_validation_res_validate(&val_ctx, true);
3745 	if (unlikely(ret != 0))
3746 		goto out_err;
3747 
3748 	vmw_validation_drop_ht(&val_ctx);
3749 
3750 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3751 	if (unlikely(ret != 0)) {
3752 		ret = -ERESTARTSYS;
3753 		goto out_err;
3754 	}
3755 
3756 	if (dev_priv->has_mob) {
3757 		ret = vmw_rebind_contexts(sw_context);
3758 		if (unlikely(ret != 0))
3759 			goto out_unlock_binding;
3760 	}
3761 
3762 	if (!header) {
3763 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3764 					      command_size, sw_context);
3765 	} else {
3766 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3767 						sw_context);
3768 		header = NULL;
3769 	}
3770 	mutex_unlock(&dev_priv->binding_mutex);
3771 	if (ret)
3772 		goto out_err;
3773 
3774 	vmw_query_bo_switch_commit(dev_priv, sw_context);
3775 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
3776 					 (user_fence_rep) ? &handle : NULL);
3777 	/*
3778 	 * This error is harmless, because if fence submission fails,
3779 	 * vmw_fifo_send_fence will sync. The error will be propagated to
3780 	 * user-space in @fence_rep
3781 	 */
3782 	if (ret != 0)
3783 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3784 
3785 	vmw_execbuf_bindings_commit(sw_context, false);
3786 	vmw_bind_dx_query_mob(sw_context);
3787 	vmw_validation_res_unreserve(&val_ctx, false);
3788 
3789 	vmw_validation_bo_fence(sw_context->ctx, fence);
3790 
3791 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3792 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
3793 
3794 	/*
3795 	 * If anything fails here, give up trying to export the fence and do a
3796 	 * sync since the user mode will not be able to sync the fence itself.
3797 	 * This ensures we are still functionally correct.
3798 	 */
3799 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3800 
3801 		sync_file = sync_file_create(&fence->base);
3802 		if (!sync_file) {
3803 			VMW_DEBUG_USER("Sync file create failed for fence\n");
3804 			put_unused_fd(out_fence_fd);
3805 			out_fence_fd = -1;
3806 
3807 			(void) vmw_fence_obj_wait(fence, false, false,
3808 						  VMW_FENCE_WAIT_TIMEOUT);
3809 		} else {
3810 			/* Link the fence with the FD created earlier */
3811 			fd_install(out_fence_fd, sync_file->file);
3812 		}
3813 	}
3814 
3815 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3816 				    user_fence_rep, fence, handle, out_fence_fd,
3817 				    sync_file);
3818 
3819 	/* Don't unreference when handing fence out */
3820 	if (unlikely(out_fence != NULL)) {
3821 		*out_fence = fence;
3822 		fence = NULL;
3823 	} else if (likely(fence != NULL)) {
3824 		vmw_fence_obj_unreference(&fence);
3825 	}
3826 
3827 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
3828 	mutex_unlock(&dev_priv->cmdbuf_mutex);
3829 
3830 	/*
3831 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3832 	 * in resource destruction paths.
3833 	 */
3834 	vmw_validation_unref_lists(&val_ctx);
3835 
3836 	return 0;
3837 
3838 out_unlock_binding:
3839 	mutex_unlock(&dev_priv->binding_mutex);
3840 out_err:
3841 	vmw_validation_bo_backoff(&val_ctx);
3842 out_err_nores:
3843 	vmw_execbuf_bindings_commit(sw_context, true);
3844 	vmw_validation_res_unreserve(&val_ctx, true);
3845 	vmw_resource_relocations_free(&sw_context->res_relocations);
3846 	vmw_free_relocations(sw_context);
3847 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3848 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3849 out_unlock:
3850 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
3851 	vmw_validation_drop_ht(&val_ctx);
3852 	WARN_ON(!list_empty(&sw_context->ctx_list));
3853 	mutex_unlock(&dev_priv->cmdbuf_mutex);
3854 
3855 	/*
3856 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3857 	 * in resource destruction paths.
3858 	 */
3859 	vmw_validation_unref_lists(&val_ctx);
3860 out_free_header:
3861 	if (header)
3862 		vmw_cmdbuf_header_free(header);
3863 out_free_fence_fd:
3864 	if (out_fence_fd >= 0)
3865 		put_unused_fd(out_fence_fd);
3866 
3867 	return ret;
3868 }
3869 
3870 /**
3871  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3872  *
3873  * @dev_priv: The device private structure.
3874  *
3875  * This function is called to idle the fifo and unpin the query buffer if the
3876  * normal way to do this hits an error, which should typically be extremely
3877  * rare.
3878  */
3879 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3880 {
3881 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3882 
3883 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3884 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3885 	if (dev_priv->dummy_query_bo_pinned) {
3886 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3887 		dev_priv->dummy_query_bo_pinned = false;
3888 	}
3889 }
3890 
3891 
3892 /**
3893  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3894  * bo.
3895  *
3896  * @dev_priv: The device private structure.
3897  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3898  * query barrier that flushes all queries touching the current buffer pointed to
3899  * by @dev_priv->pinned_bo
3900  *
3901  * This function should be used to unpin the pinned query bo, or as a query
3902  * barrier when we need to make sure that all queries have finished before the
3903  * next fifo command. (For example on hardware context destructions where the
3904  * hardware may otherwise leak unfinished queries).
3905  *
3906  * This function does not return any failure codes, but make attempts to do safe
3907  * unpinning in case of errors.
3908  *
3909  * The function will synchronize on the previous query barrier, and will thus
3910  * not finish until that barrier has executed.
3911  *
3912  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3913  * calling this function.
3914  */
3915 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3916 				     struct vmw_fence_obj *fence)
3917 {
3918 	int ret = 0;
3919 	struct vmw_fence_obj *lfence = NULL;
3920 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3921 
3922 	if (dev_priv->pinned_bo == NULL)
3923 		goto out_unlock;
3924 
3925 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3926 				    false);
3927 	if (ret)
3928 		goto out_no_reserve;
3929 
3930 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3931 				    false);
3932 	if (ret)
3933 		goto out_no_reserve;
3934 
3935 	ret = vmw_validation_bo_reserve(&val_ctx, false);
3936 	if (ret)
3937 		goto out_no_reserve;
3938 
3939 	if (dev_priv->query_cid_valid) {
3940 		BUG_ON(fence != NULL);
3941 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3942 		if (ret)
3943 			goto out_no_emit;
3944 		dev_priv->query_cid_valid = false;
3945 	}
3946 
3947 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3948 	if (dev_priv->dummy_query_bo_pinned) {
3949 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3950 		dev_priv->dummy_query_bo_pinned = false;
3951 	}
3952 	if (fence == NULL) {
3953 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3954 						  NULL);
3955 		fence = lfence;
3956 	}
3957 	vmw_validation_bo_fence(&val_ctx, fence);
3958 	if (lfence != NULL)
3959 		vmw_fence_obj_unreference(&lfence);
3960 
3961 	vmw_validation_unref_lists(&val_ctx);
3962 	vmw_bo_unreference(&dev_priv->pinned_bo);
3963 
3964 out_unlock:
3965 	return;
3966 out_no_emit:
3967 	vmw_validation_bo_backoff(&val_ctx);
3968 out_no_reserve:
3969 	vmw_validation_unref_lists(&val_ctx);
3970 	vmw_execbuf_unpin_panic(dev_priv);
3971 	vmw_bo_unreference(&dev_priv->pinned_bo);
3972 }
3973 
3974 /**
3975  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
3976  *
3977  * @dev_priv: The device private structure.
3978  *
3979  * This function should be used to unpin the pinned query bo, or as a query
3980  * barrier when we need to make sure that all queries have finished before the
3981  * next fifo command. (For example on hardware context destructions where the
3982  * hardware may otherwise leak unfinished queries).
3983  *
3984  * This function does not return any failure codes, but make attempts to do safe
3985  * unpinning in case of errors.
3986  *
3987  * The function will synchronize on the previous query barrier, and will thus
3988  * not finish until that barrier has executed.
3989  */
3990 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3991 {
3992 	mutex_lock(&dev_priv->cmdbuf_mutex);
3993 	if (dev_priv->query_cid_valid)
3994 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3995 	mutex_unlock(&dev_priv->cmdbuf_mutex);
3996 }
3997 
3998 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
3999 		      struct drm_file *file_priv, size_t size)
4000 {
4001 	struct vmw_private *dev_priv = vmw_priv(dev);
4002 	struct drm_vmw_execbuf_arg arg;
4003 	int ret;
4004 	static const size_t copy_offset[] = {
4005 		offsetof(struct drm_vmw_execbuf_arg, context_handle),
4006 		sizeof(struct drm_vmw_execbuf_arg)};
4007 	struct dma_fence *in_fence = NULL;
4008 
4009 	if (unlikely(size < copy_offset[0])) {
4010 		VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
4011 			       DRM_VMW_EXECBUF);
4012 		return -EINVAL;
4013 	}
4014 
4015 	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4016 		return -EFAULT;
4017 
4018 	/*
4019 	 * Extend the ioctl argument while maintaining backwards compatibility:
4020 	 * We take different code paths depending on the value of arg.version.
4021 	 */
4022 	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4023 		     arg.version == 0)) {
4024 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4025 		return -EINVAL;
4026 	}
4027 
4028 	if (arg.version > 1 &&
4029 	    copy_from_user(&arg.context_handle,
4030 			   (void __user *) (data + copy_offset[0]),
4031 			   copy_offset[arg.version - 1] - copy_offset[0]) != 0)
4032 		return -EFAULT;
4033 
4034 	switch (arg.version) {
4035 	case 1:
4036 		arg.context_handle = (uint32_t) -1;
4037 		break;
4038 	case 2:
4039 	default:
4040 		break;
4041 	}
4042 
4043 	/* If imported a fence FD from elsewhere, then wait on it */
4044 	if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4045 		in_fence = sync_file_get_fence(arg.imported_fence_fd);
4046 
4047 		if (!in_fence) {
4048 			VMW_DEBUG_USER("Cannot get imported fence\n");
4049 			return -EINVAL;
4050 		}
4051 
4052 		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4053 		if (ret)
4054 			goto out;
4055 	}
4056 
4057 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4058 	if (unlikely(ret != 0))
4059 		return ret;
4060 
4061 	ret = vmw_execbuf_process(file_priv, dev_priv,
4062 				  (void __user *)(unsigned long)arg.commands,
4063 				  NULL, arg.command_size, arg.throttle_us,
4064 				  arg.context_handle,
4065 				  (void __user *)(unsigned long)arg.fence_rep,
4066 				  NULL, arg.flags);
4067 
4068 	ttm_read_unlock(&dev_priv->reservation_sem);
4069 	if (unlikely(ret != 0))
4070 		goto out;
4071 
4072 	vmw_kms_cursor_post_execbuf(dev_priv);
4073 
4074 out:
4075 	if (in_fence)
4076 		dma_fence_put(in_fence);
4077 	return ret;
4078 }
4079