xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c (revision 52990390f91c1c39ca742fc8f390b29891d95127)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_binding.h"
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_mksstat.h"
31 #include "vmwgfx_so.h"
32 
33 #include <drm/ttm/ttm_bo.h>
34 #include <drm/ttm/ttm_placement.h>
35 
36 #include <linux/sync_file.h>
37 #include <linux/hashtable.h>
38 
39 /*
40  * Helper macro to get dx_ctx_node if available otherwise print an error
41  * message. This is for use in command verifier function where if dx_ctx_node
42  * is not set then command is invalid.
43  */
44 #define VMW_GET_CTX_NODE(__sw_context)                                        \
45 ({                                                                            \
46 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
47 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
48 		__sw_context->dx_ctx_node;                                    \
49 	});                                                                   \
50 })
51 
52 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
53 	struct {                                                              \
54 		SVGA3dCmdHeader header;                                       \
55 		__type body;                                                  \
56 	} __var
57 
58 /**
59  * struct vmw_relocation - Buffer object relocation
60  *
61  * @head: List head for the command submission context's relocation list
62  * @vbo: Non ref-counted pointer to buffer object
63  * @mob_loc: Pointer to location for mob id to be modified
64  * @location: Pointer to location for guest pointer to be modified
65  */
66 struct vmw_relocation {
67 	struct list_head head;
68 	struct vmw_bo *vbo;
69 	union {
70 		SVGAMobId *mob_loc;
71 		SVGAGuestPtr *location;
72 	};
73 };
74 
75 /**
76  * enum vmw_resource_relocation_type - Relocation type for resources
77  *
78  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79  * command stream is replaced with the actual id after validation.
80  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81  * with a NOP.
82  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83  * validation is -1, the command is replaced with a NOP. Otherwise no action.
84  * @vmw_res_rel_max: Last value in the enum - used for error checking
85 */
86 enum vmw_resource_relocation_type {
87 	vmw_res_rel_normal,
88 	vmw_res_rel_nop,
89 	vmw_res_rel_cond_nop,
90 	vmw_res_rel_max
91 };
92 
93 /**
94  * struct vmw_resource_relocation - Relocation info for resources
95  *
96  * @head: List head for the software context's relocation list.
97  * @res: Non-ref-counted pointer to the resource.
98  * @offset: Offset of single byte entries into the command buffer where the id
99  * that needs fixup is located.
100  * @rel_type: Type of relocation.
101  */
102 struct vmw_resource_relocation {
103 	struct list_head head;
104 	const struct vmw_resource *res;
105 	u32 offset:29;
106 	enum vmw_resource_relocation_type rel_type:3;
107 };
108 
109 /**
110  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111  *
112  * @head: List head of context list
113  * @ctx: The context resource
114  * @cur: The context's persistent binding state
115  * @staged: The binding state changes of this command buffer
116  */
117 struct vmw_ctx_validation_info {
118 	struct list_head head;
119 	struct vmw_resource *ctx;
120 	struct vmw_ctx_binding_state *cur;
121 	struct vmw_ctx_binding_state *staged;
122 };
123 
124 /**
125  * struct vmw_cmd_entry - Describe a command for the verifier
126  *
127  * @func: Call-back to handle the command.
128  * @user_allow: Whether allowed from the execbuf ioctl.
129  * @gb_disable: Whether disabled if guest-backed objects are available.
130  * @gb_enable: Whether enabled iff guest-backed objects are available.
131  * @cmd_name: Name of the command.
132  */
133 struct vmw_cmd_entry {
134 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 		     SVGA3dCmdHeader *);
136 	bool user_allow;
137 	bool gb_disable;
138 	bool gb_enable;
139 	const char *cmd_name;
140 };
141 
142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
143 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 				       (_gb_disable), (_gb_enable), #_cmd}
145 
146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 					struct vmw_sw_context *sw_context,
148 					struct vmw_resource *ctx);
149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 				 struct vmw_sw_context *sw_context,
151 				 SVGAMobId *id,
152 				 struct vmw_bo **vmw_bo_p);
153 /**
154  * vmw_ptr_diff - Compute the offset from a to b in bytes
155  *
156  * @a: A starting pointer.
157  * @b: A pointer offset in the same address space.
158  *
159  * Returns: The offset in bytes between the two pointers.
160  */
161 static size_t vmw_ptr_diff(void *a, void *b)
162 {
163 	return (unsigned long) b - (unsigned long) a;
164 }
165 
166 /**
167  * vmw_execbuf_bindings_commit - Commit modified binding state
168  *
169  * @sw_context: The command submission context
170  * @backoff: Whether this is part of the error path and binding state changes
171  * should be ignored
172  */
173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174 					bool backoff)
175 {
176 	struct vmw_ctx_validation_info *entry;
177 
178 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
179 		if (!backoff)
180 			vmw_binding_state_commit(entry->cur, entry->staged);
181 
182 		if (entry->staged != sw_context->staged_bindings)
183 			vmw_binding_state_free(entry->staged);
184 		else
185 			sw_context->staged_bindings_inuse = false;
186 	}
187 
188 	/* List entries are freed with the validation context */
189 	INIT_LIST_HEAD(&sw_context->ctx_list);
190 }
191 
192 /**
193  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194  *
195  * @sw_context: The command submission context
196  */
197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198 {
199 	if (sw_context->dx_query_mob)
200 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 					  sw_context->dx_query_mob);
202 }
203 
204 /**
205  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206  * the validate list.
207  *
208  * @dev_priv: Pointer to the device private:
209  * @sw_context: The command submission context
210  * @res: Pointer to the resource
211  * @node: The validation node holding the context resource metadata
212  */
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 				   struct vmw_sw_context *sw_context,
215 				   struct vmw_resource *res,
216 				   struct vmw_ctx_validation_info *node)
217 {
218 	int ret;
219 
220 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 	if (unlikely(ret != 0))
222 		goto out_err;
223 
224 	if (!sw_context->staged_bindings) {
225 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 		if (IS_ERR(sw_context->staged_bindings)) {
227 			ret = PTR_ERR(sw_context->staged_bindings);
228 			sw_context->staged_bindings = NULL;
229 			goto out_err;
230 		}
231 	}
232 
233 	if (sw_context->staged_bindings_inuse) {
234 		node->staged = vmw_binding_state_alloc(dev_priv);
235 		if (IS_ERR(node->staged)) {
236 			ret = PTR_ERR(node->staged);
237 			node->staged = NULL;
238 			goto out_err;
239 		}
240 	} else {
241 		node->staged = sw_context->staged_bindings;
242 		sw_context->staged_bindings_inuse = true;
243 	}
244 
245 	node->ctx = res;
246 	node->cur = vmw_context_binding_state(res);
247 	list_add_tail(&node->head, &sw_context->ctx_list);
248 
249 	return 0;
250 
251 out_err:
252 	return ret;
253 }
254 
255 /**
256  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257  *
258  * @dev_priv: Pointer to the device private struct.
259  * @res_type: The resource type.
260  *
261  * Guest-backed contexts and DX contexts require extra size to store execbuf
262  * private information in the validation node. Typically the binding manager
263  * associated data structures.
264  *
265  * Returns: The extra size requirement based on resource type.
266  */
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 					 enum vmw_res_type res_type)
269 {
270 	return (res_type == vmw_res_dx_context ||
271 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
272 		sizeof(struct vmw_ctx_validation_info) : 0;
273 }
274 
275 /**
276  * vmw_execbuf_rcache_update - Update a resource-node cache entry
277  *
278  * @rcache: Pointer to the entry to update.
279  * @res: Pointer to the resource.
280  * @private: Pointer to the execbuf-private space in the resource validation
281  * node.
282  */
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 				      struct vmw_resource *res,
285 				      void *private)
286 {
287 	rcache->res = res;
288 	rcache->private = private;
289 	rcache->valid = 1;
290 	rcache->valid_handle = 0;
291 }
292 
293 enum vmw_val_add_flags {
294 	vmw_val_add_flag_none  =      0,
295 	vmw_val_add_flag_noctx = 1 << 0,
296 };
297 
298 /**
299  * vmw_execbuf_res_val_add - Add a resource to the validation list.
300  *
301  * @sw_context: Pointer to the software context.
302  * @res: Unreferenced rcu-protected pointer to the resource.
303  * @dirty: Whether to change dirty status.
304  * @flags: specifies whether to use the context or not
305  *
306  * Returns: 0 on success. Negative error code on failure. Typical error codes
307  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
308  */
309 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310 				   struct vmw_resource *res,
311 				   u32 dirty,
312 				   u32 flags)
313 {
314 	struct vmw_private *dev_priv = res->dev_priv;
315 	int ret;
316 	enum vmw_res_type res_type = vmw_res_type(res);
317 	struct vmw_res_cache_entry *rcache;
318 	struct vmw_ctx_validation_info *ctx_info;
319 	bool first_usage;
320 	unsigned int priv_size;
321 
322 	rcache = &sw_context->res_cache[res_type];
323 	if (likely(rcache->valid && rcache->res == res)) {
324 		if (dirty)
325 			vmw_validation_res_set_dirty(sw_context->ctx,
326 						     rcache->private, dirty);
327 		return 0;
328 	}
329 
330 	if ((flags & vmw_val_add_flag_noctx) != 0) {
331 		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332 						  (void **)&ctx_info, NULL);
333 		if (ret)
334 			return ret;
335 
336 	} else {
337 		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338 		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339 						  dirty, (void **)&ctx_info,
340 						  &first_usage);
341 		if (ret)
342 			return ret;
343 
344 		if (priv_size && first_usage) {
345 			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
346 						      ctx_info);
347 			if (ret) {
348 				VMW_DEBUG_USER("Failed first usage context setup.\n");
349 				return ret;
350 			}
351 		}
352 	}
353 
354 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
355 	return 0;
356 }
357 
358 /**
359  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
360  * validation list
361  *
362  * @sw_context: The software context holding the validation list.
363  * @view: Pointer to the view resource.
364  *
365  * Returns 0 if success, negative error code otherwise.
366  */
367 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368 				struct vmw_resource *view)
369 {
370 	int ret;
371 
372 	/*
373 	 * First add the resource the view is pointing to, otherwise it may be
374 	 * swapped out when the view is validated.
375 	 */
376 	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377 				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
378 	if (ret)
379 		return ret;
380 
381 	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382 				       vmw_val_add_flag_noctx);
383 }
384 
385 /**
386  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387  * to to the validation list.
388  *
389  * @sw_context: The software context holding the validation list.
390  * @view_type: The view type to look up.
391  * @id: view id of the view.
392  *
393  * The view is represented by a view id and the DX context it's created on, or
394  * scheduled for creation on. If there is no DX context set, the function will
395  * return an -EINVAL error pointer.
396  *
397  * Returns: Unreferenced pointer to the resource on success, negative error
398  * pointer on failure.
399  */
400 static struct vmw_resource *
401 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402 		    enum vmw_view_type view_type, u32 id)
403 {
404 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405 	struct vmw_resource *view;
406 	int ret;
407 
408 	if (!ctx_node)
409 		return ERR_PTR(-EINVAL);
410 
411 	view = vmw_view_lookup(sw_context->man, view_type, id);
412 	if (IS_ERR(view))
413 		return view;
414 
415 	ret = vmw_view_res_val_add(sw_context, view);
416 	if (ret)
417 		return ERR_PTR(ret);
418 
419 	return view;
420 }
421 
422 /**
423  * vmw_resource_context_res_add - Put resources previously bound to a context on
424  * the validation list
425  *
426  * @dev_priv: Pointer to a device private structure
427  * @sw_context: Pointer to a software context used for this command submission
428  * @ctx: Pointer to the context resource
429  *
430  * This function puts all resources that were previously bound to @ctx on the
431  * resource validation list. This is part of the context state reemission
432  */
433 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434 					struct vmw_sw_context *sw_context,
435 					struct vmw_resource *ctx)
436 {
437 	struct list_head *binding_list;
438 	struct vmw_ctx_bindinfo *entry;
439 	int ret = 0;
440 	struct vmw_resource *res;
441 	u32 i;
442 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
444 
445 	/* Add all cotables to the validation list. */
446 	if (has_sm4_context(dev_priv) &&
447 	    vmw_res_type(ctx) == vmw_res_dx_context) {
448 		for (i = 0; i < cotable_max; ++i) {
449 			res = vmw_context_cotable(ctx, i);
450 			if (IS_ERR(res))
451 				continue;
452 
453 			ret = vmw_execbuf_res_val_add(sw_context, res,
454 						      VMW_RES_DIRTY_SET,
455 						      vmw_val_add_flag_noctx);
456 			if (unlikely(ret != 0))
457 				return ret;
458 		}
459 	}
460 
461 	/* Add all resources bound to the context to the validation list */
462 	mutex_lock(&dev_priv->binding_mutex);
463 	binding_list = vmw_context_binding_list(ctx);
464 
465 	list_for_each_entry(entry, binding_list, ctx_list) {
466 		if (vmw_res_type(entry->res) == vmw_res_view)
467 			ret = vmw_view_res_val_add(sw_context, entry->res);
468 		else
469 			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470 						      vmw_binding_dirtying(entry->bt),
471 						      vmw_val_add_flag_noctx);
472 		if (unlikely(ret != 0))
473 			break;
474 	}
475 
476 	if (has_sm4_context(dev_priv) &&
477 	    vmw_res_type(ctx) == vmw_res_dx_context) {
478 		struct vmw_bo *dx_query_mob;
479 
480 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
481 		if (dx_query_mob) {
482 			vmw_bo_placement_set(dx_query_mob,
483 					     VMW_BO_DOMAIN_MOB,
484 					     VMW_BO_DOMAIN_MOB);
485 			ret = vmw_validation_add_bo(sw_context->ctx,
486 						    dx_query_mob);
487 		}
488 	}
489 
490 	mutex_unlock(&dev_priv->binding_mutex);
491 	return ret;
492 }
493 
494 /**
495  * vmw_resource_relocation_add - Add a relocation to the relocation list
496  *
497  * @sw_context: Pointer to the software context.
498  * @res: The resource.
499  * @offset: Offset into the command buffer currently being parsed where the id
500  * that needs fixup is located. Granularity is one byte.
501  * @rel_type: Relocation type.
502  */
503 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
504 				       const struct vmw_resource *res,
505 				       unsigned long offset,
506 				       enum vmw_resource_relocation_type
507 				       rel_type)
508 {
509 	struct vmw_resource_relocation *rel;
510 
511 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
512 	if (unlikely(!rel)) {
513 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
514 		return -ENOMEM;
515 	}
516 
517 	rel->res = res;
518 	rel->offset = offset;
519 	rel->rel_type = rel_type;
520 	list_add_tail(&rel->head, &sw_context->res_relocations);
521 
522 	return 0;
523 }
524 
525 /**
526  * vmw_resource_relocations_free - Free all relocations on a list
527  *
528  * @list: Pointer to the head of the relocation list
529  */
530 static void vmw_resource_relocations_free(struct list_head *list)
531 {
532 	/* Memory is validation context memory, so no need to free it */
533 	INIT_LIST_HEAD(list);
534 }
535 
536 /**
537  * vmw_resource_relocations_apply - Apply all relocations on a list
538  *
539  * @cb: Pointer to the start of the command buffer bein patch. This need not be
540  * the same buffer as the one being parsed when the relocation list was built,
541  * but the contents must be the same modulo the resource ids.
542  * @list: Pointer to the head of the relocation list.
543  */
544 static void vmw_resource_relocations_apply(uint32_t *cb,
545 					   struct list_head *list)
546 {
547 	struct vmw_resource_relocation *rel;
548 
549 	/* Validate the struct vmw_resource_relocation member size */
550 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
551 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
552 
553 	list_for_each_entry(rel, list, head) {
554 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
555 		switch (rel->rel_type) {
556 		case vmw_res_rel_normal:
557 			*addr = rel->res->id;
558 			break;
559 		case vmw_res_rel_nop:
560 			*addr = SVGA_3D_CMD_NOP;
561 			break;
562 		default:
563 			if (rel->res->id == -1)
564 				*addr = SVGA_3D_CMD_NOP;
565 			break;
566 		}
567 	}
568 }
569 
570 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
571 			   struct vmw_sw_context *sw_context,
572 			   SVGA3dCmdHeader *header)
573 {
574 	return -EINVAL;
575 }
576 
577 static int vmw_cmd_ok(struct vmw_private *dev_priv,
578 		      struct vmw_sw_context *sw_context,
579 		      SVGA3dCmdHeader *header)
580 {
581 	return 0;
582 }
583 
584 /**
585  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
586  * list.
587  *
588  * @sw_context: Pointer to the software context.
589  *
590  * Note that since vmware's command submission currently is protected by the
591  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
592  * only a single thread at once will attempt this.
593  */
594 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
595 {
596 	int ret;
597 
598 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
599 	if (ret)
600 		return ret;
601 
602 	if (sw_context->dx_query_mob) {
603 		struct vmw_bo *expected_dx_query_mob;
604 
605 		expected_dx_query_mob =
606 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
607 		if (expected_dx_query_mob &&
608 		    expected_dx_query_mob != sw_context->dx_query_mob) {
609 			ret = -EINVAL;
610 		}
611 	}
612 
613 	return ret;
614 }
615 
616 /**
617  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
618  * resource validate list unless it's already there.
619  *
620  * @dev_priv: Pointer to a device private structure.
621  * @sw_context: Pointer to the software context.
622  * @res_type: Resource type.
623  * @dirty: Whether to change dirty status.
624  * @converter: User-space visisble type specific information.
625  * @id_loc: Pointer to the location in the command buffer currently being parsed
626  * from where the user-space resource id handle is located.
627  * @p_res: Pointer to pointer to resource validalidation node. Populated on
628  * exit.
629  */
630 static int
631 vmw_cmd_res_check(struct vmw_private *dev_priv,
632 		  struct vmw_sw_context *sw_context,
633 		  enum vmw_res_type res_type,
634 		  u32 dirty,
635 		  const struct vmw_user_resource_conv *converter,
636 		  uint32_t *id_loc,
637 		  struct vmw_resource **p_res)
638 {
639 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
640 	struct vmw_resource *res;
641 	int ret = 0;
642 	bool needs_unref = false;
643 
644 	if (p_res)
645 		*p_res = NULL;
646 
647 	if (*id_loc == SVGA3D_INVALID_ID) {
648 		if (res_type == vmw_res_context) {
649 			VMW_DEBUG_USER("Illegal context invalid id.\n");
650 			return -EINVAL;
651 		}
652 		return 0;
653 	}
654 
655 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
656 		res = rcache->res;
657 		if (dirty)
658 			vmw_validation_res_set_dirty(sw_context->ctx,
659 						     rcache->private, dirty);
660 	} else {
661 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
662 
663 		ret = vmw_validation_preload_res(sw_context->ctx, size);
664 		if (ret)
665 			return ret;
666 
667 		ret = vmw_user_resource_lookup_handle
668 			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
669 		if (ret != 0) {
670 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
671 				       (unsigned int) *id_loc);
672 			return ret;
673 		}
674 		needs_unref = true;
675 
676 		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
677 		if (unlikely(ret != 0))
678 			goto res_check_done;
679 
680 		if (rcache->valid && rcache->res == res) {
681 			rcache->valid_handle = true;
682 			rcache->handle = *id_loc;
683 		}
684 	}
685 
686 	ret = vmw_resource_relocation_add(sw_context, res,
687 					  vmw_ptr_diff(sw_context->buf_start,
688 						       id_loc),
689 					  vmw_res_rel_normal);
690 	if (p_res)
691 		*p_res = res;
692 
693 res_check_done:
694 	if (needs_unref)
695 		vmw_resource_unreference(&res);
696 
697 	return ret;
698 }
699 
700 /**
701  * vmw_rebind_all_dx_query - Rebind DX query associated with the context
702  *
703  * @ctx_res: context the query belongs to
704  *
705  * This function assumes binding_mutex is held.
706  */
707 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
708 {
709 	struct vmw_private *dev_priv = ctx_res->dev_priv;
710 	struct vmw_bo *dx_query_mob;
711 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
712 
713 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
714 
715 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
716 		return 0;
717 
718 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
719 	if (cmd == NULL)
720 		return -ENOMEM;
721 
722 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
723 	cmd->header.size = sizeof(cmd->body);
724 	cmd->body.cid = ctx_res->id;
725 	cmd->body.mobid = dx_query_mob->tbo.resource->start;
726 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
727 
728 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
729 
730 	return 0;
731 }
732 
733 /**
734  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
735  * contexts.
736  *
737  * @sw_context: Pointer to the software context.
738  *
739  * Rebind context binding points that have been scrubbed because of eviction.
740  */
741 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
742 {
743 	struct vmw_ctx_validation_info *val;
744 	int ret;
745 
746 	list_for_each_entry(val, &sw_context->ctx_list, head) {
747 		ret = vmw_binding_rebind_all(val->cur);
748 		if (unlikely(ret != 0)) {
749 			if (ret != -ERESTARTSYS)
750 				VMW_DEBUG_USER("Failed to rebind context.\n");
751 			return ret;
752 		}
753 
754 		ret = vmw_rebind_all_dx_query(val->ctx);
755 		if (ret != 0) {
756 			VMW_DEBUG_USER("Failed to rebind queries.\n");
757 			return ret;
758 		}
759 	}
760 
761 	return 0;
762 }
763 
764 /**
765  * vmw_view_bindings_add - Add an array of view bindings to a context binding
766  * state tracker.
767  *
768  * @sw_context: The execbuf state used for this command.
769  * @view_type: View type for the bindings.
770  * @binding_type: Binding type for the bindings.
771  * @shader_slot: The shader slot to user for the bindings.
772  * @view_ids: Array of view ids to be bound.
773  * @num_views: Number of view ids in @view_ids.
774  * @first_slot: The binding slot to be used for the first view id in @view_ids.
775  */
776 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
777 				 enum vmw_view_type view_type,
778 				 enum vmw_ctx_binding_type binding_type,
779 				 uint32 shader_slot,
780 				 uint32 view_ids[], u32 num_views,
781 				 u32 first_slot)
782 {
783 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
784 	u32 i;
785 
786 	if (!ctx_node)
787 		return -EINVAL;
788 
789 	for (i = 0; i < num_views; ++i) {
790 		struct vmw_ctx_bindinfo_view binding;
791 		struct vmw_resource *view = NULL;
792 
793 		if (view_ids[i] != SVGA3D_INVALID_ID) {
794 			view = vmw_view_id_val_add(sw_context, view_type,
795 						   view_ids[i]);
796 			if (IS_ERR(view)) {
797 				VMW_DEBUG_USER("View not found.\n");
798 				return PTR_ERR(view);
799 			}
800 		}
801 		binding.bi.ctx = ctx_node->ctx;
802 		binding.bi.res = view;
803 		binding.bi.bt = binding_type;
804 		binding.shader_slot = shader_slot;
805 		binding.slot = first_slot + i;
806 		vmw_binding_add(ctx_node->staged, &binding.bi,
807 				shader_slot, binding.slot);
808 	}
809 
810 	return 0;
811 }
812 
813 /**
814  * vmw_cmd_cid_check - Check a command header for valid context information.
815  *
816  * @dev_priv: Pointer to a device private structure.
817  * @sw_context: Pointer to the software context.
818  * @header: A command header with an embedded user-space context handle.
819  *
820  * Convenience function: Call vmw_cmd_res_check with the user-space context
821  * handle embedded in @header.
822  */
823 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
824 			     struct vmw_sw_context *sw_context,
825 			     SVGA3dCmdHeader *header)
826 {
827 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
828 		container_of(header, typeof(*cmd), header);
829 
830 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
831 				 VMW_RES_DIRTY_SET, user_context_converter,
832 				 &cmd->body, NULL);
833 }
834 
835 /**
836  * vmw_execbuf_info_from_res - Get the private validation metadata for a
837  * recently validated resource
838  *
839  * @sw_context: Pointer to the command submission context
840  * @res: The resource
841  *
842  * The resource pointed to by @res needs to be present in the command submission
843  * context's resource cache and hence the last resource of that type to be
844  * processed by the validation code.
845  *
846  * Return: a pointer to the private metadata of the resource, or NULL if it
847  * wasn't found
848  */
849 static struct vmw_ctx_validation_info *
850 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
851 			  struct vmw_resource *res)
852 {
853 	struct vmw_res_cache_entry *rcache =
854 		&sw_context->res_cache[vmw_res_type(res)];
855 
856 	if (rcache->valid && rcache->res == res)
857 		return rcache->private;
858 
859 	WARN_ON_ONCE(true);
860 	return NULL;
861 }
862 
863 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
864 					   struct vmw_sw_context *sw_context,
865 					   SVGA3dCmdHeader *header)
866 {
867 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
868 	struct vmw_resource *ctx;
869 	struct vmw_resource *res;
870 	int ret;
871 
872 	cmd = container_of(header, typeof(*cmd), header);
873 
874 	if (cmd->body.type >= SVGA3D_RT_MAX) {
875 		VMW_DEBUG_USER("Illegal render target type %u.\n",
876 			       (unsigned int) cmd->body.type);
877 		return -EINVAL;
878 	}
879 
880 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881 				VMW_RES_DIRTY_SET, user_context_converter,
882 				&cmd->body.cid, &ctx);
883 	if (unlikely(ret != 0))
884 		return ret;
885 
886 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
887 				VMW_RES_DIRTY_SET, user_surface_converter,
888 				&cmd->body.target.sid, &res);
889 	if (unlikely(ret))
890 		return ret;
891 
892 	if (dev_priv->has_mob) {
893 		struct vmw_ctx_bindinfo_view binding;
894 		struct vmw_ctx_validation_info *node;
895 
896 		node = vmw_execbuf_info_from_res(sw_context, ctx);
897 		if (!node)
898 			return -EINVAL;
899 
900 		binding.bi.ctx = ctx;
901 		binding.bi.res = res;
902 		binding.bi.bt = vmw_ctx_binding_rt;
903 		binding.slot = cmd->body.type;
904 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
905 	}
906 
907 	return 0;
908 }
909 
910 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
911 				      struct vmw_sw_context *sw_context,
912 				      SVGA3dCmdHeader *header)
913 {
914 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
915 	int ret;
916 
917 	cmd = container_of(header, typeof(*cmd), header);
918 
919 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
920 				VMW_RES_DIRTY_NONE, user_surface_converter,
921 				&cmd->body.src.sid, NULL);
922 	if (ret)
923 		return ret;
924 
925 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
926 				 VMW_RES_DIRTY_SET, user_surface_converter,
927 				 &cmd->body.dest.sid, NULL);
928 }
929 
930 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
931 				     struct vmw_sw_context *sw_context,
932 				     SVGA3dCmdHeader *header)
933 {
934 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
935 	int ret;
936 
937 	cmd = container_of(header, typeof(*cmd), header);
938 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 				VMW_RES_DIRTY_NONE, user_surface_converter,
940 				&cmd->body.src, NULL);
941 	if (ret != 0)
942 		return ret;
943 
944 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945 				 VMW_RES_DIRTY_SET, user_surface_converter,
946 				 &cmd->body.dest, NULL);
947 }
948 
949 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
950 				   struct vmw_sw_context *sw_context,
951 				   SVGA3dCmdHeader *header)
952 {
953 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
954 	int ret;
955 
956 	cmd = container_of(header, typeof(*cmd), header);
957 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 				VMW_RES_DIRTY_NONE, user_surface_converter,
959 				&cmd->body.srcSid, NULL);
960 	if (ret != 0)
961 		return ret;
962 
963 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
964 				 VMW_RES_DIRTY_SET, user_surface_converter,
965 				 &cmd->body.dstSid, NULL);
966 }
967 
968 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
969 				     struct vmw_sw_context *sw_context,
970 				     SVGA3dCmdHeader *header)
971 {
972 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
973 	int ret;
974 
975 	cmd = container_of(header, typeof(*cmd), header);
976 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 				VMW_RES_DIRTY_NONE, user_surface_converter,
978 				&cmd->body.src.sid, NULL);
979 	if (unlikely(ret != 0))
980 		return ret;
981 
982 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
983 				 VMW_RES_DIRTY_SET, user_surface_converter,
984 				 &cmd->body.dest.sid, NULL);
985 }
986 
987 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
988 					 struct vmw_sw_context *sw_context,
989 					 SVGA3dCmdHeader *header)
990 {
991 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
992 		container_of(header, typeof(*cmd), header);
993 
994 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
995 				 VMW_RES_DIRTY_NONE, user_surface_converter,
996 				 &cmd->body.srcImage.sid, NULL);
997 }
998 
999 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1000 				 struct vmw_sw_context *sw_context,
1001 				 SVGA3dCmdHeader *header)
1002 {
1003 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1004 		container_of(header, typeof(*cmd), header);
1005 
1006 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1008 				 &cmd->body.sid, NULL);
1009 }
1010 
1011 /**
1012  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1013  *
1014  * @dev_priv: The device private structure.
1015  * @new_query_bo: The new buffer holding query results.
1016  * @sw_context: The software context used for this command submission.
1017  *
1018  * This function checks whether @new_query_bo is suitable for holding query
1019  * results, and if another buffer currently is pinned for query results. If so,
1020  * the function prepares the state of @sw_context for switching pinned buffers
1021  * after successful submission of the current command batch.
1022  */
1023 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024 				       struct vmw_bo *new_query_bo,
1025 				       struct vmw_sw_context *sw_context)
1026 {
1027 	struct vmw_res_cache_entry *ctx_entry =
1028 		&sw_context->res_cache[vmw_res_context];
1029 	int ret;
1030 
1031 	BUG_ON(!ctx_entry->valid);
1032 	sw_context->last_query_ctx = ctx_entry->res;
1033 
1034 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035 
1036 		if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1037 			VMW_DEBUG_USER("Query buffer too large.\n");
1038 			return -EINVAL;
1039 		}
1040 
1041 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1042 			sw_context->needs_post_query_barrier = true;
1043 			vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1044 			ret = vmw_validation_add_bo(sw_context->ctx,
1045 						    sw_context->cur_query_bo);
1046 			if (unlikely(ret != 0))
1047 				return ret;
1048 		}
1049 		sw_context->cur_query_bo = new_query_bo;
1050 
1051 		vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1052 		ret = vmw_validation_add_bo(sw_context->ctx,
1053 					    dev_priv->dummy_query_bo);
1054 		if (unlikely(ret != 0))
1055 			return ret;
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1063  *
1064  * @dev_priv: The device private structure.
1065  * @sw_context: The software context used for this command submission batch.
1066  *
1067  * This function will check if we're switching query buffers, and will then,
1068  * issue a dummy occlusion query wait used as a query barrier. When the fence
1069  * object following that query wait has signaled, we are sure that all preceding
1070  * queries have finished, and the old query buffer can be unpinned. However,
1071  * since both the new query buffer and the old one are fenced with that fence,
1072  * we can do an asynchronus unpin now, and be sure that the old query buffer
1073  * won't be moved until the fence has signaled.
1074  *
1075  * As mentioned above, both the new - and old query buffers need to be fenced
1076  * using a sequence emitted *after* calling this function.
1077  */
1078 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1079 				     struct vmw_sw_context *sw_context)
1080 {
1081 	/*
1082 	 * The validate list should still hold references to all
1083 	 * contexts here.
1084 	 */
1085 	if (sw_context->needs_post_query_barrier) {
1086 		struct vmw_res_cache_entry *ctx_entry =
1087 			&sw_context->res_cache[vmw_res_context];
1088 		struct vmw_resource *ctx;
1089 		int ret;
1090 
1091 		BUG_ON(!ctx_entry->valid);
1092 		ctx = ctx_entry->res;
1093 
1094 		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1095 
1096 		if (unlikely(ret != 0))
1097 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1098 	}
1099 
1100 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1101 		if (dev_priv->pinned_bo) {
1102 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1103 			vmw_bo_unreference(&dev_priv->pinned_bo);
1104 		}
1105 
1106 		if (!sw_context->needs_post_query_barrier) {
1107 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1108 
1109 			/*
1110 			 * We pin also the dummy_query_bo buffer so that we
1111 			 * don't need to validate it when emitting dummy queries
1112 			 * in context destroy paths.
1113 			 */
1114 			if (!dev_priv->dummy_query_bo_pinned) {
1115 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1116 						    true);
1117 				dev_priv->dummy_query_bo_pinned = true;
1118 			}
1119 
1120 			BUG_ON(sw_context->last_query_ctx == NULL);
1121 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1122 			dev_priv->query_cid_valid = true;
1123 			dev_priv->pinned_bo =
1124 				vmw_bo_reference(sw_context->cur_query_bo);
1125 		}
1126 	}
1127 }
1128 
1129 /**
1130  * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1131  * to a MOB id.
1132  *
1133  * @dev_priv: Pointer to a device private structure.
1134  * @sw_context: The software context used for this command batch validation.
1135  * @id: Pointer to the user-space handle to be translated.
1136  * @vmw_bo_p: Points to a location that, on successful return will carry a
1137  * non-reference-counted pointer to the buffer object identified by the
1138  * user-space handle in @id.
1139  *
1140  * This function saves information needed to translate a user-space buffer
1141  * handle to a MOB id. The translation does not take place immediately, but
1142  * during a call to vmw_apply_relocations().
1143  *
1144  * This function builds a relocation list and a list of buffers to validate. The
1145  * former needs to be freed using either vmw_apply_relocations() or
1146  * vmw_free_relocations(). The latter needs to be freed using
1147  * vmw_clear_validations.
1148  */
1149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1150 				 struct vmw_sw_context *sw_context,
1151 				 SVGAMobId *id,
1152 				 struct vmw_bo **vmw_bo_p)
1153 {
1154 	struct vmw_bo *vmw_bo;
1155 	uint32_t handle = *id;
1156 	struct vmw_relocation *reloc;
1157 	int ret;
1158 
1159 	vmw_validation_preload_bo(sw_context->ctx);
1160 	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1161 	if (ret != 0) {
1162 		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1163 		return PTR_ERR(vmw_bo);
1164 	}
1165 	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1166 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1167 	ttm_bo_put(&vmw_bo->tbo);
1168 	drm_gem_object_put(&vmw_bo->tbo.base);
1169 	if (unlikely(ret != 0))
1170 		return ret;
1171 
1172 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1173 	if (!reloc)
1174 		return -ENOMEM;
1175 
1176 	reloc->mob_loc = id;
1177 	reloc->vbo = vmw_bo;
1178 
1179 	*vmw_bo_p = vmw_bo;
1180 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1181 
1182 	return 0;
1183 }
1184 
1185 /**
1186  * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1187  * to a valid SVGAGuestPtr
1188  *
1189  * @dev_priv: Pointer to a device private structure.
1190  * @sw_context: The software context used for this command batch validation.
1191  * @ptr: Pointer to the user-space handle to be translated.
1192  * @vmw_bo_p: Points to a location that, on successful return will carry a
1193  * non-reference-counted pointer to the DMA buffer identified by the user-space
1194  * handle in @id.
1195  *
1196  * This function saves information needed to translate a user-space buffer
1197  * handle to a valid SVGAGuestPtr. The translation does not take place
1198  * immediately, but during a call to vmw_apply_relocations().
1199  *
1200  * This function builds a relocation list and a list of buffers to validate.
1201  * The former needs to be freed using either vmw_apply_relocations() or
1202  * vmw_free_relocations(). The latter needs to be freed using
1203  * vmw_clear_validations.
1204  */
1205 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1206 				   struct vmw_sw_context *sw_context,
1207 				   SVGAGuestPtr *ptr,
1208 				   struct vmw_bo **vmw_bo_p)
1209 {
1210 	struct vmw_bo *vmw_bo;
1211 	uint32_t handle = ptr->gmrId;
1212 	struct vmw_relocation *reloc;
1213 	int ret;
1214 
1215 	vmw_validation_preload_bo(sw_context->ctx);
1216 	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1217 	if (ret != 0) {
1218 		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1219 		return PTR_ERR(vmw_bo);
1220 	}
1221 	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1222 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1223 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1224 	ttm_bo_put(&vmw_bo->tbo);
1225 	drm_gem_object_put(&vmw_bo->tbo.base);
1226 	if (unlikely(ret != 0))
1227 		return ret;
1228 
1229 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1230 	if (!reloc)
1231 		return -ENOMEM;
1232 
1233 	reloc->location = ptr;
1234 	reloc->vbo = vmw_bo;
1235 	*vmw_bo_p = vmw_bo;
1236 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1237 
1238 	return 0;
1239 }
1240 
1241 /**
1242  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1243  *
1244  * @dev_priv: Pointer to a device private struct.
1245  * @sw_context: The software context used for this command submission.
1246  * @header: Pointer to the command header in the command stream.
1247  *
1248  * This function adds the new query into the query COTABLE
1249  */
1250 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1251 				   struct vmw_sw_context *sw_context,
1252 				   SVGA3dCmdHeader *header)
1253 {
1254 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1255 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1256 	struct vmw_resource *cotable_res;
1257 	int ret;
1258 
1259 	if (!ctx_node)
1260 		return -EINVAL;
1261 
1262 	cmd = container_of(header, typeof(*cmd), header);
1263 
1264 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1265 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1266 		return -EINVAL;
1267 
1268 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1269 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1270 
1271 	return ret;
1272 }
1273 
1274 /**
1275  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1276  *
1277  * @dev_priv: Pointer to a device private struct.
1278  * @sw_context: The software context used for this command submission.
1279  * @header: Pointer to the command header in the command stream.
1280  *
1281  * The query bind operation will eventually associate the query ID with its
1282  * backing MOB.  In this function, we take the user mode MOB ID and use
1283  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1284  */
1285 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1286 				 struct vmw_sw_context *sw_context,
1287 				 SVGA3dCmdHeader *header)
1288 {
1289 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1290 	struct vmw_bo *vmw_bo;
1291 	int ret;
1292 
1293 	cmd = container_of(header, typeof(*cmd), header);
1294 
1295 	/*
1296 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1297 	 * list so its kernel mode MOB ID can be filled in later
1298 	 */
1299 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1300 				    &vmw_bo);
1301 
1302 	if (ret != 0)
1303 		return ret;
1304 
1305 	sw_context->dx_query_mob = vmw_bo;
1306 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1307 	return 0;
1308 }
1309 
1310 /**
1311  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1312  *
1313  * @dev_priv: Pointer to a device private struct.
1314  * @sw_context: The software context used for this command submission.
1315  * @header: Pointer to the command header in the command stream.
1316  */
1317 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1318 				  struct vmw_sw_context *sw_context,
1319 				  SVGA3dCmdHeader *header)
1320 {
1321 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1322 		container_of(header, typeof(*cmd), header);
1323 
1324 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1325 				 VMW_RES_DIRTY_SET, user_context_converter,
1326 				 &cmd->body.cid, NULL);
1327 }
1328 
1329 /**
1330  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1331  *
1332  * @dev_priv: Pointer to a device private struct.
1333  * @sw_context: The software context used for this command submission.
1334  * @header: Pointer to the command header in the command stream.
1335  */
1336 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1337 			       struct vmw_sw_context *sw_context,
1338 			       SVGA3dCmdHeader *header)
1339 {
1340 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1341 		container_of(header, typeof(*cmd), header);
1342 
1343 	if (unlikely(dev_priv->has_mob)) {
1344 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1345 
1346 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1347 
1348 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1349 		gb_cmd.header.size = cmd->header.size;
1350 		gb_cmd.body.cid = cmd->body.cid;
1351 		gb_cmd.body.type = cmd->body.type;
1352 
1353 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1354 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1355 	}
1356 
1357 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1358 				 VMW_RES_DIRTY_SET, user_context_converter,
1359 				 &cmd->body.cid, NULL);
1360 }
1361 
1362 /**
1363  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1364  *
1365  * @dev_priv: Pointer to a device private struct.
1366  * @sw_context: The software context used for this command submission.
1367  * @header: Pointer to the command header in the command stream.
1368  */
1369 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1370 				struct vmw_sw_context *sw_context,
1371 				SVGA3dCmdHeader *header)
1372 {
1373 	struct vmw_bo *vmw_bo;
1374 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1375 	int ret;
1376 
1377 	cmd = container_of(header, typeof(*cmd), header);
1378 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1379 	if (unlikely(ret != 0))
1380 		return ret;
1381 
1382 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1383 				    &vmw_bo);
1384 	if (unlikely(ret != 0))
1385 		return ret;
1386 
1387 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1388 
1389 	return ret;
1390 }
1391 
1392 /**
1393  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1394  *
1395  * @dev_priv: Pointer to a device private struct.
1396  * @sw_context: The software context used for this command submission.
1397  * @header: Pointer to the command header in the command stream.
1398  */
1399 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1400 			     struct vmw_sw_context *sw_context,
1401 			     SVGA3dCmdHeader *header)
1402 {
1403 	struct vmw_bo *vmw_bo;
1404 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1405 	int ret;
1406 
1407 	cmd = container_of(header, typeof(*cmd), header);
1408 	if (dev_priv->has_mob) {
1409 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1410 
1411 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1412 
1413 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1414 		gb_cmd.header.size = cmd->header.size;
1415 		gb_cmd.body.cid = cmd->body.cid;
1416 		gb_cmd.body.type = cmd->body.type;
1417 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1418 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1419 
1420 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1421 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1422 	}
1423 
1424 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1425 	if (unlikely(ret != 0))
1426 		return ret;
1427 
1428 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1429 				      &cmd->body.guestResult, &vmw_bo);
1430 	if (unlikely(ret != 0))
1431 		return ret;
1432 
1433 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1434 
1435 	return ret;
1436 }
1437 
1438 /**
1439  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1440  *
1441  * @dev_priv: Pointer to a device private struct.
1442  * @sw_context: The software context used for this command submission.
1443  * @header: Pointer to the command header in the command stream.
1444  */
1445 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1446 				 struct vmw_sw_context *sw_context,
1447 				 SVGA3dCmdHeader *header)
1448 {
1449 	struct vmw_bo *vmw_bo;
1450 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1451 	int ret;
1452 
1453 	cmd = container_of(header, typeof(*cmd), header);
1454 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1455 	if (unlikely(ret != 0))
1456 		return ret;
1457 
1458 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1459 				    &vmw_bo);
1460 	if (unlikely(ret != 0))
1461 		return ret;
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1468  *
1469  * @dev_priv: Pointer to a device private struct.
1470  * @sw_context: The software context used for this command submission.
1471  * @header: Pointer to the command header in the command stream.
1472  */
1473 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1474 			      struct vmw_sw_context *sw_context,
1475 			      SVGA3dCmdHeader *header)
1476 {
1477 	struct vmw_bo *vmw_bo;
1478 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1479 	int ret;
1480 
1481 	cmd = container_of(header, typeof(*cmd), header);
1482 	if (dev_priv->has_mob) {
1483 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1484 
1485 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1486 
1487 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1488 		gb_cmd.header.size = cmd->header.size;
1489 		gb_cmd.body.cid = cmd->body.cid;
1490 		gb_cmd.body.type = cmd->body.type;
1491 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1492 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1493 
1494 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1495 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1496 	}
1497 
1498 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1499 	if (unlikely(ret != 0))
1500 		return ret;
1501 
1502 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1503 				      &cmd->body.guestResult, &vmw_bo);
1504 	if (unlikely(ret != 0))
1505 		return ret;
1506 
1507 	return 0;
1508 }
1509 
1510 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1511 		       struct vmw_sw_context *sw_context,
1512 		       SVGA3dCmdHeader *header)
1513 {
1514 	struct vmw_bo *vmw_bo = NULL;
1515 	struct vmw_surface *srf = NULL;
1516 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1517 	int ret;
1518 	SVGA3dCmdSurfaceDMASuffix *suffix;
1519 	uint32_t bo_size;
1520 	bool dirty;
1521 
1522 	cmd = container_of(header, typeof(*cmd), header);
1523 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1524 					       header->size - sizeof(*suffix));
1525 
1526 	/* Make sure device and verifier stays in sync. */
1527 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1528 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1529 		return -EINVAL;
1530 	}
1531 
1532 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1533 				      &cmd->body.guest.ptr, &vmw_bo);
1534 	if (unlikely(ret != 0))
1535 		return ret;
1536 
1537 	/* Make sure DMA doesn't cross BO boundaries. */
1538 	bo_size = vmw_bo->tbo.base.size;
1539 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1540 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1541 		return -EINVAL;
1542 	}
1543 
1544 	bo_size -= cmd->body.guest.ptr.offset;
1545 	if (unlikely(suffix->maximumOffset > bo_size))
1546 		suffix->maximumOffset = bo_size;
1547 
1548 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1549 		VMW_RES_DIRTY_SET : 0;
1550 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1551 				dirty, user_surface_converter,
1552 				&cmd->body.host.sid, NULL);
1553 	if (unlikely(ret != 0)) {
1554 		if (unlikely(ret != -ERESTARTSYS))
1555 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1556 		return ret;
1557 	}
1558 
1559 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1560 
1561 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1562 
1563 	return 0;
1564 }
1565 
1566 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1567 			struct vmw_sw_context *sw_context,
1568 			SVGA3dCmdHeader *header)
1569 {
1570 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1571 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1572 		(unsigned long)header + sizeof(*cmd));
1573 	SVGA3dPrimitiveRange *range;
1574 	uint32_t i;
1575 	uint32_t maxnum;
1576 	int ret;
1577 
1578 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1579 	if (unlikely(ret != 0))
1580 		return ret;
1581 
1582 	cmd = container_of(header, typeof(*cmd), header);
1583 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1584 
1585 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1586 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1587 		return -EINVAL;
1588 	}
1589 
1590 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1591 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1592 					VMW_RES_DIRTY_NONE,
1593 					user_surface_converter,
1594 					&decl->array.surfaceId, NULL);
1595 		if (unlikely(ret != 0))
1596 			return ret;
1597 	}
1598 
1599 	maxnum = (header->size - sizeof(cmd->body) -
1600 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1601 	if (unlikely(cmd->body.numRanges > maxnum)) {
1602 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1603 		return -EINVAL;
1604 	}
1605 
1606 	range = (SVGA3dPrimitiveRange *) decl;
1607 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1608 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1609 					VMW_RES_DIRTY_NONE,
1610 					user_surface_converter,
1611 					&range->indexArray.surfaceId, NULL);
1612 		if (unlikely(ret != 0))
1613 			return ret;
1614 	}
1615 	return 0;
1616 }
1617 
1618 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1619 			     struct vmw_sw_context *sw_context,
1620 			     SVGA3dCmdHeader *header)
1621 {
1622 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1623 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1624 	  ((unsigned long) header + header->size + sizeof(header));
1625 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1626 		((unsigned long) header + sizeof(*cmd));
1627 	struct vmw_resource *ctx;
1628 	struct vmw_resource *res;
1629 	int ret;
1630 
1631 	cmd = container_of(header, typeof(*cmd), header);
1632 
1633 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634 				VMW_RES_DIRTY_SET, user_context_converter,
1635 				&cmd->body.cid, &ctx);
1636 	if (unlikely(ret != 0))
1637 		return ret;
1638 
1639 	for (; cur_state < last_state; ++cur_state) {
1640 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1641 			continue;
1642 
1643 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1644 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1645 				       (unsigned int) cur_state->stage);
1646 			return -EINVAL;
1647 		}
1648 
1649 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1650 					VMW_RES_DIRTY_NONE,
1651 					user_surface_converter,
1652 					&cur_state->value, &res);
1653 		if (unlikely(ret != 0))
1654 			return ret;
1655 
1656 		if (dev_priv->has_mob) {
1657 			struct vmw_ctx_bindinfo_tex binding;
1658 			struct vmw_ctx_validation_info *node;
1659 
1660 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1661 			if (!node)
1662 				return -EINVAL;
1663 
1664 			binding.bi.ctx = ctx;
1665 			binding.bi.res = res;
1666 			binding.bi.bt = vmw_ctx_binding_tex;
1667 			binding.texture_stage = cur_state->stage;
1668 			vmw_binding_add(node->staged, &binding.bi, 0,
1669 					binding.texture_stage);
1670 		}
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1677 				      struct vmw_sw_context *sw_context,
1678 				      void *buf)
1679 {
1680 	struct vmw_bo *vmw_bo;
1681 
1682 	struct {
1683 		uint32_t header;
1684 		SVGAFifoCmdDefineGMRFB body;
1685 	} *cmd = buf;
1686 
1687 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1688 				       &vmw_bo);
1689 }
1690 
1691 /**
1692  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1693  * switching
1694  *
1695  * @dev_priv: Pointer to a device private struct.
1696  * @sw_context: The software context being used for this batch.
1697  * @res: Pointer to the resource.
1698  * @buf_id: Pointer to the user-space backup buffer handle in the command
1699  * stream.
1700  * @backup_offset: Offset of backup into MOB.
1701  *
1702  * This function prepares for registering a switch of backup buffers in the
1703  * resource metadata just prior to unreserving. It's basically a wrapper around
1704  * vmw_cmd_res_switch_backup with a different interface.
1705  */
1706 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1707 				     struct vmw_sw_context *sw_context,
1708 				     struct vmw_resource *res, uint32_t *buf_id,
1709 				     unsigned long backup_offset)
1710 {
1711 	struct vmw_bo *vbo;
1712 	void *info;
1713 	int ret;
1714 
1715 	info = vmw_execbuf_info_from_res(sw_context, res);
1716 	if (!info)
1717 		return -EINVAL;
1718 
1719 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1720 	if (ret)
1721 		return ret;
1722 
1723 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1724 					 backup_offset);
1725 	return 0;
1726 }
1727 
1728 /**
1729  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1730  *
1731  * @dev_priv: Pointer to a device private struct.
1732  * @sw_context: The software context being used for this batch.
1733  * @res_type: The resource type.
1734  * @converter: Information about user-space binding for this resource type.
1735  * @res_id: Pointer to the user-space resource handle in the command stream.
1736  * @buf_id: Pointer to the user-space backup buffer handle in the command
1737  * stream.
1738  * @backup_offset: Offset of backup into MOB.
1739  *
1740  * This function prepares for registering a switch of backup buffers in the
1741  * resource metadata just prior to unreserving. It's basically a wrapper around
1742  * vmw_cmd_res_switch_backup with a different interface.
1743  */
1744 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1745 				 struct vmw_sw_context *sw_context,
1746 				 enum vmw_res_type res_type,
1747 				 const struct vmw_user_resource_conv
1748 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1749 				 unsigned long backup_offset)
1750 {
1751 	struct vmw_resource *res;
1752 	int ret;
1753 
1754 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1755 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1756 	if (ret)
1757 		return ret;
1758 
1759 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1760 					 backup_offset);
1761 }
1762 
1763 /**
1764  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1765  *
1766  * @dev_priv: Pointer to a device private struct.
1767  * @sw_context: The software context being used for this batch.
1768  * @header: Pointer to the command header in the command stream.
1769  */
1770 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1771 				   struct vmw_sw_context *sw_context,
1772 				   SVGA3dCmdHeader *header)
1773 {
1774 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1775 		container_of(header, typeof(*cmd), header);
1776 
1777 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1778 				     user_surface_converter, &cmd->body.sid,
1779 				     &cmd->body.mobid, 0);
1780 }
1781 
1782 /**
1783  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1784  *
1785  * @dev_priv: Pointer to a device private struct.
1786  * @sw_context: The software context being used for this batch.
1787  * @header: Pointer to the command header in the command stream.
1788  */
1789 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1790 				   struct vmw_sw_context *sw_context,
1791 				   SVGA3dCmdHeader *header)
1792 {
1793 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1794 		container_of(header, typeof(*cmd), header);
1795 
1796 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1797 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1798 				 &cmd->body.image.sid, NULL);
1799 }
1800 
1801 /**
1802  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1803  *
1804  * @dev_priv: Pointer to a device private struct.
1805  * @sw_context: The software context being used for this batch.
1806  * @header: Pointer to the command header in the command stream.
1807  */
1808 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1809 				     struct vmw_sw_context *sw_context,
1810 				     SVGA3dCmdHeader *header)
1811 {
1812 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1813 		container_of(header, typeof(*cmd), header);
1814 
1815 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1816 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1817 				 &cmd->body.sid, NULL);
1818 }
1819 
1820 /**
1821  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1822  *
1823  * @dev_priv: Pointer to a device private struct.
1824  * @sw_context: The software context being used for this batch.
1825  * @header: Pointer to the command header in the command stream.
1826  */
1827 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1828 				     struct vmw_sw_context *sw_context,
1829 				     SVGA3dCmdHeader *header)
1830 {
1831 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1832 		container_of(header, typeof(*cmd), header);
1833 
1834 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1835 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1836 				 &cmd->body.image.sid, NULL);
1837 }
1838 
1839 /**
1840  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1841  * command
1842  *
1843  * @dev_priv: Pointer to a device private struct.
1844  * @sw_context: The software context being used for this batch.
1845  * @header: Pointer to the command header in the command stream.
1846  */
1847 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1848 				       struct vmw_sw_context *sw_context,
1849 				       SVGA3dCmdHeader *header)
1850 {
1851 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1852 		container_of(header, typeof(*cmd), header);
1853 
1854 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1855 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1856 				 &cmd->body.sid, NULL);
1857 }
1858 
1859 /**
1860  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1861  * command
1862  *
1863  * @dev_priv: Pointer to a device private struct.
1864  * @sw_context: The software context being used for this batch.
1865  * @header: Pointer to the command header in the command stream.
1866  */
1867 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1868 				       struct vmw_sw_context *sw_context,
1869 				       SVGA3dCmdHeader *header)
1870 {
1871 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1872 		container_of(header, typeof(*cmd), header);
1873 
1874 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1875 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1876 				 &cmd->body.image.sid, NULL);
1877 }
1878 
1879 /**
1880  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1881  * command
1882  *
1883  * @dev_priv: Pointer to a device private struct.
1884  * @sw_context: The software context being used for this batch.
1885  * @header: Pointer to the command header in the command stream.
1886  */
1887 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1888 					 struct vmw_sw_context *sw_context,
1889 					 SVGA3dCmdHeader *header)
1890 {
1891 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1892 		container_of(header, typeof(*cmd), header);
1893 
1894 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1895 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1896 				 &cmd->body.sid, NULL);
1897 }
1898 
1899 /**
1900  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1901  *
1902  * @dev_priv: Pointer to a device private struct.
1903  * @sw_context: The software context being used for this batch.
1904  * @header: Pointer to the command header in the command stream.
1905  */
1906 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1907 				 struct vmw_sw_context *sw_context,
1908 				 SVGA3dCmdHeader *header)
1909 {
1910 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1911 	int ret;
1912 	size_t size;
1913 	struct vmw_resource *ctx;
1914 
1915 	cmd = container_of(header, typeof(*cmd), header);
1916 
1917 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1918 				VMW_RES_DIRTY_SET, user_context_converter,
1919 				&cmd->body.cid, &ctx);
1920 	if (unlikely(ret != 0))
1921 		return ret;
1922 
1923 	if (unlikely(!dev_priv->has_mob))
1924 		return 0;
1925 
1926 	size = cmd->header.size - sizeof(cmd->body);
1927 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1928 				    cmd->body.shid, cmd + 1, cmd->body.type,
1929 				    size, &sw_context->staged_cmd_res);
1930 	if (unlikely(ret != 0))
1931 		return ret;
1932 
1933 	return vmw_resource_relocation_add(sw_context, NULL,
1934 					   vmw_ptr_diff(sw_context->buf_start,
1935 							&cmd->header.id),
1936 					   vmw_res_rel_nop);
1937 }
1938 
1939 /**
1940  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1941  *
1942  * @dev_priv: Pointer to a device private struct.
1943  * @sw_context: The software context being used for this batch.
1944  * @header: Pointer to the command header in the command stream.
1945  */
1946 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1947 				  struct vmw_sw_context *sw_context,
1948 				  SVGA3dCmdHeader *header)
1949 {
1950 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1951 	int ret;
1952 	struct vmw_resource *ctx;
1953 
1954 	cmd = container_of(header, typeof(*cmd), header);
1955 
1956 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1957 				VMW_RES_DIRTY_SET, user_context_converter,
1958 				&cmd->body.cid, &ctx);
1959 	if (unlikely(ret != 0))
1960 		return ret;
1961 
1962 	if (unlikely(!dev_priv->has_mob))
1963 		return 0;
1964 
1965 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1966 				cmd->body.type, &sw_context->staged_cmd_res);
1967 	if (unlikely(ret != 0))
1968 		return ret;
1969 
1970 	return vmw_resource_relocation_add(sw_context, NULL,
1971 					   vmw_ptr_diff(sw_context->buf_start,
1972 							&cmd->header.id),
1973 					   vmw_res_rel_nop);
1974 }
1975 
1976 /**
1977  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1978  *
1979  * @dev_priv: Pointer to a device private struct.
1980  * @sw_context: The software context being used for this batch.
1981  * @header: Pointer to the command header in the command stream.
1982  */
1983 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1984 			      struct vmw_sw_context *sw_context,
1985 			      SVGA3dCmdHeader *header)
1986 {
1987 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1988 	struct vmw_ctx_bindinfo_shader binding;
1989 	struct vmw_resource *ctx, *res = NULL;
1990 	struct vmw_ctx_validation_info *ctx_info;
1991 	int ret;
1992 
1993 	cmd = container_of(header, typeof(*cmd), header);
1994 
1995 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1996 		VMW_DEBUG_USER("Illegal shader type %u.\n",
1997 			       (unsigned int) cmd->body.type);
1998 		return -EINVAL;
1999 	}
2000 
2001 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2002 				VMW_RES_DIRTY_SET, user_context_converter,
2003 				&cmd->body.cid, &ctx);
2004 	if (unlikely(ret != 0))
2005 		return ret;
2006 
2007 	if (!dev_priv->has_mob)
2008 		return 0;
2009 
2010 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2011 		/*
2012 		 * This is the compat shader path - Per device guest-backed
2013 		 * shaders, but user-space thinks it's per context host-
2014 		 * backed shaders.
2015 		 */
2016 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2017 					cmd->body.shid, cmd->body.type);
2018 		if (!IS_ERR(res)) {
2019 			ret = vmw_execbuf_res_val_add(sw_context, res,
2020 						      VMW_RES_DIRTY_NONE,
2021 						      vmw_val_add_flag_noctx);
2022 			if (unlikely(ret != 0))
2023 				return ret;
2024 
2025 			ret = vmw_resource_relocation_add
2026 				(sw_context, res,
2027 				 vmw_ptr_diff(sw_context->buf_start,
2028 					      &cmd->body.shid),
2029 				 vmw_res_rel_normal);
2030 			if (unlikely(ret != 0))
2031 				return ret;
2032 		}
2033 	}
2034 
2035 	if (IS_ERR_OR_NULL(res)) {
2036 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2037 					VMW_RES_DIRTY_NONE,
2038 					user_shader_converter, &cmd->body.shid,
2039 					&res);
2040 		if (unlikely(ret != 0))
2041 			return ret;
2042 	}
2043 
2044 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2045 	if (!ctx_info)
2046 		return -EINVAL;
2047 
2048 	binding.bi.ctx = ctx;
2049 	binding.bi.res = res;
2050 	binding.bi.bt = vmw_ctx_binding_shader;
2051 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2052 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2053 
2054 	return 0;
2055 }
2056 
2057 /**
2058  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2059  *
2060  * @dev_priv: Pointer to a device private struct.
2061  * @sw_context: The software context being used for this batch.
2062  * @header: Pointer to the command header in the command stream.
2063  */
2064 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2065 				    struct vmw_sw_context *sw_context,
2066 				    SVGA3dCmdHeader *header)
2067 {
2068 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2069 	int ret;
2070 
2071 	cmd = container_of(header, typeof(*cmd), header);
2072 
2073 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2074 				VMW_RES_DIRTY_SET, user_context_converter,
2075 				&cmd->body.cid, NULL);
2076 	if (unlikely(ret != 0))
2077 		return ret;
2078 
2079 	if (dev_priv->has_mob)
2080 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2081 
2082 	return 0;
2083 }
2084 
2085 /**
2086  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2087  *
2088  * @dev_priv: Pointer to a device private struct.
2089  * @sw_context: The software context being used for this batch.
2090  * @header: Pointer to the command header in the command stream.
2091  */
2092 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2093 				  struct vmw_sw_context *sw_context,
2094 				  SVGA3dCmdHeader *header)
2095 {
2096 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2097 		container_of(header, typeof(*cmd), header);
2098 
2099 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2100 				     user_shader_converter, &cmd->body.shid,
2101 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2102 }
2103 
2104 /**
2105  * vmw_cmd_dx_set_single_constant_buffer - Validate
2106  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2107  *
2108  * @dev_priv: Pointer to a device private struct.
2109  * @sw_context: The software context being used for this batch.
2110  * @header: Pointer to the command header in the command stream.
2111  */
2112 static int
2113 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2114 				      struct vmw_sw_context *sw_context,
2115 				      SVGA3dCmdHeader *header)
2116 {
2117 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2118 	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2119 		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2120 
2121 	struct vmw_resource *res = NULL;
2122 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2123 	struct vmw_ctx_bindinfo_cb binding;
2124 	int ret;
2125 
2126 	if (!ctx_node)
2127 		return -EINVAL;
2128 
2129 	cmd = container_of(header, typeof(*cmd), header);
2130 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2131 				VMW_RES_DIRTY_NONE, user_surface_converter,
2132 				&cmd->body.sid, &res);
2133 	if (unlikely(ret != 0))
2134 		return ret;
2135 
2136 	binding.bi.ctx = ctx_node->ctx;
2137 	binding.bi.res = res;
2138 	binding.bi.bt = vmw_ctx_binding_cb;
2139 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2140 	binding.offset = cmd->body.offsetInBytes;
2141 	binding.size = cmd->body.sizeInBytes;
2142 	binding.slot = cmd->body.slot;
2143 
2144 	if (binding.shader_slot >= max_shader_num ||
2145 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2146 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2147 			       (unsigned int) cmd->body.type,
2148 			       (unsigned int) binding.slot);
2149 		return -EINVAL;
2150 	}
2151 
2152 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2153 			binding.slot);
2154 
2155 	return 0;
2156 }
2157 
2158 /**
2159  * vmw_cmd_dx_set_constant_buffer_offset - Validate
2160  * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2161  *
2162  * @dev_priv: Pointer to a device private struct.
2163  * @sw_context: The software context being used for this batch.
2164  * @header: Pointer to the command header in the command stream.
2165  */
2166 static int
2167 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2168 				      struct vmw_sw_context *sw_context,
2169 				      SVGA3dCmdHeader *header)
2170 {
2171 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2172 
2173 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2174 	u32 shader_slot;
2175 
2176 	if (!has_sm5_context(dev_priv))
2177 		return -EINVAL;
2178 
2179 	if (!ctx_node)
2180 		return -EINVAL;
2181 
2182 	cmd = container_of(header, typeof(*cmd), header);
2183 	if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2184 		VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2185 			       (unsigned int) cmd->body.slot);
2186 		return -EINVAL;
2187 	}
2188 
2189 	shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2190 	vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2191 				     cmd->body.slot, cmd->body.offsetInBytes);
2192 
2193 	return 0;
2194 }
2195 
2196 /**
2197  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2198  * command
2199  *
2200  * @dev_priv: Pointer to a device private struct.
2201  * @sw_context: The software context being used for this batch.
2202  * @header: Pointer to the command header in the command stream.
2203  */
2204 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2205 				     struct vmw_sw_context *sw_context,
2206 				     SVGA3dCmdHeader *header)
2207 {
2208 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2209 		container_of(header, typeof(*cmd), header);
2210 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2211 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2212 
2213 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2214 		sizeof(SVGA3dShaderResourceViewId);
2215 
2216 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2217 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2218 	    cmd->body.type >= max_allowed) {
2219 		VMW_DEBUG_USER("Invalid shader binding.\n");
2220 		return -EINVAL;
2221 	}
2222 
2223 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2224 				     vmw_ctx_binding_sr,
2225 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2226 				     (void *) &cmd[1], num_sr_view,
2227 				     cmd->body.startView);
2228 }
2229 
2230 /**
2231  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2232  *
2233  * @dev_priv: Pointer to a device private struct.
2234  * @sw_context: The software context being used for this batch.
2235  * @header: Pointer to the command header in the command stream.
2236  */
2237 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2238 				 struct vmw_sw_context *sw_context,
2239 				 SVGA3dCmdHeader *header)
2240 {
2241 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2242 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2243 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2244 	struct vmw_resource *res = NULL;
2245 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2246 	struct vmw_ctx_bindinfo_shader binding;
2247 	int ret = 0;
2248 
2249 	if (!ctx_node)
2250 		return -EINVAL;
2251 
2252 	cmd = container_of(header, typeof(*cmd), header);
2253 
2254 	if (cmd->body.type >= max_allowed ||
2255 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2256 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2257 			       (unsigned int) cmd->body.type);
2258 		return -EINVAL;
2259 	}
2260 
2261 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2262 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2263 		if (IS_ERR(res)) {
2264 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2265 			return PTR_ERR(res);
2266 		}
2267 
2268 		ret = vmw_execbuf_res_val_add(sw_context, res,
2269 					      VMW_RES_DIRTY_NONE,
2270 					      vmw_val_add_flag_noctx);
2271 		if (ret)
2272 			return ret;
2273 	}
2274 
2275 	binding.bi.ctx = ctx_node->ctx;
2276 	binding.bi.res = res;
2277 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2278 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2279 
2280 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2281 
2282 	return 0;
2283 }
2284 
2285 /**
2286  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2287  * command
2288  *
2289  * @dev_priv: Pointer to a device private struct.
2290  * @sw_context: The software context being used for this batch.
2291  * @header: Pointer to the command header in the command stream.
2292  */
2293 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2294 					 struct vmw_sw_context *sw_context,
2295 					 SVGA3dCmdHeader *header)
2296 {
2297 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2298 	struct vmw_ctx_bindinfo_vb binding;
2299 	struct vmw_resource *res;
2300 	struct {
2301 		SVGA3dCmdHeader header;
2302 		SVGA3dCmdDXSetVertexBuffers body;
2303 		SVGA3dVertexBuffer buf[];
2304 	} *cmd;
2305 	int i, ret, num;
2306 
2307 	if (!ctx_node)
2308 		return -EINVAL;
2309 
2310 	cmd = container_of(header, typeof(*cmd), header);
2311 	num = (cmd->header.size - sizeof(cmd->body)) /
2312 		sizeof(SVGA3dVertexBuffer);
2313 	if ((u64)num + (u64)cmd->body.startBuffer >
2314 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2315 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2316 		return -EINVAL;
2317 	}
2318 
2319 	for (i = 0; i < num; i++) {
2320 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2321 					VMW_RES_DIRTY_NONE,
2322 					user_surface_converter,
2323 					&cmd->buf[i].sid, &res);
2324 		if (unlikely(ret != 0))
2325 			return ret;
2326 
2327 		binding.bi.ctx = ctx_node->ctx;
2328 		binding.bi.bt = vmw_ctx_binding_vb;
2329 		binding.bi.res = res;
2330 		binding.offset = cmd->buf[i].offset;
2331 		binding.stride = cmd->buf[i].stride;
2332 		binding.slot = i + cmd->body.startBuffer;
2333 
2334 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 /**
2341  * vmw_cmd_dx_set_index_buffer - Validate
2342  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2343  *
2344  * @dev_priv: Pointer to a device private struct.
2345  * @sw_context: The software context being used for this batch.
2346  * @header: Pointer to the command header in the command stream.
2347  */
2348 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2349 				       struct vmw_sw_context *sw_context,
2350 				       SVGA3dCmdHeader *header)
2351 {
2352 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2353 	struct vmw_ctx_bindinfo_ib binding;
2354 	struct vmw_resource *res;
2355 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2356 	int ret;
2357 
2358 	if (!ctx_node)
2359 		return -EINVAL;
2360 
2361 	cmd = container_of(header, typeof(*cmd), header);
2362 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2363 				VMW_RES_DIRTY_NONE, user_surface_converter,
2364 				&cmd->body.sid, &res);
2365 	if (unlikely(ret != 0))
2366 		return ret;
2367 
2368 	binding.bi.ctx = ctx_node->ctx;
2369 	binding.bi.res = res;
2370 	binding.bi.bt = vmw_ctx_binding_ib;
2371 	binding.offset = cmd->body.offset;
2372 	binding.format = cmd->body.format;
2373 
2374 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2375 
2376 	return 0;
2377 }
2378 
2379 /**
2380  * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2381  * command
2382  *
2383  * @dev_priv: Pointer to a device private struct.
2384  * @sw_context: The software context being used for this batch.
2385  * @header: Pointer to the command header in the command stream.
2386  */
2387 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2388 					struct vmw_sw_context *sw_context,
2389 					SVGA3dCmdHeader *header)
2390 {
2391 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2392 		container_of(header, typeof(*cmd), header);
2393 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2394 		sizeof(SVGA3dRenderTargetViewId);
2395 	int ret;
2396 
2397 	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2398 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2399 		return -EINVAL;
2400 	}
2401 
2402 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2403 				    0, &cmd->body.depthStencilViewId, 1, 0);
2404 	if (ret)
2405 		return ret;
2406 
2407 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2408 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2409 				     num_rt_view, 0);
2410 }
2411 
2412 /**
2413  * vmw_cmd_dx_clear_rendertarget_view - Validate
2414  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2415  *
2416  * @dev_priv: Pointer to a device private struct.
2417  * @sw_context: The software context being used for this batch.
2418  * @header: Pointer to the command header in the command stream.
2419  */
2420 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2421 					      struct vmw_sw_context *sw_context,
2422 					      SVGA3dCmdHeader *header)
2423 {
2424 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2425 		container_of(header, typeof(*cmd), header);
2426 	struct vmw_resource *ret;
2427 
2428 	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2429 				  cmd->body.renderTargetViewId);
2430 
2431 	return PTR_ERR_OR_ZERO(ret);
2432 }
2433 
2434 /**
2435  * vmw_cmd_dx_clear_depthstencil_view - Validate
2436  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2437  *
2438  * @dev_priv: Pointer to a device private struct.
2439  * @sw_context: The software context being used for this batch.
2440  * @header: Pointer to the command header in the command stream.
2441  */
2442 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2443 					      struct vmw_sw_context *sw_context,
2444 					      SVGA3dCmdHeader *header)
2445 {
2446 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2447 		container_of(header, typeof(*cmd), header);
2448 	struct vmw_resource *ret;
2449 
2450 	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2451 				  cmd->body.depthStencilViewId);
2452 
2453 	return PTR_ERR_OR_ZERO(ret);
2454 }
2455 
2456 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2457 				  struct vmw_sw_context *sw_context,
2458 				  SVGA3dCmdHeader *header)
2459 {
2460 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2461 	struct vmw_resource *srf;
2462 	struct vmw_resource *res;
2463 	enum vmw_view_type view_type;
2464 	int ret;
2465 	/*
2466 	 * This is based on the fact that all affected define commands have the
2467 	 * same initial command body layout.
2468 	 */
2469 	struct {
2470 		SVGA3dCmdHeader header;
2471 		uint32 defined_id;
2472 		uint32 sid;
2473 	} *cmd;
2474 
2475 	if (!ctx_node)
2476 		return -EINVAL;
2477 
2478 	view_type = vmw_view_cmd_to_type(header->id);
2479 	if (view_type == vmw_view_max)
2480 		return -EINVAL;
2481 
2482 	cmd = container_of(header, typeof(*cmd), header);
2483 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2484 		VMW_DEBUG_USER("Invalid surface id.\n");
2485 		return -EINVAL;
2486 	}
2487 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2488 				VMW_RES_DIRTY_NONE, user_surface_converter,
2489 				&cmd->sid, &srf);
2490 	if (unlikely(ret != 0))
2491 		return ret;
2492 
2493 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2494 	ret = vmw_cotable_notify(res, cmd->defined_id);
2495 	if (unlikely(ret != 0))
2496 		return ret;
2497 
2498 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2499 			    cmd->defined_id, header,
2500 			    header->size + sizeof(*header),
2501 			    &sw_context->staged_cmd_res);
2502 }
2503 
2504 /**
2505  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2506  *
2507  * @dev_priv: Pointer to a device private struct.
2508  * @sw_context: The software context being used for this batch.
2509  * @header: Pointer to the command header in the command stream.
2510  */
2511 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2512 				     struct vmw_sw_context *sw_context,
2513 				     SVGA3dCmdHeader *header)
2514 {
2515 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2516 	struct vmw_ctx_bindinfo_so_target binding;
2517 	struct vmw_resource *res;
2518 	struct {
2519 		SVGA3dCmdHeader header;
2520 		SVGA3dCmdDXSetSOTargets body;
2521 		SVGA3dSoTarget targets[];
2522 	} *cmd;
2523 	int i, ret, num;
2524 
2525 	if (!ctx_node)
2526 		return -EINVAL;
2527 
2528 	cmd = container_of(header, typeof(*cmd), header);
2529 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2530 
2531 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2532 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2533 		return -EINVAL;
2534 	}
2535 
2536 	for (i = 0; i < num; i++) {
2537 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2538 					VMW_RES_DIRTY_SET,
2539 					user_surface_converter,
2540 					&cmd->targets[i].sid, &res);
2541 		if (unlikely(ret != 0))
2542 			return ret;
2543 
2544 		binding.bi.ctx = ctx_node->ctx;
2545 		binding.bi.res = res;
2546 		binding.bi.bt = vmw_ctx_binding_so_target;
2547 		binding.offset = cmd->targets[i].offset;
2548 		binding.size = cmd->targets[i].sizeInBytes;
2549 		binding.slot = i;
2550 
2551 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2552 	}
2553 
2554 	return 0;
2555 }
2556 
2557 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2558 				struct vmw_sw_context *sw_context,
2559 				SVGA3dCmdHeader *header)
2560 {
2561 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2562 	struct vmw_resource *res;
2563 	/*
2564 	 * This is based on the fact that all affected define commands have
2565 	 * the same initial command body layout.
2566 	 */
2567 	struct {
2568 		SVGA3dCmdHeader header;
2569 		uint32 defined_id;
2570 	} *cmd;
2571 	enum vmw_so_type so_type;
2572 	int ret;
2573 
2574 	if (!ctx_node)
2575 		return -EINVAL;
2576 
2577 	so_type = vmw_so_cmd_to_type(header->id);
2578 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2579 	if (IS_ERR(res))
2580 		return PTR_ERR(res);
2581 	cmd = container_of(header, typeof(*cmd), header);
2582 	ret = vmw_cotable_notify(res, cmd->defined_id);
2583 
2584 	return ret;
2585 }
2586 
2587 /**
2588  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2589  * command
2590  *
2591  * @dev_priv: Pointer to a device private struct.
2592  * @sw_context: The software context being used for this batch.
2593  * @header: Pointer to the command header in the command stream.
2594  */
2595 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2596 					struct vmw_sw_context *sw_context,
2597 					SVGA3dCmdHeader *header)
2598 {
2599 	struct {
2600 		SVGA3dCmdHeader header;
2601 		union {
2602 			SVGA3dCmdDXReadbackSubResource r_body;
2603 			SVGA3dCmdDXInvalidateSubResource i_body;
2604 			SVGA3dCmdDXUpdateSubResource u_body;
2605 			SVGA3dSurfaceId sid;
2606 		};
2607 	} *cmd;
2608 
2609 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2610 		     offsetof(typeof(*cmd), sid));
2611 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2612 		     offsetof(typeof(*cmd), sid));
2613 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2614 		     offsetof(typeof(*cmd), sid));
2615 
2616 	cmd = container_of(header, typeof(*cmd), header);
2617 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2618 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2619 				 &cmd->sid, NULL);
2620 }
2621 
2622 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2623 				struct vmw_sw_context *sw_context,
2624 				SVGA3dCmdHeader *header)
2625 {
2626 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2627 
2628 	if (!ctx_node)
2629 		return -EINVAL;
2630 
2631 	return 0;
2632 }
2633 
2634 /**
2635  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2636  * resource for removal.
2637  *
2638  * @dev_priv: Pointer to a device private struct.
2639  * @sw_context: The software context being used for this batch.
2640  * @header: Pointer to the command header in the command stream.
2641  *
2642  * Check that the view exists, and if it was not created using this command
2643  * batch, conditionally make this command a NOP.
2644  */
2645 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2646 				  struct vmw_sw_context *sw_context,
2647 				  SVGA3dCmdHeader *header)
2648 {
2649 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2650 	struct {
2651 		SVGA3dCmdHeader header;
2652 		union vmw_view_destroy body;
2653 	} *cmd = container_of(header, typeof(*cmd), header);
2654 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2655 	struct vmw_resource *view;
2656 	int ret;
2657 
2658 	if (!ctx_node)
2659 		return -EINVAL;
2660 
2661 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2662 			      &sw_context->staged_cmd_res, &view);
2663 	if (ret || !view)
2664 		return ret;
2665 
2666 	/*
2667 	 * If the view wasn't created during this command batch, it might
2668 	 * have been removed due to a context swapout, so add a
2669 	 * relocation to conditionally make this command a NOP to avoid
2670 	 * device errors.
2671 	 */
2672 	return vmw_resource_relocation_add(sw_context, view,
2673 					   vmw_ptr_diff(sw_context->buf_start,
2674 							&cmd->header.id),
2675 					   vmw_res_rel_cond_nop);
2676 }
2677 
2678 /**
2679  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2680  *
2681  * @dev_priv: Pointer to a device private struct.
2682  * @sw_context: The software context being used for this batch.
2683  * @header: Pointer to the command header in the command stream.
2684  */
2685 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2686 				    struct vmw_sw_context *sw_context,
2687 				    SVGA3dCmdHeader *header)
2688 {
2689 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2690 	struct vmw_resource *res;
2691 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2692 		container_of(header, typeof(*cmd), header);
2693 	int ret;
2694 
2695 	if (!ctx_node)
2696 		return -EINVAL;
2697 
2698 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2699 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2700 	if (ret)
2701 		return ret;
2702 
2703 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2704 				 cmd->body.shaderId, cmd->body.type,
2705 				 &sw_context->staged_cmd_res);
2706 }
2707 
2708 /**
2709  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2710  *
2711  * @dev_priv: Pointer to a device private struct.
2712  * @sw_context: The software context being used for this batch.
2713  * @header: Pointer to the command header in the command stream.
2714  */
2715 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2716 				     struct vmw_sw_context *sw_context,
2717 				     SVGA3dCmdHeader *header)
2718 {
2719 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2720 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2721 		container_of(header, typeof(*cmd), header);
2722 	int ret;
2723 
2724 	if (!ctx_node)
2725 		return -EINVAL;
2726 
2727 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2728 				&sw_context->staged_cmd_res);
2729 
2730 	return ret;
2731 }
2732 
2733 /**
2734  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2735  *
2736  * @dev_priv: Pointer to a device private struct.
2737  * @sw_context: The software context being used for this batch.
2738  * @header: Pointer to the command header in the command stream.
2739  */
2740 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2741 				  struct vmw_sw_context *sw_context,
2742 				  SVGA3dCmdHeader *header)
2743 {
2744 	struct vmw_resource *ctx;
2745 	struct vmw_resource *res;
2746 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2747 		container_of(header, typeof(*cmd), header);
2748 	int ret;
2749 
2750 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2751 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2752 					VMW_RES_DIRTY_SET,
2753 					user_context_converter, &cmd->body.cid,
2754 					&ctx);
2755 		if (ret)
2756 			return ret;
2757 	} else {
2758 		struct vmw_ctx_validation_info *ctx_node =
2759 			VMW_GET_CTX_NODE(sw_context);
2760 
2761 		if (!ctx_node)
2762 			return -EINVAL;
2763 
2764 		ctx = ctx_node->ctx;
2765 	}
2766 
2767 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2768 	if (IS_ERR(res)) {
2769 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2770 		return PTR_ERR(res);
2771 	}
2772 
2773 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2774 				      vmw_val_add_flag_noctx);
2775 	if (ret) {
2776 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2777 		return ret;
2778 	}
2779 
2780 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2781 					 &cmd->body.mobid,
2782 					 cmd->body.offsetInBytes);
2783 }
2784 
2785 /**
2786  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2787  *
2788  * @dev_priv: Pointer to a device private struct.
2789  * @sw_context: The software context being used for this batch.
2790  * @header: Pointer to the command header in the command stream.
2791  */
2792 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2793 			      struct vmw_sw_context *sw_context,
2794 			      SVGA3dCmdHeader *header)
2795 {
2796 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2797 		container_of(header, typeof(*cmd), header);
2798 	struct vmw_resource *view;
2799 	struct vmw_res_cache_entry *rcache;
2800 
2801 	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2802 				   cmd->body.shaderResourceViewId);
2803 	if (IS_ERR(view))
2804 		return PTR_ERR(view);
2805 
2806 	/*
2807 	 * Normally the shader-resource view is not gpu-dirtying, but for
2808 	 * this particular command it is...
2809 	 * So mark the last looked-up surface, which is the surface
2810 	 * the view points to, gpu-dirty.
2811 	 */
2812 	rcache = &sw_context->res_cache[vmw_res_surface];
2813 	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2814 				     VMW_RES_DIRTY_SET);
2815 	return 0;
2816 }
2817 
2818 /**
2819  * vmw_cmd_dx_transfer_from_buffer - Validate
2820  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2821  *
2822  * @dev_priv: Pointer to a device private struct.
2823  * @sw_context: The software context being used for this batch.
2824  * @header: Pointer to the command header in the command stream.
2825  */
2826 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2827 					   struct vmw_sw_context *sw_context,
2828 					   SVGA3dCmdHeader *header)
2829 {
2830 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2831 		container_of(header, typeof(*cmd), header);
2832 	int ret;
2833 
2834 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2835 				VMW_RES_DIRTY_NONE, user_surface_converter,
2836 				&cmd->body.srcSid, NULL);
2837 	if (ret != 0)
2838 		return ret;
2839 
2840 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2841 				 VMW_RES_DIRTY_SET, user_surface_converter,
2842 				 &cmd->body.destSid, NULL);
2843 }
2844 
2845 /**
2846  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2847  *
2848  * @dev_priv: Pointer to a device private struct.
2849  * @sw_context: The software context being used for this batch.
2850  * @header: Pointer to the command header in the command stream.
2851  */
2852 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2853 					   struct vmw_sw_context *sw_context,
2854 					   SVGA3dCmdHeader *header)
2855 {
2856 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2857 		container_of(header, typeof(*cmd), header);
2858 
2859 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2860 		return -EINVAL;
2861 
2862 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2863 				 VMW_RES_DIRTY_SET, user_surface_converter,
2864 				 &cmd->body.surface.sid, NULL);
2865 }
2866 
2867 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2868 		       struct vmw_sw_context *sw_context,
2869 		       SVGA3dCmdHeader *header)
2870 {
2871 	if (!has_sm5_context(dev_priv))
2872 		return -EINVAL;
2873 
2874 	return 0;
2875 }
2876 
2877 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2878 				   struct vmw_sw_context *sw_context,
2879 				   SVGA3dCmdHeader *header)
2880 {
2881 	if (!has_sm5_context(dev_priv))
2882 		return -EINVAL;
2883 
2884 	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2885 }
2886 
2887 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2888 				   struct vmw_sw_context *sw_context,
2889 				   SVGA3dCmdHeader *header)
2890 {
2891 	if (!has_sm5_context(dev_priv))
2892 		return -EINVAL;
2893 
2894 	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2895 }
2896 
2897 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2898 				  struct vmw_sw_context *sw_context,
2899 				  SVGA3dCmdHeader *header)
2900 {
2901 	struct {
2902 		SVGA3dCmdHeader header;
2903 		SVGA3dCmdDXClearUAViewUint body;
2904 	} *cmd = container_of(header, typeof(*cmd), header);
2905 	struct vmw_resource *ret;
2906 
2907 	if (!has_sm5_context(dev_priv))
2908 		return -EINVAL;
2909 
2910 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2911 				  cmd->body.uaViewId);
2912 
2913 	return PTR_ERR_OR_ZERO(ret);
2914 }
2915 
2916 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2917 				   struct vmw_sw_context *sw_context,
2918 				   SVGA3dCmdHeader *header)
2919 {
2920 	struct {
2921 		SVGA3dCmdHeader header;
2922 		SVGA3dCmdDXClearUAViewFloat body;
2923 	} *cmd = container_of(header, typeof(*cmd), header);
2924 	struct vmw_resource *ret;
2925 
2926 	if (!has_sm5_context(dev_priv))
2927 		return -EINVAL;
2928 
2929 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2930 				  cmd->body.uaViewId);
2931 
2932 	return PTR_ERR_OR_ZERO(ret);
2933 }
2934 
2935 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2936 			   struct vmw_sw_context *sw_context,
2937 			   SVGA3dCmdHeader *header)
2938 {
2939 	struct {
2940 		SVGA3dCmdHeader header;
2941 		SVGA3dCmdDXSetUAViews body;
2942 	} *cmd = container_of(header, typeof(*cmd), header);
2943 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2944 		sizeof(SVGA3dUAViewId);
2945 	int ret;
2946 
2947 	if (!has_sm5_context(dev_priv))
2948 		return -EINVAL;
2949 
2950 	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2951 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2952 		return -EINVAL;
2953 	}
2954 
2955 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2956 				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2957 				    num_uav, 0);
2958 	if (ret)
2959 		return ret;
2960 
2961 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2962 					 cmd->body.uavSpliceIndex);
2963 
2964 	return ret;
2965 }
2966 
2967 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2968 			      struct vmw_sw_context *sw_context,
2969 			      SVGA3dCmdHeader *header)
2970 {
2971 	struct {
2972 		SVGA3dCmdHeader header;
2973 		SVGA3dCmdDXSetCSUAViews body;
2974 	} *cmd = container_of(header, typeof(*cmd), header);
2975 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2976 		sizeof(SVGA3dUAViewId);
2977 	int ret;
2978 
2979 	if (!has_sm5_context(dev_priv))
2980 		return -EINVAL;
2981 
2982 	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2983 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2984 		return -EINVAL;
2985 	}
2986 
2987 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2988 				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2989 				    num_uav, 0);
2990 	if (ret)
2991 		return ret;
2992 
2993 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2994 				  cmd->body.startIndex);
2995 
2996 	return ret;
2997 }
2998 
2999 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
3000 					  struct vmw_sw_context *sw_context,
3001 					  SVGA3dCmdHeader *header)
3002 {
3003 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3004 	struct vmw_resource *res;
3005 	struct {
3006 		SVGA3dCmdHeader header;
3007 		SVGA3dCmdDXDefineStreamOutputWithMob body;
3008 	} *cmd = container_of(header, typeof(*cmd), header);
3009 	int ret;
3010 
3011 	if (!has_sm5_context(dev_priv))
3012 		return -EINVAL;
3013 
3014 	if (!ctx_node) {
3015 		DRM_ERROR("DX Context not set.\n");
3016 		return -EINVAL;
3017 	}
3018 
3019 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3020 	ret = vmw_cotable_notify(res, cmd->body.soid);
3021 	if (ret)
3022 		return ret;
3023 
3024 	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3025 				       cmd->body.soid,
3026 				       &sw_context->staged_cmd_res);
3027 }
3028 
3029 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3030 					   struct vmw_sw_context *sw_context,
3031 					   SVGA3dCmdHeader *header)
3032 {
3033 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3034 	struct vmw_resource *res;
3035 	struct {
3036 		SVGA3dCmdHeader header;
3037 		SVGA3dCmdDXDestroyStreamOutput body;
3038 	} *cmd = container_of(header, typeof(*cmd), header);
3039 
3040 	if (!ctx_node) {
3041 		DRM_ERROR("DX Context not set.\n");
3042 		return -EINVAL;
3043 	}
3044 
3045 	/*
3046 	 * When device does not support SM5 then streamoutput with mob command is
3047 	 * not available to user-space. Simply return in this case.
3048 	 */
3049 	if (!has_sm5_context(dev_priv))
3050 		return 0;
3051 
3052 	/*
3053 	 * With SM5 capable device if lookup fails then user-space probably used
3054 	 * old streamoutput define command. Return without an error.
3055 	 */
3056 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3057 					 cmd->body.soid);
3058 	if (IS_ERR(res))
3059 		return 0;
3060 
3061 	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3062 					  &sw_context->staged_cmd_res);
3063 }
3064 
3065 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3066 					struct vmw_sw_context *sw_context,
3067 					SVGA3dCmdHeader *header)
3068 {
3069 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3070 	struct vmw_resource *res;
3071 	struct {
3072 		SVGA3dCmdHeader header;
3073 		SVGA3dCmdDXBindStreamOutput body;
3074 	} *cmd = container_of(header, typeof(*cmd), header);
3075 	int ret;
3076 
3077 	if (!has_sm5_context(dev_priv))
3078 		return -EINVAL;
3079 
3080 	if (!ctx_node) {
3081 		DRM_ERROR("DX Context not set.\n");
3082 		return -EINVAL;
3083 	}
3084 
3085 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3086 					 cmd->body.soid);
3087 	if (IS_ERR(res)) {
3088 		DRM_ERROR("Could not find streamoutput to bind.\n");
3089 		return PTR_ERR(res);
3090 	}
3091 
3092 	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3093 
3094 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3095 				      vmw_val_add_flag_noctx);
3096 	if (ret) {
3097 		DRM_ERROR("Error creating resource validation node.\n");
3098 		return ret;
3099 	}
3100 
3101 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3102 					 &cmd->body.mobid,
3103 					 cmd->body.offsetInBytes);
3104 }
3105 
3106 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3107 				       struct vmw_sw_context *sw_context,
3108 				       SVGA3dCmdHeader *header)
3109 {
3110 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3111 	struct vmw_resource *res;
3112 	struct vmw_ctx_bindinfo_so binding;
3113 	struct {
3114 		SVGA3dCmdHeader header;
3115 		SVGA3dCmdDXSetStreamOutput body;
3116 	} *cmd = container_of(header, typeof(*cmd), header);
3117 	int ret;
3118 
3119 	if (!ctx_node) {
3120 		DRM_ERROR("DX Context not set.\n");
3121 		return -EINVAL;
3122 	}
3123 
3124 	if (cmd->body.soid == SVGA3D_INVALID_ID)
3125 		return 0;
3126 
3127 	/*
3128 	 * When device does not support SM5 then streamoutput with mob command is
3129 	 * not available to user-space. Simply return in this case.
3130 	 */
3131 	if (!has_sm5_context(dev_priv))
3132 		return 0;
3133 
3134 	/*
3135 	 * With SM5 capable device if lookup fails then user-space probably used
3136 	 * old streamoutput define command. Return without an error.
3137 	 */
3138 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3139 					 cmd->body.soid);
3140 	if (IS_ERR(res)) {
3141 		return 0;
3142 	}
3143 
3144 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3145 				      vmw_val_add_flag_noctx);
3146 	if (ret) {
3147 		DRM_ERROR("Error creating resource validation node.\n");
3148 		return ret;
3149 	}
3150 
3151 	binding.bi.ctx = ctx_node->ctx;
3152 	binding.bi.res = res;
3153 	binding.bi.bt = vmw_ctx_binding_so;
3154 	binding.slot = 0; /* Only one SO set to context at a time. */
3155 
3156 	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3157 			binding.slot);
3158 
3159 	return ret;
3160 }
3161 
3162 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3163 					      struct vmw_sw_context *sw_context,
3164 					      SVGA3dCmdHeader *header)
3165 {
3166 	struct vmw_draw_indexed_instanced_indirect_cmd {
3167 		SVGA3dCmdHeader header;
3168 		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3169 	} *cmd = container_of(header, typeof(*cmd), header);
3170 
3171 	if (!has_sm5_context(dev_priv))
3172 		return -EINVAL;
3173 
3174 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3175 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3176 				 &cmd->body.argsBufferSid, NULL);
3177 }
3178 
3179 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3180 				      struct vmw_sw_context *sw_context,
3181 				      SVGA3dCmdHeader *header)
3182 {
3183 	struct vmw_draw_instanced_indirect_cmd {
3184 		SVGA3dCmdHeader header;
3185 		SVGA3dCmdDXDrawInstancedIndirect body;
3186 	} *cmd = container_of(header, typeof(*cmd), header);
3187 
3188 	if (!has_sm5_context(dev_priv))
3189 		return -EINVAL;
3190 
3191 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3192 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3193 				 &cmd->body.argsBufferSid, NULL);
3194 }
3195 
3196 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3197 				     struct vmw_sw_context *sw_context,
3198 				     SVGA3dCmdHeader *header)
3199 {
3200 	struct vmw_dispatch_indirect_cmd {
3201 		SVGA3dCmdHeader header;
3202 		SVGA3dCmdDXDispatchIndirect body;
3203 	} *cmd = container_of(header, typeof(*cmd), header);
3204 
3205 	if (!has_sm5_context(dev_priv))
3206 		return -EINVAL;
3207 
3208 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3209 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3210 				 &cmd->body.argsBufferSid, NULL);
3211 }
3212 
3213 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3214 				struct vmw_sw_context *sw_context,
3215 				void *buf, uint32_t *size)
3216 {
3217 	uint32_t size_remaining = *size;
3218 	uint32_t cmd_id;
3219 
3220 	cmd_id = ((uint32_t *)buf)[0];
3221 	switch (cmd_id) {
3222 	case SVGA_CMD_UPDATE:
3223 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3224 		break;
3225 	case SVGA_CMD_DEFINE_GMRFB:
3226 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3227 		break;
3228 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3229 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3230 		break;
3231 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3232 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3233 		break;
3234 	default:
3235 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3236 		return -EINVAL;
3237 	}
3238 
3239 	if (*size > size_remaining) {
3240 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3241 			       cmd_id);
3242 		return -EINVAL;
3243 	}
3244 
3245 	if (unlikely(!sw_context->kernel)) {
3246 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3247 		return -EPERM;
3248 	}
3249 
3250 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3251 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3252 
3253 	return 0;
3254 }
3255 
3256 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3257 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3258 		    false, false, false),
3259 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3260 		    false, false, false),
3261 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3262 		    true, false, false),
3263 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3264 		    true, false, false),
3265 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3266 		    true, false, false),
3267 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3268 		    false, false, false),
3269 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3270 		    false, false, false),
3271 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3272 		    true, false, false),
3273 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3274 		    true, false, false),
3275 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3276 		    true, false, false),
3277 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3278 		    &vmw_cmd_set_render_target_check, true, false, false),
3279 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3280 		    true, false, false),
3281 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3282 		    true, false, false),
3283 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3284 		    true, false, false),
3285 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3286 		    true, false, false),
3287 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3288 		    true, false, false),
3289 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3290 		    true, false, false),
3291 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3292 		    true, false, false),
3293 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3294 		    false, false, false),
3295 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3296 		    true, false, false),
3297 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3298 		    true, false, false),
3299 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3300 		    true, false, false),
3301 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3302 		    true, false, false),
3303 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3304 		    true, false, false),
3305 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3306 		    true, false, false),
3307 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3308 		    true, false, false),
3309 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3310 		    true, false, false),
3311 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3312 		    true, false, false),
3313 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3314 		    true, false, false),
3315 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3316 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3317 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3318 		    false, false, false),
3319 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3320 		    false, false, false),
3321 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3322 		    false, false, false),
3323 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3324 		    false, false, false),
3325 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3326 		    false, false, false),
3327 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3328 		    false, false, false),
3329 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3330 		    false, false, false),
3331 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3332 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3333 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3334 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3335 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3336 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3337 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3338 		    false, false, true),
3339 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3340 		    false, false, true),
3341 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3342 		    false, false, true),
3343 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3344 		    false, false, true),
3345 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3346 		    false, false, true),
3347 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3348 		    false, false, true),
3349 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3350 		    false, false, true),
3351 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3352 		    false, false, true),
3353 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3354 		    true, false, true),
3355 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3356 		    false, false, true),
3357 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3358 		    true, false, true),
3359 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3360 		    &vmw_cmd_update_gb_surface, true, false, true),
3361 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3362 		    &vmw_cmd_readback_gb_image, true, false, true),
3363 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3364 		    &vmw_cmd_readback_gb_surface, true, false, true),
3365 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3366 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3367 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3368 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3369 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3370 		    false, false, true),
3371 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3372 		    false, false, true),
3373 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3374 		    false, false, true),
3375 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3376 		    false, false, true),
3377 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3378 		    false, false, true),
3379 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3380 		    false, false, true),
3381 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3382 		    true, false, true),
3383 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3384 		    false, false, true),
3385 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3386 		    false, false, false),
3387 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3388 		    true, false, true),
3389 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3390 		    true, false, true),
3391 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3392 		    true, false, true),
3393 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3394 		    true, false, true),
3395 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3396 		    true, false, true),
3397 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3398 		    false, false, true),
3399 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3400 		    false, false, true),
3401 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3402 		    false, false, true),
3403 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3404 		    false, false, true),
3405 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3406 		    false, false, true),
3407 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3408 		    false, false, true),
3409 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3410 		    false, false, true),
3411 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3412 		    false, false, true),
3413 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3414 		    false, false, true),
3415 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3416 		    false, false, true),
3417 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3418 		    true, false, true),
3419 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3420 		    false, false, true),
3421 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3422 		    false, false, true),
3423 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3424 		    false, false, true),
3425 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3426 		    false, false, true),
3427 
3428 	/* SM commands */
3429 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3430 		    false, false, true),
3431 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3432 		    false, false, true),
3433 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3434 		    false, false, true),
3435 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3436 		    false, false, true),
3437 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3438 		    false, false, true),
3439 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3440 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3441 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3442 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3443 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3444 		    true, false, true),
3445 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3446 		    true, false, true),
3447 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3448 		    true, false, true),
3449 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3450 		    true, false, true),
3451 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3452 		    true, false, true),
3453 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3454 		    &vmw_cmd_dx_cid_check, true, false, true),
3455 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3456 		    true, false, true),
3457 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3458 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3459 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3460 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3461 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3462 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3463 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3464 		    true, false, true),
3465 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3466 		    &vmw_cmd_dx_cid_check, true, false, true),
3467 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3468 		    &vmw_cmd_dx_cid_check, true, false, true),
3469 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3470 		    true, false, true),
3471 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3472 		    true, false, true),
3473 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3474 		    true, false, true),
3475 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3476 		    &vmw_cmd_dx_cid_check, true, false, true),
3477 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3478 		    true, false, true),
3479 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3480 		    true, false, true),
3481 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3482 		    true, false, true),
3483 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3484 		    true, false, true),
3485 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3486 		    true, false, true),
3487 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3488 		    true, false, true),
3489 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3490 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3491 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3492 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3493 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3494 		    true, false, true),
3495 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3496 		    true, false, true),
3497 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3498 		    &vmw_cmd_dx_check_subresource, true, false, true),
3499 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3500 		    &vmw_cmd_dx_check_subresource, true, false, true),
3501 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3502 		    &vmw_cmd_dx_check_subresource, true, false, true),
3503 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3504 		    &vmw_cmd_dx_view_define, true, false, true),
3505 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3506 		    &vmw_cmd_dx_view_remove, true, false, true),
3507 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3508 		    &vmw_cmd_dx_view_define, true, false, true),
3509 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3510 		    &vmw_cmd_dx_view_remove, true, false, true),
3511 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3512 		    &vmw_cmd_dx_view_define, true, false, true),
3513 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3514 		    &vmw_cmd_dx_view_remove, true, false, true),
3515 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3516 		    &vmw_cmd_dx_so_define, true, false, true),
3517 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3518 		    &vmw_cmd_dx_cid_check, true, false, true),
3519 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3520 		    &vmw_cmd_dx_so_define, true, false, true),
3521 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3522 		    &vmw_cmd_dx_cid_check, true, false, true),
3523 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3524 		    &vmw_cmd_dx_so_define, true, false, true),
3525 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3526 		    &vmw_cmd_dx_cid_check, true, false, true),
3527 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3528 		    &vmw_cmd_dx_so_define, true, false, true),
3529 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3530 		    &vmw_cmd_dx_cid_check, true, false, true),
3531 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3532 		    &vmw_cmd_dx_so_define, true, false, true),
3533 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3534 		    &vmw_cmd_dx_cid_check, true, false, true),
3535 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3536 		    &vmw_cmd_dx_define_shader, true, false, true),
3537 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3538 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3539 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3540 		    &vmw_cmd_dx_bind_shader, true, false, true),
3541 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3542 		    &vmw_cmd_dx_so_define, true, false, true),
3543 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3544 		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3545 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3546 		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3547 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3548 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3549 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3550 		    &vmw_cmd_dx_cid_check, true, false, true),
3551 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3552 		    &vmw_cmd_dx_cid_check, true, false, true),
3553 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3554 		    &vmw_cmd_buffer_copy_check, true, false, true),
3555 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3556 		    &vmw_cmd_pred_copy_check, true, false, true),
3557 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3558 		    &vmw_cmd_dx_transfer_from_buffer,
3559 		    true, false, true),
3560 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3561 		    &vmw_cmd_dx_set_constant_buffer_offset,
3562 		    true, false, true),
3563 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3564 		    &vmw_cmd_dx_set_constant_buffer_offset,
3565 		    true, false, true),
3566 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3567 		    &vmw_cmd_dx_set_constant_buffer_offset,
3568 		    true, false, true),
3569 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3570 		    &vmw_cmd_dx_set_constant_buffer_offset,
3571 		    true, false, true),
3572 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3573 		    &vmw_cmd_dx_set_constant_buffer_offset,
3574 		    true, false, true),
3575 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3576 		    &vmw_cmd_dx_set_constant_buffer_offset,
3577 		    true, false, true),
3578 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3579 		    true, false, true),
3580 
3581 	/*
3582 	 * SM5 commands
3583 	 */
3584 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3585 		    true, false, true),
3586 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3587 		    true, false, true),
3588 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3589 		    true, false, true),
3590 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3591 		    &vmw_cmd_clear_uav_float, true, false, true),
3592 	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3593 		    false, true),
3594 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3595 		    true),
3596 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3597 		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3598 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3599 		    &vmw_cmd_instanced_indirect, true, false, true),
3600 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3601 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3602 		    &vmw_cmd_dispatch_indirect, true, false, true),
3603 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3604 		    false, true),
3605 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3606 		    &vmw_cmd_sm5_view_define, true, false, true),
3607 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3608 		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3609 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3610 		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3611 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3612 		    &vmw_cmd_dx_so_define, true, false, true),
3613 };
3614 
3615 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3616 {
3617 	u32 cmd_id = ((u32 *) buf)[0];
3618 
3619 	if (cmd_id >= SVGA_CMD_MAX) {
3620 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3621 		const struct vmw_cmd_entry *entry;
3622 
3623 		*size = header->size + sizeof(SVGA3dCmdHeader);
3624 		cmd_id = header->id;
3625 		if (cmd_id >= SVGA_3D_CMD_MAX)
3626 			return false;
3627 
3628 		cmd_id -= SVGA_3D_CMD_BASE;
3629 		entry = &vmw_cmd_entries[cmd_id];
3630 		*cmd = entry->cmd_name;
3631 		return true;
3632 	}
3633 
3634 	switch (cmd_id) {
3635 	case SVGA_CMD_UPDATE:
3636 		*cmd = "SVGA_CMD_UPDATE";
3637 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3638 		break;
3639 	case SVGA_CMD_DEFINE_GMRFB:
3640 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3641 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3642 		break;
3643 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3644 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3645 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3646 		break;
3647 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3648 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3649 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3650 		break;
3651 	default:
3652 		*cmd = "UNKNOWN";
3653 		*size = 0;
3654 		return false;
3655 	}
3656 
3657 	return true;
3658 }
3659 
3660 static int vmw_cmd_check(struct vmw_private *dev_priv,
3661 			 struct vmw_sw_context *sw_context, void *buf,
3662 			 uint32_t *size)
3663 {
3664 	uint32_t cmd_id;
3665 	uint32_t size_remaining = *size;
3666 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3667 	int ret;
3668 	const struct vmw_cmd_entry *entry;
3669 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3670 
3671 	cmd_id = ((uint32_t *)buf)[0];
3672 	/* Handle any none 3D commands */
3673 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3674 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3675 
3676 
3677 	cmd_id = header->id;
3678 	*size = header->size + sizeof(SVGA3dCmdHeader);
3679 
3680 	cmd_id -= SVGA_3D_CMD_BASE;
3681 	if (unlikely(*size > size_remaining))
3682 		goto out_invalid;
3683 
3684 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3685 		goto out_invalid;
3686 
3687 	entry = &vmw_cmd_entries[cmd_id];
3688 	if (unlikely(!entry->func))
3689 		goto out_invalid;
3690 
3691 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3692 		goto out_privileged;
3693 
3694 	if (unlikely(entry->gb_disable && gb))
3695 		goto out_old;
3696 
3697 	if (unlikely(entry->gb_enable && !gb))
3698 		goto out_new;
3699 
3700 	ret = entry->func(dev_priv, sw_context, header);
3701 	if (unlikely(ret != 0)) {
3702 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3703 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3704 		return ret;
3705 	}
3706 
3707 	return 0;
3708 out_invalid:
3709 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3710 		       cmd_id + SVGA_3D_CMD_BASE);
3711 	return -EINVAL;
3712 out_privileged:
3713 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3714 		       cmd_id + SVGA_3D_CMD_BASE);
3715 	return -EPERM;
3716 out_old:
3717 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3718 		       cmd_id + SVGA_3D_CMD_BASE);
3719 	return -EINVAL;
3720 out_new:
3721 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3722 		       cmd_id + SVGA_3D_CMD_BASE);
3723 	return -EINVAL;
3724 }
3725 
3726 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3727 			     struct vmw_sw_context *sw_context, void *buf,
3728 			     uint32_t size)
3729 {
3730 	int32_t cur_size = size;
3731 	int ret;
3732 
3733 	sw_context->buf_start = buf;
3734 
3735 	while (cur_size > 0) {
3736 		size = cur_size;
3737 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3738 		if (unlikely(ret != 0))
3739 			return ret;
3740 		buf = (void *)((unsigned long) buf + size);
3741 		cur_size -= size;
3742 	}
3743 
3744 	if (unlikely(cur_size != 0)) {
3745 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3746 		return -EINVAL;
3747 	}
3748 
3749 	return 0;
3750 }
3751 
3752 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3753 {
3754 	/* Memory is validation context memory, so no need to free it */
3755 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3756 }
3757 
3758 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3759 {
3760 	struct vmw_relocation *reloc;
3761 	struct ttm_buffer_object *bo;
3762 
3763 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3764 		bo = &reloc->vbo->tbo;
3765 		switch (bo->resource->mem_type) {
3766 		case TTM_PL_VRAM:
3767 			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3768 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3769 			break;
3770 		case VMW_PL_GMR:
3771 			reloc->location->gmrId = bo->resource->start;
3772 			break;
3773 		case VMW_PL_MOB:
3774 			*reloc->mob_loc = bo->resource->start;
3775 			break;
3776 		default:
3777 			BUG();
3778 		}
3779 	}
3780 	vmw_free_relocations(sw_context);
3781 }
3782 
3783 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3784 				 uint32_t size)
3785 {
3786 	if (likely(sw_context->cmd_bounce_size >= size))
3787 		return 0;
3788 
3789 	if (sw_context->cmd_bounce_size == 0)
3790 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3791 
3792 	while (sw_context->cmd_bounce_size < size) {
3793 		sw_context->cmd_bounce_size =
3794 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3795 				   (sw_context->cmd_bounce_size >> 1));
3796 	}
3797 
3798 	vfree(sw_context->cmd_bounce);
3799 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3800 
3801 	if (sw_context->cmd_bounce == NULL) {
3802 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3803 		sw_context->cmd_bounce_size = 0;
3804 		return -ENOMEM;
3805 	}
3806 
3807 	return 0;
3808 }
3809 
3810 /*
3811  * vmw_execbuf_fence_commands - create and submit a command stream fence
3812  *
3813  * Creates a fence object and submits a command stream marker.
3814  * If this fails for some reason, We sync the fifo and return NULL.
3815  * It is then safe to fence buffers with a NULL pointer.
3816  *
3817  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3818  * userspace handle if @p_handle is not NULL, otherwise not.
3819  */
3820 
3821 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3822 			       struct vmw_private *dev_priv,
3823 			       struct vmw_fence_obj **p_fence,
3824 			       uint32_t *p_handle)
3825 {
3826 	uint32_t sequence;
3827 	int ret;
3828 	bool synced = false;
3829 
3830 	/* p_handle implies file_priv. */
3831 	BUG_ON(p_handle != NULL && file_priv == NULL);
3832 
3833 	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3834 	if (unlikely(ret != 0)) {
3835 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3836 		synced = true;
3837 	}
3838 
3839 	if (p_handle != NULL)
3840 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3841 					    sequence, p_fence, p_handle);
3842 	else
3843 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3844 
3845 	if (unlikely(ret != 0 && !synced)) {
3846 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3847 					 false, VMW_FENCE_WAIT_TIMEOUT);
3848 		*p_fence = NULL;
3849 	}
3850 
3851 	return ret;
3852 }
3853 
3854 /**
3855  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3856  *
3857  * @dev_priv: Pointer to a vmw_private struct.
3858  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3859  * @ret: Return value from fence object creation.
3860  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3861  * the information should be copied.
3862  * @fence: Pointer to the fenc object.
3863  * @fence_handle: User-space fence handle.
3864  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3865  *
3866  * This function copies fence information to user-space. If copying fails, the
3867  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3868  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3869  * will hopefully be detected.
3870  *
3871  * Also if copying fails, user-space will be unable to signal the fence object
3872  * so we wait for it immediately, and then unreference the user-space reference.
3873  */
3874 int
3875 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3876 			    struct vmw_fpriv *vmw_fp, int ret,
3877 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3878 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3879 			    int32_t out_fence_fd)
3880 {
3881 	struct drm_vmw_fence_rep fence_rep;
3882 
3883 	if (user_fence_rep == NULL)
3884 		return 0;
3885 
3886 	memset(&fence_rep, 0, sizeof(fence_rep));
3887 
3888 	fence_rep.error = ret;
3889 	fence_rep.fd = out_fence_fd;
3890 	if (ret == 0) {
3891 		BUG_ON(fence == NULL);
3892 
3893 		fence_rep.handle = fence_handle;
3894 		fence_rep.seqno = fence->base.seqno;
3895 		vmw_update_seqno(dev_priv);
3896 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3897 	}
3898 
3899 	/*
3900 	 * copy_to_user errors will be detected by user space not seeing
3901 	 * fence_rep::error filled in. Typically user-space would have pre-set
3902 	 * that member to -EFAULT.
3903 	 */
3904 	ret = copy_to_user(user_fence_rep, &fence_rep,
3905 			   sizeof(fence_rep));
3906 
3907 	/*
3908 	 * User-space lost the fence object. We need to sync and unreference the
3909 	 * handle.
3910 	 */
3911 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3912 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3913 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3914 		(void) vmw_fence_obj_wait(fence, false, false,
3915 					  VMW_FENCE_WAIT_TIMEOUT);
3916 	}
3917 
3918 	return ret ? -EFAULT : 0;
3919 }
3920 
3921 /**
3922  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3923  *
3924  * @dev_priv: Pointer to a device private structure.
3925  * @kernel_commands: Pointer to the unpatched command batch.
3926  * @command_size: Size of the unpatched command batch.
3927  * @sw_context: Structure holding the relocation lists.
3928  *
3929  * Side effects: If this function returns 0, then the command batch pointed to
3930  * by @kernel_commands will have been modified.
3931  */
3932 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3933 				   void *kernel_commands, u32 command_size,
3934 				   struct vmw_sw_context *sw_context)
3935 {
3936 	void *cmd;
3937 
3938 	if (sw_context->dx_ctx_node)
3939 		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3940 					  sw_context->dx_ctx_node->ctx->id);
3941 	else
3942 		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3943 
3944 	if (!cmd)
3945 		return -ENOMEM;
3946 
3947 	vmw_apply_relocations(sw_context);
3948 	memcpy(cmd, kernel_commands, command_size);
3949 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3950 	vmw_resource_relocations_free(&sw_context->res_relocations);
3951 	vmw_cmd_commit(dev_priv, command_size);
3952 
3953 	return 0;
3954 }
3955 
3956 /**
3957  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3958  * command buffer manager.
3959  *
3960  * @dev_priv: Pointer to a device private structure.
3961  * @header: Opaque handle to the command buffer allocation.
3962  * @command_size: Size of the unpatched command batch.
3963  * @sw_context: Structure holding the relocation lists.
3964  *
3965  * Side effects: If this function returns 0, then the command buffer represented
3966  * by @header will have been modified.
3967  */
3968 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3969 				     struct vmw_cmdbuf_header *header,
3970 				     u32 command_size,
3971 				     struct vmw_sw_context *sw_context)
3972 {
3973 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3974 		  SVGA3D_INVALID_ID);
3975 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3976 				       header);
3977 
3978 	vmw_apply_relocations(sw_context);
3979 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3980 	vmw_resource_relocations_free(&sw_context->res_relocations);
3981 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3982 
3983 	return 0;
3984 }
3985 
3986 /**
3987  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3988  * submission using a command buffer.
3989  *
3990  * @dev_priv: Pointer to a device private structure.
3991  * @user_commands: User-space pointer to the commands to be submitted.
3992  * @command_size: Size of the unpatched command batch.
3993  * @header: Out parameter returning the opaque pointer to the command buffer.
3994  *
3995  * This function checks whether we can use the command buffer manager for
3996  * submission and if so, creates a command buffer of suitable size and copies
3997  * the user data into that buffer.
3998  *
3999  * On successful return, the function returns a pointer to the data in the
4000  * command buffer and *@header is set to non-NULL.
4001  *
4002  * @kernel_commands: If command buffers could not be used, the function will
4003  * return the value of @kernel_commands on function call. That value may be
4004  * NULL. In that case, the value of *@header will be set to NULL.
4005  *
4006  * If an error is encountered, the function will return a pointer error value.
4007  * If the function is interrupted by a signal while sleeping, it will return
4008  * -ERESTARTSYS casted to a pointer error value.
4009  */
4010 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4011 				void __user *user_commands,
4012 				void *kernel_commands, u32 command_size,
4013 				struct vmw_cmdbuf_header **header)
4014 {
4015 	size_t cmdbuf_size;
4016 	int ret;
4017 
4018 	*header = NULL;
4019 	if (command_size > SVGA_CB_MAX_SIZE) {
4020 		VMW_DEBUG_USER("Command buffer is too large.\n");
4021 		return ERR_PTR(-EINVAL);
4022 	}
4023 
4024 	if (!dev_priv->cman || kernel_commands)
4025 		return kernel_commands;
4026 
4027 	/* If possible, add a little space for fencing. */
4028 	cmdbuf_size = command_size + 512;
4029 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4030 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4031 					   header);
4032 	if (IS_ERR(kernel_commands))
4033 		return kernel_commands;
4034 
4035 	ret = copy_from_user(kernel_commands, user_commands, command_size);
4036 	if (ret) {
4037 		VMW_DEBUG_USER("Failed copying commands.\n");
4038 		vmw_cmdbuf_header_free(*header);
4039 		*header = NULL;
4040 		return ERR_PTR(-EFAULT);
4041 	}
4042 
4043 	return kernel_commands;
4044 }
4045 
4046 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4047 				   struct vmw_sw_context *sw_context,
4048 				   uint32_t handle)
4049 {
4050 	struct vmw_resource *res;
4051 	int ret;
4052 	unsigned int size;
4053 
4054 	if (handle == SVGA3D_INVALID_ID)
4055 		return 0;
4056 
4057 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4058 	ret = vmw_validation_preload_res(sw_context->ctx, size);
4059 	if (ret)
4060 		return ret;
4061 
4062 	ret = vmw_user_resource_lookup_handle
4063 		(dev_priv, sw_context->fp->tfile, handle,
4064 		 user_context_converter, &res);
4065 	if (ret != 0) {
4066 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4067 			       (unsigned int) handle);
4068 		return ret;
4069 	}
4070 
4071 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4072 				      vmw_val_add_flag_none);
4073 	if (unlikely(ret != 0)) {
4074 		vmw_resource_unreference(&res);
4075 		return ret;
4076 	}
4077 
4078 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4079 	sw_context->man = vmw_context_res_man(res);
4080 
4081 	vmw_resource_unreference(&res);
4082 	return 0;
4083 }
4084 
4085 int vmw_execbuf_process(struct drm_file *file_priv,
4086 			struct vmw_private *dev_priv,
4087 			void __user *user_commands, void *kernel_commands,
4088 			uint32_t command_size, uint64_t throttle_us,
4089 			uint32_t dx_context_handle,
4090 			struct drm_vmw_fence_rep __user *user_fence_rep,
4091 			struct vmw_fence_obj **out_fence, uint32_t flags)
4092 {
4093 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4094 	struct vmw_fence_obj *fence = NULL;
4095 	struct vmw_cmdbuf_header *header;
4096 	uint32_t handle = 0;
4097 	int ret;
4098 	int32_t out_fence_fd = -1;
4099 	struct sync_file *sync_file = NULL;
4100 	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4101 
4102 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4103 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4104 		if (out_fence_fd < 0) {
4105 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4106 			return out_fence_fd;
4107 		}
4108 	}
4109 
4110 	if (throttle_us) {
4111 		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4112 	}
4113 
4114 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4115 					     kernel_commands, command_size,
4116 					     &header);
4117 	if (IS_ERR(kernel_commands)) {
4118 		ret = PTR_ERR(kernel_commands);
4119 		goto out_free_fence_fd;
4120 	}
4121 
4122 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4123 	if (ret) {
4124 		ret = -ERESTARTSYS;
4125 		goto out_free_header;
4126 	}
4127 
4128 	sw_context->kernel = false;
4129 	if (kernel_commands == NULL) {
4130 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4131 		if (unlikely(ret != 0))
4132 			goto out_unlock;
4133 
4134 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4135 				     command_size);
4136 		if (unlikely(ret != 0)) {
4137 			ret = -EFAULT;
4138 			VMW_DEBUG_USER("Failed copying commands.\n");
4139 			goto out_unlock;
4140 		}
4141 
4142 		kernel_commands = sw_context->cmd_bounce;
4143 	} else if (!header) {
4144 		sw_context->kernel = true;
4145 	}
4146 
4147 	sw_context->filp = file_priv;
4148 	sw_context->fp = vmw_fpriv(file_priv);
4149 	INIT_LIST_HEAD(&sw_context->ctx_list);
4150 	sw_context->cur_query_bo = dev_priv->pinned_bo;
4151 	sw_context->last_query_ctx = NULL;
4152 	sw_context->needs_post_query_barrier = false;
4153 	sw_context->dx_ctx_node = NULL;
4154 	sw_context->dx_query_mob = NULL;
4155 	sw_context->dx_query_ctx = NULL;
4156 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4157 	INIT_LIST_HEAD(&sw_context->res_relocations);
4158 	INIT_LIST_HEAD(&sw_context->bo_relocations);
4159 
4160 	if (sw_context->staged_bindings)
4161 		vmw_binding_state_reset(sw_context->staged_bindings);
4162 
4163 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4164 	sw_context->ctx = &val_ctx;
4165 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4166 	if (unlikely(ret != 0))
4167 		goto out_err_nores;
4168 
4169 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4170 				command_size);
4171 	if (unlikely(ret != 0))
4172 		goto out_err_nores;
4173 
4174 	ret = vmw_resources_reserve(sw_context);
4175 	if (unlikely(ret != 0))
4176 		goto out_err_nores;
4177 
4178 	ret = vmw_validation_bo_reserve(&val_ctx, true);
4179 	if (unlikely(ret != 0))
4180 		goto out_err_nores;
4181 
4182 	ret = vmw_validation_bo_validate(&val_ctx, true);
4183 	if (unlikely(ret != 0))
4184 		goto out_err;
4185 
4186 	ret = vmw_validation_res_validate(&val_ctx, true);
4187 	if (unlikely(ret != 0))
4188 		goto out_err;
4189 
4190 	vmw_validation_drop_ht(&val_ctx);
4191 
4192 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4193 	if (unlikely(ret != 0)) {
4194 		ret = -ERESTARTSYS;
4195 		goto out_err;
4196 	}
4197 
4198 	if (dev_priv->has_mob) {
4199 		ret = vmw_rebind_contexts(sw_context);
4200 		if (unlikely(ret != 0))
4201 			goto out_unlock_binding;
4202 	}
4203 
4204 	if (!header) {
4205 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4206 					      command_size, sw_context);
4207 	} else {
4208 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4209 						sw_context);
4210 		header = NULL;
4211 	}
4212 	mutex_unlock(&dev_priv->binding_mutex);
4213 	if (ret)
4214 		goto out_err;
4215 
4216 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4217 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4218 					 (user_fence_rep) ? &handle : NULL);
4219 	/*
4220 	 * This error is harmless, because if fence submission fails,
4221 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4222 	 * user-space in @fence_rep
4223 	 */
4224 	if (ret != 0)
4225 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4226 
4227 	vmw_execbuf_bindings_commit(sw_context, false);
4228 	vmw_bind_dx_query_mob(sw_context);
4229 	vmw_validation_res_unreserve(&val_ctx, false);
4230 
4231 	vmw_validation_bo_fence(sw_context->ctx, fence);
4232 
4233 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4234 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4235 
4236 	/*
4237 	 * If anything fails here, give up trying to export the fence and do a
4238 	 * sync since the user mode will not be able to sync the fence itself.
4239 	 * This ensures we are still functionally correct.
4240 	 */
4241 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4242 
4243 		sync_file = sync_file_create(&fence->base);
4244 		if (!sync_file) {
4245 			VMW_DEBUG_USER("Sync file create failed for fence\n");
4246 			put_unused_fd(out_fence_fd);
4247 			out_fence_fd = -1;
4248 
4249 			(void) vmw_fence_obj_wait(fence, false, false,
4250 						  VMW_FENCE_WAIT_TIMEOUT);
4251 		}
4252 	}
4253 
4254 	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4255 				    user_fence_rep, fence, handle, out_fence_fd);
4256 
4257 	if (sync_file) {
4258 		if (ret) {
4259 			/* usercopy of fence failed, put the file object */
4260 			fput(sync_file->file);
4261 			put_unused_fd(out_fence_fd);
4262 		} else {
4263 			/* Link the fence with the FD created earlier */
4264 			fd_install(out_fence_fd, sync_file->file);
4265 		}
4266 	}
4267 
4268 	/* Don't unreference when handing fence out */
4269 	if (unlikely(out_fence != NULL)) {
4270 		*out_fence = fence;
4271 		fence = NULL;
4272 	} else if (likely(fence != NULL)) {
4273 		vmw_fence_obj_unreference(&fence);
4274 	}
4275 
4276 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4277 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4278 
4279 	/*
4280 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4281 	 * in resource destruction paths.
4282 	 */
4283 	vmw_validation_unref_lists(&val_ctx);
4284 
4285 	return ret;
4286 
4287 out_unlock_binding:
4288 	mutex_unlock(&dev_priv->binding_mutex);
4289 out_err:
4290 	vmw_validation_bo_backoff(&val_ctx);
4291 out_err_nores:
4292 	vmw_execbuf_bindings_commit(sw_context, true);
4293 	vmw_validation_res_unreserve(&val_ctx, true);
4294 	vmw_resource_relocations_free(&sw_context->res_relocations);
4295 	vmw_free_relocations(sw_context);
4296 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4297 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4298 out_unlock:
4299 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4300 	vmw_validation_drop_ht(&val_ctx);
4301 	WARN_ON(!list_empty(&sw_context->ctx_list));
4302 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4303 
4304 	/*
4305 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4306 	 * in resource destruction paths.
4307 	 */
4308 	vmw_validation_unref_lists(&val_ctx);
4309 out_free_header:
4310 	if (header)
4311 		vmw_cmdbuf_header_free(header);
4312 out_free_fence_fd:
4313 	if (out_fence_fd >= 0)
4314 		put_unused_fd(out_fence_fd);
4315 
4316 	return ret;
4317 }
4318 
4319 /**
4320  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4321  *
4322  * @dev_priv: The device private structure.
4323  *
4324  * This function is called to idle the fifo and unpin the query buffer if the
4325  * normal way to do this hits an error, which should typically be extremely
4326  * rare.
4327  */
4328 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4329 {
4330 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4331 
4332 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4333 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4334 	if (dev_priv->dummy_query_bo_pinned) {
4335 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4336 		dev_priv->dummy_query_bo_pinned = false;
4337 	}
4338 }
4339 
4340 
4341 /**
4342  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4343  * bo.
4344  *
4345  * @dev_priv: The device private structure.
4346  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4347  * query barrier that flushes all queries touching the current buffer pointed to
4348  * by @dev_priv->pinned_bo
4349  *
4350  * This function should be used to unpin the pinned query bo, or as a query
4351  * barrier when we need to make sure that all queries have finished before the
4352  * next fifo command. (For example on hardware context destructions where the
4353  * hardware may otherwise leak unfinished queries).
4354  *
4355  * This function does not return any failure codes, but make attempts to do safe
4356  * unpinning in case of errors.
4357  *
4358  * The function will synchronize on the previous query barrier, and will thus
4359  * not finish until that barrier has executed.
4360  *
4361  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4362  * calling this function.
4363  */
4364 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4365 				     struct vmw_fence_obj *fence)
4366 {
4367 	int ret = 0;
4368 	struct vmw_fence_obj *lfence = NULL;
4369 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4370 
4371 	if (dev_priv->pinned_bo == NULL)
4372 		goto out_unlock;
4373 
4374 	vmw_bo_placement_set(dev_priv->pinned_bo,
4375 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4376 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4377 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4378 	if (ret)
4379 		goto out_no_reserve;
4380 
4381 	vmw_bo_placement_set(dev_priv->dummy_query_bo,
4382 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4383 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4384 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4385 	if (ret)
4386 		goto out_no_reserve;
4387 
4388 	ret = vmw_validation_bo_reserve(&val_ctx, false);
4389 	if (ret)
4390 		goto out_no_reserve;
4391 
4392 	if (dev_priv->query_cid_valid) {
4393 		BUG_ON(fence != NULL);
4394 		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4395 		if (ret)
4396 			goto out_no_emit;
4397 		dev_priv->query_cid_valid = false;
4398 	}
4399 
4400 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4401 	if (dev_priv->dummy_query_bo_pinned) {
4402 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4403 		dev_priv->dummy_query_bo_pinned = false;
4404 	}
4405 	if (fence == NULL) {
4406 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4407 						  NULL);
4408 		fence = lfence;
4409 	}
4410 	vmw_validation_bo_fence(&val_ctx, fence);
4411 	if (lfence != NULL)
4412 		vmw_fence_obj_unreference(&lfence);
4413 
4414 	vmw_validation_unref_lists(&val_ctx);
4415 	vmw_bo_unreference(&dev_priv->pinned_bo);
4416 
4417 out_unlock:
4418 	return;
4419 out_no_emit:
4420 	vmw_validation_bo_backoff(&val_ctx);
4421 out_no_reserve:
4422 	vmw_validation_unref_lists(&val_ctx);
4423 	vmw_execbuf_unpin_panic(dev_priv);
4424 	vmw_bo_unreference(&dev_priv->pinned_bo);
4425 }
4426 
4427 /**
4428  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4429  *
4430  * @dev_priv: The device private structure.
4431  *
4432  * This function should be used to unpin the pinned query bo, or as a query
4433  * barrier when we need to make sure that all queries have finished before the
4434  * next fifo command. (For example on hardware context destructions where the
4435  * hardware may otherwise leak unfinished queries).
4436  *
4437  * This function does not return any failure codes, but make attempts to do safe
4438  * unpinning in case of errors.
4439  *
4440  * The function will synchronize on the previous query barrier, and will thus
4441  * not finish until that barrier has executed.
4442  */
4443 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4444 {
4445 	mutex_lock(&dev_priv->cmdbuf_mutex);
4446 	if (dev_priv->query_cid_valid)
4447 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4448 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4449 }
4450 
4451 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4452 		      struct drm_file *file_priv)
4453 {
4454 	struct vmw_private *dev_priv = vmw_priv(dev);
4455 	struct drm_vmw_execbuf_arg *arg = data;
4456 	int ret;
4457 	struct dma_fence *in_fence = NULL;
4458 
4459 	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4460 	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4461 
4462 	/*
4463 	 * Extend the ioctl argument while maintaining backwards compatibility:
4464 	 * We take different code paths depending on the value of arg->version.
4465 	 *
4466 	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4467 	 */
4468 	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4469 		     arg->version == 0)) {
4470 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4471 		ret = -EINVAL;
4472 		goto mksstats_out;
4473 	}
4474 
4475 	switch (arg->version) {
4476 	case 1:
4477 		/* For v1 core DRM have extended + zeropadded the data */
4478 		arg->context_handle = (uint32_t) -1;
4479 		break;
4480 	case 2:
4481 	default:
4482 		/* For v2 and later core DRM would have correctly copied it */
4483 		break;
4484 	}
4485 
4486 	/* If imported a fence FD from elsewhere, then wait on it */
4487 	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4488 		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4489 
4490 		if (!in_fence) {
4491 			VMW_DEBUG_USER("Cannot get imported fence\n");
4492 			ret = -EINVAL;
4493 			goto mksstats_out;
4494 		}
4495 
4496 		ret = dma_fence_wait(in_fence, true);
4497 		if (ret)
4498 			goto out;
4499 	}
4500 
4501 	ret = vmw_execbuf_process(file_priv, dev_priv,
4502 				  (void __user *)(unsigned long)arg->commands,
4503 				  NULL, arg->command_size, arg->throttle_us,
4504 				  arg->context_handle,
4505 				  (void __user *)(unsigned long)arg->fence_rep,
4506 				  NULL, arg->flags);
4507 
4508 	if (unlikely(ret != 0))
4509 		goto out;
4510 
4511 	vmw_kms_cursor_post_execbuf(dev_priv);
4512 
4513 out:
4514 	if (in_fence)
4515 		dma_fence_put(in_fence);
4516 
4517 mksstats_out:
4518 	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4519 	return ret;
4520 }
4521