xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c (revision 981368e1440b76f68b1ac8f5fb14e739f80ecc4e)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_binding.h"
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_mksstat.h"
31 #include "vmwgfx_so.h"
32 
33 #include <drm/ttm/ttm_bo.h>
34 #include <drm/ttm/ttm_placement.h>
35 
36 #include <linux/sync_file.h>
37 #include <linux/hashtable.h>
38 
39 /*
40  * Helper macro to get dx_ctx_node if available otherwise print an error
41  * message. This is for use in command verifier function where if dx_ctx_node
42  * is not set then command is invalid.
43  */
44 #define VMW_GET_CTX_NODE(__sw_context)                                        \
45 ({                                                                            \
46 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
47 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
48 		__sw_context->dx_ctx_node;                                    \
49 	});                                                                   \
50 })
51 
52 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
53 	struct {                                                              \
54 		SVGA3dCmdHeader header;                                       \
55 		__type body;                                                  \
56 	} __var
57 
58 /**
59  * struct vmw_relocation - Buffer object relocation
60  *
61  * @head: List head for the command submission context's relocation list
62  * @vbo: Non ref-counted pointer to buffer object
63  * @mob_loc: Pointer to location for mob id to be modified
64  * @location: Pointer to location for guest pointer to be modified
65  */
66 struct vmw_relocation {
67 	struct list_head head;
68 	struct vmw_bo *vbo;
69 	union {
70 		SVGAMobId *mob_loc;
71 		SVGAGuestPtr *location;
72 	};
73 };
74 
75 /**
76  * enum vmw_resource_relocation_type - Relocation type for resources
77  *
78  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79  * command stream is replaced with the actual id after validation.
80  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81  * with a NOP.
82  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83  * validation is -1, the command is replaced with a NOP. Otherwise no action.
84  * @vmw_res_rel_max: Last value in the enum - used for error checking
85 */
86 enum vmw_resource_relocation_type {
87 	vmw_res_rel_normal,
88 	vmw_res_rel_nop,
89 	vmw_res_rel_cond_nop,
90 	vmw_res_rel_max
91 };
92 
93 /**
94  * struct vmw_resource_relocation - Relocation info for resources
95  *
96  * @head: List head for the software context's relocation list.
97  * @res: Non-ref-counted pointer to the resource.
98  * @offset: Offset of single byte entries into the command buffer where the id
99  * that needs fixup is located.
100  * @rel_type: Type of relocation.
101  */
102 struct vmw_resource_relocation {
103 	struct list_head head;
104 	const struct vmw_resource *res;
105 	u32 offset:29;
106 	enum vmw_resource_relocation_type rel_type:3;
107 };
108 
109 /**
110  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111  *
112  * @head: List head of context list
113  * @ctx: The context resource
114  * @cur: The context's persistent binding state
115  * @staged: The binding state changes of this command buffer
116  */
117 struct vmw_ctx_validation_info {
118 	struct list_head head;
119 	struct vmw_resource *ctx;
120 	struct vmw_ctx_binding_state *cur;
121 	struct vmw_ctx_binding_state *staged;
122 };
123 
124 /**
125  * struct vmw_cmd_entry - Describe a command for the verifier
126  *
127  * @func: Call-back to handle the command.
128  * @user_allow: Whether allowed from the execbuf ioctl.
129  * @gb_disable: Whether disabled if guest-backed objects are available.
130  * @gb_enable: Whether enabled iff guest-backed objects are available.
131  * @cmd_name: Name of the command.
132  */
133 struct vmw_cmd_entry {
134 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 		     SVGA3dCmdHeader *);
136 	bool user_allow;
137 	bool gb_disable;
138 	bool gb_enable;
139 	const char *cmd_name;
140 };
141 
142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
143 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 				       (_gb_disable), (_gb_enable), #_cmd}
145 
146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 					struct vmw_sw_context *sw_context,
148 					struct vmw_resource *ctx);
149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 				 struct vmw_sw_context *sw_context,
151 				 SVGAMobId *id,
152 				 struct vmw_bo **vmw_bo_p);
153 /**
154  * vmw_ptr_diff - Compute the offset from a to b in bytes
155  *
156  * @a: A starting pointer.
157  * @b: A pointer offset in the same address space.
158  *
159  * Returns: The offset in bytes between the two pointers.
160  */
161 static size_t vmw_ptr_diff(void *a, void *b)
162 {
163 	return (unsigned long) b - (unsigned long) a;
164 }
165 
166 /**
167  * vmw_execbuf_bindings_commit - Commit modified binding state
168  *
169  * @sw_context: The command submission context
170  * @backoff: Whether this is part of the error path and binding state changes
171  * should be ignored
172  */
173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174 					bool backoff)
175 {
176 	struct vmw_ctx_validation_info *entry;
177 
178 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
179 		if (!backoff)
180 			vmw_binding_state_commit(entry->cur, entry->staged);
181 
182 		if (entry->staged != sw_context->staged_bindings)
183 			vmw_binding_state_free(entry->staged);
184 		else
185 			sw_context->staged_bindings_inuse = false;
186 	}
187 
188 	/* List entries are freed with the validation context */
189 	INIT_LIST_HEAD(&sw_context->ctx_list);
190 }
191 
192 /**
193  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194  *
195  * @sw_context: The command submission context
196  */
197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198 {
199 	if (sw_context->dx_query_mob)
200 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 					  sw_context->dx_query_mob);
202 }
203 
204 /**
205  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206  * the validate list.
207  *
208  * @dev_priv: Pointer to the device private:
209  * @sw_context: The command submission context
210  * @res: Pointer to the resource
211  * @node: The validation node holding the context resource metadata
212  */
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 				   struct vmw_sw_context *sw_context,
215 				   struct vmw_resource *res,
216 				   struct vmw_ctx_validation_info *node)
217 {
218 	int ret;
219 
220 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 	if (unlikely(ret != 0))
222 		goto out_err;
223 
224 	if (!sw_context->staged_bindings) {
225 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 		if (IS_ERR(sw_context->staged_bindings)) {
227 			ret = PTR_ERR(sw_context->staged_bindings);
228 			sw_context->staged_bindings = NULL;
229 			goto out_err;
230 		}
231 	}
232 
233 	if (sw_context->staged_bindings_inuse) {
234 		node->staged = vmw_binding_state_alloc(dev_priv);
235 		if (IS_ERR(node->staged)) {
236 			ret = PTR_ERR(node->staged);
237 			node->staged = NULL;
238 			goto out_err;
239 		}
240 	} else {
241 		node->staged = sw_context->staged_bindings;
242 		sw_context->staged_bindings_inuse = true;
243 	}
244 
245 	node->ctx = res;
246 	node->cur = vmw_context_binding_state(res);
247 	list_add_tail(&node->head, &sw_context->ctx_list);
248 
249 	return 0;
250 
251 out_err:
252 	return ret;
253 }
254 
255 /**
256  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257  *
258  * @dev_priv: Pointer to the device private struct.
259  * @res_type: The resource type.
260  *
261  * Guest-backed contexts and DX contexts require extra size to store execbuf
262  * private information in the validation node. Typically the binding manager
263  * associated data structures.
264  *
265  * Returns: The extra size requirement based on resource type.
266  */
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 					 enum vmw_res_type res_type)
269 {
270 	return (res_type == vmw_res_dx_context ||
271 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
272 		sizeof(struct vmw_ctx_validation_info) : 0;
273 }
274 
275 /**
276  * vmw_execbuf_rcache_update - Update a resource-node cache entry
277  *
278  * @rcache: Pointer to the entry to update.
279  * @res: Pointer to the resource.
280  * @private: Pointer to the execbuf-private space in the resource validation
281  * node.
282  */
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 				      struct vmw_resource *res,
285 				      void *private)
286 {
287 	rcache->res = res;
288 	rcache->private = private;
289 	rcache->valid = 1;
290 	rcache->valid_handle = 0;
291 }
292 
293 enum vmw_val_add_flags {
294 	vmw_val_add_flag_none  =      0,
295 	vmw_val_add_flag_noctx = 1 << 0,
296 };
297 
298 /**
299  * vmw_execbuf_res_val_add - Add a resource to the validation list.
300  *
301  * @sw_context: Pointer to the software context.
302  * @res: Unreferenced rcu-protected pointer to the resource.
303  * @dirty: Whether to change dirty status.
304  * @flags: specifies whether to use the context or not
305  *
306  * Returns: 0 on success. Negative error code on failure. Typical error codes
307  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
308  */
309 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310 				   struct vmw_resource *res,
311 				   u32 dirty,
312 				   u32 flags)
313 {
314 	struct vmw_private *dev_priv = res->dev_priv;
315 	int ret;
316 	enum vmw_res_type res_type = vmw_res_type(res);
317 	struct vmw_res_cache_entry *rcache;
318 	struct vmw_ctx_validation_info *ctx_info;
319 	bool first_usage;
320 	unsigned int priv_size;
321 
322 	rcache = &sw_context->res_cache[res_type];
323 	if (likely(rcache->valid && rcache->res == res)) {
324 		if (dirty)
325 			vmw_validation_res_set_dirty(sw_context->ctx,
326 						     rcache->private, dirty);
327 		return 0;
328 	}
329 
330 	if ((flags & vmw_val_add_flag_noctx) != 0) {
331 		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332 						  (void **)&ctx_info, NULL);
333 		if (ret)
334 			return ret;
335 
336 	} else {
337 		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338 		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339 						  dirty, (void **)&ctx_info,
340 						  &first_usage);
341 		if (ret)
342 			return ret;
343 
344 		if (priv_size && first_usage) {
345 			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
346 						      ctx_info);
347 			if (ret) {
348 				VMW_DEBUG_USER("Failed first usage context setup.\n");
349 				return ret;
350 			}
351 		}
352 	}
353 
354 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
355 	return 0;
356 }
357 
358 /**
359  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
360  * validation list
361  *
362  * @sw_context: The software context holding the validation list.
363  * @view: Pointer to the view resource.
364  *
365  * Returns 0 if success, negative error code otherwise.
366  */
367 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368 				struct vmw_resource *view)
369 {
370 	int ret;
371 
372 	/*
373 	 * First add the resource the view is pointing to, otherwise it may be
374 	 * swapped out when the view is validated.
375 	 */
376 	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377 				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
378 	if (ret)
379 		return ret;
380 
381 	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382 				       vmw_val_add_flag_noctx);
383 }
384 
385 /**
386  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387  * to to the validation list.
388  *
389  * @sw_context: The software context holding the validation list.
390  * @view_type: The view type to look up.
391  * @id: view id of the view.
392  *
393  * The view is represented by a view id and the DX context it's created on, or
394  * scheduled for creation on. If there is no DX context set, the function will
395  * return an -EINVAL error pointer.
396  *
397  * Returns: Unreferenced pointer to the resource on success, negative error
398  * pointer on failure.
399  */
400 static struct vmw_resource *
401 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402 		    enum vmw_view_type view_type, u32 id)
403 {
404 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405 	struct vmw_resource *view;
406 	int ret;
407 
408 	if (!ctx_node)
409 		return ERR_PTR(-EINVAL);
410 
411 	view = vmw_view_lookup(sw_context->man, view_type, id);
412 	if (IS_ERR(view))
413 		return view;
414 
415 	ret = vmw_view_res_val_add(sw_context, view);
416 	if (ret)
417 		return ERR_PTR(ret);
418 
419 	return view;
420 }
421 
422 /**
423  * vmw_resource_context_res_add - Put resources previously bound to a context on
424  * the validation list
425  *
426  * @dev_priv: Pointer to a device private structure
427  * @sw_context: Pointer to a software context used for this command submission
428  * @ctx: Pointer to the context resource
429  *
430  * This function puts all resources that were previously bound to @ctx on the
431  * resource validation list. This is part of the context state reemission
432  */
433 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434 					struct vmw_sw_context *sw_context,
435 					struct vmw_resource *ctx)
436 {
437 	struct list_head *binding_list;
438 	struct vmw_ctx_bindinfo *entry;
439 	int ret = 0;
440 	struct vmw_resource *res;
441 	u32 i;
442 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
444 
445 	/* Add all cotables to the validation list. */
446 	if (has_sm4_context(dev_priv) &&
447 	    vmw_res_type(ctx) == vmw_res_dx_context) {
448 		for (i = 0; i < cotable_max; ++i) {
449 			res = vmw_context_cotable(ctx, i);
450 			if (IS_ERR(res))
451 				continue;
452 
453 			ret = vmw_execbuf_res_val_add(sw_context, res,
454 						      VMW_RES_DIRTY_SET,
455 						      vmw_val_add_flag_noctx);
456 			if (unlikely(ret != 0))
457 				return ret;
458 		}
459 	}
460 
461 	/* Add all resources bound to the context to the validation list */
462 	mutex_lock(&dev_priv->binding_mutex);
463 	binding_list = vmw_context_binding_list(ctx);
464 
465 	list_for_each_entry(entry, binding_list, ctx_list) {
466 		if (vmw_res_type(entry->res) == vmw_res_view)
467 			ret = vmw_view_res_val_add(sw_context, entry->res);
468 		else
469 			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470 						      vmw_binding_dirtying(entry->bt),
471 						      vmw_val_add_flag_noctx);
472 		if (unlikely(ret != 0))
473 			break;
474 	}
475 
476 	if (has_sm4_context(dev_priv) &&
477 	    vmw_res_type(ctx) == vmw_res_dx_context) {
478 		struct vmw_bo *dx_query_mob;
479 
480 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
481 		if (dx_query_mob) {
482 			vmw_bo_placement_set(dx_query_mob,
483 					     VMW_BO_DOMAIN_MOB,
484 					     VMW_BO_DOMAIN_MOB);
485 			ret = vmw_validation_add_bo(sw_context->ctx,
486 						    dx_query_mob);
487 		}
488 	}
489 
490 	mutex_unlock(&dev_priv->binding_mutex);
491 	return ret;
492 }
493 
494 /**
495  * vmw_resource_relocation_add - Add a relocation to the relocation list
496  *
497  * @sw_context: Pointer to the software context.
498  * @res: The resource.
499  * @offset: Offset into the command buffer currently being parsed where the id
500  * that needs fixup is located. Granularity is one byte.
501  * @rel_type: Relocation type.
502  */
503 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
504 				       const struct vmw_resource *res,
505 				       unsigned long offset,
506 				       enum vmw_resource_relocation_type
507 				       rel_type)
508 {
509 	struct vmw_resource_relocation *rel;
510 
511 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
512 	if (unlikely(!rel)) {
513 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
514 		return -ENOMEM;
515 	}
516 
517 	rel->res = res;
518 	rel->offset = offset;
519 	rel->rel_type = rel_type;
520 	list_add_tail(&rel->head, &sw_context->res_relocations);
521 
522 	return 0;
523 }
524 
525 /**
526  * vmw_resource_relocations_free - Free all relocations on a list
527  *
528  * @list: Pointer to the head of the relocation list
529  */
530 static void vmw_resource_relocations_free(struct list_head *list)
531 {
532 	/* Memory is validation context memory, so no need to free it */
533 	INIT_LIST_HEAD(list);
534 }
535 
536 /**
537  * vmw_resource_relocations_apply - Apply all relocations on a list
538  *
539  * @cb: Pointer to the start of the command buffer bein patch. This need not be
540  * the same buffer as the one being parsed when the relocation list was built,
541  * but the contents must be the same modulo the resource ids.
542  * @list: Pointer to the head of the relocation list.
543  */
544 static void vmw_resource_relocations_apply(uint32_t *cb,
545 					   struct list_head *list)
546 {
547 	struct vmw_resource_relocation *rel;
548 
549 	/* Validate the struct vmw_resource_relocation member size */
550 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
551 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
552 
553 	list_for_each_entry(rel, list, head) {
554 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
555 		switch (rel->rel_type) {
556 		case vmw_res_rel_normal:
557 			*addr = rel->res->id;
558 			break;
559 		case vmw_res_rel_nop:
560 			*addr = SVGA_3D_CMD_NOP;
561 			break;
562 		default:
563 			if (rel->res->id == -1)
564 				*addr = SVGA_3D_CMD_NOP;
565 			break;
566 		}
567 	}
568 }
569 
570 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
571 			   struct vmw_sw_context *sw_context,
572 			   SVGA3dCmdHeader *header)
573 {
574 	return -EINVAL;
575 }
576 
577 static int vmw_cmd_ok(struct vmw_private *dev_priv,
578 		      struct vmw_sw_context *sw_context,
579 		      SVGA3dCmdHeader *header)
580 {
581 	return 0;
582 }
583 
584 /**
585  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
586  * list.
587  *
588  * @sw_context: Pointer to the software context.
589  *
590  * Note that since vmware's command submission currently is protected by the
591  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
592  * only a single thread at once will attempt this.
593  */
594 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
595 {
596 	int ret;
597 
598 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
599 	if (ret)
600 		return ret;
601 
602 	if (sw_context->dx_query_mob) {
603 		struct vmw_bo *expected_dx_query_mob;
604 
605 		expected_dx_query_mob =
606 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
607 		if (expected_dx_query_mob &&
608 		    expected_dx_query_mob != sw_context->dx_query_mob) {
609 			ret = -EINVAL;
610 		}
611 	}
612 
613 	return ret;
614 }
615 
616 /**
617  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
618  * resource validate list unless it's already there.
619  *
620  * @dev_priv: Pointer to a device private structure.
621  * @sw_context: Pointer to the software context.
622  * @res_type: Resource type.
623  * @dirty: Whether to change dirty status.
624  * @converter: User-space visisble type specific information.
625  * @id_loc: Pointer to the location in the command buffer currently being parsed
626  * from where the user-space resource id handle is located.
627  * @p_res: Pointer to pointer to resource validalidation node. Populated on
628  * exit.
629  */
630 static int
631 vmw_cmd_res_check(struct vmw_private *dev_priv,
632 		  struct vmw_sw_context *sw_context,
633 		  enum vmw_res_type res_type,
634 		  u32 dirty,
635 		  const struct vmw_user_resource_conv *converter,
636 		  uint32_t *id_loc,
637 		  struct vmw_resource **p_res)
638 {
639 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
640 	struct vmw_resource *res;
641 	int ret = 0;
642 	bool needs_unref = false;
643 
644 	if (p_res)
645 		*p_res = NULL;
646 
647 	if (*id_loc == SVGA3D_INVALID_ID) {
648 		if (res_type == vmw_res_context) {
649 			VMW_DEBUG_USER("Illegal context invalid id.\n");
650 			return -EINVAL;
651 		}
652 		return 0;
653 	}
654 
655 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
656 		res = rcache->res;
657 		if (dirty)
658 			vmw_validation_res_set_dirty(sw_context->ctx,
659 						     rcache->private, dirty);
660 	} else {
661 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
662 
663 		ret = vmw_validation_preload_res(sw_context->ctx, size);
664 		if (ret)
665 			return ret;
666 
667 		ret = vmw_user_resource_lookup_handle
668 			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
669 		if (ret != 0) {
670 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
671 				       (unsigned int) *id_loc);
672 			return ret;
673 		}
674 		needs_unref = true;
675 
676 		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
677 		if (unlikely(ret != 0))
678 			goto res_check_done;
679 
680 		if (rcache->valid && rcache->res == res) {
681 			rcache->valid_handle = true;
682 			rcache->handle = *id_loc;
683 		}
684 	}
685 
686 	ret = vmw_resource_relocation_add(sw_context, res,
687 					  vmw_ptr_diff(sw_context->buf_start,
688 						       id_loc),
689 					  vmw_res_rel_normal);
690 	if (p_res)
691 		*p_res = res;
692 
693 res_check_done:
694 	if (needs_unref)
695 		vmw_resource_unreference(&res);
696 
697 	return ret;
698 }
699 
700 /**
701  * vmw_rebind_all_dx_query - Rebind DX query associated with the context
702  *
703  * @ctx_res: context the query belongs to
704  *
705  * This function assumes binding_mutex is held.
706  */
707 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
708 {
709 	struct vmw_private *dev_priv = ctx_res->dev_priv;
710 	struct vmw_bo *dx_query_mob;
711 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
712 
713 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
714 
715 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
716 		return 0;
717 
718 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
719 	if (cmd == NULL)
720 		return -ENOMEM;
721 
722 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
723 	cmd->header.size = sizeof(cmd->body);
724 	cmd->body.cid = ctx_res->id;
725 	cmd->body.mobid = dx_query_mob->tbo.resource->start;
726 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
727 
728 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
729 
730 	return 0;
731 }
732 
733 /**
734  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
735  * contexts.
736  *
737  * @sw_context: Pointer to the software context.
738  *
739  * Rebind context binding points that have been scrubbed because of eviction.
740  */
741 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
742 {
743 	struct vmw_ctx_validation_info *val;
744 	int ret;
745 
746 	list_for_each_entry(val, &sw_context->ctx_list, head) {
747 		ret = vmw_binding_rebind_all(val->cur);
748 		if (unlikely(ret != 0)) {
749 			if (ret != -ERESTARTSYS)
750 				VMW_DEBUG_USER("Failed to rebind context.\n");
751 			return ret;
752 		}
753 
754 		ret = vmw_rebind_all_dx_query(val->ctx);
755 		if (ret != 0) {
756 			VMW_DEBUG_USER("Failed to rebind queries.\n");
757 			return ret;
758 		}
759 	}
760 
761 	return 0;
762 }
763 
764 /**
765  * vmw_view_bindings_add - Add an array of view bindings to a context binding
766  * state tracker.
767  *
768  * @sw_context: The execbuf state used for this command.
769  * @view_type: View type for the bindings.
770  * @binding_type: Binding type for the bindings.
771  * @shader_slot: The shader slot to user for the bindings.
772  * @view_ids: Array of view ids to be bound.
773  * @num_views: Number of view ids in @view_ids.
774  * @first_slot: The binding slot to be used for the first view id in @view_ids.
775  */
776 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
777 				 enum vmw_view_type view_type,
778 				 enum vmw_ctx_binding_type binding_type,
779 				 uint32 shader_slot,
780 				 uint32 view_ids[], u32 num_views,
781 				 u32 first_slot)
782 {
783 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
784 	u32 i;
785 
786 	if (!ctx_node)
787 		return -EINVAL;
788 
789 	for (i = 0; i < num_views; ++i) {
790 		struct vmw_ctx_bindinfo_view binding;
791 		struct vmw_resource *view = NULL;
792 
793 		if (view_ids[i] != SVGA3D_INVALID_ID) {
794 			view = vmw_view_id_val_add(sw_context, view_type,
795 						   view_ids[i]);
796 			if (IS_ERR(view)) {
797 				VMW_DEBUG_USER("View not found.\n");
798 				return PTR_ERR(view);
799 			}
800 		}
801 		binding.bi.ctx = ctx_node->ctx;
802 		binding.bi.res = view;
803 		binding.bi.bt = binding_type;
804 		binding.shader_slot = shader_slot;
805 		binding.slot = first_slot + i;
806 		vmw_binding_add(ctx_node->staged, &binding.bi,
807 				shader_slot, binding.slot);
808 	}
809 
810 	return 0;
811 }
812 
813 /**
814  * vmw_cmd_cid_check - Check a command header for valid context information.
815  *
816  * @dev_priv: Pointer to a device private structure.
817  * @sw_context: Pointer to the software context.
818  * @header: A command header with an embedded user-space context handle.
819  *
820  * Convenience function: Call vmw_cmd_res_check with the user-space context
821  * handle embedded in @header.
822  */
823 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
824 			     struct vmw_sw_context *sw_context,
825 			     SVGA3dCmdHeader *header)
826 {
827 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
828 		container_of(header, typeof(*cmd), header);
829 
830 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
831 				 VMW_RES_DIRTY_SET, user_context_converter,
832 				 &cmd->body, NULL);
833 }
834 
835 /**
836  * vmw_execbuf_info_from_res - Get the private validation metadata for a
837  * recently validated resource
838  *
839  * @sw_context: Pointer to the command submission context
840  * @res: The resource
841  *
842  * The resource pointed to by @res needs to be present in the command submission
843  * context's resource cache and hence the last resource of that type to be
844  * processed by the validation code.
845  *
846  * Return: a pointer to the private metadata of the resource, or NULL if it
847  * wasn't found
848  */
849 static struct vmw_ctx_validation_info *
850 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
851 			  struct vmw_resource *res)
852 {
853 	struct vmw_res_cache_entry *rcache =
854 		&sw_context->res_cache[vmw_res_type(res)];
855 
856 	if (rcache->valid && rcache->res == res)
857 		return rcache->private;
858 
859 	WARN_ON_ONCE(true);
860 	return NULL;
861 }
862 
863 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
864 					   struct vmw_sw_context *sw_context,
865 					   SVGA3dCmdHeader *header)
866 {
867 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
868 	struct vmw_resource *ctx;
869 	struct vmw_resource *res;
870 	int ret;
871 
872 	cmd = container_of(header, typeof(*cmd), header);
873 
874 	if (cmd->body.type >= SVGA3D_RT_MAX) {
875 		VMW_DEBUG_USER("Illegal render target type %u.\n",
876 			       (unsigned int) cmd->body.type);
877 		return -EINVAL;
878 	}
879 
880 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881 				VMW_RES_DIRTY_SET, user_context_converter,
882 				&cmd->body.cid, &ctx);
883 	if (unlikely(ret != 0))
884 		return ret;
885 
886 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
887 				VMW_RES_DIRTY_SET, user_surface_converter,
888 				&cmd->body.target.sid, &res);
889 	if (unlikely(ret))
890 		return ret;
891 
892 	if (dev_priv->has_mob) {
893 		struct vmw_ctx_bindinfo_view binding;
894 		struct vmw_ctx_validation_info *node;
895 
896 		node = vmw_execbuf_info_from_res(sw_context, ctx);
897 		if (!node)
898 			return -EINVAL;
899 
900 		binding.bi.ctx = ctx;
901 		binding.bi.res = res;
902 		binding.bi.bt = vmw_ctx_binding_rt;
903 		binding.slot = cmd->body.type;
904 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
905 	}
906 
907 	return 0;
908 }
909 
910 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
911 				      struct vmw_sw_context *sw_context,
912 				      SVGA3dCmdHeader *header)
913 {
914 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
915 	int ret;
916 
917 	cmd = container_of(header, typeof(*cmd), header);
918 
919 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
920 				VMW_RES_DIRTY_NONE, user_surface_converter,
921 				&cmd->body.src.sid, NULL);
922 	if (ret)
923 		return ret;
924 
925 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
926 				 VMW_RES_DIRTY_SET, user_surface_converter,
927 				 &cmd->body.dest.sid, NULL);
928 }
929 
930 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
931 				     struct vmw_sw_context *sw_context,
932 				     SVGA3dCmdHeader *header)
933 {
934 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
935 	int ret;
936 
937 	cmd = container_of(header, typeof(*cmd), header);
938 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 				VMW_RES_DIRTY_NONE, user_surface_converter,
940 				&cmd->body.src, NULL);
941 	if (ret != 0)
942 		return ret;
943 
944 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945 				 VMW_RES_DIRTY_SET, user_surface_converter,
946 				 &cmd->body.dest, NULL);
947 }
948 
949 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
950 				   struct vmw_sw_context *sw_context,
951 				   SVGA3dCmdHeader *header)
952 {
953 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
954 	int ret;
955 
956 	cmd = container_of(header, typeof(*cmd), header);
957 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 				VMW_RES_DIRTY_NONE, user_surface_converter,
959 				&cmd->body.srcSid, NULL);
960 	if (ret != 0)
961 		return ret;
962 
963 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
964 				 VMW_RES_DIRTY_SET, user_surface_converter,
965 				 &cmd->body.dstSid, NULL);
966 }
967 
968 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
969 				     struct vmw_sw_context *sw_context,
970 				     SVGA3dCmdHeader *header)
971 {
972 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
973 	int ret;
974 
975 	cmd = container_of(header, typeof(*cmd), header);
976 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 				VMW_RES_DIRTY_NONE, user_surface_converter,
978 				&cmd->body.src.sid, NULL);
979 	if (unlikely(ret != 0))
980 		return ret;
981 
982 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
983 				 VMW_RES_DIRTY_SET, user_surface_converter,
984 				 &cmd->body.dest.sid, NULL);
985 }
986 
987 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
988 					 struct vmw_sw_context *sw_context,
989 					 SVGA3dCmdHeader *header)
990 {
991 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
992 		container_of(header, typeof(*cmd), header);
993 
994 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
995 				 VMW_RES_DIRTY_NONE, user_surface_converter,
996 				 &cmd->body.srcImage.sid, NULL);
997 }
998 
999 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1000 				 struct vmw_sw_context *sw_context,
1001 				 SVGA3dCmdHeader *header)
1002 {
1003 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1004 		container_of(header, typeof(*cmd), header);
1005 
1006 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1008 				 &cmd->body.sid, NULL);
1009 }
1010 
1011 /**
1012  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1013  *
1014  * @dev_priv: The device private structure.
1015  * @new_query_bo: The new buffer holding query results.
1016  * @sw_context: The software context used for this command submission.
1017  *
1018  * This function checks whether @new_query_bo is suitable for holding query
1019  * results, and if another buffer currently is pinned for query results. If so,
1020  * the function prepares the state of @sw_context for switching pinned buffers
1021  * after successful submission of the current command batch.
1022  */
1023 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024 				       struct vmw_bo *new_query_bo,
1025 				       struct vmw_sw_context *sw_context)
1026 {
1027 	struct vmw_res_cache_entry *ctx_entry =
1028 		&sw_context->res_cache[vmw_res_context];
1029 	int ret;
1030 
1031 	BUG_ON(!ctx_entry->valid);
1032 	sw_context->last_query_ctx = ctx_entry->res;
1033 
1034 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035 
1036 		if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1037 			VMW_DEBUG_USER("Query buffer too large.\n");
1038 			return -EINVAL;
1039 		}
1040 
1041 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1042 			sw_context->needs_post_query_barrier = true;
1043 			vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1044 			ret = vmw_validation_add_bo(sw_context->ctx,
1045 						    sw_context->cur_query_bo);
1046 			if (unlikely(ret != 0))
1047 				return ret;
1048 		}
1049 		sw_context->cur_query_bo = new_query_bo;
1050 
1051 		vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1052 		ret = vmw_validation_add_bo(sw_context->ctx,
1053 					    dev_priv->dummy_query_bo);
1054 		if (unlikely(ret != 0))
1055 			return ret;
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1063  *
1064  * @dev_priv: The device private structure.
1065  * @sw_context: The software context used for this command submission batch.
1066  *
1067  * This function will check if we're switching query buffers, and will then,
1068  * issue a dummy occlusion query wait used as a query barrier. When the fence
1069  * object following that query wait has signaled, we are sure that all preceding
1070  * queries have finished, and the old query buffer can be unpinned. However,
1071  * since both the new query buffer and the old one are fenced with that fence,
1072  * we can do an asynchronus unpin now, and be sure that the old query buffer
1073  * won't be moved until the fence has signaled.
1074  *
1075  * As mentioned above, both the new - and old query buffers need to be fenced
1076  * using a sequence emitted *after* calling this function.
1077  */
1078 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1079 				     struct vmw_sw_context *sw_context)
1080 {
1081 	/*
1082 	 * The validate list should still hold references to all
1083 	 * contexts here.
1084 	 */
1085 	if (sw_context->needs_post_query_barrier) {
1086 		struct vmw_res_cache_entry *ctx_entry =
1087 			&sw_context->res_cache[vmw_res_context];
1088 		struct vmw_resource *ctx;
1089 		int ret;
1090 
1091 		BUG_ON(!ctx_entry->valid);
1092 		ctx = ctx_entry->res;
1093 
1094 		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1095 
1096 		if (unlikely(ret != 0))
1097 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1098 	}
1099 
1100 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1101 		if (dev_priv->pinned_bo) {
1102 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1103 			vmw_bo_unreference(&dev_priv->pinned_bo);
1104 		}
1105 
1106 		if (!sw_context->needs_post_query_barrier) {
1107 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1108 
1109 			/*
1110 			 * We pin also the dummy_query_bo buffer so that we
1111 			 * don't need to validate it when emitting dummy queries
1112 			 * in context destroy paths.
1113 			 */
1114 			if (!dev_priv->dummy_query_bo_pinned) {
1115 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1116 						    true);
1117 				dev_priv->dummy_query_bo_pinned = true;
1118 			}
1119 
1120 			BUG_ON(sw_context->last_query_ctx == NULL);
1121 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1122 			dev_priv->query_cid_valid = true;
1123 			dev_priv->pinned_bo =
1124 				vmw_bo_reference(sw_context->cur_query_bo);
1125 		}
1126 	}
1127 }
1128 
1129 /**
1130  * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1131  * to a MOB id.
1132  *
1133  * @dev_priv: Pointer to a device private structure.
1134  * @sw_context: The software context used for this command batch validation.
1135  * @id: Pointer to the user-space handle to be translated.
1136  * @vmw_bo_p: Points to a location that, on successful return will carry a
1137  * non-reference-counted pointer to the buffer object identified by the
1138  * user-space handle in @id.
1139  *
1140  * This function saves information needed to translate a user-space buffer
1141  * handle to a MOB id. The translation does not take place immediately, but
1142  * during a call to vmw_apply_relocations().
1143  *
1144  * This function builds a relocation list and a list of buffers to validate. The
1145  * former needs to be freed using either vmw_apply_relocations() or
1146  * vmw_free_relocations(). The latter needs to be freed using
1147  * vmw_clear_validations.
1148  */
1149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1150 				 struct vmw_sw_context *sw_context,
1151 				 SVGAMobId *id,
1152 				 struct vmw_bo **vmw_bo_p)
1153 {
1154 	struct vmw_bo *vmw_bo;
1155 	uint32_t handle = *id;
1156 	struct vmw_relocation *reloc;
1157 	int ret;
1158 
1159 	vmw_validation_preload_bo(sw_context->ctx);
1160 	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1161 	if (ret != 0) {
1162 		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1163 		return PTR_ERR(vmw_bo);
1164 	}
1165 	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1166 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1167 	vmw_user_bo_unref(vmw_bo);
1168 	if (unlikely(ret != 0))
1169 		return ret;
1170 
1171 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1172 	if (!reloc)
1173 		return -ENOMEM;
1174 
1175 	reloc->mob_loc = id;
1176 	reloc->vbo = vmw_bo;
1177 
1178 	*vmw_bo_p = vmw_bo;
1179 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1180 
1181 	return 0;
1182 }
1183 
1184 /**
1185  * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1186  * to a valid SVGAGuestPtr
1187  *
1188  * @dev_priv: Pointer to a device private structure.
1189  * @sw_context: The software context used for this command batch validation.
1190  * @ptr: Pointer to the user-space handle to be translated.
1191  * @vmw_bo_p: Points to a location that, on successful return will carry a
1192  * non-reference-counted pointer to the DMA buffer identified by the user-space
1193  * handle in @id.
1194  *
1195  * This function saves information needed to translate a user-space buffer
1196  * handle to a valid SVGAGuestPtr. The translation does not take place
1197  * immediately, but during a call to vmw_apply_relocations().
1198  *
1199  * This function builds a relocation list and a list of buffers to validate.
1200  * The former needs to be freed using either vmw_apply_relocations() or
1201  * vmw_free_relocations(). The latter needs to be freed using
1202  * vmw_clear_validations.
1203  */
1204 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1205 				   struct vmw_sw_context *sw_context,
1206 				   SVGAGuestPtr *ptr,
1207 				   struct vmw_bo **vmw_bo_p)
1208 {
1209 	struct vmw_bo *vmw_bo;
1210 	uint32_t handle = ptr->gmrId;
1211 	struct vmw_relocation *reloc;
1212 	int ret;
1213 
1214 	vmw_validation_preload_bo(sw_context->ctx);
1215 	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1216 	if (ret != 0) {
1217 		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1218 		return PTR_ERR(vmw_bo);
1219 	}
1220 	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1221 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1222 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1223 	vmw_user_bo_unref(vmw_bo);
1224 	if (unlikely(ret != 0))
1225 		return ret;
1226 
1227 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1228 	if (!reloc)
1229 		return -ENOMEM;
1230 
1231 	reloc->location = ptr;
1232 	reloc->vbo = vmw_bo;
1233 	*vmw_bo_p = vmw_bo;
1234 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1235 
1236 	return 0;
1237 }
1238 
1239 /**
1240  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1241  *
1242  * @dev_priv: Pointer to a device private struct.
1243  * @sw_context: The software context used for this command submission.
1244  * @header: Pointer to the command header in the command stream.
1245  *
1246  * This function adds the new query into the query COTABLE
1247  */
1248 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1249 				   struct vmw_sw_context *sw_context,
1250 				   SVGA3dCmdHeader *header)
1251 {
1252 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1253 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1254 	struct vmw_resource *cotable_res;
1255 	int ret;
1256 
1257 	if (!ctx_node)
1258 		return -EINVAL;
1259 
1260 	cmd = container_of(header, typeof(*cmd), header);
1261 
1262 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1263 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1264 		return -EINVAL;
1265 
1266 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1267 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1268 
1269 	return ret;
1270 }
1271 
1272 /**
1273  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1274  *
1275  * @dev_priv: Pointer to a device private struct.
1276  * @sw_context: The software context used for this command submission.
1277  * @header: Pointer to the command header in the command stream.
1278  *
1279  * The query bind operation will eventually associate the query ID with its
1280  * backing MOB.  In this function, we take the user mode MOB ID and use
1281  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1282  */
1283 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1284 				 struct vmw_sw_context *sw_context,
1285 				 SVGA3dCmdHeader *header)
1286 {
1287 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1288 	struct vmw_bo *vmw_bo;
1289 	int ret;
1290 
1291 	cmd = container_of(header, typeof(*cmd), header);
1292 
1293 	/*
1294 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1295 	 * list so its kernel mode MOB ID can be filled in later
1296 	 */
1297 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1298 				    &vmw_bo);
1299 
1300 	if (ret != 0)
1301 		return ret;
1302 
1303 	sw_context->dx_query_mob = vmw_bo;
1304 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1305 	return 0;
1306 }
1307 
1308 /**
1309  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1310  *
1311  * @dev_priv: Pointer to a device private struct.
1312  * @sw_context: The software context used for this command submission.
1313  * @header: Pointer to the command header in the command stream.
1314  */
1315 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1316 				  struct vmw_sw_context *sw_context,
1317 				  SVGA3dCmdHeader *header)
1318 {
1319 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1320 		container_of(header, typeof(*cmd), header);
1321 
1322 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1323 				 VMW_RES_DIRTY_SET, user_context_converter,
1324 				 &cmd->body.cid, NULL);
1325 }
1326 
1327 /**
1328  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1329  *
1330  * @dev_priv: Pointer to a device private struct.
1331  * @sw_context: The software context used for this command submission.
1332  * @header: Pointer to the command header in the command stream.
1333  */
1334 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1335 			       struct vmw_sw_context *sw_context,
1336 			       SVGA3dCmdHeader *header)
1337 {
1338 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1339 		container_of(header, typeof(*cmd), header);
1340 
1341 	if (unlikely(dev_priv->has_mob)) {
1342 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1343 
1344 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1345 
1346 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1347 		gb_cmd.header.size = cmd->header.size;
1348 		gb_cmd.body.cid = cmd->body.cid;
1349 		gb_cmd.body.type = cmd->body.type;
1350 
1351 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1352 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1353 	}
1354 
1355 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1356 				 VMW_RES_DIRTY_SET, user_context_converter,
1357 				 &cmd->body.cid, NULL);
1358 }
1359 
1360 /**
1361  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1362  *
1363  * @dev_priv: Pointer to a device private struct.
1364  * @sw_context: The software context used for this command submission.
1365  * @header: Pointer to the command header in the command stream.
1366  */
1367 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1368 				struct vmw_sw_context *sw_context,
1369 				SVGA3dCmdHeader *header)
1370 {
1371 	struct vmw_bo *vmw_bo;
1372 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1373 	int ret;
1374 
1375 	cmd = container_of(header, typeof(*cmd), header);
1376 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1377 	if (unlikely(ret != 0))
1378 		return ret;
1379 
1380 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1381 				    &vmw_bo);
1382 	if (unlikely(ret != 0))
1383 		return ret;
1384 
1385 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1386 
1387 	return ret;
1388 }
1389 
1390 /**
1391  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1392  *
1393  * @dev_priv: Pointer to a device private struct.
1394  * @sw_context: The software context used for this command submission.
1395  * @header: Pointer to the command header in the command stream.
1396  */
1397 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1398 			     struct vmw_sw_context *sw_context,
1399 			     SVGA3dCmdHeader *header)
1400 {
1401 	struct vmw_bo *vmw_bo;
1402 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1403 	int ret;
1404 
1405 	cmd = container_of(header, typeof(*cmd), header);
1406 	if (dev_priv->has_mob) {
1407 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1408 
1409 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1410 
1411 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1412 		gb_cmd.header.size = cmd->header.size;
1413 		gb_cmd.body.cid = cmd->body.cid;
1414 		gb_cmd.body.type = cmd->body.type;
1415 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1416 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1417 
1418 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1419 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1420 	}
1421 
1422 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1423 	if (unlikely(ret != 0))
1424 		return ret;
1425 
1426 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1427 				      &cmd->body.guestResult, &vmw_bo);
1428 	if (unlikely(ret != 0))
1429 		return ret;
1430 
1431 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1432 
1433 	return ret;
1434 }
1435 
1436 /**
1437  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1438  *
1439  * @dev_priv: Pointer to a device private struct.
1440  * @sw_context: The software context used for this command submission.
1441  * @header: Pointer to the command header in the command stream.
1442  */
1443 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1444 				 struct vmw_sw_context *sw_context,
1445 				 SVGA3dCmdHeader *header)
1446 {
1447 	struct vmw_bo *vmw_bo;
1448 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1449 	int ret;
1450 
1451 	cmd = container_of(header, typeof(*cmd), header);
1452 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1453 	if (unlikely(ret != 0))
1454 		return ret;
1455 
1456 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1457 				    &vmw_bo);
1458 	if (unlikely(ret != 0))
1459 		return ret;
1460 
1461 	return 0;
1462 }
1463 
1464 /**
1465  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1466  *
1467  * @dev_priv: Pointer to a device private struct.
1468  * @sw_context: The software context used for this command submission.
1469  * @header: Pointer to the command header in the command stream.
1470  */
1471 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1472 			      struct vmw_sw_context *sw_context,
1473 			      SVGA3dCmdHeader *header)
1474 {
1475 	struct vmw_bo *vmw_bo;
1476 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1477 	int ret;
1478 
1479 	cmd = container_of(header, typeof(*cmd), header);
1480 	if (dev_priv->has_mob) {
1481 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1482 
1483 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1484 
1485 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1486 		gb_cmd.header.size = cmd->header.size;
1487 		gb_cmd.body.cid = cmd->body.cid;
1488 		gb_cmd.body.type = cmd->body.type;
1489 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1490 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1491 
1492 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1493 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1494 	}
1495 
1496 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1497 	if (unlikely(ret != 0))
1498 		return ret;
1499 
1500 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1501 				      &cmd->body.guestResult, &vmw_bo);
1502 	if (unlikely(ret != 0))
1503 		return ret;
1504 
1505 	return 0;
1506 }
1507 
1508 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1509 		       struct vmw_sw_context *sw_context,
1510 		       SVGA3dCmdHeader *header)
1511 {
1512 	struct vmw_bo *vmw_bo = NULL;
1513 	struct vmw_surface *srf = NULL;
1514 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1515 	int ret;
1516 	SVGA3dCmdSurfaceDMASuffix *suffix;
1517 	uint32_t bo_size;
1518 	bool dirty;
1519 
1520 	cmd = container_of(header, typeof(*cmd), header);
1521 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1522 					       header->size - sizeof(*suffix));
1523 
1524 	/* Make sure device and verifier stays in sync. */
1525 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1526 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1527 		return -EINVAL;
1528 	}
1529 
1530 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1531 				      &cmd->body.guest.ptr, &vmw_bo);
1532 	if (unlikely(ret != 0))
1533 		return ret;
1534 
1535 	/* Make sure DMA doesn't cross BO boundaries. */
1536 	bo_size = vmw_bo->tbo.base.size;
1537 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1538 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1539 		return -EINVAL;
1540 	}
1541 
1542 	bo_size -= cmd->body.guest.ptr.offset;
1543 	if (unlikely(suffix->maximumOffset > bo_size))
1544 		suffix->maximumOffset = bo_size;
1545 
1546 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1547 		VMW_RES_DIRTY_SET : 0;
1548 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1549 				dirty, user_surface_converter,
1550 				&cmd->body.host.sid, NULL);
1551 	if (unlikely(ret != 0)) {
1552 		if (unlikely(ret != -ERESTARTSYS))
1553 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1554 		return ret;
1555 	}
1556 
1557 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1558 
1559 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1560 
1561 	return 0;
1562 }
1563 
1564 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1565 			struct vmw_sw_context *sw_context,
1566 			SVGA3dCmdHeader *header)
1567 {
1568 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1569 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1570 		(unsigned long)header + sizeof(*cmd));
1571 	SVGA3dPrimitiveRange *range;
1572 	uint32_t i;
1573 	uint32_t maxnum;
1574 	int ret;
1575 
1576 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1577 	if (unlikely(ret != 0))
1578 		return ret;
1579 
1580 	cmd = container_of(header, typeof(*cmd), header);
1581 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1582 
1583 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1584 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1585 		return -EINVAL;
1586 	}
1587 
1588 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1589 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1590 					VMW_RES_DIRTY_NONE,
1591 					user_surface_converter,
1592 					&decl->array.surfaceId, NULL);
1593 		if (unlikely(ret != 0))
1594 			return ret;
1595 	}
1596 
1597 	maxnum = (header->size - sizeof(cmd->body) -
1598 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1599 	if (unlikely(cmd->body.numRanges > maxnum)) {
1600 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1601 		return -EINVAL;
1602 	}
1603 
1604 	range = (SVGA3dPrimitiveRange *) decl;
1605 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1606 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1607 					VMW_RES_DIRTY_NONE,
1608 					user_surface_converter,
1609 					&range->indexArray.surfaceId, NULL);
1610 		if (unlikely(ret != 0))
1611 			return ret;
1612 	}
1613 	return 0;
1614 }
1615 
1616 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1617 			     struct vmw_sw_context *sw_context,
1618 			     SVGA3dCmdHeader *header)
1619 {
1620 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1621 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1622 	  ((unsigned long) header + header->size + sizeof(header));
1623 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1624 		((unsigned long) header + sizeof(*cmd));
1625 	struct vmw_resource *ctx;
1626 	struct vmw_resource *res;
1627 	int ret;
1628 
1629 	cmd = container_of(header, typeof(*cmd), header);
1630 
1631 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1632 				VMW_RES_DIRTY_SET, user_context_converter,
1633 				&cmd->body.cid, &ctx);
1634 	if (unlikely(ret != 0))
1635 		return ret;
1636 
1637 	for (; cur_state < last_state; ++cur_state) {
1638 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1639 			continue;
1640 
1641 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1642 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1643 				       (unsigned int) cur_state->stage);
1644 			return -EINVAL;
1645 		}
1646 
1647 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1648 					VMW_RES_DIRTY_NONE,
1649 					user_surface_converter,
1650 					&cur_state->value, &res);
1651 		if (unlikely(ret != 0))
1652 			return ret;
1653 
1654 		if (dev_priv->has_mob) {
1655 			struct vmw_ctx_bindinfo_tex binding;
1656 			struct vmw_ctx_validation_info *node;
1657 
1658 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1659 			if (!node)
1660 				return -EINVAL;
1661 
1662 			binding.bi.ctx = ctx;
1663 			binding.bi.res = res;
1664 			binding.bi.bt = vmw_ctx_binding_tex;
1665 			binding.texture_stage = cur_state->stage;
1666 			vmw_binding_add(node->staged, &binding.bi, 0,
1667 					binding.texture_stage);
1668 		}
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1675 				      struct vmw_sw_context *sw_context,
1676 				      void *buf)
1677 {
1678 	struct vmw_bo *vmw_bo;
1679 
1680 	struct {
1681 		uint32_t header;
1682 		SVGAFifoCmdDefineGMRFB body;
1683 	} *cmd = buf;
1684 
1685 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1686 				       &vmw_bo);
1687 }
1688 
1689 /**
1690  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1691  * switching
1692  *
1693  * @dev_priv: Pointer to a device private struct.
1694  * @sw_context: The software context being used for this batch.
1695  * @res: Pointer to the resource.
1696  * @buf_id: Pointer to the user-space backup buffer handle in the command
1697  * stream.
1698  * @backup_offset: Offset of backup into MOB.
1699  *
1700  * This function prepares for registering a switch of backup buffers in the
1701  * resource metadata just prior to unreserving. It's basically a wrapper around
1702  * vmw_cmd_res_switch_backup with a different interface.
1703  */
1704 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1705 				     struct vmw_sw_context *sw_context,
1706 				     struct vmw_resource *res, uint32_t *buf_id,
1707 				     unsigned long backup_offset)
1708 {
1709 	struct vmw_bo *vbo;
1710 	void *info;
1711 	int ret;
1712 
1713 	info = vmw_execbuf_info_from_res(sw_context, res);
1714 	if (!info)
1715 		return -EINVAL;
1716 
1717 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1718 	if (ret)
1719 		return ret;
1720 
1721 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1722 					 backup_offset);
1723 	return 0;
1724 }
1725 
1726 /**
1727  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1728  *
1729  * @dev_priv: Pointer to a device private struct.
1730  * @sw_context: The software context being used for this batch.
1731  * @res_type: The resource type.
1732  * @converter: Information about user-space binding for this resource type.
1733  * @res_id: Pointer to the user-space resource handle in the command stream.
1734  * @buf_id: Pointer to the user-space backup buffer handle in the command
1735  * stream.
1736  * @backup_offset: Offset of backup into MOB.
1737  *
1738  * This function prepares for registering a switch of backup buffers in the
1739  * resource metadata just prior to unreserving. It's basically a wrapper around
1740  * vmw_cmd_res_switch_backup with a different interface.
1741  */
1742 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1743 				 struct vmw_sw_context *sw_context,
1744 				 enum vmw_res_type res_type,
1745 				 const struct vmw_user_resource_conv
1746 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1747 				 unsigned long backup_offset)
1748 {
1749 	struct vmw_resource *res;
1750 	int ret;
1751 
1752 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1753 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1754 	if (ret)
1755 		return ret;
1756 
1757 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1758 					 backup_offset);
1759 }
1760 
1761 /**
1762  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1763  *
1764  * @dev_priv: Pointer to a device private struct.
1765  * @sw_context: The software context being used for this batch.
1766  * @header: Pointer to the command header in the command stream.
1767  */
1768 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1769 				   struct vmw_sw_context *sw_context,
1770 				   SVGA3dCmdHeader *header)
1771 {
1772 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1773 		container_of(header, typeof(*cmd), header);
1774 
1775 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1776 				     user_surface_converter, &cmd->body.sid,
1777 				     &cmd->body.mobid, 0);
1778 }
1779 
1780 /**
1781  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1782  *
1783  * @dev_priv: Pointer to a device private struct.
1784  * @sw_context: The software context being used for this batch.
1785  * @header: Pointer to the command header in the command stream.
1786  */
1787 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1788 				   struct vmw_sw_context *sw_context,
1789 				   SVGA3dCmdHeader *header)
1790 {
1791 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1792 		container_of(header, typeof(*cmd), header);
1793 
1794 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1795 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1796 				 &cmd->body.image.sid, NULL);
1797 }
1798 
1799 /**
1800  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1801  *
1802  * @dev_priv: Pointer to a device private struct.
1803  * @sw_context: The software context being used for this batch.
1804  * @header: Pointer to the command header in the command stream.
1805  */
1806 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1807 				     struct vmw_sw_context *sw_context,
1808 				     SVGA3dCmdHeader *header)
1809 {
1810 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1811 		container_of(header, typeof(*cmd), header);
1812 
1813 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1814 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1815 				 &cmd->body.sid, NULL);
1816 }
1817 
1818 /**
1819  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1820  *
1821  * @dev_priv: Pointer to a device private struct.
1822  * @sw_context: The software context being used for this batch.
1823  * @header: Pointer to the command header in the command stream.
1824  */
1825 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1826 				     struct vmw_sw_context *sw_context,
1827 				     SVGA3dCmdHeader *header)
1828 {
1829 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1830 		container_of(header, typeof(*cmd), header);
1831 
1832 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1833 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1834 				 &cmd->body.image.sid, NULL);
1835 }
1836 
1837 /**
1838  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1839  * command
1840  *
1841  * @dev_priv: Pointer to a device private struct.
1842  * @sw_context: The software context being used for this batch.
1843  * @header: Pointer to the command header in the command stream.
1844  */
1845 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1846 				       struct vmw_sw_context *sw_context,
1847 				       SVGA3dCmdHeader *header)
1848 {
1849 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1850 		container_of(header, typeof(*cmd), header);
1851 
1852 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1853 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1854 				 &cmd->body.sid, NULL);
1855 }
1856 
1857 /**
1858  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1859  * command
1860  *
1861  * @dev_priv: Pointer to a device private struct.
1862  * @sw_context: The software context being used for this batch.
1863  * @header: Pointer to the command header in the command stream.
1864  */
1865 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1866 				       struct vmw_sw_context *sw_context,
1867 				       SVGA3dCmdHeader *header)
1868 {
1869 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1870 		container_of(header, typeof(*cmd), header);
1871 
1872 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1873 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1874 				 &cmd->body.image.sid, NULL);
1875 }
1876 
1877 /**
1878  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1879  * command
1880  *
1881  * @dev_priv: Pointer to a device private struct.
1882  * @sw_context: The software context being used for this batch.
1883  * @header: Pointer to the command header in the command stream.
1884  */
1885 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1886 					 struct vmw_sw_context *sw_context,
1887 					 SVGA3dCmdHeader *header)
1888 {
1889 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1890 		container_of(header, typeof(*cmd), header);
1891 
1892 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1893 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1894 				 &cmd->body.sid, NULL);
1895 }
1896 
1897 /**
1898  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1899  *
1900  * @dev_priv: Pointer to a device private struct.
1901  * @sw_context: The software context being used for this batch.
1902  * @header: Pointer to the command header in the command stream.
1903  */
1904 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1905 				 struct vmw_sw_context *sw_context,
1906 				 SVGA3dCmdHeader *header)
1907 {
1908 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1909 	int ret;
1910 	size_t size;
1911 	struct vmw_resource *ctx;
1912 
1913 	cmd = container_of(header, typeof(*cmd), header);
1914 
1915 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1916 				VMW_RES_DIRTY_SET, user_context_converter,
1917 				&cmd->body.cid, &ctx);
1918 	if (unlikely(ret != 0))
1919 		return ret;
1920 
1921 	if (unlikely(!dev_priv->has_mob))
1922 		return 0;
1923 
1924 	size = cmd->header.size - sizeof(cmd->body);
1925 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1926 				    cmd->body.shid, cmd + 1, cmd->body.type,
1927 				    size, &sw_context->staged_cmd_res);
1928 	if (unlikely(ret != 0))
1929 		return ret;
1930 
1931 	return vmw_resource_relocation_add(sw_context, NULL,
1932 					   vmw_ptr_diff(sw_context->buf_start,
1933 							&cmd->header.id),
1934 					   vmw_res_rel_nop);
1935 }
1936 
1937 /**
1938  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1939  *
1940  * @dev_priv: Pointer to a device private struct.
1941  * @sw_context: The software context being used for this batch.
1942  * @header: Pointer to the command header in the command stream.
1943  */
1944 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1945 				  struct vmw_sw_context *sw_context,
1946 				  SVGA3dCmdHeader *header)
1947 {
1948 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1949 	int ret;
1950 	struct vmw_resource *ctx;
1951 
1952 	cmd = container_of(header, typeof(*cmd), header);
1953 
1954 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1955 				VMW_RES_DIRTY_SET, user_context_converter,
1956 				&cmd->body.cid, &ctx);
1957 	if (unlikely(ret != 0))
1958 		return ret;
1959 
1960 	if (unlikely(!dev_priv->has_mob))
1961 		return 0;
1962 
1963 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1964 				cmd->body.type, &sw_context->staged_cmd_res);
1965 	if (unlikely(ret != 0))
1966 		return ret;
1967 
1968 	return vmw_resource_relocation_add(sw_context, NULL,
1969 					   vmw_ptr_diff(sw_context->buf_start,
1970 							&cmd->header.id),
1971 					   vmw_res_rel_nop);
1972 }
1973 
1974 /**
1975  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1976  *
1977  * @dev_priv: Pointer to a device private struct.
1978  * @sw_context: The software context being used for this batch.
1979  * @header: Pointer to the command header in the command stream.
1980  */
1981 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1982 			      struct vmw_sw_context *sw_context,
1983 			      SVGA3dCmdHeader *header)
1984 {
1985 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1986 	struct vmw_ctx_bindinfo_shader binding;
1987 	struct vmw_resource *ctx, *res = NULL;
1988 	struct vmw_ctx_validation_info *ctx_info;
1989 	int ret;
1990 
1991 	cmd = container_of(header, typeof(*cmd), header);
1992 
1993 	if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1994 		VMW_DEBUG_USER("Illegal shader type %u.\n",
1995 			       (unsigned int) cmd->body.type);
1996 		return -EINVAL;
1997 	}
1998 
1999 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2000 				VMW_RES_DIRTY_SET, user_context_converter,
2001 				&cmd->body.cid, &ctx);
2002 	if (unlikely(ret != 0))
2003 		return ret;
2004 
2005 	if (!dev_priv->has_mob)
2006 		return 0;
2007 
2008 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2009 		/*
2010 		 * This is the compat shader path - Per device guest-backed
2011 		 * shaders, but user-space thinks it's per context host-
2012 		 * backed shaders.
2013 		 */
2014 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2015 					cmd->body.shid, cmd->body.type);
2016 		if (!IS_ERR(res)) {
2017 			ret = vmw_execbuf_res_val_add(sw_context, res,
2018 						      VMW_RES_DIRTY_NONE,
2019 						      vmw_val_add_flag_noctx);
2020 			if (unlikely(ret != 0))
2021 				return ret;
2022 
2023 			ret = vmw_resource_relocation_add
2024 				(sw_context, res,
2025 				 vmw_ptr_diff(sw_context->buf_start,
2026 					      &cmd->body.shid),
2027 				 vmw_res_rel_normal);
2028 			if (unlikely(ret != 0))
2029 				return ret;
2030 		}
2031 	}
2032 
2033 	if (IS_ERR_OR_NULL(res)) {
2034 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2035 					VMW_RES_DIRTY_NONE,
2036 					user_shader_converter, &cmd->body.shid,
2037 					&res);
2038 		if (unlikely(ret != 0))
2039 			return ret;
2040 	}
2041 
2042 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2043 	if (!ctx_info)
2044 		return -EINVAL;
2045 
2046 	binding.bi.ctx = ctx;
2047 	binding.bi.res = res;
2048 	binding.bi.bt = vmw_ctx_binding_shader;
2049 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2050 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2051 
2052 	return 0;
2053 }
2054 
2055 /**
2056  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2057  *
2058  * @dev_priv: Pointer to a device private struct.
2059  * @sw_context: The software context being used for this batch.
2060  * @header: Pointer to the command header in the command stream.
2061  */
2062 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2063 				    struct vmw_sw_context *sw_context,
2064 				    SVGA3dCmdHeader *header)
2065 {
2066 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2067 	int ret;
2068 
2069 	cmd = container_of(header, typeof(*cmd), header);
2070 
2071 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2072 				VMW_RES_DIRTY_SET, user_context_converter,
2073 				&cmd->body.cid, NULL);
2074 	if (unlikely(ret != 0))
2075 		return ret;
2076 
2077 	if (dev_priv->has_mob)
2078 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2079 
2080 	return 0;
2081 }
2082 
2083 /**
2084  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2085  *
2086  * @dev_priv: Pointer to a device private struct.
2087  * @sw_context: The software context being used for this batch.
2088  * @header: Pointer to the command header in the command stream.
2089  */
2090 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2091 				  struct vmw_sw_context *sw_context,
2092 				  SVGA3dCmdHeader *header)
2093 {
2094 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2095 		container_of(header, typeof(*cmd), header);
2096 
2097 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2098 				     user_shader_converter, &cmd->body.shid,
2099 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2100 }
2101 
2102 /**
2103  * vmw_cmd_dx_set_single_constant_buffer - Validate
2104  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2105  *
2106  * @dev_priv: Pointer to a device private struct.
2107  * @sw_context: The software context being used for this batch.
2108  * @header: Pointer to the command header in the command stream.
2109  */
2110 static int
2111 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2112 				      struct vmw_sw_context *sw_context,
2113 				      SVGA3dCmdHeader *header)
2114 {
2115 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2116 
2117 	struct vmw_resource *res = NULL;
2118 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2119 	struct vmw_ctx_bindinfo_cb binding;
2120 	int ret;
2121 
2122 	if (!ctx_node)
2123 		return -EINVAL;
2124 
2125 	cmd = container_of(header, typeof(*cmd), header);
2126 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2127 				VMW_RES_DIRTY_NONE, user_surface_converter,
2128 				&cmd->body.sid, &res);
2129 	if (unlikely(ret != 0))
2130 		return ret;
2131 
2132 	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2133 	    cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2134 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2135 			       (unsigned int) cmd->body.type,
2136 			       (unsigned int) cmd->body.slot);
2137 		return -EINVAL;
2138 	}
2139 
2140 	binding.bi.ctx = ctx_node->ctx;
2141 	binding.bi.res = res;
2142 	binding.bi.bt = vmw_ctx_binding_cb;
2143 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2144 	binding.offset = cmd->body.offsetInBytes;
2145 	binding.size = cmd->body.sizeInBytes;
2146 	binding.slot = cmd->body.slot;
2147 
2148 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2149 			binding.slot);
2150 
2151 	return 0;
2152 }
2153 
2154 /**
2155  * vmw_cmd_dx_set_constant_buffer_offset - Validate
2156  * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2157  *
2158  * @dev_priv: Pointer to a device private struct.
2159  * @sw_context: The software context being used for this batch.
2160  * @header: Pointer to the command header in the command stream.
2161  */
2162 static int
2163 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2164 				      struct vmw_sw_context *sw_context,
2165 				      SVGA3dCmdHeader *header)
2166 {
2167 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2168 
2169 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2170 	u32 shader_slot;
2171 
2172 	if (!has_sm5_context(dev_priv))
2173 		return -EINVAL;
2174 
2175 	if (!ctx_node)
2176 		return -EINVAL;
2177 
2178 	cmd = container_of(header, typeof(*cmd), header);
2179 	if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2180 		VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2181 			       (unsigned int) cmd->body.slot);
2182 		return -EINVAL;
2183 	}
2184 
2185 	shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2186 	vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2187 				     cmd->body.slot, cmd->body.offsetInBytes);
2188 
2189 	return 0;
2190 }
2191 
2192 /**
2193  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2194  * command
2195  *
2196  * @dev_priv: Pointer to a device private struct.
2197  * @sw_context: The software context being used for this batch.
2198  * @header: Pointer to the command header in the command stream.
2199  */
2200 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2201 				     struct vmw_sw_context *sw_context,
2202 				     SVGA3dCmdHeader *header)
2203 {
2204 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2205 		container_of(header, typeof(*cmd), header);
2206 
2207 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2208 		sizeof(SVGA3dShaderResourceViewId);
2209 
2210 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2211 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2212 	    !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2213 		VMW_DEBUG_USER("Invalid shader binding.\n");
2214 		return -EINVAL;
2215 	}
2216 
2217 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2218 				     vmw_ctx_binding_sr,
2219 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2220 				     (void *) &cmd[1], num_sr_view,
2221 				     cmd->body.startView);
2222 }
2223 
2224 /**
2225  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2226  *
2227  * @dev_priv: Pointer to a device private struct.
2228  * @sw_context: The software context being used for this batch.
2229  * @header: Pointer to the command header in the command stream.
2230  */
2231 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2232 				 struct vmw_sw_context *sw_context,
2233 				 SVGA3dCmdHeader *header)
2234 {
2235 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2236 	struct vmw_resource *res = NULL;
2237 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2238 	struct vmw_ctx_bindinfo_shader binding;
2239 	int ret = 0;
2240 
2241 	if (!ctx_node)
2242 		return -EINVAL;
2243 
2244 	cmd = container_of(header, typeof(*cmd), header);
2245 
2246 	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2247 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2248 			       (unsigned int) cmd->body.type);
2249 		return -EINVAL;
2250 	}
2251 
2252 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2253 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2254 		if (IS_ERR(res)) {
2255 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2256 			return PTR_ERR(res);
2257 		}
2258 
2259 		ret = vmw_execbuf_res_val_add(sw_context, res,
2260 					      VMW_RES_DIRTY_NONE,
2261 					      vmw_val_add_flag_noctx);
2262 		if (ret)
2263 			return ret;
2264 	}
2265 
2266 	binding.bi.ctx = ctx_node->ctx;
2267 	binding.bi.res = res;
2268 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2269 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2270 
2271 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2272 
2273 	return 0;
2274 }
2275 
2276 /**
2277  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2278  * command
2279  *
2280  * @dev_priv: Pointer to a device private struct.
2281  * @sw_context: The software context being used for this batch.
2282  * @header: Pointer to the command header in the command stream.
2283  */
2284 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2285 					 struct vmw_sw_context *sw_context,
2286 					 SVGA3dCmdHeader *header)
2287 {
2288 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2289 	struct vmw_ctx_bindinfo_vb binding;
2290 	struct vmw_resource *res;
2291 	struct {
2292 		SVGA3dCmdHeader header;
2293 		SVGA3dCmdDXSetVertexBuffers body;
2294 		SVGA3dVertexBuffer buf[];
2295 	} *cmd;
2296 	int i, ret, num;
2297 
2298 	if (!ctx_node)
2299 		return -EINVAL;
2300 
2301 	cmd = container_of(header, typeof(*cmd), header);
2302 	num = (cmd->header.size - sizeof(cmd->body)) /
2303 		sizeof(SVGA3dVertexBuffer);
2304 	if ((u64)num + (u64)cmd->body.startBuffer >
2305 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2306 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2307 		return -EINVAL;
2308 	}
2309 
2310 	for (i = 0; i < num; i++) {
2311 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2312 					VMW_RES_DIRTY_NONE,
2313 					user_surface_converter,
2314 					&cmd->buf[i].sid, &res);
2315 		if (unlikely(ret != 0))
2316 			return ret;
2317 
2318 		binding.bi.ctx = ctx_node->ctx;
2319 		binding.bi.bt = vmw_ctx_binding_vb;
2320 		binding.bi.res = res;
2321 		binding.offset = cmd->buf[i].offset;
2322 		binding.stride = cmd->buf[i].stride;
2323 		binding.slot = i + cmd->body.startBuffer;
2324 
2325 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2326 	}
2327 
2328 	return 0;
2329 }
2330 
2331 /**
2332  * vmw_cmd_dx_set_index_buffer - Validate
2333  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2334  *
2335  * @dev_priv: Pointer to a device private struct.
2336  * @sw_context: The software context being used for this batch.
2337  * @header: Pointer to the command header in the command stream.
2338  */
2339 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2340 				       struct vmw_sw_context *sw_context,
2341 				       SVGA3dCmdHeader *header)
2342 {
2343 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2344 	struct vmw_ctx_bindinfo_ib binding;
2345 	struct vmw_resource *res;
2346 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2347 	int ret;
2348 
2349 	if (!ctx_node)
2350 		return -EINVAL;
2351 
2352 	cmd = container_of(header, typeof(*cmd), header);
2353 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2354 				VMW_RES_DIRTY_NONE, user_surface_converter,
2355 				&cmd->body.sid, &res);
2356 	if (unlikely(ret != 0))
2357 		return ret;
2358 
2359 	binding.bi.ctx = ctx_node->ctx;
2360 	binding.bi.res = res;
2361 	binding.bi.bt = vmw_ctx_binding_ib;
2362 	binding.offset = cmd->body.offset;
2363 	binding.format = cmd->body.format;
2364 
2365 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2366 
2367 	return 0;
2368 }
2369 
2370 /**
2371  * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2372  * command
2373  *
2374  * @dev_priv: Pointer to a device private struct.
2375  * @sw_context: The software context being used for this batch.
2376  * @header: Pointer to the command header in the command stream.
2377  */
2378 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2379 					struct vmw_sw_context *sw_context,
2380 					SVGA3dCmdHeader *header)
2381 {
2382 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2383 		container_of(header, typeof(*cmd), header);
2384 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2385 		sizeof(SVGA3dRenderTargetViewId);
2386 	int ret;
2387 
2388 	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2389 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2390 		return -EINVAL;
2391 	}
2392 
2393 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2394 				    0, &cmd->body.depthStencilViewId, 1, 0);
2395 	if (ret)
2396 		return ret;
2397 
2398 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2399 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2400 				     num_rt_view, 0);
2401 }
2402 
2403 /**
2404  * vmw_cmd_dx_clear_rendertarget_view - Validate
2405  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2406  *
2407  * @dev_priv: Pointer to a device private struct.
2408  * @sw_context: The software context being used for this batch.
2409  * @header: Pointer to the command header in the command stream.
2410  */
2411 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2412 					      struct vmw_sw_context *sw_context,
2413 					      SVGA3dCmdHeader *header)
2414 {
2415 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2416 		container_of(header, typeof(*cmd), header);
2417 	struct vmw_resource *ret;
2418 
2419 	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2420 				  cmd->body.renderTargetViewId);
2421 
2422 	return PTR_ERR_OR_ZERO(ret);
2423 }
2424 
2425 /**
2426  * vmw_cmd_dx_clear_depthstencil_view - Validate
2427  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2428  *
2429  * @dev_priv: Pointer to a device private struct.
2430  * @sw_context: The software context being used for this batch.
2431  * @header: Pointer to the command header in the command stream.
2432  */
2433 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2434 					      struct vmw_sw_context *sw_context,
2435 					      SVGA3dCmdHeader *header)
2436 {
2437 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2438 		container_of(header, typeof(*cmd), header);
2439 	struct vmw_resource *ret;
2440 
2441 	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2442 				  cmd->body.depthStencilViewId);
2443 
2444 	return PTR_ERR_OR_ZERO(ret);
2445 }
2446 
2447 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2448 				  struct vmw_sw_context *sw_context,
2449 				  SVGA3dCmdHeader *header)
2450 {
2451 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2452 	struct vmw_resource *srf;
2453 	struct vmw_resource *res;
2454 	enum vmw_view_type view_type;
2455 	int ret;
2456 	/*
2457 	 * This is based on the fact that all affected define commands have the
2458 	 * same initial command body layout.
2459 	 */
2460 	struct {
2461 		SVGA3dCmdHeader header;
2462 		uint32 defined_id;
2463 		uint32 sid;
2464 	} *cmd;
2465 
2466 	if (!ctx_node)
2467 		return -EINVAL;
2468 
2469 	view_type = vmw_view_cmd_to_type(header->id);
2470 	if (view_type == vmw_view_max)
2471 		return -EINVAL;
2472 
2473 	cmd = container_of(header, typeof(*cmd), header);
2474 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2475 		VMW_DEBUG_USER("Invalid surface id.\n");
2476 		return -EINVAL;
2477 	}
2478 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2479 				VMW_RES_DIRTY_NONE, user_surface_converter,
2480 				&cmd->sid, &srf);
2481 	if (unlikely(ret != 0))
2482 		return ret;
2483 
2484 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2485 	ret = vmw_cotable_notify(res, cmd->defined_id);
2486 	if (unlikely(ret != 0))
2487 		return ret;
2488 
2489 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2490 			    cmd->defined_id, header,
2491 			    header->size + sizeof(*header),
2492 			    &sw_context->staged_cmd_res);
2493 }
2494 
2495 /**
2496  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2497  *
2498  * @dev_priv: Pointer to a device private struct.
2499  * @sw_context: The software context being used for this batch.
2500  * @header: Pointer to the command header in the command stream.
2501  */
2502 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2503 				     struct vmw_sw_context *sw_context,
2504 				     SVGA3dCmdHeader *header)
2505 {
2506 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2507 	struct vmw_ctx_bindinfo_so_target binding;
2508 	struct vmw_resource *res;
2509 	struct {
2510 		SVGA3dCmdHeader header;
2511 		SVGA3dCmdDXSetSOTargets body;
2512 		SVGA3dSoTarget targets[];
2513 	} *cmd;
2514 	int i, ret, num;
2515 
2516 	if (!ctx_node)
2517 		return -EINVAL;
2518 
2519 	cmd = container_of(header, typeof(*cmd), header);
2520 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2521 
2522 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2523 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2524 		return -EINVAL;
2525 	}
2526 
2527 	for (i = 0; i < num; i++) {
2528 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2529 					VMW_RES_DIRTY_SET,
2530 					user_surface_converter,
2531 					&cmd->targets[i].sid, &res);
2532 		if (unlikely(ret != 0))
2533 			return ret;
2534 
2535 		binding.bi.ctx = ctx_node->ctx;
2536 		binding.bi.res = res;
2537 		binding.bi.bt = vmw_ctx_binding_so_target;
2538 		binding.offset = cmd->targets[i].offset;
2539 		binding.size = cmd->targets[i].sizeInBytes;
2540 		binding.slot = i;
2541 
2542 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2543 	}
2544 
2545 	return 0;
2546 }
2547 
2548 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2549 				struct vmw_sw_context *sw_context,
2550 				SVGA3dCmdHeader *header)
2551 {
2552 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2553 	struct vmw_resource *res;
2554 	/*
2555 	 * This is based on the fact that all affected define commands have
2556 	 * the same initial command body layout.
2557 	 */
2558 	struct {
2559 		SVGA3dCmdHeader header;
2560 		uint32 defined_id;
2561 	} *cmd;
2562 	enum vmw_so_type so_type;
2563 	int ret;
2564 
2565 	if (!ctx_node)
2566 		return -EINVAL;
2567 
2568 	so_type = vmw_so_cmd_to_type(header->id);
2569 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2570 	if (IS_ERR(res))
2571 		return PTR_ERR(res);
2572 	cmd = container_of(header, typeof(*cmd), header);
2573 	ret = vmw_cotable_notify(res, cmd->defined_id);
2574 
2575 	return ret;
2576 }
2577 
2578 /**
2579  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2580  * command
2581  *
2582  * @dev_priv: Pointer to a device private struct.
2583  * @sw_context: The software context being used for this batch.
2584  * @header: Pointer to the command header in the command stream.
2585  */
2586 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2587 					struct vmw_sw_context *sw_context,
2588 					SVGA3dCmdHeader *header)
2589 {
2590 	struct {
2591 		SVGA3dCmdHeader header;
2592 		union {
2593 			SVGA3dCmdDXReadbackSubResource r_body;
2594 			SVGA3dCmdDXInvalidateSubResource i_body;
2595 			SVGA3dCmdDXUpdateSubResource u_body;
2596 			SVGA3dSurfaceId sid;
2597 		};
2598 	} *cmd;
2599 
2600 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2601 		     offsetof(typeof(*cmd), sid));
2602 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2603 		     offsetof(typeof(*cmd), sid));
2604 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2605 		     offsetof(typeof(*cmd), sid));
2606 
2607 	cmd = container_of(header, typeof(*cmd), header);
2608 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2609 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2610 				 &cmd->sid, NULL);
2611 }
2612 
2613 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2614 				struct vmw_sw_context *sw_context,
2615 				SVGA3dCmdHeader *header)
2616 {
2617 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2618 
2619 	if (!ctx_node)
2620 		return -EINVAL;
2621 
2622 	return 0;
2623 }
2624 
2625 /**
2626  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2627  * resource for removal.
2628  *
2629  * @dev_priv: Pointer to a device private struct.
2630  * @sw_context: The software context being used for this batch.
2631  * @header: Pointer to the command header in the command stream.
2632  *
2633  * Check that the view exists, and if it was not created using this command
2634  * batch, conditionally make this command a NOP.
2635  */
2636 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2637 				  struct vmw_sw_context *sw_context,
2638 				  SVGA3dCmdHeader *header)
2639 {
2640 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2641 	struct {
2642 		SVGA3dCmdHeader header;
2643 		union vmw_view_destroy body;
2644 	} *cmd = container_of(header, typeof(*cmd), header);
2645 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2646 	struct vmw_resource *view;
2647 	int ret;
2648 
2649 	if (!ctx_node)
2650 		return -EINVAL;
2651 
2652 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2653 			      &sw_context->staged_cmd_res, &view);
2654 	if (ret || !view)
2655 		return ret;
2656 
2657 	/*
2658 	 * If the view wasn't created during this command batch, it might
2659 	 * have been removed due to a context swapout, so add a
2660 	 * relocation to conditionally make this command a NOP to avoid
2661 	 * device errors.
2662 	 */
2663 	return vmw_resource_relocation_add(sw_context, view,
2664 					   vmw_ptr_diff(sw_context->buf_start,
2665 							&cmd->header.id),
2666 					   vmw_res_rel_cond_nop);
2667 }
2668 
2669 /**
2670  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2671  *
2672  * @dev_priv: Pointer to a device private struct.
2673  * @sw_context: The software context being used for this batch.
2674  * @header: Pointer to the command header in the command stream.
2675  */
2676 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2677 				    struct vmw_sw_context *sw_context,
2678 				    SVGA3dCmdHeader *header)
2679 {
2680 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2681 	struct vmw_resource *res;
2682 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2683 		container_of(header, typeof(*cmd), header);
2684 	int ret;
2685 
2686 	if (!ctx_node)
2687 		return -EINVAL;
2688 
2689 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2690 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2691 	if (ret)
2692 		return ret;
2693 
2694 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2695 				 cmd->body.shaderId, cmd->body.type,
2696 				 &sw_context->staged_cmd_res);
2697 }
2698 
2699 /**
2700  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2701  *
2702  * @dev_priv: Pointer to a device private struct.
2703  * @sw_context: The software context being used for this batch.
2704  * @header: Pointer to the command header in the command stream.
2705  */
2706 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2707 				     struct vmw_sw_context *sw_context,
2708 				     SVGA3dCmdHeader *header)
2709 {
2710 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2711 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2712 		container_of(header, typeof(*cmd), header);
2713 	int ret;
2714 
2715 	if (!ctx_node)
2716 		return -EINVAL;
2717 
2718 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2719 				&sw_context->staged_cmd_res);
2720 
2721 	return ret;
2722 }
2723 
2724 /**
2725  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2726  *
2727  * @dev_priv: Pointer to a device private struct.
2728  * @sw_context: The software context being used for this batch.
2729  * @header: Pointer to the command header in the command stream.
2730  */
2731 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2732 				  struct vmw_sw_context *sw_context,
2733 				  SVGA3dCmdHeader *header)
2734 {
2735 	struct vmw_resource *ctx;
2736 	struct vmw_resource *res;
2737 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2738 		container_of(header, typeof(*cmd), header);
2739 	int ret;
2740 
2741 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2742 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2743 					VMW_RES_DIRTY_SET,
2744 					user_context_converter, &cmd->body.cid,
2745 					&ctx);
2746 		if (ret)
2747 			return ret;
2748 	} else {
2749 		struct vmw_ctx_validation_info *ctx_node =
2750 			VMW_GET_CTX_NODE(sw_context);
2751 
2752 		if (!ctx_node)
2753 			return -EINVAL;
2754 
2755 		ctx = ctx_node->ctx;
2756 	}
2757 
2758 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2759 	if (IS_ERR(res)) {
2760 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2761 		return PTR_ERR(res);
2762 	}
2763 
2764 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2765 				      vmw_val_add_flag_noctx);
2766 	if (ret) {
2767 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2768 		return ret;
2769 	}
2770 
2771 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2772 					 &cmd->body.mobid,
2773 					 cmd->body.offsetInBytes);
2774 }
2775 
2776 /**
2777  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2778  *
2779  * @dev_priv: Pointer to a device private struct.
2780  * @sw_context: The software context being used for this batch.
2781  * @header: Pointer to the command header in the command stream.
2782  */
2783 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2784 			      struct vmw_sw_context *sw_context,
2785 			      SVGA3dCmdHeader *header)
2786 {
2787 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2788 		container_of(header, typeof(*cmd), header);
2789 	struct vmw_resource *view;
2790 	struct vmw_res_cache_entry *rcache;
2791 
2792 	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2793 				   cmd->body.shaderResourceViewId);
2794 	if (IS_ERR(view))
2795 		return PTR_ERR(view);
2796 
2797 	/*
2798 	 * Normally the shader-resource view is not gpu-dirtying, but for
2799 	 * this particular command it is...
2800 	 * So mark the last looked-up surface, which is the surface
2801 	 * the view points to, gpu-dirty.
2802 	 */
2803 	rcache = &sw_context->res_cache[vmw_res_surface];
2804 	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2805 				     VMW_RES_DIRTY_SET);
2806 	return 0;
2807 }
2808 
2809 /**
2810  * vmw_cmd_dx_transfer_from_buffer - Validate
2811  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2812  *
2813  * @dev_priv: Pointer to a device private struct.
2814  * @sw_context: The software context being used for this batch.
2815  * @header: Pointer to the command header in the command stream.
2816  */
2817 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2818 					   struct vmw_sw_context *sw_context,
2819 					   SVGA3dCmdHeader *header)
2820 {
2821 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2822 		container_of(header, typeof(*cmd), header);
2823 	int ret;
2824 
2825 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2826 				VMW_RES_DIRTY_NONE, user_surface_converter,
2827 				&cmd->body.srcSid, NULL);
2828 	if (ret != 0)
2829 		return ret;
2830 
2831 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2832 				 VMW_RES_DIRTY_SET, user_surface_converter,
2833 				 &cmd->body.destSid, NULL);
2834 }
2835 
2836 /**
2837  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2838  *
2839  * @dev_priv: Pointer to a device private struct.
2840  * @sw_context: The software context being used for this batch.
2841  * @header: Pointer to the command header in the command stream.
2842  */
2843 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2844 					   struct vmw_sw_context *sw_context,
2845 					   SVGA3dCmdHeader *header)
2846 {
2847 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2848 		container_of(header, typeof(*cmd), header);
2849 
2850 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2851 		return -EINVAL;
2852 
2853 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2854 				 VMW_RES_DIRTY_SET, user_surface_converter,
2855 				 &cmd->body.surface.sid, NULL);
2856 }
2857 
2858 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2859 		       struct vmw_sw_context *sw_context,
2860 		       SVGA3dCmdHeader *header)
2861 {
2862 	if (!has_sm5_context(dev_priv))
2863 		return -EINVAL;
2864 
2865 	return 0;
2866 }
2867 
2868 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2869 				   struct vmw_sw_context *sw_context,
2870 				   SVGA3dCmdHeader *header)
2871 {
2872 	if (!has_sm5_context(dev_priv))
2873 		return -EINVAL;
2874 
2875 	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2876 }
2877 
2878 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2879 				   struct vmw_sw_context *sw_context,
2880 				   SVGA3dCmdHeader *header)
2881 {
2882 	if (!has_sm5_context(dev_priv))
2883 		return -EINVAL;
2884 
2885 	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2886 }
2887 
2888 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2889 				  struct vmw_sw_context *sw_context,
2890 				  SVGA3dCmdHeader *header)
2891 {
2892 	struct {
2893 		SVGA3dCmdHeader header;
2894 		SVGA3dCmdDXClearUAViewUint body;
2895 	} *cmd = container_of(header, typeof(*cmd), header);
2896 	struct vmw_resource *ret;
2897 
2898 	if (!has_sm5_context(dev_priv))
2899 		return -EINVAL;
2900 
2901 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2902 				  cmd->body.uaViewId);
2903 
2904 	return PTR_ERR_OR_ZERO(ret);
2905 }
2906 
2907 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2908 				   struct vmw_sw_context *sw_context,
2909 				   SVGA3dCmdHeader *header)
2910 {
2911 	struct {
2912 		SVGA3dCmdHeader header;
2913 		SVGA3dCmdDXClearUAViewFloat body;
2914 	} *cmd = container_of(header, typeof(*cmd), header);
2915 	struct vmw_resource *ret;
2916 
2917 	if (!has_sm5_context(dev_priv))
2918 		return -EINVAL;
2919 
2920 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2921 				  cmd->body.uaViewId);
2922 
2923 	return PTR_ERR_OR_ZERO(ret);
2924 }
2925 
2926 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2927 			   struct vmw_sw_context *sw_context,
2928 			   SVGA3dCmdHeader *header)
2929 {
2930 	struct {
2931 		SVGA3dCmdHeader header;
2932 		SVGA3dCmdDXSetUAViews body;
2933 	} *cmd = container_of(header, typeof(*cmd), header);
2934 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2935 		sizeof(SVGA3dUAViewId);
2936 	int ret;
2937 
2938 	if (!has_sm5_context(dev_priv))
2939 		return -EINVAL;
2940 
2941 	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2942 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2943 		return -EINVAL;
2944 	}
2945 
2946 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2947 				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2948 				    num_uav, 0);
2949 	if (ret)
2950 		return ret;
2951 
2952 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2953 					 cmd->body.uavSpliceIndex);
2954 
2955 	return ret;
2956 }
2957 
2958 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2959 			      struct vmw_sw_context *sw_context,
2960 			      SVGA3dCmdHeader *header)
2961 {
2962 	struct {
2963 		SVGA3dCmdHeader header;
2964 		SVGA3dCmdDXSetCSUAViews body;
2965 	} *cmd = container_of(header, typeof(*cmd), header);
2966 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2967 		sizeof(SVGA3dUAViewId);
2968 	int ret;
2969 
2970 	if (!has_sm5_context(dev_priv))
2971 		return -EINVAL;
2972 
2973 	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2974 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2975 		return -EINVAL;
2976 	}
2977 
2978 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2979 				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2980 				    num_uav, 0);
2981 	if (ret)
2982 		return ret;
2983 
2984 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2985 				  cmd->body.startIndex);
2986 
2987 	return ret;
2988 }
2989 
2990 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2991 					  struct vmw_sw_context *sw_context,
2992 					  SVGA3dCmdHeader *header)
2993 {
2994 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2995 	struct vmw_resource *res;
2996 	struct {
2997 		SVGA3dCmdHeader header;
2998 		SVGA3dCmdDXDefineStreamOutputWithMob body;
2999 	} *cmd = container_of(header, typeof(*cmd), header);
3000 	int ret;
3001 
3002 	if (!has_sm5_context(dev_priv))
3003 		return -EINVAL;
3004 
3005 	if (!ctx_node) {
3006 		DRM_ERROR("DX Context not set.\n");
3007 		return -EINVAL;
3008 	}
3009 
3010 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3011 	ret = vmw_cotable_notify(res, cmd->body.soid);
3012 	if (ret)
3013 		return ret;
3014 
3015 	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3016 				       cmd->body.soid,
3017 				       &sw_context->staged_cmd_res);
3018 }
3019 
3020 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3021 					   struct vmw_sw_context *sw_context,
3022 					   SVGA3dCmdHeader *header)
3023 {
3024 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3025 	struct vmw_resource *res;
3026 	struct {
3027 		SVGA3dCmdHeader header;
3028 		SVGA3dCmdDXDestroyStreamOutput body;
3029 	} *cmd = container_of(header, typeof(*cmd), header);
3030 
3031 	if (!ctx_node) {
3032 		DRM_ERROR("DX Context not set.\n");
3033 		return -EINVAL;
3034 	}
3035 
3036 	/*
3037 	 * When device does not support SM5 then streamoutput with mob command is
3038 	 * not available to user-space. Simply return in this case.
3039 	 */
3040 	if (!has_sm5_context(dev_priv))
3041 		return 0;
3042 
3043 	/*
3044 	 * With SM5 capable device if lookup fails then user-space probably used
3045 	 * old streamoutput define command. Return without an error.
3046 	 */
3047 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3048 					 cmd->body.soid);
3049 	if (IS_ERR(res))
3050 		return 0;
3051 
3052 	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3053 					  &sw_context->staged_cmd_res);
3054 }
3055 
3056 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3057 					struct vmw_sw_context *sw_context,
3058 					SVGA3dCmdHeader *header)
3059 {
3060 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3061 	struct vmw_resource *res;
3062 	struct {
3063 		SVGA3dCmdHeader header;
3064 		SVGA3dCmdDXBindStreamOutput body;
3065 	} *cmd = container_of(header, typeof(*cmd), header);
3066 	int ret;
3067 
3068 	if (!has_sm5_context(dev_priv))
3069 		return -EINVAL;
3070 
3071 	if (!ctx_node) {
3072 		DRM_ERROR("DX Context not set.\n");
3073 		return -EINVAL;
3074 	}
3075 
3076 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3077 					 cmd->body.soid);
3078 	if (IS_ERR(res)) {
3079 		DRM_ERROR("Could not find streamoutput to bind.\n");
3080 		return PTR_ERR(res);
3081 	}
3082 
3083 	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3084 
3085 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3086 				      vmw_val_add_flag_noctx);
3087 	if (ret) {
3088 		DRM_ERROR("Error creating resource validation node.\n");
3089 		return ret;
3090 	}
3091 
3092 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3093 					 &cmd->body.mobid,
3094 					 cmd->body.offsetInBytes);
3095 }
3096 
3097 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3098 				       struct vmw_sw_context *sw_context,
3099 				       SVGA3dCmdHeader *header)
3100 {
3101 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3102 	struct vmw_resource *res;
3103 	struct vmw_ctx_bindinfo_so binding;
3104 	struct {
3105 		SVGA3dCmdHeader header;
3106 		SVGA3dCmdDXSetStreamOutput body;
3107 	} *cmd = container_of(header, typeof(*cmd), header);
3108 	int ret;
3109 
3110 	if (!ctx_node) {
3111 		DRM_ERROR("DX Context not set.\n");
3112 		return -EINVAL;
3113 	}
3114 
3115 	if (cmd->body.soid == SVGA3D_INVALID_ID)
3116 		return 0;
3117 
3118 	/*
3119 	 * When device does not support SM5 then streamoutput with mob command is
3120 	 * not available to user-space. Simply return in this case.
3121 	 */
3122 	if (!has_sm5_context(dev_priv))
3123 		return 0;
3124 
3125 	/*
3126 	 * With SM5 capable device if lookup fails then user-space probably used
3127 	 * old streamoutput define command. Return without an error.
3128 	 */
3129 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3130 					 cmd->body.soid);
3131 	if (IS_ERR(res)) {
3132 		return 0;
3133 	}
3134 
3135 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3136 				      vmw_val_add_flag_noctx);
3137 	if (ret) {
3138 		DRM_ERROR("Error creating resource validation node.\n");
3139 		return ret;
3140 	}
3141 
3142 	binding.bi.ctx = ctx_node->ctx;
3143 	binding.bi.res = res;
3144 	binding.bi.bt = vmw_ctx_binding_so;
3145 	binding.slot = 0; /* Only one SO set to context at a time. */
3146 
3147 	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3148 			binding.slot);
3149 
3150 	return ret;
3151 }
3152 
3153 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3154 					      struct vmw_sw_context *sw_context,
3155 					      SVGA3dCmdHeader *header)
3156 {
3157 	struct vmw_draw_indexed_instanced_indirect_cmd {
3158 		SVGA3dCmdHeader header;
3159 		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3160 	} *cmd = container_of(header, typeof(*cmd), header);
3161 
3162 	if (!has_sm5_context(dev_priv))
3163 		return -EINVAL;
3164 
3165 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3166 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3167 				 &cmd->body.argsBufferSid, NULL);
3168 }
3169 
3170 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3171 				      struct vmw_sw_context *sw_context,
3172 				      SVGA3dCmdHeader *header)
3173 {
3174 	struct vmw_draw_instanced_indirect_cmd {
3175 		SVGA3dCmdHeader header;
3176 		SVGA3dCmdDXDrawInstancedIndirect body;
3177 	} *cmd = container_of(header, typeof(*cmd), header);
3178 
3179 	if (!has_sm5_context(dev_priv))
3180 		return -EINVAL;
3181 
3182 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3183 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3184 				 &cmd->body.argsBufferSid, NULL);
3185 }
3186 
3187 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3188 				     struct vmw_sw_context *sw_context,
3189 				     SVGA3dCmdHeader *header)
3190 {
3191 	struct vmw_dispatch_indirect_cmd {
3192 		SVGA3dCmdHeader header;
3193 		SVGA3dCmdDXDispatchIndirect body;
3194 	} *cmd = container_of(header, typeof(*cmd), header);
3195 
3196 	if (!has_sm5_context(dev_priv))
3197 		return -EINVAL;
3198 
3199 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3200 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3201 				 &cmd->body.argsBufferSid, NULL);
3202 }
3203 
3204 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3205 				struct vmw_sw_context *sw_context,
3206 				void *buf, uint32_t *size)
3207 {
3208 	uint32_t size_remaining = *size;
3209 	uint32_t cmd_id;
3210 
3211 	cmd_id = ((uint32_t *)buf)[0];
3212 	switch (cmd_id) {
3213 	case SVGA_CMD_UPDATE:
3214 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3215 		break;
3216 	case SVGA_CMD_DEFINE_GMRFB:
3217 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3218 		break;
3219 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3220 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3221 		break;
3222 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3223 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3224 		break;
3225 	default:
3226 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3227 		return -EINVAL;
3228 	}
3229 
3230 	if (*size > size_remaining) {
3231 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3232 			       cmd_id);
3233 		return -EINVAL;
3234 	}
3235 
3236 	if (unlikely(!sw_context->kernel)) {
3237 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3238 		return -EPERM;
3239 	}
3240 
3241 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3242 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3243 
3244 	return 0;
3245 }
3246 
3247 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3248 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3249 		    false, false, false),
3250 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3251 		    false, false, false),
3252 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3253 		    true, false, false),
3254 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3255 		    true, false, false),
3256 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3257 		    true, false, false),
3258 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3259 		    false, false, false),
3260 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3261 		    false, false, false),
3262 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3263 		    true, false, false),
3264 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3265 		    true, false, false),
3266 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3267 		    true, false, false),
3268 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3269 		    &vmw_cmd_set_render_target_check, true, false, false),
3270 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3271 		    true, false, false),
3272 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3273 		    true, false, false),
3274 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3275 		    true, false, false),
3276 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3277 		    true, false, false),
3278 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3279 		    true, false, false),
3280 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3281 		    true, false, false),
3282 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3283 		    true, false, false),
3284 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3285 		    false, false, false),
3286 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3287 		    true, false, false),
3288 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3289 		    true, false, false),
3290 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3291 		    true, false, false),
3292 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3293 		    true, false, false),
3294 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3295 		    true, false, false),
3296 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3297 		    true, false, false),
3298 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3299 		    true, false, false),
3300 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3301 		    true, false, false),
3302 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3303 		    true, false, false),
3304 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3305 		    true, false, false),
3306 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3307 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3308 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3309 		    false, false, false),
3310 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3311 		    false, false, false),
3312 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3313 		    false, false, false),
3314 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3315 		    false, false, false),
3316 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3317 		    false, false, false),
3318 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3319 		    false, false, false),
3320 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3321 		    false, false, false),
3322 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3323 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3324 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3325 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3326 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3327 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3328 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3329 		    false, false, true),
3330 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3331 		    false, false, true),
3332 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3333 		    false, false, true),
3334 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3335 		    false, false, true),
3336 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3337 		    false, false, true),
3338 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3339 		    false, false, true),
3340 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3341 		    false, false, true),
3342 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3343 		    false, false, true),
3344 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3345 		    true, false, true),
3346 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3347 		    false, false, true),
3348 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3349 		    true, false, true),
3350 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3351 		    &vmw_cmd_update_gb_surface, true, false, true),
3352 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3353 		    &vmw_cmd_readback_gb_image, true, false, true),
3354 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3355 		    &vmw_cmd_readback_gb_surface, true, false, true),
3356 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3357 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3358 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3359 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3360 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3361 		    false, false, true),
3362 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3363 		    false, false, true),
3364 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3365 		    false, false, true),
3366 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3367 		    false, false, true),
3368 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3369 		    false, false, true),
3370 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3371 		    false, false, true),
3372 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3373 		    true, false, true),
3374 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3375 		    false, false, true),
3376 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3377 		    false, false, false),
3378 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3379 		    true, false, true),
3380 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3381 		    true, false, true),
3382 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3383 		    true, false, true),
3384 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3385 		    true, false, true),
3386 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3387 		    true, false, true),
3388 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3389 		    false, false, true),
3390 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3391 		    false, false, true),
3392 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3393 		    false, false, true),
3394 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3395 		    false, false, true),
3396 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3397 		    false, false, true),
3398 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3399 		    false, false, true),
3400 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3401 		    false, false, true),
3402 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3403 		    false, false, true),
3404 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3405 		    false, false, true),
3406 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3407 		    false, false, true),
3408 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3409 		    true, false, true),
3410 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3411 		    false, false, true),
3412 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3413 		    false, false, true),
3414 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3415 		    false, false, true),
3416 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3417 		    false, false, true),
3418 
3419 	/* SM commands */
3420 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3421 		    false, false, true),
3422 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3423 		    false, false, true),
3424 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3425 		    false, false, true),
3426 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3427 		    false, false, true),
3428 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3429 		    false, false, true),
3430 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3431 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3432 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3433 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3434 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3435 		    true, false, true),
3436 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3437 		    true, false, true),
3438 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3439 		    true, false, true),
3440 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3441 		    true, false, true),
3442 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3443 		    true, false, true),
3444 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3445 		    &vmw_cmd_dx_cid_check, true, false, true),
3446 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3447 		    true, false, true),
3448 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3449 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3450 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3451 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3452 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3453 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3454 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3455 		    true, false, true),
3456 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3457 		    &vmw_cmd_dx_cid_check, true, false, true),
3458 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3459 		    &vmw_cmd_dx_cid_check, true, false, true),
3460 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3461 		    true, false, true),
3462 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3463 		    true, false, true),
3464 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3465 		    true, false, true),
3466 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3467 		    &vmw_cmd_dx_cid_check, true, false, true),
3468 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3469 		    true, false, true),
3470 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3471 		    true, false, true),
3472 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3473 		    true, false, true),
3474 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3475 		    true, false, true),
3476 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3477 		    true, false, true),
3478 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3479 		    true, false, true),
3480 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3481 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3482 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3483 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3484 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3485 		    true, false, true),
3486 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3487 		    true, false, true),
3488 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3489 		    &vmw_cmd_dx_check_subresource, true, false, true),
3490 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3491 		    &vmw_cmd_dx_check_subresource, true, false, true),
3492 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3493 		    &vmw_cmd_dx_check_subresource, true, false, true),
3494 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3495 		    &vmw_cmd_dx_view_define, true, false, true),
3496 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3497 		    &vmw_cmd_dx_view_remove, true, false, true),
3498 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3499 		    &vmw_cmd_dx_view_define, true, false, true),
3500 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3501 		    &vmw_cmd_dx_view_remove, true, false, true),
3502 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3503 		    &vmw_cmd_dx_view_define, true, false, true),
3504 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3505 		    &vmw_cmd_dx_view_remove, true, false, true),
3506 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3507 		    &vmw_cmd_dx_so_define, true, false, true),
3508 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3509 		    &vmw_cmd_dx_cid_check, true, false, true),
3510 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3511 		    &vmw_cmd_dx_so_define, true, false, true),
3512 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3513 		    &vmw_cmd_dx_cid_check, true, false, true),
3514 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3515 		    &vmw_cmd_dx_so_define, true, false, true),
3516 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3517 		    &vmw_cmd_dx_cid_check, true, false, true),
3518 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3519 		    &vmw_cmd_dx_so_define, true, false, true),
3520 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3521 		    &vmw_cmd_dx_cid_check, true, false, true),
3522 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3523 		    &vmw_cmd_dx_so_define, true, false, true),
3524 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3525 		    &vmw_cmd_dx_cid_check, true, false, true),
3526 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3527 		    &vmw_cmd_dx_define_shader, true, false, true),
3528 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3529 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3530 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3531 		    &vmw_cmd_dx_bind_shader, true, false, true),
3532 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3533 		    &vmw_cmd_dx_so_define, true, false, true),
3534 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3535 		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3536 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3537 		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3538 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3539 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3540 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3541 		    &vmw_cmd_dx_cid_check, true, false, true),
3542 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3543 		    &vmw_cmd_dx_cid_check, true, false, true),
3544 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3545 		    &vmw_cmd_buffer_copy_check, true, false, true),
3546 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3547 		    &vmw_cmd_pred_copy_check, true, false, true),
3548 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3549 		    &vmw_cmd_dx_transfer_from_buffer,
3550 		    true, false, true),
3551 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3552 		    &vmw_cmd_dx_set_constant_buffer_offset,
3553 		    true, false, true),
3554 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3555 		    &vmw_cmd_dx_set_constant_buffer_offset,
3556 		    true, false, true),
3557 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3558 		    &vmw_cmd_dx_set_constant_buffer_offset,
3559 		    true, false, true),
3560 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3561 		    &vmw_cmd_dx_set_constant_buffer_offset,
3562 		    true, false, true),
3563 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3564 		    &vmw_cmd_dx_set_constant_buffer_offset,
3565 		    true, false, true),
3566 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3567 		    &vmw_cmd_dx_set_constant_buffer_offset,
3568 		    true, false, true),
3569 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3570 		    true, false, true),
3571 
3572 	/*
3573 	 * SM5 commands
3574 	 */
3575 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3576 		    true, false, true),
3577 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3578 		    true, false, true),
3579 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3580 		    true, false, true),
3581 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3582 		    &vmw_cmd_clear_uav_float, true, false, true),
3583 	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3584 		    false, true),
3585 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3586 		    true),
3587 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3588 		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3589 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3590 		    &vmw_cmd_instanced_indirect, true, false, true),
3591 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3592 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3593 		    &vmw_cmd_dispatch_indirect, true, false, true),
3594 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3595 		    false, true),
3596 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3597 		    &vmw_cmd_sm5_view_define, true, false, true),
3598 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3599 		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3600 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3601 		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3602 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3603 		    &vmw_cmd_dx_so_define, true, false, true),
3604 };
3605 
3606 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3607 {
3608 	u32 cmd_id = ((u32 *) buf)[0];
3609 
3610 	if (cmd_id >= SVGA_CMD_MAX) {
3611 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3612 		const struct vmw_cmd_entry *entry;
3613 
3614 		*size = header->size + sizeof(SVGA3dCmdHeader);
3615 		cmd_id = header->id;
3616 		if (cmd_id >= SVGA_3D_CMD_MAX)
3617 			return false;
3618 
3619 		cmd_id -= SVGA_3D_CMD_BASE;
3620 		entry = &vmw_cmd_entries[cmd_id];
3621 		*cmd = entry->cmd_name;
3622 		return true;
3623 	}
3624 
3625 	switch (cmd_id) {
3626 	case SVGA_CMD_UPDATE:
3627 		*cmd = "SVGA_CMD_UPDATE";
3628 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3629 		break;
3630 	case SVGA_CMD_DEFINE_GMRFB:
3631 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3632 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3633 		break;
3634 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3635 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3636 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3637 		break;
3638 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3639 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3640 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3641 		break;
3642 	default:
3643 		*cmd = "UNKNOWN";
3644 		*size = 0;
3645 		return false;
3646 	}
3647 
3648 	return true;
3649 }
3650 
3651 static int vmw_cmd_check(struct vmw_private *dev_priv,
3652 			 struct vmw_sw_context *sw_context, void *buf,
3653 			 uint32_t *size)
3654 {
3655 	uint32_t cmd_id;
3656 	uint32_t size_remaining = *size;
3657 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3658 	int ret;
3659 	const struct vmw_cmd_entry *entry;
3660 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3661 
3662 	cmd_id = ((uint32_t *)buf)[0];
3663 	/* Handle any none 3D commands */
3664 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3665 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3666 
3667 
3668 	cmd_id = header->id;
3669 	*size = header->size + sizeof(SVGA3dCmdHeader);
3670 
3671 	cmd_id -= SVGA_3D_CMD_BASE;
3672 	if (unlikely(*size > size_remaining))
3673 		goto out_invalid;
3674 
3675 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3676 		goto out_invalid;
3677 
3678 	entry = &vmw_cmd_entries[cmd_id];
3679 	if (unlikely(!entry->func))
3680 		goto out_invalid;
3681 
3682 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3683 		goto out_privileged;
3684 
3685 	if (unlikely(entry->gb_disable && gb))
3686 		goto out_old;
3687 
3688 	if (unlikely(entry->gb_enable && !gb))
3689 		goto out_new;
3690 
3691 	ret = entry->func(dev_priv, sw_context, header);
3692 	if (unlikely(ret != 0)) {
3693 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3694 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3695 		return ret;
3696 	}
3697 
3698 	return 0;
3699 out_invalid:
3700 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3701 		       cmd_id + SVGA_3D_CMD_BASE);
3702 	return -EINVAL;
3703 out_privileged:
3704 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3705 		       cmd_id + SVGA_3D_CMD_BASE);
3706 	return -EPERM;
3707 out_old:
3708 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3709 		       cmd_id + SVGA_3D_CMD_BASE);
3710 	return -EINVAL;
3711 out_new:
3712 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3713 		       cmd_id + SVGA_3D_CMD_BASE);
3714 	return -EINVAL;
3715 }
3716 
3717 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3718 			     struct vmw_sw_context *sw_context, void *buf,
3719 			     uint32_t size)
3720 {
3721 	int32_t cur_size = size;
3722 	int ret;
3723 
3724 	sw_context->buf_start = buf;
3725 
3726 	while (cur_size > 0) {
3727 		size = cur_size;
3728 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3729 		if (unlikely(ret != 0))
3730 			return ret;
3731 		buf = (void *)((unsigned long) buf + size);
3732 		cur_size -= size;
3733 	}
3734 
3735 	if (unlikely(cur_size != 0)) {
3736 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3737 		return -EINVAL;
3738 	}
3739 
3740 	return 0;
3741 }
3742 
3743 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3744 {
3745 	/* Memory is validation context memory, so no need to free it */
3746 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3747 }
3748 
3749 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3750 {
3751 	struct vmw_relocation *reloc;
3752 	struct ttm_buffer_object *bo;
3753 
3754 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3755 		bo = &reloc->vbo->tbo;
3756 		switch (bo->resource->mem_type) {
3757 		case TTM_PL_VRAM:
3758 			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3759 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3760 			break;
3761 		case VMW_PL_GMR:
3762 			reloc->location->gmrId = bo->resource->start;
3763 			break;
3764 		case VMW_PL_MOB:
3765 			*reloc->mob_loc = bo->resource->start;
3766 			break;
3767 		default:
3768 			BUG();
3769 		}
3770 	}
3771 	vmw_free_relocations(sw_context);
3772 }
3773 
3774 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3775 				 uint32_t size)
3776 {
3777 	if (likely(sw_context->cmd_bounce_size >= size))
3778 		return 0;
3779 
3780 	if (sw_context->cmd_bounce_size == 0)
3781 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3782 
3783 	while (sw_context->cmd_bounce_size < size) {
3784 		sw_context->cmd_bounce_size =
3785 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3786 				   (sw_context->cmd_bounce_size >> 1));
3787 	}
3788 
3789 	vfree(sw_context->cmd_bounce);
3790 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3791 
3792 	if (sw_context->cmd_bounce == NULL) {
3793 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3794 		sw_context->cmd_bounce_size = 0;
3795 		return -ENOMEM;
3796 	}
3797 
3798 	return 0;
3799 }
3800 
3801 /*
3802  * vmw_execbuf_fence_commands - create and submit a command stream fence
3803  *
3804  * Creates a fence object and submits a command stream marker.
3805  * If this fails for some reason, We sync the fifo and return NULL.
3806  * It is then safe to fence buffers with a NULL pointer.
3807  *
3808  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3809  * userspace handle if @p_handle is not NULL, otherwise not.
3810  */
3811 
3812 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3813 			       struct vmw_private *dev_priv,
3814 			       struct vmw_fence_obj **p_fence,
3815 			       uint32_t *p_handle)
3816 {
3817 	uint32_t sequence;
3818 	int ret;
3819 	bool synced = false;
3820 
3821 	/* p_handle implies file_priv. */
3822 	BUG_ON(p_handle != NULL && file_priv == NULL);
3823 
3824 	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3825 	if (unlikely(ret != 0)) {
3826 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3827 		synced = true;
3828 	}
3829 
3830 	if (p_handle != NULL)
3831 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3832 					    sequence, p_fence, p_handle);
3833 	else
3834 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3835 
3836 	if (unlikely(ret != 0 && !synced)) {
3837 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3838 					 false, VMW_FENCE_WAIT_TIMEOUT);
3839 		*p_fence = NULL;
3840 	}
3841 
3842 	return ret;
3843 }
3844 
3845 /**
3846  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3847  *
3848  * @dev_priv: Pointer to a vmw_private struct.
3849  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3850  * @ret: Return value from fence object creation.
3851  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3852  * the information should be copied.
3853  * @fence: Pointer to the fenc object.
3854  * @fence_handle: User-space fence handle.
3855  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3856  *
3857  * This function copies fence information to user-space. If copying fails, the
3858  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3859  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3860  * will hopefully be detected.
3861  *
3862  * Also if copying fails, user-space will be unable to signal the fence object
3863  * so we wait for it immediately, and then unreference the user-space reference.
3864  */
3865 int
3866 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3867 			    struct vmw_fpriv *vmw_fp, int ret,
3868 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3869 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3870 			    int32_t out_fence_fd)
3871 {
3872 	struct drm_vmw_fence_rep fence_rep;
3873 
3874 	if (user_fence_rep == NULL)
3875 		return 0;
3876 
3877 	memset(&fence_rep, 0, sizeof(fence_rep));
3878 
3879 	fence_rep.error = ret;
3880 	fence_rep.fd = out_fence_fd;
3881 	if (ret == 0) {
3882 		BUG_ON(fence == NULL);
3883 
3884 		fence_rep.handle = fence_handle;
3885 		fence_rep.seqno = fence->base.seqno;
3886 		vmw_update_seqno(dev_priv);
3887 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3888 	}
3889 
3890 	/*
3891 	 * copy_to_user errors will be detected by user space not seeing
3892 	 * fence_rep::error filled in. Typically user-space would have pre-set
3893 	 * that member to -EFAULT.
3894 	 */
3895 	ret = copy_to_user(user_fence_rep, &fence_rep,
3896 			   sizeof(fence_rep));
3897 
3898 	/*
3899 	 * User-space lost the fence object. We need to sync and unreference the
3900 	 * handle.
3901 	 */
3902 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3903 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3904 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3905 		(void) vmw_fence_obj_wait(fence, false, false,
3906 					  VMW_FENCE_WAIT_TIMEOUT);
3907 	}
3908 
3909 	return ret ? -EFAULT : 0;
3910 }
3911 
3912 /**
3913  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3914  *
3915  * @dev_priv: Pointer to a device private structure.
3916  * @kernel_commands: Pointer to the unpatched command batch.
3917  * @command_size: Size of the unpatched command batch.
3918  * @sw_context: Structure holding the relocation lists.
3919  *
3920  * Side effects: If this function returns 0, then the command batch pointed to
3921  * by @kernel_commands will have been modified.
3922  */
3923 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3924 				   void *kernel_commands, u32 command_size,
3925 				   struct vmw_sw_context *sw_context)
3926 {
3927 	void *cmd;
3928 
3929 	if (sw_context->dx_ctx_node)
3930 		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3931 					  sw_context->dx_ctx_node->ctx->id);
3932 	else
3933 		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3934 
3935 	if (!cmd)
3936 		return -ENOMEM;
3937 
3938 	vmw_apply_relocations(sw_context);
3939 	memcpy(cmd, kernel_commands, command_size);
3940 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3941 	vmw_resource_relocations_free(&sw_context->res_relocations);
3942 	vmw_cmd_commit(dev_priv, command_size);
3943 
3944 	return 0;
3945 }
3946 
3947 /**
3948  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3949  * command buffer manager.
3950  *
3951  * @dev_priv: Pointer to a device private structure.
3952  * @header: Opaque handle to the command buffer allocation.
3953  * @command_size: Size of the unpatched command batch.
3954  * @sw_context: Structure holding the relocation lists.
3955  *
3956  * Side effects: If this function returns 0, then the command buffer represented
3957  * by @header will have been modified.
3958  */
3959 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3960 				     struct vmw_cmdbuf_header *header,
3961 				     u32 command_size,
3962 				     struct vmw_sw_context *sw_context)
3963 {
3964 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3965 		  SVGA3D_INVALID_ID);
3966 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3967 				       header);
3968 
3969 	vmw_apply_relocations(sw_context);
3970 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3971 	vmw_resource_relocations_free(&sw_context->res_relocations);
3972 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3973 
3974 	return 0;
3975 }
3976 
3977 /**
3978  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3979  * submission using a command buffer.
3980  *
3981  * @dev_priv: Pointer to a device private structure.
3982  * @user_commands: User-space pointer to the commands to be submitted.
3983  * @command_size: Size of the unpatched command batch.
3984  * @header: Out parameter returning the opaque pointer to the command buffer.
3985  *
3986  * This function checks whether we can use the command buffer manager for
3987  * submission and if so, creates a command buffer of suitable size and copies
3988  * the user data into that buffer.
3989  *
3990  * On successful return, the function returns a pointer to the data in the
3991  * command buffer and *@header is set to non-NULL.
3992  *
3993  * @kernel_commands: If command buffers could not be used, the function will
3994  * return the value of @kernel_commands on function call. That value may be
3995  * NULL. In that case, the value of *@header will be set to NULL.
3996  *
3997  * If an error is encountered, the function will return a pointer error value.
3998  * If the function is interrupted by a signal while sleeping, it will return
3999  * -ERESTARTSYS casted to a pointer error value.
4000  */
4001 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4002 				void __user *user_commands,
4003 				void *kernel_commands, u32 command_size,
4004 				struct vmw_cmdbuf_header **header)
4005 {
4006 	size_t cmdbuf_size;
4007 	int ret;
4008 
4009 	*header = NULL;
4010 	if (command_size > SVGA_CB_MAX_SIZE) {
4011 		VMW_DEBUG_USER("Command buffer is too large.\n");
4012 		return ERR_PTR(-EINVAL);
4013 	}
4014 
4015 	if (!dev_priv->cman || kernel_commands)
4016 		return kernel_commands;
4017 
4018 	/* If possible, add a little space for fencing. */
4019 	cmdbuf_size = command_size + 512;
4020 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4021 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4022 					   header);
4023 	if (IS_ERR(kernel_commands))
4024 		return kernel_commands;
4025 
4026 	ret = copy_from_user(kernel_commands, user_commands, command_size);
4027 	if (ret) {
4028 		VMW_DEBUG_USER("Failed copying commands.\n");
4029 		vmw_cmdbuf_header_free(*header);
4030 		*header = NULL;
4031 		return ERR_PTR(-EFAULT);
4032 	}
4033 
4034 	return kernel_commands;
4035 }
4036 
4037 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4038 				   struct vmw_sw_context *sw_context,
4039 				   uint32_t handle)
4040 {
4041 	struct vmw_resource *res;
4042 	int ret;
4043 	unsigned int size;
4044 
4045 	if (handle == SVGA3D_INVALID_ID)
4046 		return 0;
4047 
4048 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4049 	ret = vmw_validation_preload_res(sw_context->ctx, size);
4050 	if (ret)
4051 		return ret;
4052 
4053 	ret = vmw_user_resource_lookup_handle
4054 		(dev_priv, sw_context->fp->tfile, handle,
4055 		 user_context_converter, &res);
4056 	if (ret != 0) {
4057 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4058 			       (unsigned int) handle);
4059 		return ret;
4060 	}
4061 
4062 	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4063 				      vmw_val_add_flag_none);
4064 	if (unlikely(ret != 0)) {
4065 		vmw_resource_unreference(&res);
4066 		return ret;
4067 	}
4068 
4069 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4070 	sw_context->man = vmw_context_res_man(res);
4071 
4072 	vmw_resource_unreference(&res);
4073 	return 0;
4074 }
4075 
4076 int vmw_execbuf_process(struct drm_file *file_priv,
4077 			struct vmw_private *dev_priv,
4078 			void __user *user_commands, void *kernel_commands,
4079 			uint32_t command_size, uint64_t throttle_us,
4080 			uint32_t dx_context_handle,
4081 			struct drm_vmw_fence_rep __user *user_fence_rep,
4082 			struct vmw_fence_obj **out_fence, uint32_t flags)
4083 {
4084 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4085 	struct vmw_fence_obj *fence = NULL;
4086 	struct vmw_cmdbuf_header *header;
4087 	uint32_t handle = 0;
4088 	int ret;
4089 	int32_t out_fence_fd = -1;
4090 	struct sync_file *sync_file = NULL;
4091 	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4092 
4093 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4094 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4095 		if (out_fence_fd < 0) {
4096 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4097 			return out_fence_fd;
4098 		}
4099 	}
4100 
4101 	if (throttle_us) {
4102 		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4103 	}
4104 
4105 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4106 					     kernel_commands, command_size,
4107 					     &header);
4108 	if (IS_ERR(kernel_commands)) {
4109 		ret = PTR_ERR(kernel_commands);
4110 		goto out_free_fence_fd;
4111 	}
4112 
4113 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4114 	if (ret) {
4115 		ret = -ERESTARTSYS;
4116 		goto out_free_header;
4117 	}
4118 
4119 	sw_context->kernel = false;
4120 	if (kernel_commands == NULL) {
4121 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4122 		if (unlikely(ret != 0))
4123 			goto out_unlock;
4124 
4125 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4126 				     command_size);
4127 		if (unlikely(ret != 0)) {
4128 			ret = -EFAULT;
4129 			VMW_DEBUG_USER("Failed copying commands.\n");
4130 			goto out_unlock;
4131 		}
4132 
4133 		kernel_commands = sw_context->cmd_bounce;
4134 	} else if (!header) {
4135 		sw_context->kernel = true;
4136 	}
4137 
4138 	sw_context->filp = file_priv;
4139 	sw_context->fp = vmw_fpriv(file_priv);
4140 	INIT_LIST_HEAD(&sw_context->ctx_list);
4141 	sw_context->cur_query_bo = dev_priv->pinned_bo;
4142 	sw_context->last_query_ctx = NULL;
4143 	sw_context->needs_post_query_barrier = false;
4144 	sw_context->dx_ctx_node = NULL;
4145 	sw_context->dx_query_mob = NULL;
4146 	sw_context->dx_query_ctx = NULL;
4147 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4148 	INIT_LIST_HEAD(&sw_context->res_relocations);
4149 	INIT_LIST_HEAD(&sw_context->bo_relocations);
4150 
4151 	if (sw_context->staged_bindings)
4152 		vmw_binding_state_reset(sw_context->staged_bindings);
4153 
4154 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4155 	sw_context->ctx = &val_ctx;
4156 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4157 	if (unlikely(ret != 0))
4158 		goto out_err_nores;
4159 
4160 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4161 				command_size);
4162 	if (unlikely(ret != 0))
4163 		goto out_err_nores;
4164 
4165 	ret = vmw_resources_reserve(sw_context);
4166 	if (unlikely(ret != 0))
4167 		goto out_err_nores;
4168 
4169 	ret = vmw_validation_bo_reserve(&val_ctx, true);
4170 	if (unlikely(ret != 0))
4171 		goto out_err_nores;
4172 
4173 	ret = vmw_validation_bo_validate(&val_ctx, true);
4174 	if (unlikely(ret != 0))
4175 		goto out_err;
4176 
4177 	ret = vmw_validation_res_validate(&val_ctx, true);
4178 	if (unlikely(ret != 0))
4179 		goto out_err;
4180 
4181 	vmw_validation_drop_ht(&val_ctx);
4182 
4183 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4184 	if (unlikely(ret != 0)) {
4185 		ret = -ERESTARTSYS;
4186 		goto out_err;
4187 	}
4188 
4189 	if (dev_priv->has_mob) {
4190 		ret = vmw_rebind_contexts(sw_context);
4191 		if (unlikely(ret != 0))
4192 			goto out_unlock_binding;
4193 	}
4194 
4195 	if (!header) {
4196 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4197 					      command_size, sw_context);
4198 	} else {
4199 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4200 						sw_context);
4201 		header = NULL;
4202 	}
4203 	mutex_unlock(&dev_priv->binding_mutex);
4204 	if (ret)
4205 		goto out_err;
4206 
4207 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4208 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4209 					 (user_fence_rep) ? &handle : NULL);
4210 	/*
4211 	 * This error is harmless, because if fence submission fails,
4212 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4213 	 * user-space in @fence_rep
4214 	 */
4215 	if (ret != 0)
4216 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4217 
4218 	vmw_execbuf_bindings_commit(sw_context, false);
4219 	vmw_bind_dx_query_mob(sw_context);
4220 	vmw_validation_res_unreserve(&val_ctx, false);
4221 
4222 	vmw_validation_bo_fence(sw_context->ctx, fence);
4223 
4224 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4225 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4226 
4227 	/*
4228 	 * If anything fails here, give up trying to export the fence and do a
4229 	 * sync since the user mode will not be able to sync the fence itself.
4230 	 * This ensures we are still functionally correct.
4231 	 */
4232 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4233 
4234 		sync_file = sync_file_create(&fence->base);
4235 		if (!sync_file) {
4236 			VMW_DEBUG_USER("Sync file create failed for fence\n");
4237 			put_unused_fd(out_fence_fd);
4238 			out_fence_fd = -1;
4239 
4240 			(void) vmw_fence_obj_wait(fence, false, false,
4241 						  VMW_FENCE_WAIT_TIMEOUT);
4242 		}
4243 	}
4244 
4245 	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4246 				    user_fence_rep, fence, handle, out_fence_fd);
4247 
4248 	if (sync_file) {
4249 		if (ret) {
4250 			/* usercopy of fence failed, put the file object */
4251 			fput(sync_file->file);
4252 			put_unused_fd(out_fence_fd);
4253 		} else {
4254 			/* Link the fence with the FD created earlier */
4255 			fd_install(out_fence_fd, sync_file->file);
4256 		}
4257 	}
4258 
4259 	/* Don't unreference when handing fence out */
4260 	if (unlikely(out_fence != NULL)) {
4261 		*out_fence = fence;
4262 		fence = NULL;
4263 	} else if (likely(fence != NULL)) {
4264 		vmw_fence_obj_unreference(&fence);
4265 	}
4266 
4267 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4268 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4269 
4270 	/*
4271 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4272 	 * in resource destruction paths.
4273 	 */
4274 	vmw_validation_unref_lists(&val_ctx);
4275 
4276 	return ret;
4277 
4278 out_unlock_binding:
4279 	mutex_unlock(&dev_priv->binding_mutex);
4280 out_err:
4281 	vmw_validation_bo_backoff(&val_ctx);
4282 out_err_nores:
4283 	vmw_execbuf_bindings_commit(sw_context, true);
4284 	vmw_validation_res_unreserve(&val_ctx, true);
4285 	vmw_resource_relocations_free(&sw_context->res_relocations);
4286 	vmw_free_relocations(sw_context);
4287 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4288 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4289 out_unlock:
4290 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4291 	vmw_validation_drop_ht(&val_ctx);
4292 	WARN_ON(!list_empty(&sw_context->ctx_list));
4293 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4294 
4295 	/*
4296 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4297 	 * in resource destruction paths.
4298 	 */
4299 	vmw_validation_unref_lists(&val_ctx);
4300 out_free_header:
4301 	if (header)
4302 		vmw_cmdbuf_header_free(header);
4303 out_free_fence_fd:
4304 	if (out_fence_fd >= 0)
4305 		put_unused_fd(out_fence_fd);
4306 
4307 	return ret;
4308 }
4309 
4310 /**
4311  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4312  *
4313  * @dev_priv: The device private structure.
4314  *
4315  * This function is called to idle the fifo and unpin the query buffer if the
4316  * normal way to do this hits an error, which should typically be extremely
4317  * rare.
4318  */
4319 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4320 {
4321 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4322 
4323 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4324 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4325 	if (dev_priv->dummy_query_bo_pinned) {
4326 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4327 		dev_priv->dummy_query_bo_pinned = false;
4328 	}
4329 }
4330 
4331 
4332 /**
4333  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4334  * bo.
4335  *
4336  * @dev_priv: The device private structure.
4337  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4338  * query barrier that flushes all queries touching the current buffer pointed to
4339  * by @dev_priv->pinned_bo
4340  *
4341  * This function should be used to unpin the pinned query bo, or as a query
4342  * barrier when we need to make sure that all queries have finished before the
4343  * next fifo command. (For example on hardware context destructions where the
4344  * hardware may otherwise leak unfinished queries).
4345  *
4346  * This function does not return any failure codes, but make attempts to do safe
4347  * unpinning in case of errors.
4348  *
4349  * The function will synchronize on the previous query barrier, and will thus
4350  * not finish until that barrier has executed.
4351  *
4352  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4353  * calling this function.
4354  */
4355 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4356 				     struct vmw_fence_obj *fence)
4357 {
4358 	int ret = 0;
4359 	struct vmw_fence_obj *lfence = NULL;
4360 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4361 
4362 	if (dev_priv->pinned_bo == NULL)
4363 		goto out_unlock;
4364 
4365 	vmw_bo_placement_set(dev_priv->pinned_bo,
4366 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4367 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4368 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4369 	if (ret)
4370 		goto out_no_reserve;
4371 
4372 	vmw_bo_placement_set(dev_priv->dummy_query_bo,
4373 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4374 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4375 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4376 	if (ret)
4377 		goto out_no_reserve;
4378 
4379 	ret = vmw_validation_bo_reserve(&val_ctx, false);
4380 	if (ret)
4381 		goto out_no_reserve;
4382 
4383 	if (dev_priv->query_cid_valid) {
4384 		BUG_ON(fence != NULL);
4385 		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4386 		if (ret)
4387 			goto out_no_emit;
4388 		dev_priv->query_cid_valid = false;
4389 	}
4390 
4391 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4392 	if (dev_priv->dummy_query_bo_pinned) {
4393 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4394 		dev_priv->dummy_query_bo_pinned = false;
4395 	}
4396 	if (fence == NULL) {
4397 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4398 						  NULL);
4399 		fence = lfence;
4400 	}
4401 	vmw_validation_bo_fence(&val_ctx, fence);
4402 	if (lfence != NULL)
4403 		vmw_fence_obj_unreference(&lfence);
4404 
4405 	vmw_validation_unref_lists(&val_ctx);
4406 	vmw_bo_unreference(&dev_priv->pinned_bo);
4407 
4408 out_unlock:
4409 	return;
4410 out_no_emit:
4411 	vmw_validation_bo_backoff(&val_ctx);
4412 out_no_reserve:
4413 	vmw_validation_unref_lists(&val_ctx);
4414 	vmw_execbuf_unpin_panic(dev_priv);
4415 	vmw_bo_unreference(&dev_priv->pinned_bo);
4416 }
4417 
4418 /**
4419  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4420  *
4421  * @dev_priv: The device private structure.
4422  *
4423  * This function should be used to unpin the pinned query bo, or as a query
4424  * barrier when we need to make sure that all queries have finished before the
4425  * next fifo command. (For example on hardware context destructions where the
4426  * hardware may otherwise leak unfinished queries).
4427  *
4428  * This function does not return any failure codes, but make attempts to do safe
4429  * unpinning in case of errors.
4430  *
4431  * The function will synchronize on the previous query barrier, and will thus
4432  * not finish until that barrier has executed.
4433  */
4434 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4435 {
4436 	mutex_lock(&dev_priv->cmdbuf_mutex);
4437 	if (dev_priv->query_cid_valid)
4438 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4439 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4440 }
4441 
4442 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4443 		      struct drm_file *file_priv)
4444 {
4445 	struct vmw_private *dev_priv = vmw_priv(dev);
4446 	struct drm_vmw_execbuf_arg *arg = data;
4447 	int ret;
4448 	struct dma_fence *in_fence = NULL;
4449 
4450 	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4451 	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4452 
4453 	/*
4454 	 * Extend the ioctl argument while maintaining backwards compatibility:
4455 	 * We take different code paths depending on the value of arg->version.
4456 	 *
4457 	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4458 	 */
4459 	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4460 		     arg->version == 0)) {
4461 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4462 		ret = -EINVAL;
4463 		goto mksstats_out;
4464 	}
4465 
4466 	switch (arg->version) {
4467 	case 1:
4468 		/* For v1 core DRM have extended + zeropadded the data */
4469 		arg->context_handle = (uint32_t) -1;
4470 		break;
4471 	case 2:
4472 	default:
4473 		/* For v2 and later core DRM would have correctly copied it */
4474 		break;
4475 	}
4476 
4477 	/* If imported a fence FD from elsewhere, then wait on it */
4478 	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4479 		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4480 
4481 		if (!in_fence) {
4482 			VMW_DEBUG_USER("Cannot get imported fence\n");
4483 			ret = -EINVAL;
4484 			goto mksstats_out;
4485 		}
4486 
4487 		ret = dma_fence_wait(in_fence, true);
4488 		if (ret)
4489 			goto out;
4490 	}
4491 
4492 	ret = vmw_execbuf_process(file_priv, dev_priv,
4493 				  (void __user *)(unsigned long)arg->commands,
4494 				  NULL, arg->command_size, arg->throttle_us,
4495 				  arg->context_handle,
4496 				  (void __user *)(unsigned long)arg->fence_rep,
4497 				  NULL, arg->flags);
4498 
4499 	if (unlikely(ret != 0))
4500 		goto out;
4501 
4502 	vmw_kms_cursor_post_execbuf(dev_priv);
4503 
4504 out:
4505 	if (in_fence)
4506 		dma_fence_put(in_fence);
4507 
4508 mksstats_out:
4509 	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4510 	return ret;
4511 }
4512