xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/sync_file.h>
28 
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35 
36 #define VMW_RES_HT_ORDER 12
37 
38 /*
39  * Helper macro to get dx_ctx_node if available otherwise print an error
40  * message. This is for use in command verifier function where if dx_ctx_node
41  * is not set then command is invalid.
42  */
43 #define VMW_GET_CTX_NODE(__sw_context)                                        \
44 ({                                                                            \
45 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
46 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
47 		__sw_context->dx_ctx_node;                                    \
48 	});                                                                   \
49 })
50 
51 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
52 	struct {                                                              \
53 		SVGA3dCmdHeader header;                                       \
54 		__type body;                                                  \
55 	} __var
56 
57 /**
58  * struct vmw_relocation - Buffer object relocation
59  *
60  * @head: List head for the command submission context's relocation list
61  * @vbo: Non ref-counted pointer to buffer object
62  * @mob_loc: Pointer to location for mob id to be modified
63  * @location: Pointer to location for guest pointer to be modified
64  */
65 struct vmw_relocation {
66 	struct list_head head;
67 	struct vmw_buffer_object *vbo;
68 	union {
69 		SVGAMobId *mob_loc;
70 		SVGAGuestPtr *location;
71 	};
72 };
73 
74 /**
75  * enum vmw_resource_relocation_type - Relocation type for resources
76  *
77  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78  * command stream is replaced with the actual id after validation.
79  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80  * with a NOP.
81  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82  * validation is -1, the command is replaced with a NOP. Otherwise no action.
83  * @vmw_res_rel_max: Last value in the enum - used for error checking
84 */
85 enum vmw_resource_relocation_type {
86 	vmw_res_rel_normal,
87 	vmw_res_rel_nop,
88 	vmw_res_rel_cond_nop,
89 	vmw_res_rel_max
90 };
91 
92 /**
93  * struct vmw_resource_relocation - Relocation info for resources
94  *
95  * @head: List head for the software context's relocation list.
96  * @res: Non-ref-counted pointer to the resource.
97  * @offset: Offset of single byte entries into the command buffer where the id
98  * that needs fixup is located.
99  * @rel_type: Type of relocation.
100  */
101 struct vmw_resource_relocation {
102 	struct list_head head;
103 	const struct vmw_resource *res;
104 	u32 offset:29;
105 	enum vmw_resource_relocation_type rel_type:3;
106 };
107 
108 /**
109  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
110  *
111  * @head: List head of context list
112  * @ctx: The context resource
113  * @cur: The context's persistent binding state
114  * @staged: The binding state changes of this command buffer
115  */
116 struct vmw_ctx_validation_info {
117 	struct list_head head;
118 	struct vmw_resource *ctx;
119 	struct vmw_ctx_binding_state *cur;
120 	struct vmw_ctx_binding_state *staged;
121 };
122 
123 /**
124  * struct vmw_cmd_entry - Describe a command for the verifier
125  *
126  * @func: Call-back to handle the command.
127  * @user_allow: Whether allowed from the execbuf ioctl.
128  * @gb_disable: Whether disabled if guest-backed objects are available.
129  * @gb_enable: Whether enabled iff guest-backed objects are available.
130  * @cmd_name: Name of the command.
131  */
132 struct vmw_cmd_entry {
133 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
134 		     SVGA3dCmdHeader *);
135 	bool user_allow;
136 	bool gb_disable;
137 	bool gb_enable;
138 	const char *cmd_name;
139 };
140 
141 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
142 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
143 				       (_gb_disable), (_gb_enable), #_cmd}
144 
145 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
146 					struct vmw_sw_context *sw_context,
147 					struct vmw_resource *ctx);
148 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
149 				 struct vmw_sw_context *sw_context,
150 				 SVGAMobId *id,
151 				 struct vmw_buffer_object **vmw_bo_p);
152 /**
153  * vmw_ptr_diff - Compute the offset from a to b in bytes
154  *
155  * @a: A starting pointer.
156  * @b: A pointer offset in the same address space.
157  *
158  * Returns: The offset in bytes between the two pointers.
159  */
160 static size_t vmw_ptr_diff(void *a, void *b)
161 {
162 	return (unsigned long) b - (unsigned long) a;
163 }
164 
165 /**
166  * vmw_execbuf_bindings_commit - Commit modified binding state
167  *
168  * @sw_context: The command submission context
169  * @backoff: Whether this is part of the error path and binding state changes
170  * should be ignored
171  */
172 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
173 					bool backoff)
174 {
175 	struct vmw_ctx_validation_info *entry;
176 
177 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
178 		if (!backoff)
179 			vmw_binding_state_commit(entry->cur, entry->staged);
180 
181 		if (entry->staged != sw_context->staged_bindings)
182 			vmw_binding_state_free(entry->staged);
183 		else
184 			sw_context->staged_bindings_inuse = false;
185 	}
186 
187 	/* List entries are freed with the validation context */
188 	INIT_LIST_HEAD(&sw_context->ctx_list);
189 }
190 
191 /**
192  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
193  *
194  * @sw_context: The command submission context
195  */
196 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
197 {
198 	if (sw_context->dx_query_mob)
199 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
200 					  sw_context->dx_query_mob);
201 }
202 
203 /**
204  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
205  * the validate list.
206  *
207  * @dev_priv: Pointer to the device private:
208  * @sw_context: The command submission context
209  * @res: Pointer to the resource
210  * @node: The validation node holding the context resource metadata
211  */
212 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
213 				   struct vmw_sw_context *sw_context,
214 				   struct vmw_resource *res,
215 				   struct vmw_ctx_validation_info *node)
216 {
217 	int ret;
218 
219 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
220 	if (unlikely(ret != 0))
221 		goto out_err;
222 
223 	if (!sw_context->staged_bindings) {
224 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
225 		if (IS_ERR(sw_context->staged_bindings)) {
226 			ret = PTR_ERR(sw_context->staged_bindings);
227 			sw_context->staged_bindings = NULL;
228 			goto out_err;
229 		}
230 	}
231 
232 	if (sw_context->staged_bindings_inuse) {
233 		node->staged = vmw_binding_state_alloc(dev_priv);
234 		if (IS_ERR(node->staged)) {
235 			ret = PTR_ERR(node->staged);
236 			node->staged = NULL;
237 			goto out_err;
238 		}
239 	} else {
240 		node->staged = sw_context->staged_bindings;
241 		sw_context->staged_bindings_inuse = true;
242 	}
243 
244 	node->ctx = res;
245 	node->cur = vmw_context_binding_state(res);
246 	list_add_tail(&node->head, &sw_context->ctx_list);
247 
248 	return 0;
249 
250 out_err:
251 	return ret;
252 }
253 
254 /**
255  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
256  *
257  * @dev_priv: Pointer to the device private struct.
258  * @res_type: The resource type.
259  *
260  * Guest-backed contexts and DX contexts require extra size to store execbuf
261  * private information in the validation node. Typically the binding manager
262  * associated data structures.
263  *
264  * Returns: The extra size requirement based on resource type.
265  */
266 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
267 					 enum vmw_res_type res_type)
268 {
269 	return (res_type == vmw_res_dx_context ||
270 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
271 		sizeof(struct vmw_ctx_validation_info) : 0;
272 }
273 
274 /**
275  * vmw_execbuf_rcache_update - Update a resource-node cache entry
276  *
277  * @rcache: Pointer to the entry to update.
278  * @res: Pointer to the resource.
279  * @private: Pointer to the execbuf-private space in the resource validation
280  * node.
281  */
282 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
283 				      struct vmw_resource *res,
284 				      void *private)
285 {
286 	rcache->res = res;
287 	rcache->private = private;
288 	rcache->valid = 1;
289 	rcache->valid_handle = 0;
290 }
291 
292 /**
293  * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
294  * rcu-protected pointer to the validation list.
295  *
296  * @sw_context: Pointer to the software context.
297  * @res: Unreferenced rcu-protected pointer to the resource.
298  * @dirty: Whether to change dirty status.
299  *
300  * Returns: 0 on success. Negative error code on failure. Typical error codes
301  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
302  */
303 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
304 					 struct vmw_resource *res,
305 					 u32 dirty)
306 {
307 	struct vmw_private *dev_priv = res->dev_priv;
308 	int ret;
309 	enum vmw_res_type res_type = vmw_res_type(res);
310 	struct vmw_res_cache_entry *rcache;
311 	struct vmw_ctx_validation_info *ctx_info;
312 	bool first_usage;
313 	unsigned int priv_size;
314 
315 	rcache = &sw_context->res_cache[res_type];
316 	if (likely(rcache->valid && rcache->res == res)) {
317 		if (dirty)
318 			vmw_validation_res_set_dirty(sw_context->ctx,
319 						     rcache->private, dirty);
320 		vmw_user_resource_noref_release();
321 		return 0;
322 	}
323 
324 	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
325 	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
326 					  dirty, (void **)&ctx_info,
327 					  &first_usage);
328 	vmw_user_resource_noref_release();
329 	if (ret)
330 		return ret;
331 
332 	if (priv_size && first_usage) {
333 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
334 					      ctx_info);
335 		if (ret) {
336 			VMW_DEBUG_USER("Failed first usage context setup.\n");
337 			return ret;
338 		}
339 	}
340 
341 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
342 	return 0;
343 }
344 
345 /**
346  * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
347  * validation list if it's not already on it
348  *
349  * @sw_context: Pointer to the software context.
350  * @res: Pointer to the resource.
351  * @dirty: Whether to change dirty status.
352  *
353  * Returns: Zero on success. Negative error code on failure.
354  */
355 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
356 					 struct vmw_resource *res,
357 					 u32 dirty)
358 {
359 	struct vmw_res_cache_entry *rcache;
360 	enum vmw_res_type res_type = vmw_res_type(res);
361 	void *ptr;
362 	int ret;
363 
364 	rcache = &sw_context->res_cache[res_type];
365 	if (likely(rcache->valid && rcache->res == res)) {
366 		if (dirty)
367 			vmw_validation_res_set_dirty(sw_context->ctx,
368 						     rcache->private, dirty);
369 		return 0;
370 	}
371 
372 	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
373 					  &ptr, NULL);
374 	if (ret)
375 		return ret;
376 
377 	vmw_execbuf_rcache_update(rcache, res, ptr);
378 
379 	return 0;
380 }
381 
382 /**
383  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
384  * validation list
385  *
386  * @sw_context: The software context holding the validation list.
387  * @view: Pointer to the view resource.
388  *
389  * Returns 0 if success, negative error code otherwise.
390  */
391 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
392 				struct vmw_resource *view)
393 {
394 	int ret;
395 
396 	/*
397 	 * First add the resource the view is pointing to, otherwise it may be
398 	 * swapped out when the view is validated.
399 	 */
400 	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
401 					    vmw_view_dirtying(view));
402 	if (ret)
403 		return ret;
404 
405 	return vmw_execbuf_res_noctx_val_add(sw_context, view,
406 					     VMW_RES_DIRTY_NONE);
407 }
408 
409 /**
410  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
411  * to to the validation list.
412  *
413  * @sw_context: The software context holding the validation list.
414  * @view_type: The view type to look up.
415  * @id: view id of the view.
416  *
417  * The view is represented by a view id and the DX context it's created on, or
418  * scheduled for creation on. If there is no DX context set, the function will
419  * return an -EINVAL error pointer.
420  *
421  * Returns: Unreferenced pointer to the resource on success, negative error
422  * pointer on failure.
423  */
424 static struct vmw_resource *
425 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
426 		    enum vmw_view_type view_type, u32 id)
427 {
428 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
429 	struct vmw_resource *view;
430 	int ret;
431 
432 	if (!ctx_node)
433 		return ERR_PTR(-EINVAL);
434 
435 	view = vmw_view_lookup(sw_context->man, view_type, id);
436 	if (IS_ERR(view))
437 		return view;
438 
439 	ret = vmw_view_res_val_add(sw_context, view);
440 	if (ret)
441 		return ERR_PTR(ret);
442 
443 	return view;
444 }
445 
446 /**
447  * vmw_resource_context_res_add - Put resources previously bound to a context on
448  * the validation list
449  *
450  * @dev_priv: Pointer to a device private structure
451  * @sw_context: Pointer to a software context used for this command submission
452  * @ctx: Pointer to the context resource
453  *
454  * This function puts all resources that were previously bound to @ctx on the
455  * resource validation list. This is part of the context state reemission
456  */
457 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
458 					struct vmw_sw_context *sw_context,
459 					struct vmw_resource *ctx)
460 {
461 	struct list_head *binding_list;
462 	struct vmw_ctx_bindinfo *entry;
463 	int ret = 0;
464 	struct vmw_resource *res;
465 	u32 i;
466 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
467 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
468 
469 	/* Add all cotables to the validation list. */
470 	if (has_sm4_context(dev_priv) &&
471 	    vmw_res_type(ctx) == vmw_res_dx_context) {
472 		for (i = 0; i < cotable_max; ++i) {
473 			res = vmw_context_cotable(ctx, i);
474 			if (IS_ERR(res))
475 				continue;
476 
477 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
478 							    VMW_RES_DIRTY_SET);
479 			if (unlikely(ret != 0))
480 				return ret;
481 		}
482 	}
483 
484 	/* Add all resources bound to the context to the validation list */
485 	mutex_lock(&dev_priv->binding_mutex);
486 	binding_list = vmw_context_binding_list(ctx);
487 
488 	list_for_each_entry(entry, binding_list, ctx_list) {
489 		if (vmw_res_type(entry->res) == vmw_res_view)
490 			ret = vmw_view_res_val_add(sw_context, entry->res);
491 		else
492 			ret = vmw_execbuf_res_noctx_val_add
493 				(sw_context, entry->res,
494 				 vmw_binding_dirtying(entry->bt));
495 		if (unlikely(ret != 0))
496 			break;
497 	}
498 
499 	if (has_sm4_context(dev_priv) &&
500 	    vmw_res_type(ctx) == vmw_res_dx_context) {
501 		struct vmw_buffer_object *dx_query_mob;
502 
503 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
504 		if (dx_query_mob)
505 			ret = vmw_validation_add_bo(sw_context->ctx,
506 						    dx_query_mob, true, false);
507 	}
508 
509 	mutex_unlock(&dev_priv->binding_mutex);
510 	return ret;
511 }
512 
513 /**
514  * vmw_resource_relocation_add - Add a relocation to the relocation list
515  *
516  * @sw_context: Pointer to the software context.
517  * @res: The resource.
518  * @offset: Offset into the command buffer currently being parsed where the id
519  * that needs fixup is located. Granularity is one byte.
520  * @rel_type: Relocation type.
521  */
522 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
523 				       const struct vmw_resource *res,
524 				       unsigned long offset,
525 				       enum vmw_resource_relocation_type
526 				       rel_type)
527 {
528 	struct vmw_resource_relocation *rel;
529 
530 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
531 	if (unlikely(!rel)) {
532 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
533 		return -ENOMEM;
534 	}
535 
536 	rel->res = res;
537 	rel->offset = offset;
538 	rel->rel_type = rel_type;
539 	list_add_tail(&rel->head, &sw_context->res_relocations);
540 
541 	return 0;
542 }
543 
544 /**
545  * vmw_resource_relocations_free - Free all relocations on a list
546  *
547  * @list: Pointer to the head of the relocation list
548  */
549 static void vmw_resource_relocations_free(struct list_head *list)
550 {
551 	/* Memory is validation context memory, so no need to free it */
552 	INIT_LIST_HEAD(list);
553 }
554 
555 /**
556  * vmw_resource_relocations_apply - Apply all relocations on a list
557  *
558  * @cb: Pointer to the start of the command buffer bein patch. This need not be
559  * the same buffer as the one being parsed when the relocation list was built,
560  * but the contents must be the same modulo the resource ids.
561  * @list: Pointer to the head of the relocation list.
562  */
563 static void vmw_resource_relocations_apply(uint32_t *cb,
564 					   struct list_head *list)
565 {
566 	struct vmw_resource_relocation *rel;
567 
568 	/* Validate the struct vmw_resource_relocation member size */
569 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
570 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
571 
572 	list_for_each_entry(rel, list, head) {
573 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
574 		switch (rel->rel_type) {
575 		case vmw_res_rel_normal:
576 			*addr = rel->res->id;
577 			break;
578 		case vmw_res_rel_nop:
579 			*addr = SVGA_3D_CMD_NOP;
580 			break;
581 		default:
582 			if (rel->res->id == -1)
583 				*addr = SVGA_3D_CMD_NOP;
584 			break;
585 		}
586 	}
587 }
588 
589 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
590 			   struct vmw_sw_context *sw_context,
591 			   SVGA3dCmdHeader *header)
592 {
593 	return -EINVAL;
594 }
595 
596 static int vmw_cmd_ok(struct vmw_private *dev_priv,
597 		      struct vmw_sw_context *sw_context,
598 		      SVGA3dCmdHeader *header)
599 {
600 	return 0;
601 }
602 
603 /**
604  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
605  * list.
606  *
607  * @sw_context: Pointer to the software context.
608  *
609  * Note that since vmware's command submission currently is protected by the
610  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
611  * only a single thread at once will attempt this.
612  */
613 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
614 {
615 	int ret;
616 
617 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
618 	if (ret)
619 		return ret;
620 
621 	if (sw_context->dx_query_mob) {
622 		struct vmw_buffer_object *expected_dx_query_mob;
623 
624 		expected_dx_query_mob =
625 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
626 		if (expected_dx_query_mob &&
627 		    expected_dx_query_mob != sw_context->dx_query_mob) {
628 			ret = -EINVAL;
629 		}
630 	}
631 
632 	return ret;
633 }
634 
635 /**
636  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
637  * resource validate list unless it's already there.
638  *
639  * @dev_priv: Pointer to a device private structure.
640  * @sw_context: Pointer to the software context.
641  * @res_type: Resource type.
642  * @dirty: Whether to change dirty status.
643  * @converter: User-space visisble type specific information.
644  * @id_loc: Pointer to the location in the command buffer currently being parsed
645  * from where the user-space resource id handle is located.
646  * @p_res: Pointer to pointer to resource validalidation node. Populated on
647  * exit.
648  */
649 static int
650 vmw_cmd_res_check(struct vmw_private *dev_priv,
651 		  struct vmw_sw_context *sw_context,
652 		  enum vmw_res_type res_type,
653 		  u32 dirty,
654 		  const struct vmw_user_resource_conv *converter,
655 		  uint32_t *id_loc,
656 		  struct vmw_resource **p_res)
657 {
658 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
659 	struct vmw_resource *res;
660 	int ret;
661 
662 	if (p_res)
663 		*p_res = NULL;
664 
665 	if (*id_loc == SVGA3D_INVALID_ID) {
666 		if (res_type == vmw_res_context) {
667 			VMW_DEBUG_USER("Illegal context invalid id.\n");
668 			return -EINVAL;
669 		}
670 		return 0;
671 	}
672 
673 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
674 		res = rcache->res;
675 		if (dirty)
676 			vmw_validation_res_set_dirty(sw_context->ctx,
677 						     rcache->private, dirty);
678 	} else {
679 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
680 
681 		ret = vmw_validation_preload_res(sw_context->ctx, size);
682 		if (ret)
683 			return ret;
684 
685 		res = vmw_user_resource_noref_lookup_handle
686 			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
687 		if (IS_ERR(res)) {
688 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
689 				       (unsigned int) *id_loc);
690 			return PTR_ERR(res);
691 		}
692 
693 		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
694 		if (unlikely(ret != 0))
695 			return ret;
696 
697 		if (rcache->valid && rcache->res == res) {
698 			rcache->valid_handle = true;
699 			rcache->handle = *id_loc;
700 		}
701 	}
702 
703 	ret = vmw_resource_relocation_add(sw_context, res,
704 					  vmw_ptr_diff(sw_context->buf_start,
705 						       id_loc),
706 					  vmw_res_rel_normal);
707 	if (p_res)
708 		*p_res = res;
709 
710 	return 0;
711 }
712 
713 /**
714  * vmw_rebind_all_dx_query - Rebind DX query associated with the context
715  *
716  * @ctx_res: context the query belongs to
717  *
718  * This function assumes binding_mutex is held.
719  */
720 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
721 {
722 	struct vmw_private *dev_priv = ctx_res->dev_priv;
723 	struct vmw_buffer_object *dx_query_mob;
724 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
725 
726 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
727 
728 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
729 		return 0;
730 
731 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
732 	if (cmd == NULL)
733 		return -ENOMEM;
734 
735 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
736 	cmd->header.size = sizeof(cmd->body);
737 	cmd->body.cid = ctx_res->id;
738 	cmd->body.mobid = dx_query_mob->base.mem.start;
739 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
740 
741 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
742 
743 	return 0;
744 }
745 
746 /**
747  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
748  * contexts.
749  *
750  * @sw_context: Pointer to the software context.
751  *
752  * Rebind context binding points that have been scrubbed because of eviction.
753  */
754 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
755 {
756 	struct vmw_ctx_validation_info *val;
757 	int ret;
758 
759 	list_for_each_entry(val, &sw_context->ctx_list, head) {
760 		ret = vmw_binding_rebind_all(val->cur);
761 		if (unlikely(ret != 0)) {
762 			if (ret != -ERESTARTSYS)
763 				VMW_DEBUG_USER("Failed to rebind context.\n");
764 			return ret;
765 		}
766 
767 		ret = vmw_rebind_all_dx_query(val->ctx);
768 		if (ret != 0) {
769 			VMW_DEBUG_USER("Failed to rebind queries.\n");
770 			return ret;
771 		}
772 	}
773 
774 	return 0;
775 }
776 
777 /**
778  * vmw_view_bindings_add - Add an array of view bindings to a context binding
779  * state tracker.
780  *
781  * @sw_context: The execbuf state used for this command.
782  * @view_type: View type for the bindings.
783  * @binding_type: Binding type for the bindings.
784  * @shader_slot: The shader slot to user for the bindings.
785  * @view_ids: Array of view ids to be bound.
786  * @num_views: Number of view ids in @view_ids.
787  * @first_slot: The binding slot to be used for the first view id in @view_ids.
788  */
789 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
790 				 enum vmw_view_type view_type,
791 				 enum vmw_ctx_binding_type binding_type,
792 				 uint32 shader_slot,
793 				 uint32 view_ids[], u32 num_views,
794 				 u32 first_slot)
795 {
796 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
797 	u32 i;
798 
799 	if (!ctx_node)
800 		return -EINVAL;
801 
802 	for (i = 0; i < num_views; ++i) {
803 		struct vmw_ctx_bindinfo_view binding;
804 		struct vmw_resource *view = NULL;
805 
806 		if (view_ids[i] != SVGA3D_INVALID_ID) {
807 			view = vmw_view_id_val_add(sw_context, view_type,
808 						   view_ids[i]);
809 			if (IS_ERR(view)) {
810 				VMW_DEBUG_USER("View not found.\n");
811 				return PTR_ERR(view);
812 			}
813 		}
814 		binding.bi.ctx = ctx_node->ctx;
815 		binding.bi.res = view;
816 		binding.bi.bt = binding_type;
817 		binding.shader_slot = shader_slot;
818 		binding.slot = first_slot + i;
819 		vmw_binding_add(ctx_node->staged, &binding.bi,
820 				shader_slot, binding.slot);
821 	}
822 
823 	return 0;
824 }
825 
826 /**
827  * vmw_cmd_cid_check - Check a command header for valid context information.
828  *
829  * @dev_priv: Pointer to a device private structure.
830  * @sw_context: Pointer to the software context.
831  * @header: A command header with an embedded user-space context handle.
832  *
833  * Convenience function: Call vmw_cmd_res_check with the user-space context
834  * handle embedded in @header.
835  */
836 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
837 			     struct vmw_sw_context *sw_context,
838 			     SVGA3dCmdHeader *header)
839 {
840 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
841 		container_of(header, typeof(*cmd), header);
842 
843 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
844 				 VMW_RES_DIRTY_SET, user_context_converter,
845 				 &cmd->body, NULL);
846 }
847 
848 /**
849  * vmw_execbuf_info_from_res - Get the private validation metadata for a
850  * recently validated resource
851  *
852  * @sw_context: Pointer to the command submission context
853  * @res: The resource
854  *
855  * The resource pointed to by @res needs to be present in the command submission
856  * context's resource cache and hence the last resource of that type to be
857  * processed by the validation code.
858  *
859  * Return: a pointer to the private metadata of the resource, or NULL if it
860  * wasn't found
861  */
862 static struct vmw_ctx_validation_info *
863 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
864 			  struct vmw_resource *res)
865 {
866 	struct vmw_res_cache_entry *rcache =
867 		&sw_context->res_cache[vmw_res_type(res)];
868 
869 	if (rcache->valid && rcache->res == res)
870 		return rcache->private;
871 
872 	WARN_ON_ONCE(true);
873 	return NULL;
874 }
875 
876 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
877 					   struct vmw_sw_context *sw_context,
878 					   SVGA3dCmdHeader *header)
879 {
880 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
881 	struct vmw_resource *ctx;
882 	struct vmw_resource *res;
883 	int ret;
884 
885 	cmd = container_of(header, typeof(*cmd), header);
886 
887 	if (cmd->body.type >= SVGA3D_RT_MAX) {
888 		VMW_DEBUG_USER("Illegal render target type %u.\n",
889 			       (unsigned int) cmd->body.type);
890 		return -EINVAL;
891 	}
892 
893 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
894 				VMW_RES_DIRTY_SET, user_context_converter,
895 				&cmd->body.cid, &ctx);
896 	if (unlikely(ret != 0))
897 		return ret;
898 
899 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
900 				VMW_RES_DIRTY_SET, user_surface_converter,
901 				&cmd->body.target.sid, &res);
902 	if (unlikely(ret))
903 		return ret;
904 
905 	if (dev_priv->has_mob) {
906 		struct vmw_ctx_bindinfo_view binding;
907 		struct vmw_ctx_validation_info *node;
908 
909 		node = vmw_execbuf_info_from_res(sw_context, ctx);
910 		if (!node)
911 			return -EINVAL;
912 
913 		binding.bi.ctx = ctx;
914 		binding.bi.res = res;
915 		binding.bi.bt = vmw_ctx_binding_rt;
916 		binding.slot = cmd->body.type;
917 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
918 	}
919 
920 	return 0;
921 }
922 
923 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
924 				      struct vmw_sw_context *sw_context,
925 				      SVGA3dCmdHeader *header)
926 {
927 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
928 	int ret;
929 
930 	cmd = container_of(header, typeof(*cmd), header);
931 
932 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
933 				VMW_RES_DIRTY_NONE, user_surface_converter,
934 				&cmd->body.src.sid, NULL);
935 	if (ret)
936 		return ret;
937 
938 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 				 VMW_RES_DIRTY_SET, user_surface_converter,
940 				 &cmd->body.dest.sid, NULL);
941 }
942 
943 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
944 				     struct vmw_sw_context *sw_context,
945 				     SVGA3dCmdHeader *header)
946 {
947 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
948 	int ret;
949 
950 	cmd = container_of(header, typeof(*cmd), header);
951 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
952 				VMW_RES_DIRTY_NONE, user_surface_converter,
953 				&cmd->body.src, NULL);
954 	if (ret != 0)
955 		return ret;
956 
957 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 				 VMW_RES_DIRTY_SET, user_surface_converter,
959 				 &cmd->body.dest, NULL);
960 }
961 
962 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
963 				   struct vmw_sw_context *sw_context,
964 				   SVGA3dCmdHeader *header)
965 {
966 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
967 	int ret;
968 
969 	cmd = container_of(header, typeof(*cmd), header);
970 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
971 				VMW_RES_DIRTY_NONE, user_surface_converter,
972 				&cmd->body.srcSid, NULL);
973 	if (ret != 0)
974 		return ret;
975 
976 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 				 VMW_RES_DIRTY_SET, user_surface_converter,
978 				 &cmd->body.dstSid, NULL);
979 }
980 
981 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
982 				     struct vmw_sw_context *sw_context,
983 				     SVGA3dCmdHeader *header)
984 {
985 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
986 	int ret;
987 
988 	cmd = container_of(header, typeof(*cmd), header);
989 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
990 				VMW_RES_DIRTY_NONE, user_surface_converter,
991 				&cmd->body.src.sid, NULL);
992 	if (unlikely(ret != 0))
993 		return ret;
994 
995 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
996 				 VMW_RES_DIRTY_SET, user_surface_converter,
997 				 &cmd->body.dest.sid, NULL);
998 }
999 
1000 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1001 					 struct vmw_sw_context *sw_context,
1002 					 SVGA3dCmdHeader *header)
1003 {
1004 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1005 		container_of(header, typeof(*cmd), header);
1006 
1007 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1009 				 &cmd->body.srcImage.sid, NULL);
1010 }
1011 
1012 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1013 				 struct vmw_sw_context *sw_context,
1014 				 SVGA3dCmdHeader *header)
1015 {
1016 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1017 		container_of(header, typeof(*cmd), header);
1018 
1019 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1020 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1021 				 &cmd->body.sid, NULL);
1022 }
1023 
1024 /**
1025  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1026  *
1027  * @dev_priv: The device private structure.
1028  * @new_query_bo: The new buffer holding query results.
1029  * @sw_context: The software context used for this command submission.
1030  *
1031  * This function checks whether @new_query_bo is suitable for holding query
1032  * results, and if another buffer currently is pinned for query results. If so,
1033  * the function prepares the state of @sw_context for switching pinned buffers
1034  * after successful submission of the current command batch.
1035  */
1036 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1037 				       struct vmw_buffer_object *new_query_bo,
1038 				       struct vmw_sw_context *sw_context)
1039 {
1040 	struct vmw_res_cache_entry *ctx_entry =
1041 		&sw_context->res_cache[vmw_res_context];
1042 	int ret;
1043 
1044 	BUG_ON(!ctx_entry->valid);
1045 	sw_context->last_query_ctx = ctx_entry->res;
1046 
1047 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1048 
1049 		if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
1050 			VMW_DEBUG_USER("Query buffer too large.\n");
1051 			return -EINVAL;
1052 		}
1053 
1054 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1055 			sw_context->needs_post_query_barrier = true;
1056 			ret = vmw_validation_add_bo(sw_context->ctx,
1057 						    sw_context->cur_query_bo,
1058 						    dev_priv->has_mob, false);
1059 			if (unlikely(ret != 0))
1060 				return ret;
1061 		}
1062 		sw_context->cur_query_bo = new_query_bo;
1063 
1064 		ret = vmw_validation_add_bo(sw_context->ctx,
1065 					    dev_priv->dummy_query_bo,
1066 					    dev_priv->has_mob, false);
1067 		if (unlikely(ret != 0))
1068 			return ret;
1069 	}
1070 
1071 	return 0;
1072 }
1073 
1074 /**
1075  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1076  *
1077  * @dev_priv: The device private structure.
1078  * @sw_context: The software context used for this command submission batch.
1079  *
1080  * This function will check if we're switching query buffers, and will then,
1081  * issue a dummy occlusion query wait used as a query barrier. When the fence
1082  * object following that query wait has signaled, we are sure that all preceding
1083  * queries have finished, and the old query buffer can be unpinned. However,
1084  * since both the new query buffer and the old one are fenced with that fence,
1085  * we can do an asynchronus unpin now, and be sure that the old query buffer
1086  * won't be moved until the fence has signaled.
1087  *
1088  * As mentioned above, both the new - and old query buffers need to be fenced
1089  * using a sequence emitted *after* calling this function.
1090  */
1091 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1092 				     struct vmw_sw_context *sw_context)
1093 {
1094 	/*
1095 	 * The validate list should still hold references to all
1096 	 * contexts here.
1097 	 */
1098 	if (sw_context->needs_post_query_barrier) {
1099 		struct vmw_res_cache_entry *ctx_entry =
1100 			&sw_context->res_cache[vmw_res_context];
1101 		struct vmw_resource *ctx;
1102 		int ret;
1103 
1104 		BUG_ON(!ctx_entry->valid);
1105 		ctx = ctx_entry->res;
1106 
1107 		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1108 
1109 		if (unlikely(ret != 0))
1110 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1111 	}
1112 
1113 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1114 		if (dev_priv->pinned_bo) {
1115 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1116 			vmw_bo_unreference(&dev_priv->pinned_bo);
1117 		}
1118 
1119 		if (!sw_context->needs_post_query_barrier) {
1120 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1121 
1122 			/*
1123 			 * We pin also the dummy_query_bo buffer so that we
1124 			 * don't need to validate it when emitting dummy queries
1125 			 * in context destroy paths.
1126 			 */
1127 			if (!dev_priv->dummy_query_bo_pinned) {
1128 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1129 						    true);
1130 				dev_priv->dummy_query_bo_pinned = true;
1131 			}
1132 
1133 			BUG_ON(sw_context->last_query_ctx == NULL);
1134 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1135 			dev_priv->query_cid_valid = true;
1136 			dev_priv->pinned_bo =
1137 				vmw_bo_reference(sw_context->cur_query_bo);
1138 		}
1139 	}
1140 }
1141 
1142 /**
1143  * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1144  * to a MOB id.
1145  *
1146  * @dev_priv: Pointer to a device private structure.
1147  * @sw_context: The software context used for this command batch validation.
1148  * @id: Pointer to the user-space handle to be translated.
1149  * @vmw_bo_p: Points to a location that, on successful return will carry a
1150  * non-reference-counted pointer to the buffer object identified by the
1151  * user-space handle in @id.
1152  *
1153  * This function saves information needed to translate a user-space buffer
1154  * handle to a MOB id. The translation does not take place immediately, but
1155  * during a call to vmw_apply_relocations().
1156  *
1157  * This function builds a relocation list and a list of buffers to validate. The
1158  * former needs to be freed using either vmw_apply_relocations() or
1159  * vmw_free_relocations(). The latter needs to be freed using
1160  * vmw_clear_validations.
1161  */
1162 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1163 				 struct vmw_sw_context *sw_context,
1164 				 SVGAMobId *id,
1165 				 struct vmw_buffer_object **vmw_bo_p)
1166 {
1167 	struct vmw_buffer_object *vmw_bo;
1168 	uint32_t handle = *id;
1169 	struct vmw_relocation *reloc;
1170 	int ret;
1171 
1172 	vmw_validation_preload_bo(sw_context->ctx);
1173 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1174 	if (IS_ERR(vmw_bo)) {
1175 		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1176 		return PTR_ERR(vmw_bo);
1177 	}
1178 
1179 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1180 	vmw_user_bo_noref_release();
1181 	if (unlikely(ret != 0))
1182 		return ret;
1183 
1184 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1185 	if (!reloc)
1186 		return -ENOMEM;
1187 
1188 	reloc->mob_loc = id;
1189 	reloc->vbo = vmw_bo;
1190 
1191 	*vmw_bo_p = vmw_bo;
1192 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1193 
1194 	return 0;
1195 }
1196 
1197 /**
1198  * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1199  * to a valid SVGAGuestPtr
1200  *
1201  * @dev_priv: Pointer to a device private structure.
1202  * @sw_context: The software context used for this command batch validation.
1203  * @ptr: Pointer to the user-space handle to be translated.
1204  * @vmw_bo_p: Points to a location that, on successful return will carry a
1205  * non-reference-counted pointer to the DMA buffer identified by the user-space
1206  * handle in @id.
1207  *
1208  * This function saves information needed to translate a user-space buffer
1209  * handle to a valid SVGAGuestPtr. The translation does not take place
1210  * immediately, but during a call to vmw_apply_relocations().
1211  *
1212  * This function builds a relocation list and a list of buffers to validate.
1213  * The former needs to be freed using either vmw_apply_relocations() or
1214  * vmw_free_relocations(). The latter needs to be freed using
1215  * vmw_clear_validations.
1216  */
1217 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1218 				   struct vmw_sw_context *sw_context,
1219 				   SVGAGuestPtr *ptr,
1220 				   struct vmw_buffer_object **vmw_bo_p)
1221 {
1222 	struct vmw_buffer_object *vmw_bo;
1223 	uint32_t handle = ptr->gmrId;
1224 	struct vmw_relocation *reloc;
1225 	int ret;
1226 
1227 	vmw_validation_preload_bo(sw_context->ctx);
1228 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1229 	if (IS_ERR(vmw_bo)) {
1230 		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1231 		return PTR_ERR(vmw_bo);
1232 	}
1233 
1234 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1235 	vmw_user_bo_noref_release();
1236 	if (unlikely(ret != 0))
1237 		return ret;
1238 
1239 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1240 	if (!reloc)
1241 		return -ENOMEM;
1242 
1243 	reloc->location = ptr;
1244 	reloc->vbo = vmw_bo;
1245 	*vmw_bo_p = vmw_bo;
1246 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1247 
1248 	return 0;
1249 }
1250 
1251 /**
1252  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1253  *
1254  * @dev_priv: Pointer to a device private struct.
1255  * @sw_context: The software context used for this command submission.
1256  * @header: Pointer to the command header in the command stream.
1257  *
1258  * This function adds the new query into the query COTABLE
1259  */
1260 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1261 				   struct vmw_sw_context *sw_context,
1262 				   SVGA3dCmdHeader *header)
1263 {
1264 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1265 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1266 	struct vmw_resource *cotable_res;
1267 	int ret;
1268 
1269 	if (!ctx_node)
1270 		return -EINVAL;
1271 
1272 	cmd = container_of(header, typeof(*cmd), header);
1273 
1274 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1275 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1276 		return -EINVAL;
1277 
1278 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1279 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1280 
1281 	return ret;
1282 }
1283 
1284 /**
1285  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1286  *
1287  * @dev_priv: Pointer to a device private struct.
1288  * @sw_context: The software context used for this command submission.
1289  * @header: Pointer to the command header in the command stream.
1290  *
1291  * The query bind operation will eventually associate the query ID with its
1292  * backing MOB.  In this function, we take the user mode MOB ID and use
1293  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1294  */
1295 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1296 				 struct vmw_sw_context *sw_context,
1297 				 SVGA3dCmdHeader *header)
1298 {
1299 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1300 	struct vmw_buffer_object *vmw_bo;
1301 	int ret;
1302 
1303 	cmd = container_of(header, typeof(*cmd), header);
1304 
1305 	/*
1306 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1307 	 * list so its kernel mode MOB ID can be filled in later
1308 	 */
1309 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1310 				    &vmw_bo);
1311 
1312 	if (ret != 0)
1313 		return ret;
1314 
1315 	sw_context->dx_query_mob = vmw_bo;
1316 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1317 	return 0;
1318 }
1319 
1320 /**
1321  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1322  *
1323  * @dev_priv: Pointer to a device private struct.
1324  * @sw_context: The software context used for this command submission.
1325  * @header: Pointer to the command header in the command stream.
1326  */
1327 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1328 				  struct vmw_sw_context *sw_context,
1329 				  SVGA3dCmdHeader *header)
1330 {
1331 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1332 		container_of(header, typeof(*cmd), header);
1333 
1334 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1335 				 VMW_RES_DIRTY_SET, user_context_converter,
1336 				 &cmd->body.cid, NULL);
1337 }
1338 
1339 /**
1340  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1341  *
1342  * @dev_priv: Pointer to a device private struct.
1343  * @sw_context: The software context used for this command submission.
1344  * @header: Pointer to the command header in the command stream.
1345  */
1346 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1347 			       struct vmw_sw_context *sw_context,
1348 			       SVGA3dCmdHeader *header)
1349 {
1350 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1351 		container_of(header, typeof(*cmd), header);
1352 
1353 	if (unlikely(dev_priv->has_mob)) {
1354 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1355 
1356 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1357 
1358 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1359 		gb_cmd.header.size = cmd->header.size;
1360 		gb_cmd.body.cid = cmd->body.cid;
1361 		gb_cmd.body.type = cmd->body.type;
1362 
1363 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1364 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1365 	}
1366 
1367 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1368 				 VMW_RES_DIRTY_SET, user_context_converter,
1369 				 &cmd->body.cid, NULL);
1370 }
1371 
1372 /**
1373  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1374  *
1375  * @dev_priv: Pointer to a device private struct.
1376  * @sw_context: The software context used for this command submission.
1377  * @header: Pointer to the command header in the command stream.
1378  */
1379 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1380 				struct vmw_sw_context *sw_context,
1381 				SVGA3dCmdHeader *header)
1382 {
1383 	struct vmw_buffer_object *vmw_bo;
1384 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1385 	int ret;
1386 
1387 	cmd = container_of(header, typeof(*cmd), header);
1388 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1389 	if (unlikely(ret != 0))
1390 		return ret;
1391 
1392 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1393 				    &vmw_bo);
1394 	if (unlikely(ret != 0))
1395 		return ret;
1396 
1397 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1398 
1399 	return ret;
1400 }
1401 
1402 /**
1403  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1404  *
1405  * @dev_priv: Pointer to a device private struct.
1406  * @sw_context: The software context used for this command submission.
1407  * @header: Pointer to the command header in the command stream.
1408  */
1409 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1410 			     struct vmw_sw_context *sw_context,
1411 			     SVGA3dCmdHeader *header)
1412 {
1413 	struct vmw_buffer_object *vmw_bo;
1414 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1415 	int ret;
1416 
1417 	cmd = container_of(header, typeof(*cmd), header);
1418 	if (dev_priv->has_mob) {
1419 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1420 
1421 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1422 
1423 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1424 		gb_cmd.header.size = cmd->header.size;
1425 		gb_cmd.body.cid = cmd->body.cid;
1426 		gb_cmd.body.type = cmd->body.type;
1427 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1428 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1429 
1430 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1431 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1432 	}
1433 
1434 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1435 	if (unlikely(ret != 0))
1436 		return ret;
1437 
1438 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1439 				      &cmd->body.guestResult, &vmw_bo);
1440 	if (unlikely(ret != 0))
1441 		return ret;
1442 
1443 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1444 
1445 	return ret;
1446 }
1447 
1448 /**
1449  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1450  *
1451  * @dev_priv: Pointer to a device private struct.
1452  * @sw_context: The software context used for this command submission.
1453  * @header: Pointer to the command header in the command stream.
1454  */
1455 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1456 				 struct vmw_sw_context *sw_context,
1457 				 SVGA3dCmdHeader *header)
1458 {
1459 	struct vmw_buffer_object *vmw_bo;
1460 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1461 	int ret;
1462 
1463 	cmd = container_of(header, typeof(*cmd), header);
1464 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1465 	if (unlikely(ret != 0))
1466 		return ret;
1467 
1468 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1469 				    &vmw_bo);
1470 	if (unlikely(ret != 0))
1471 		return ret;
1472 
1473 	return 0;
1474 }
1475 
1476 /**
1477  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1478  *
1479  * @dev_priv: Pointer to a device private struct.
1480  * @sw_context: The software context used for this command submission.
1481  * @header: Pointer to the command header in the command stream.
1482  */
1483 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1484 			      struct vmw_sw_context *sw_context,
1485 			      SVGA3dCmdHeader *header)
1486 {
1487 	struct vmw_buffer_object *vmw_bo;
1488 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1489 	int ret;
1490 
1491 	cmd = container_of(header, typeof(*cmd), header);
1492 	if (dev_priv->has_mob) {
1493 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1494 
1495 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1496 
1497 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1498 		gb_cmd.header.size = cmd->header.size;
1499 		gb_cmd.body.cid = cmd->body.cid;
1500 		gb_cmd.body.type = cmd->body.type;
1501 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1502 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1503 
1504 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1505 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1506 	}
1507 
1508 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1509 	if (unlikely(ret != 0))
1510 		return ret;
1511 
1512 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1513 				      &cmd->body.guestResult, &vmw_bo);
1514 	if (unlikely(ret != 0))
1515 		return ret;
1516 
1517 	return 0;
1518 }
1519 
1520 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1521 		       struct vmw_sw_context *sw_context,
1522 		       SVGA3dCmdHeader *header)
1523 {
1524 	struct vmw_buffer_object *vmw_bo = NULL;
1525 	struct vmw_surface *srf = NULL;
1526 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1527 	int ret;
1528 	SVGA3dCmdSurfaceDMASuffix *suffix;
1529 	uint32_t bo_size;
1530 	bool dirty;
1531 
1532 	cmd = container_of(header, typeof(*cmd), header);
1533 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1534 					       header->size - sizeof(*suffix));
1535 
1536 	/* Make sure device and verifier stays in sync. */
1537 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1538 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1539 		return -EINVAL;
1540 	}
1541 
1542 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1543 				      &cmd->body.guest.ptr, &vmw_bo);
1544 	if (unlikely(ret != 0))
1545 		return ret;
1546 
1547 	/* Make sure DMA doesn't cross BO boundaries. */
1548 	bo_size = vmw_bo->base.base.size;
1549 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1550 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1551 		return -EINVAL;
1552 	}
1553 
1554 	bo_size -= cmd->body.guest.ptr.offset;
1555 	if (unlikely(suffix->maximumOffset > bo_size))
1556 		suffix->maximumOffset = bo_size;
1557 
1558 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1559 		VMW_RES_DIRTY_SET : 0;
1560 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1561 				dirty, user_surface_converter,
1562 				&cmd->body.host.sid, NULL);
1563 	if (unlikely(ret != 0)) {
1564 		if (unlikely(ret != -ERESTARTSYS))
1565 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1566 		return ret;
1567 	}
1568 
1569 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1570 
1571 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1572 
1573 	return 0;
1574 }
1575 
1576 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1577 			struct vmw_sw_context *sw_context,
1578 			SVGA3dCmdHeader *header)
1579 {
1580 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1581 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1582 		(unsigned long)header + sizeof(*cmd));
1583 	SVGA3dPrimitiveRange *range;
1584 	uint32_t i;
1585 	uint32_t maxnum;
1586 	int ret;
1587 
1588 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1589 	if (unlikely(ret != 0))
1590 		return ret;
1591 
1592 	cmd = container_of(header, typeof(*cmd), header);
1593 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1594 
1595 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1596 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1597 		return -EINVAL;
1598 	}
1599 
1600 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1601 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1602 					VMW_RES_DIRTY_NONE,
1603 					user_surface_converter,
1604 					&decl->array.surfaceId, NULL);
1605 		if (unlikely(ret != 0))
1606 			return ret;
1607 	}
1608 
1609 	maxnum = (header->size - sizeof(cmd->body) -
1610 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1611 	if (unlikely(cmd->body.numRanges > maxnum)) {
1612 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1613 		return -EINVAL;
1614 	}
1615 
1616 	range = (SVGA3dPrimitiveRange *) decl;
1617 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1618 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1619 					VMW_RES_DIRTY_NONE,
1620 					user_surface_converter,
1621 					&range->indexArray.surfaceId, NULL);
1622 		if (unlikely(ret != 0))
1623 			return ret;
1624 	}
1625 	return 0;
1626 }
1627 
1628 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1629 			     struct vmw_sw_context *sw_context,
1630 			     SVGA3dCmdHeader *header)
1631 {
1632 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1633 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1634 	  ((unsigned long) header + header->size + sizeof(header));
1635 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1636 		((unsigned long) header + sizeof(*cmd));
1637 	struct vmw_resource *ctx;
1638 	struct vmw_resource *res;
1639 	int ret;
1640 
1641 	cmd = container_of(header, typeof(*cmd), header);
1642 
1643 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1644 				VMW_RES_DIRTY_SET, user_context_converter,
1645 				&cmd->body.cid, &ctx);
1646 	if (unlikely(ret != 0))
1647 		return ret;
1648 
1649 	for (; cur_state < last_state; ++cur_state) {
1650 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1651 			continue;
1652 
1653 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1654 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1655 				       (unsigned int) cur_state->stage);
1656 			return -EINVAL;
1657 		}
1658 
1659 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1660 					VMW_RES_DIRTY_NONE,
1661 					user_surface_converter,
1662 					&cur_state->value, &res);
1663 		if (unlikely(ret != 0))
1664 			return ret;
1665 
1666 		if (dev_priv->has_mob) {
1667 			struct vmw_ctx_bindinfo_tex binding;
1668 			struct vmw_ctx_validation_info *node;
1669 
1670 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1671 			if (!node)
1672 				return -EINVAL;
1673 
1674 			binding.bi.ctx = ctx;
1675 			binding.bi.res = res;
1676 			binding.bi.bt = vmw_ctx_binding_tex;
1677 			binding.texture_stage = cur_state->stage;
1678 			vmw_binding_add(node->staged, &binding.bi, 0,
1679 					binding.texture_stage);
1680 		}
1681 	}
1682 
1683 	return 0;
1684 }
1685 
1686 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1687 				      struct vmw_sw_context *sw_context,
1688 				      void *buf)
1689 {
1690 	struct vmw_buffer_object *vmw_bo;
1691 
1692 	struct {
1693 		uint32_t header;
1694 		SVGAFifoCmdDefineGMRFB body;
1695 	} *cmd = buf;
1696 
1697 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1698 				       &vmw_bo);
1699 }
1700 
1701 /**
1702  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1703  * switching
1704  *
1705  * @dev_priv: Pointer to a device private struct.
1706  * @sw_context: The software context being used for this batch.
1707  * @res: Pointer to the resource.
1708  * @buf_id: Pointer to the user-space backup buffer handle in the command
1709  * stream.
1710  * @backup_offset: Offset of backup into MOB.
1711  *
1712  * This function prepares for registering a switch of backup buffers in the
1713  * resource metadata just prior to unreserving. It's basically a wrapper around
1714  * vmw_cmd_res_switch_backup with a different interface.
1715  */
1716 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1717 				     struct vmw_sw_context *sw_context,
1718 				     struct vmw_resource *res, uint32_t *buf_id,
1719 				     unsigned long backup_offset)
1720 {
1721 	struct vmw_buffer_object *vbo;
1722 	void *info;
1723 	int ret;
1724 
1725 	info = vmw_execbuf_info_from_res(sw_context, res);
1726 	if (!info)
1727 		return -EINVAL;
1728 
1729 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1730 	if (ret)
1731 		return ret;
1732 
1733 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1734 					 backup_offset);
1735 	return 0;
1736 }
1737 
1738 /**
1739  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1740  *
1741  * @dev_priv: Pointer to a device private struct.
1742  * @sw_context: The software context being used for this batch.
1743  * @res_type: The resource type.
1744  * @converter: Information about user-space binding for this resource type.
1745  * @res_id: Pointer to the user-space resource handle in the command stream.
1746  * @buf_id: Pointer to the user-space backup buffer handle in the command
1747  * stream.
1748  * @backup_offset: Offset of backup into MOB.
1749  *
1750  * This function prepares for registering a switch of backup buffers in the
1751  * resource metadata just prior to unreserving. It's basically a wrapper around
1752  * vmw_cmd_res_switch_backup with a different interface.
1753  */
1754 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1755 				 struct vmw_sw_context *sw_context,
1756 				 enum vmw_res_type res_type,
1757 				 const struct vmw_user_resource_conv
1758 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1759 				 unsigned long backup_offset)
1760 {
1761 	struct vmw_resource *res;
1762 	int ret;
1763 
1764 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1765 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1766 	if (ret)
1767 		return ret;
1768 
1769 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1770 					 backup_offset);
1771 }
1772 
1773 /**
1774  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1775  *
1776  * @dev_priv: Pointer to a device private struct.
1777  * @sw_context: The software context being used for this batch.
1778  * @header: Pointer to the command header in the command stream.
1779  */
1780 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1781 				   struct vmw_sw_context *sw_context,
1782 				   SVGA3dCmdHeader *header)
1783 {
1784 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1785 		container_of(header, typeof(*cmd), header);
1786 
1787 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1788 				     user_surface_converter, &cmd->body.sid,
1789 				     &cmd->body.mobid, 0);
1790 }
1791 
1792 /**
1793  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1794  *
1795  * @dev_priv: Pointer to a device private struct.
1796  * @sw_context: The software context being used for this batch.
1797  * @header: Pointer to the command header in the command stream.
1798  */
1799 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1800 				   struct vmw_sw_context *sw_context,
1801 				   SVGA3dCmdHeader *header)
1802 {
1803 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1804 		container_of(header, typeof(*cmd), header);
1805 
1806 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1807 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1808 				 &cmd->body.image.sid, NULL);
1809 }
1810 
1811 /**
1812  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1813  *
1814  * @dev_priv: Pointer to a device private struct.
1815  * @sw_context: The software context being used for this batch.
1816  * @header: Pointer to the command header in the command stream.
1817  */
1818 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1819 				     struct vmw_sw_context *sw_context,
1820 				     SVGA3dCmdHeader *header)
1821 {
1822 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1823 		container_of(header, typeof(*cmd), header);
1824 
1825 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1826 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1827 				 &cmd->body.sid, NULL);
1828 }
1829 
1830 /**
1831  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1832  *
1833  * @dev_priv: Pointer to a device private struct.
1834  * @sw_context: The software context being used for this batch.
1835  * @header: Pointer to the command header in the command stream.
1836  */
1837 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1838 				     struct vmw_sw_context *sw_context,
1839 				     SVGA3dCmdHeader *header)
1840 {
1841 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1842 		container_of(header, typeof(*cmd), header);
1843 
1844 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1845 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1846 				 &cmd->body.image.sid, NULL);
1847 }
1848 
1849 /**
1850  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1851  * command
1852  *
1853  * @dev_priv: Pointer to a device private struct.
1854  * @sw_context: The software context being used for this batch.
1855  * @header: Pointer to the command header in the command stream.
1856  */
1857 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1858 				       struct vmw_sw_context *sw_context,
1859 				       SVGA3dCmdHeader *header)
1860 {
1861 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1862 		container_of(header, typeof(*cmd), header);
1863 
1864 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1865 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1866 				 &cmd->body.sid, NULL);
1867 }
1868 
1869 /**
1870  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1871  * command
1872  *
1873  * @dev_priv: Pointer to a device private struct.
1874  * @sw_context: The software context being used for this batch.
1875  * @header: Pointer to the command header in the command stream.
1876  */
1877 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1878 				       struct vmw_sw_context *sw_context,
1879 				       SVGA3dCmdHeader *header)
1880 {
1881 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1882 		container_of(header, typeof(*cmd), header);
1883 
1884 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1885 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1886 				 &cmd->body.image.sid, NULL);
1887 }
1888 
1889 /**
1890  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1891  * command
1892  *
1893  * @dev_priv: Pointer to a device private struct.
1894  * @sw_context: The software context being used for this batch.
1895  * @header: Pointer to the command header in the command stream.
1896  */
1897 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1898 					 struct vmw_sw_context *sw_context,
1899 					 SVGA3dCmdHeader *header)
1900 {
1901 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1902 		container_of(header, typeof(*cmd), header);
1903 
1904 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1905 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1906 				 &cmd->body.sid, NULL);
1907 }
1908 
1909 /**
1910  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1911  *
1912  * @dev_priv: Pointer to a device private struct.
1913  * @sw_context: The software context being used for this batch.
1914  * @header: Pointer to the command header in the command stream.
1915  */
1916 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1917 				 struct vmw_sw_context *sw_context,
1918 				 SVGA3dCmdHeader *header)
1919 {
1920 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1921 	int ret;
1922 	size_t size;
1923 	struct vmw_resource *ctx;
1924 
1925 	cmd = container_of(header, typeof(*cmd), header);
1926 
1927 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1928 				VMW_RES_DIRTY_SET, user_context_converter,
1929 				&cmd->body.cid, &ctx);
1930 	if (unlikely(ret != 0))
1931 		return ret;
1932 
1933 	if (unlikely(!dev_priv->has_mob))
1934 		return 0;
1935 
1936 	size = cmd->header.size - sizeof(cmd->body);
1937 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1938 				    cmd->body.shid, cmd + 1, cmd->body.type,
1939 				    size, &sw_context->staged_cmd_res);
1940 	if (unlikely(ret != 0))
1941 		return ret;
1942 
1943 	return vmw_resource_relocation_add(sw_context, NULL,
1944 					   vmw_ptr_diff(sw_context->buf_start,
1945 							&cmd->header.id),
1946 					   vmw_res_rel_nop);
1947 }
1948 
1949 /**
1950  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1951  *
1952  * @dev_priv: Pointer to a device private struct.
1953  * @sw_context: The software context being used for this batch.
1954  * @header: Pointer to the command header in the command stream.
1955  */
1956 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1957 				  struct vmw_sw_context *sw_context,
1958 				  SVGA3dCmdHeader *header)
1959 {
1960 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1961 	int ret;
1962 	struct vmw_resource *ctx;
1963 
1964 	cmd = container_of(header, typeof(*cmd), header);
1965 
1966 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1967 				VMW_RES_DIRTY_SET, user_context_converter,
1968 				&cmd->body.cid, &ctx);
1969 	if (unlikely(ret != 0))
1970 		return ret;
1971 
1972 	if (unlikely(!dev_priv->has_mob))
1973 		return 0;
1974 
1975 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1976 				cmd->body.type, &sw_context->staged_cmd_res);
1977 	if (unlikely(ret != 0))
1978 		return ret;
1979 
1980 	return vmw_resource_relocation_add(sw_context, NULL,
1981 					   vmw_ptr_diff(sw_context->buf_start,
1982 							&cmd->header.id),
1983 					   vmw_res_rel_nop);
1984 }
1985 
1986 /**
1987  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1988  *
1989  * @dev_priv: Pointer to a device private struct.
1990  * @sw_context: The software context being used for this batch.
1991  * @header: Pointer to the command header in the command stream.
1992  */
1993 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1994 			      struct vmw_sw_context *sw_context,
1995 			      SVGA3dCmdHeader *header)
1996 {
1997 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1998 	struct vmw_ctx_bindinfo_shader binding;
1999 	struct vmw_resource *ctx, *res = NULL;
2000 	struct vmw_ctx_validation_info *ctx_info;
2001 	int ret;
2002 
2003 	cmd = container_of(header, typeof(*cmd), header);
2004 
2005 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2006 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2007 			       (unsigned int) cmd->body.type);
2008 		return -EINVAL;
2009 	}
2010 
2011 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2012 				VMW_RES_DIRTY_SET, user_context_converter,
2013 				&cmd->body.cid, &ctx);
2014 	if (unlikely(ret != 0))
2015 		return ret;
2016 
2017 	if (!dev_priv->has_mob)
2018 		return 0;
2019 
2020 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2021 		/*
2022 		 * This is the compat shader path - Per device guest-backed
2023 		 * shaders, but user-space thinks it's per context host-
2024 		 * backed shaders.
2025 		 */
2026 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2027 					cmd->body.shid, cmd->body.type);
2028 		if (!IS_ERR(res)) {
2029 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2030 							    VMW_RES_DIRTY_NONE);
2031 			if (unlikely(ret != 0))
2032 				return ret;
2033 
2034 			ret = vmw_resource_relocation_add
2035 				(sw_context, res,
2036 				 vmw_ptr_diff(sw_context->buf_start,
2037 					      &cmd->body.shid),
2038 				 vmw_res_rel_normal);
2039 			if (unlikely(ret != 0))
2040 				return ret;
2041 		}
2042 	}
2043 
2044 	if (IS_ERR_OR_NULL(res)) {
2045 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2046 					VMW_RES_DIRTY_NONE,
2047 					user_shader_converter, &cmd->body.shid,
2048 					&res);
2049 		if (unlikely(ret != 0))
2050 			return ret;
2051 	}
2052 
2053 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2054 	if (!ctx_info)
2055 		return -EINVAL;
2056 
2057 	binding.bi.ctx = ctx;
2058 	binding.bi.res = res;
2059 	binding.bi.bt = vmw_ctx_binding_shader;
2060 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2061 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2062 
2063 	return 0;
2064 }
2065 
2066 /**
2067  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2068  *
2069  * @dev_priv: Pointer to a device private struct.
2070  * @sw_context: The software context being used for this batch.
2071  * @header: Pointer to the command header in the command stream.
2072  */
2073 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2074 				    struct vmw_sw_context *sw_context,
2075 				    SVGA3dCmdHeader *header)
2076 {
2077 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2078 	int ret;
2079 
2080 	cmd = container_of(header, typeof(*cmd), header);
2081 
2082 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2083 				VMW_RES_DIRTY_SET, user_context_converter,
2084 				&cmd->body.cid, NULL);
2085 	if (unlikely(ret != 0))
2086 		return ret;
2087 
2088 	if (dev_priv->has_mob)
2089 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2090 
2091 	return 0;
2092 }
2093 
2094 /**
2095  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2096  *
2097  * @dev_priv: Pointer to a device private struct.
2098  * @sw_context: The software context being used for this batch.
2099  * @header: Pointer to the command header in the command stream.
2100  */
2101 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2102 				  struct vmw_sw_context *sw_context,
2103 				  SVGA3dCmdHeader *header)
2104 {
2105 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2106 		container_of(header, typeof(*cmd), header);
2107 
2108 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2109 				     user_shader_converter, &cmd->body.shid,
2110 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2111 }
2112 
2113 /**
2114  * vmw_cmd_dx_set_single_constant_buffer - Validate
2115  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2116  *
2117  * @dev_priv: Pointer to a device private struct.
2118  * @sw_context: The software context being used for this batch.
2119  * @header: Pointer to the command header in the command stream.
2120  */
2121 static int
2122 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2123 				      struct vmw_sw_context *sw_context,
2124 				      SVGA3dCmdHeader *header)
2125 {
2126 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2127 	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2128 		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2129 
2130 	struct vmw_resource *res = NULL;
2131 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2132 	struct vmw_ctx_bindinfo_cb binding;
2133 	int ret;
2134 
2135 	if (!ctx_node)
2136 		return -EINVAL;
2137 
2138 	cmd = container_of(header, typeof(*cmd), header);
2139 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2140 				VMW_RES_DIRTY_NONE, user_surface_converter,
2141 				&cmd->body.sid, &res);
2142 	if (unlikely(ret != 0))
2143 		return ret;
2144 
2145 	binding.bi.ctx = ctx_node->ctx;
2146 	binding.bi.res = res;
2147 	binding.bi.bt = vmw_ctx_binding_cb;
2148 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149 	binding.offset = cmd->body.offsetInBytes;
2150 	binding.size = cmd->body.sizeInBytes;
2151 	binding.slot = cmd->body.slot;
2152 
2153 	if (binding.shader_slot >= max_shader_num ||
2154 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2155 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2156 			       (unsigned int) cmd->body.type,
2157 			       (unsigned int) binding.slot);
2158 		return -EINVAL;
2159 	}
2160 
2161 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2162 			binding.slot);
2163 
2164 	return 0;
2165 }
2166 
2167 /**
2168  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2169  * command
2170  *
2171  * @dev_priv: Pointer to a device private struct.
2172  * @sw_context: The software context being used for this batch.
2173  * @header: Pointer to the command header in the command stream.
2174  */
2175 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2176 				     struct vmw_sw_context *sw_context,
2177 				     SVGA3dCmdHeader *header)
2178 {
2179 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2180 		container_of(header, typeof(*cmd), header);
2181 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2182 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2183 
2184 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2185 		sizeof(SVGA3dShaderResourceViewId);
2186 
2187 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2188 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2189 	    cmd->body.type >= max_allowed) {
2190 		VMW_DEBUG_USER("Invalid shader binding.\n");
2191 		return -EINVAL;
2192 	}
2193 
2194 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2195 				     vmw_ctx_binding_sr,
2196 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2197 				     (void *) &cmd[1], num_sr_view,
2198 				     cmd->body.startView);
2199 }
2200 
2201 /**
2202  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2203  *
2204  * @dev_priv: Pointer to a device private struct.
2205  * @sw_context: The software context being used for this batch.
2206  * @header: Pointer to the command header in the command stream.
2207  */
2208 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2209 				 struct vmw_sw_context *sw_context,
2210 				 SVGA3dCmdHeader *header)
2211 {
2212 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2213 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2214 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2215 	struct vmw_resource *res = NULL;
2216 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2217 	struct vmw_ctx_bindinfo_shader binding;
2218 	int ret = 0;
2219 
2220 	if (!ctx_node)
2221 		return -EINVAL;
2222 
2223 	cmd = container_of(header, typeof(*cmd), header);
2224 
2225 	if (cmd->body.type >= max_allowed ||
2226 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2227 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2228 			       (unsigned int) cmd->body.type);
2229 		return -EINVAL;
2230 	}
2231 
2232 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2233 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2234 		if (IS_ERR(res)) {
2235 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2236 			return PTR_ERR(res);
2237 		}
2238 
2239 		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2240 						    VMW_RES_DIRTY_NONE);
2241 		if (ret)
2242 			return ret;
2243 	}
2244 
2245 	binding.bi.ctx = ctx_node->ctx;
2246 	binding.bi.res = res;
2247 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2248 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2249 
2250 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2251 
2252 	return 0;
2253 }
2254 
2255 /**
2256  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2257  * command
2258  *
2259  * @dev_priv: Pointer to a device private struct.
2260  * @sw_context: The software context being used for this batch.
2261  * @header: Pointer to the command header in the command stream.
2262  */
2263 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2264 					 struct vmw_sw_context *sw_context,
2265 					 SVGA3dCmdHeader *header)
2266 {
2267 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2268 	struct vmw_ctx_bindinfo_vb binding;
2269 	struct vmw_resource *res;
2270 	struct {
2271 		SVGA3dCmdHeader header;
2272 		SVGA3dCmdDXSetVertexBuffers body;
2273 		SVGA3dVertexBuffer buf[];
2274 	} *cmd;
2275 	int i, ret, num;
2276 
2277 	if (!ctx_node)
2278 		return -EINVAL;
2279 
2280 	cmd = container_of(header, typeof(*cmd), header);
2281 	num = (cmd->header.size - sizeof(cmd->body)) /
2282 		sizeof(SVGA3dVertexBuffer);
2283 	if ((u64)num + (u64)cmd->body.startBuffer >
2284 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2285 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2286 		return -EINVAL;
2287 	}
2288 
2289 	for (i = 0; i < num; i++) {
2290 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2291 					VMW_RES_DIRTY_NONE,
2292 					user_surface_converter,
2293 					&cmd->buf[i].sid, &res);
2294 		if (unlikely(ret != 0))
2295 			return ret;
2296 
2297 		binding.bi.ctx = ctx_node->ctx;
2298 		binding.bi.bt = vmw_ctx_binding_vb;
2299 		binding.bi.res = res;
2300 		binding.offset = cmd->buf[i].offset;
2301 		binding.stride = cmd->buf[i].stride;
2302 		binding.slot = i + cmd->body.startBuffer;
2303 
2304 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 /**
2311  * vmw_cmd_dx_set_index_buffer - Validate
2312  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2313  *
2314  * @dev_priv: Pointer to a device private struct.
2315  * @sw_context: The software context being used for this batch.
2316  * @header: Pointer to the command header in the command stream.
2317  */
2318 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2319 				       struct vmw_sw_context *sw_context,
2320 				       SVGA3dCmdHeader *header)
2321 {
2322 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2323 	struct vmw_ctx_bindinfo_ib binding;
2324 	struct vmw_resource *res;
2325 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2326 	int ret;
2327 
2328 	if (!ctx_node)
2329 		return -EINVAL;
2330 
2331 	cmd = container_of(header, typeof(*cmd), header);
2332 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2333 				VMW_RES_DIRTY_NONE, user_surface_converter,
2334 				&cmd->body.sid, &res);
2335 	if (unlikely(ret != 0))
2336 		return ret;
2337 
2338 	binding.bi.ctx = ctx_node->ctx;
2339 	binding.bi.res = res;
2340 	binding.bi.bt = vmw_ctx_binding_ib;
2341 	binding.offset = cmd->body.offset;
2342 	binding.format = cmd->body.format;
2343 
2344 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2345 
2346 	return 0;
2347 }
2348 
2349 /**
2350  * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2351  * command
2352  *
2353  * @dev_priv: Pointer to a device private struct.
2354  * @sw_context: The software context being used for this batch.
2355  * @header: Pointer to the command header in the command stream.
2356  */
2357 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2358 					struct vmw_sw_context *sw_context,
2359 					SVGA3dCmdHeader *header)
2360 {
2361 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2362 		container_of(header, typeof(*cmd), header);
2363 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2364 		sizeof(SVGA3dRenderTargetViewId);
2365 	int ret;
2366 
2367 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2368 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2369 		return -EINVAL;
2370 	}
2371 
2372 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2373 				    0, &cmd->body.depthStencilViewId, 1, 0);
2374 	if (ret)
2375 		return ret;
2376 
2377 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2378 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2379 				     num_rt_view, 0);
2380 }
2381 
2382 /**
2383  * vmw_cmd_dx_clear_rendertarget_view - Validate
2384  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2385  *
2386  * @dev_priv: Pointer to a device private struct.
2387  * @sw_context: The software context being used for this batch.
2388  * @header: Pointer to the command header in the command stream.
2389  */
2390 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2391 					      struct vmw_sw_context *sw_context,
2392 					      SVGA3dCmdHeader *header)
2393 {
2394 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2395 		container_of(header, typeof(*cmd), header);
2396 	struct vmw_resource *ret;
2397 
2398 	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2399 				  cmd->body.renderTargetViewId);
2400 
2401 	return PTR_ERR_OR_ZERO(ret);
2402 }
2403 
2404 /**
2405  * vmw_cmd_dx_clear_depthstencil_view - Validate
2406  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2407  *
2408  * @dev_priv: Pointer to a device private struct.
2409  * @sw_context: The software context being used for this batch.
2410  * @header: Pointer to the command header in the command stream.
2411  */
2412 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2413 					      struct vmw_sw_context *sw_context,
2414 					      SVGA3dCmdHeader *header)
2415 {
2416 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2417 		container_of(header, typeof(*cmd), header);
2418 	struct vmw_resource *ret;
2419 
2420 	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2421 				  cmd->body.depthStencilViewId);
2422 
2423 	return PTR_ERR_OR_ZERO(ret);
2424 }
2425 
2426 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2427 				  struct vmw_sw_context *sw_context,
2428 				  SVGA3dCmdHeader *header)
2429 {
2430 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2431 	struct vmw_resource *srf;
2432 	struct vmw_resource *res;
2433 	enum vmw_view_type view_type;
2434 	int ret;
2435 	/*
2436 	 * This is based on the fact that all affected define commands have the
2437 	 * same initial command body layout.
2438 	 */
2439 	struct {
2440 		SVGA3dCmdHeader header;
2441 		uint32 defined_id;
2442 		uint32 sid;
2443 	} *cmd;
2444 
2445 	if (!ctx_node)
2446 		return -EINVAL;
2447 
2448 	view_type = vmw_view_cmd_to_type(header->id);
2449 	if (view_type == vmw_view_max)
2450 		return -EINVAL;
2451 
2452 	cmd = container_of(header, typeof(*cmd), header);
2453 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2454 		VMW_DEBUG_USER("Invalid surface id.\n");
2455 		return -EINVAL;
2456 	}
2457 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2458 				VMW_RES_DIRTY_NONE, user_surface_converter,
2459 				&cmd->sid, &srf);
2460 	if (unlikely(ret != 0))
2461 		return ret;
2462 
2463 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2464 	ret = vmw_cotable_notify(res, cmd->defined_id);
2465 	if (unlikely(ret != 0))
2466 		return ret;
2467 
2468 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2469 			    cmd->defined_id, header,
2470 			    header->size + sizeof(*header),
2471 			    &sw_context->staged_cmd_res);
2472 }
2473 
2474 /**
2475  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2476  *
2477  * @dev_priv: Pointer to a device private struct.
2478  * @sw_context: The software context being used for this batch.
2479  * @header: Pointer to the command header in the command stream.
2480  */
2481 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2482 				     struct vmw_sw_context *sw_context,
2483 				     SVGA3dCmdHeader *header)
2484 {
2485 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2486 	struct vmw_ctx_bindinfo_so_target binding;
2487 	struct vmw_resource *res;
2488 	struct {
2489 		SVGA3dCmdHeader header;
2490 		SVGA3dCmdDXSetSOTargets body;
2491 		SVGA3dSoTarget targets[];
2492 	} *cmd;
2493 	int i, ret, num;
2494 
2495 	if (!ctx_node)
2496 		return -EINVAL;
2497 
2498 	cmd = container_of(header, typeof(*cmd), header);
2499 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2500 
2501 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2502 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2503 		return -EINVAL;
2504 	}
2505 
2506 	for (i = 0; i < num; i++) {
2507 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2508 					VMW_RES_DIRTY_SET,
2509 					user_surface_converter,
2510 					&cmd->targets[i].sid, &res);
2511 		if (unlikely(ret != 0))
2512 			return ret;
2513 
2514 		binding.bi.ctx = ctx_node->ctx;
2515 		binding.bi.res = res;
2516 		binding.bi.bt = vmw_ctx_binding_so_target;
2517 		binding.offset = cmd->targets[i].offset;
2518 		binding.size = cmd->targets[i].sizeInBytes;
2519 		binding.slot = i;
2520 
2521 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2522 	}
2523 
2524 	return 0;
2525 }
2526 
2527 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2528 				struct vmw_sw_context *sw_context,
2529 				SVGA3dCmdHeader *header)
2530 {
2531 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2532 	struct vmw_resource *res;
2533 	/*
2534 	 * This is based on the fact that all affected define commands have
2535 	 * the same initial command body layout.
2536 	 */
2537 	struct {
2538 		SVGA3dCmdHeader header;
2539 		uint32 defined_id;
2540 	} *cmd;
2541 	enum vmw_so_type so_type;
2542 	int ret;
2543 
2544 	if (!ctx_node)
2545 		return -EINVAL;
2546 
2547 	so_type = vmw_so_cmd_to_type(header->id);
2548 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2549 	cmd = container_of(header, typeof(*cmd), header);
2550 	ret = vmw_cotable_notify(res, cmd->defined_id);
2551 
2552 	return ret;
2553 }
2554 
2555 /**
2556  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2557  * command
2558  *
2559  * @dev_priv: Pointer to a device private struct.
2560  * @sw_context: The software context being used for this batch.
2561  * @header: Pointer to the command header in the command stream.
2562  */
2563 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2564 					struct vmw_sw_context *sw_context,
2565 					SVGA3dCmdHeader *header)
2566 {
2567 	struct {
2568 		SVGA3dCmdHeader header;
2569 		union {
2570 			SVGA3dCmdDXReadbackSubResource r_body;
2571 			SVGA3dCmdDXInvalidateSubResource i_body;
2572 			SVGA3dCmdDXUpdateSubResource u_body;
2573 			SVGA3dSurfaceId sid;
2574 		};
2575 	} *cmd;
2576 
2577 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2578 		     offsetof(typeof(*cmd), sid));
2579 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2580 		     offsetof(typeof(*cmd), sid));
2581 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2582 		     offsetof(typeof(*cmd), sid));
2583 
2584 	cmd = container_of(header, typeof(*cmd), header);
2585 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2586 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2587 				 &cmd->sid, NULL);
2588 }
2589 
2590 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2591 				struct vmw_sw_context *sw_context,
2592 				SVGA3dCmdHeader *header)
2593 {
2594 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2595 
2596 	if (!ctx_node)
2597 		return -EINVAL;
2598 
2599 	return 0;
2600 }
2601 
2602 /**
2603  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2604  * resource for removal.
2605  *
2606  * @dev_priv: Pointer to a device private struct.
2607  * @sw_context: The software context being used for this batch.
2608  * @header: Pointer to the command header in the command stream.
2609  *
2610  * Check that the view exists, and if it was not created using this command
2611  * batch, conditionally make this command a NOP.
2612  */
2613 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2614 				  struct vmw_sw_context *sw_context,
2615 				  SVGA3dCmdHeader *header)
2616 {
2617 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2618 	struct {
2619 		SVGA3dCmdHeader header;
2620 		union vmw_view_destroy body;
2621 	} *cmd = container_of(header, typeof(*cmd), header);
2622 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2623 	struct vmw_resource *view;
2624 	int ret;
2625 
2626 	if (!ctx_node)
2627 		return -EINVAL;
2628 
2629 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2630 			      &sw_context->staged_cmd_res, &view);
2631 	if (ret || !view)
2632 		return ret;
2633 
2634 	/*
2635 	 * If the view wasn't created during this command batch, it might
2636 	 * have been removed due to a context swapout, so add a
2637 	 * relocation to conditionally make this command a NOP to avoid
2638 	 * device errors.
2639 	 */
2640 	return vmw_resource_relocation_add(sw_context, view,
2641 					   vmw_ptr_diff(sw_context->buf_start,
2642 							&cmd->header.id),
2643 					   vmw_res_rel_cond_nop);
2644 }
2645 
2646 /**
2647  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2648  *
2649  * @dev_priv: Pointer to a device private struct.
2650  * @sw_context: The software context being used for this batch.
2651  * @header: Pointer to the command header in the command stream.
2652  */
2653 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2654 				    struct vmw_sw_context *sw_context,
2655 				    SVGA3dCmdHeader *header)
2656 {
2657 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2658 	struct vmw_resource *res;
2659 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2660 		container_of(header, typeof(*cmd), header);
2661 	int ret;
2662 
2663 	if (!ctx_node)
2664 		return -EINVAL;
2665 
2666 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2667 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2668 	if (ret)
2669 		return ret;
2670 
2671 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2672 				 cmd->body.shaderId, cmd->body.type,
2673 				 &sw_context->staged_cmd_res);
2674 }
2675 
2676 /**
2677  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2678  *
2679  * @dev_priv: Pointer to a device private struct.
2680  * @sw_context: The software context being used for this batch.
2681  * @header: Pointer to the command header in the command stream.
2682  */
2683 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2684 				     struct vmw_sw_context *sw_context,
2685 				     SVGA3dCmdHeader *header)
2686 {
2687 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2688 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2689 		container_of(header, typeof(*cmd), header);
2690 	int ret;
2691 
2692 	if (!ctx_node)
2693 		return -EINVAL;
2694 
2695 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2696 				&sw_context->staged_cmd_res);
2697 
2698 	return ret;
2699 }
2700 
2701 /**
2702  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2703  *
2704  * @dev_priv: Pointer to a device private struct.
2705  * @sw_context: The software context being used for this batch.
2706  * @header: Pointer to the command header in the command stream.
2707  */
2708 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2709 				  struct vmw_sw_context *sw_context,
2710 				  SVGA3dCmdHeader *header)
2711 {
2712 	struct vmw_resource *ctx;
2713 	struct vmw_resource *res;
2714 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2715 		container_of(header, typeof(*cmd), header);
2716 	int ret;
2717 
2718 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2719 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2720 					VMW_RES_DIRTY_SET,
2721 					user_context_converter, &cmd->body.cid,
2722 					&ctx);
2723 		if (ret)
2724 			return ret;
2725 	} else {
2726 		struct vmw_ctx_validation_info *ctx_node =
2727 			VMW_GET_CTX_NODE(sw_context);
2728 
2729 		if (!ctx_node)
2730 			return -EINVAL;
2731 
2732 		ctx = ctx_node->ctx;
2733 	}
2734 
2735 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2736 	if (IS_ERR(res)) {
2737 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2738 		return PTR_ERR(res);
2739 	}
2740 
2741 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2742 					    VMW_RES_DIRTY_NONE);
2743 	if (ret) {
2744 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2745 		return ret;
2746 	}
2747 
2748 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2749 					 &cmd->body.mobid,
2750 					 cmd->body.offsetInBytes);
2751 }
2752 
2753 /**
2754  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2755  *
2756  * @dev_priv: Pointer to a device private struct.
2757  * @sw_context: The software context being used for this batch.
2758  * @header: Pointer to the command header in the command stream.
2759  */
2760 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2761 			      struct vmw_sw_context *sw_context,
2762 			      SVGA3dCmdHeader *header)
2763 {
2764 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2765 		container_of(header, typeof(*cmd), header);
2766 	struct vmw_resource *view;
2767 	struct vmw_res_cache_entry *rcache;
2768 
2769 	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2770 				   cmd->body.shaderResourceViewId);
2771 	if (IS_ERR(view))
2772 		return PTR_ERR(view);
2773 
2774 	/*
2775 	 * Normally the shader-resource view is not gpu-dirtying, but for
2776 	 * this particular command it is...
2777 	 * So mark the last looked-up surface, which is the surface
2778 	 * the view points to, gpu-dirty.
2779 	 */
2780 	rcache = &sw_context->res_cache[vmw_res_surface];
2781 	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2782 				     VMW_RES_DIRTY_SET);
2783 	return 0;
2784 }
2785 
2786 /**
2787  * vmw_cmd_dx_transfer_from_buffer - Validate
2788  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2789  *
2790  * @dev_priv: Pointer to a device private struct.
2791  * @sw_context: The software context being used for this batch.
2792  * @header: Pointer to the command header in the command stream.
2793  */
2794 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2795 					   struct vmw_sw_context *sw_context,
2796 					   SVGA3dCmdHeader *header)
2797 {
2798 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2799 		container_of(header, typeof(*cmd), header);
2800 	int ret;
2801 
2802 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2803 				VMW_RES_DIRTY_NONE, user_surface_converter,
2804 				&cmd->body.srcSid, NULL);
2805 	if (ret != 0)
2806 		return ret;
2807 
2808 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2809 				 VMW_RES_DIRTY_SET, user_surface_converter,
2810 				 &cmd->body.destSid, NULL);
2811 }
2812 
2813 /**
2814  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2815  *
2816  * @dev_priv: Pointer to a device private struct.
2817  * @sw_context: The software context being used for this batch.
2818  * @header: Pointer to the command header in the command stream.
2819  */
2820 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2821 					   struct vmw_sw_context *sw_context,
2822 					   SVGA3dCmdHeader *header)
2823 {
2824 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2825 		container_of(header, typeof(*cmd), header);
2826 
2827 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2828 		return -EINVAL;
2829 
2830 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2831 				 VMW_RES_DIRTY_SET, user_surface_converter,
2832 				 &cmd->body.surface.sid, NULL);
2833 }
2834 
2835 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2836 		       struct vmw_sw_context *sw_context,
2837 		       SVGA3dCmdHeader *header)
2838 {
2839 	if (!has_sm5_context(dev_priv))
2840 		return -EINVAL;
2841 
2842 	return 0;
2843 }
2844 
2845 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2846 				   struct vmw_sw_context *sw_context,
2847 				   SVGA3dCmdHeader *header)
2848 {
2849 	if (!has_sm5_context(dev_priv))
2850 		return -EINVAL;
2851 
2852 	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2853 }
2854 
2855 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2856 				   struct vmw_sw_context *sw_context,
2857 				   SVGA3dCmdHeader *header)
2858 {
2859 	if (!has_sm5_context(dev_priv))
2860 		return -EINVAL;
2861 
2862 	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2863 }
2864 
2865 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2866 				  struct vmw_sw_context *sw_context,
2867 				  SVGA3dCmdHeader *header)
2868 {
2869 	struct {
2870 		SVGA3dCmdHeader header;
2871 		SVGA3dCmdDXClearUAViewUint body;
2872 	} *cmd = container_of(header, typeof(*cmd), header);
2873 	struct vmw_resource *ret;
2874 
2875 	if (!has_sm5_context(dev_priv))
2876 		return -EINVAL;
2877 
2878 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2879 				  cmd->body.uaViewId);
2880 
2881 	return PTR_ERR_OR_ZERO(ret);
2882 }
2883 
2884 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2885 				   struct vmw_sw_context *sw_context,
2886 				   SVGA3dCmdHeader *header)
2887 {
2888 	struct {
2889 		SVGA3dCmdHeader header;
2890 		SVGA3dCmdDXClearUAViewFloat body;
2891 	} *cmd = container_of(header, typeof(*cmd), header);
2892 	struct vmw_resource *ret;
2893 
2894 	if (!has_sm5_context(dev_priv))
2895 		return -EINVAL;
2896 
2897 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2898 				  cmd->body.uaViewId);
2899 
2900 	return PTR_ERR_OR_ZERO(ret);
2901 }
2902 
2903 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2904 			   struct vmw_sw_context *sw_context,
2905 			   SVGA3dCmdHeader *header)
2906 {
2907 	struct {
2908 		SVGA3dCmdHeader header;
2909 		SVGA3dCmdDXSetUAViews body;
2910 	} *cmd = container_of(header, typeof(*cmd), header);
2911 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2912 		sizeof(SVGA3dUAViewId);
2913 	int ret;
2914 
2915 	if (!has_sm5_context(dev_priv))
2916 		return -EINVAL;
2917 
2918 	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2919 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2920 		return -EINVAL;
2921 	}
2922 
2923 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2924 				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2925 				    num_uav, 0);
2926 	if (ret)
2927 		return ret;
2928 
2929 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2930 					 cmd->body.uavSpliceIndex);
2931 
2932 	return ret;
2933 }
2934 
2935 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2936 			      struct vmw_sw_context *sw_context,
2937 			      SVGA3dCmdHeader *header)
2938 {
2939 	struct {
2940 		SVGA3dCmdHeader header;
2941 		SVGA3dCmdDXSetCSUAViews body;
2942 	} *cmd = container_of(header, typeof(*cmd), header);
2943 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2944 		sizeof(SVGA3dUAViewId);
2945 	int ret;
2946 
2947 	if (!has_sm5_context(dev_priv))
2948 		return -EINVAL;
2949 
2950 	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2951 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2952 		return -EINVAL;
2953 	}
2954 
2955 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2956 				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2957 				    num_uav, 0);
2958 	if (ret)
2959 		return ret;
2960 
2961 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2962 				  cmd->body.startIndex);
2963 
2964 	return ret;
2965 }
2966 
2967 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2968 					  struct vmw_sw_context *sw_context,
2969 					  SVGA3dCmdHeader *header)
2970 {
2971 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2972 	struct vmw_resource *res;
2973 	struct {
2974 		SVGA3dCmdHeader header;
2975 		SVGA3dCmdDXDefineStreamOutputWithMob body;
2976 	} *cmd = container_of(header, typeof(*cmd), header);
2977 	int ret;
2978 
2979 	if (!has_sm5_context(dev_priv))
2980 		return -EINVAL;
2981 
2982 	if (!ctx_node) {
2983 		DRM_ERROR("DX Context not set.\n");
2984 		return -EINVAL;
2985 	}
2986 
2987 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2988 	ret = vmw_cotable_notify(res, cmd->body.soid);
2989 	if (ret)
2990 		return ret;
2991 
2992 	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2993 				       cmd->body.soid,
2994 				       &sw_context->staged_cmd_res);
2995 }
2996 
2997 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
2998 					   struct vmw_sw_context *sw_context,
2999 					   SVGA3dCmdHeader *header)
3000 {
3001 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3002 	struct vmw_resource *res;
3003 	struct {
3004 		SVGA3dCmdHeader header;
3005 		SVGA3dCmdDXDestroyStreamOutput body;
3006 	} *cmd = container_of(header, typeof(*cmd), header);
3007 
3008 	if (!ctx_node) {
3009 		DRM_ERROR("DX Context not set.\n");
3010 		return -EINVAL;
3011 	}
3012 
3013 	/*
3014 	 * When device does not support SM5 then streamoutput with mob command is
3015 	 * not available to user-space. Simply return in this case.
3016 	 */
3017 	if (!has_sm5_context(dev_priv))
3018 		return 0;
3019 
3020 	/*
3021 	 * With SM5 capable device if lookup fails then user-space probably used
3022 	 * old streamoutput define command. Return without an error.
3023 	 */
3024 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3025 					 cmd->body.soid);
3026 	if (IS_ERR(res))
3027 		return 0;
3028 
3029 	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3030 					  &sw_context->staged_cmd_res);
3031 }
3032 
3033 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3034 					struct vmw_sw_context *sw_context,
3035 					SVGA3dCmdHeader *header)
3036 {
3037 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3038 	struct vmw_resource *res;
3039 	struct {
3040 		SVGA3dCmdHeader header;
3041 		SVGA3dCmdDXBindStreamOutput body;
3042 	} *cmd = container_of(header, typeof(*cmd), header);
3043 	int ret;
3044 
3045 	if (!has_sm5_context(dev_priv))
3046 		return -EINVAL;
3047 
3048 	if (!ctx_node) {
3049 		DRM_ERROR("DX Context not set.\n");
3050 		return -EINVAL;
3051 	}
3052 
3053 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3054 					 cmd->body.soid);
3055 	if (IS_ERR(res)) {
3056 		DRM_ERROR("Could not find streamoutput to bind.\n");
3057 		return PTR_ERR(res);
3058 	}
3059 
3060 	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3061 
3062 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3063 					    VMW_RES_DIRTY_NONE);
3064 	if (ret) {
3065 		DRM_ERROR("Error creating resource validation node.\n");
3066 		return ret;
3067 	}
3068 
3069 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3070 					 &cmd->body.mobid,
3071 					 cmd->body.offsetInBytes);
3072 }
3073 
3074 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3075 				       struct vmw_sw_context *sw_context,
3076 				       SVGA3dCmdHeader *header)
3077 {
3078 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3079 	struct vmw_resource *res;
3080 	struct vmw_ctx_bindinfo_so binding;
3081 	struct {
3082 		SVGA3dCmdHeader header;
3083 		SVGA3dCmdDXSetStreamOutput body;
3084 	} *cmd = container_of(header, typeof(*cmd), header);
3085 	int ret;
3086 
3087 	if (!ctx_node) {
3088 		DRM_ERROR("DX Context not set.\n");
3089 		return -EINVAL;
3090 	}
3091 
3092 	if (cmd->body.soid == SVGA3D_INVALID_ID)
3093 		return 0;
3094 
3095 	/*
3096 	 * When device does not support SM5 then streamoutput with mob command is
3097 	 * not available to user-space. Simply return in this case.
3098 	 */
3099 	if (!has_sm5_context(dev_priv))
3100 		return 0;
3101 
3102 	/*
3103 	 * With SM5 capable device if lookup fails then user-space probably used
3104 	 * old streamoutput define command. Return without an error.
3105 	 */
3106 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3107 					 cmd->body.soid);
3108 	if (IS_ERR(res)) {
3109 		return 0;
3110 	}
3111 
3112 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3113 					    VMW_RES_DIRTY_NONE);
3114 	if (ret) {
3115 		DRM_ERROR("Error creating resource validation node.\n");
3116 		return ret;
3117 	}
3118 
3119 	binding.bi.ctx = ctx_node->ctx;
3120 	binding.bi.res = res;
3121 	binding.bi.bt = vmw_ctx_binding_so;
3122 	binding.slot = 0; /* Only one SO set to context at a time. */
3123 
3124 	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3125 			binding.slot);
3126 
3127 	return ret;
3128 }
3129 
3130 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3131 					      struct vmw_sw_context *sw_context,
3132 					      SVGA3dCmdHeader *header)
3133 {
3134 	struct vmw_draw_indexed_instanced_indirect_cmd {
3135 		SVGA3dCmdHeader header;
3136 		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3137 	} *cmd = container_of(header, typeof(*cmd), header);
3138 
3139 	if (!has_sm5_context(dev_priv))
3140 		return -EINVAL;
3141 
3142 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3143 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3144 				 &cmd->body.argsBufferSid, NULL);
3145 }
3146 
3147 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3148 				      struct vmw_sw_context *sw_context,
3149 				      SVGA3dCmdHeader *header)
3150 {
3151 	struct vmw_draw_instanced_indirect_cmd {
3152 		SVGA3dCmdHeader header;
3153 		SVGA3dCmdDXDrawInstancedIndirect body;
3154 	} *cmd = container_of(header, typeof(*cmd), header);
3155 
3156 	if (!has_sm5_context(dev_priv))
3157 		return -EINVAL;
3158 
3159 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3160 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3161 				 &cmd->body.argsBufferSid, NULL);
3162 }
3163 
3164 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3165 				     struct vmw_sw_context *sw_context,
3166 				     SVGA3dCmdHeader *header)
3167 {
3168 	struct vmw_dispatch_indirect_cmd {
3169 		SVGA3dCmdHeader header;
3170 		SVGA3dCmdDXDispatchIndirect body;
3171 	} *cmd = container_of(header, typeof(*cmd), header);
3172 
3173 	if (!has_sm5_context(dev_priv))
3174 		return -EINVAL;
3175 
3176 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3177 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3178 				 &cmd->body.argsBufferSid, NULL);
3179 }
3180 
3181 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3182 				struct vmw_sw_context *sw_context,
3183 				void *buf, uint32_t *size)
3184 {
3185 	uint32_t size_remaining = *size;
3186 	uint32_t cmd_id;
3187 
3188 	cmd_id = ((uint32_t *)buf)[0];
3189 	switch (cmd_id) {
3190 	case SVGA_CMD_UPDATE:
3191 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3192 		break;
3193 	case SVGA_CMD_DEFINE_GMRFB:
3194 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3195 		break;
3196 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3197 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3198 		break;
3199 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3200 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3201 		break;
3202 	default:
3203 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3204 		return -EINVAL;
3205 	}
3206 
3207 	if (*size > size_remaining) {
3208 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3209 			       cmd_id);
3210 		return -EINVAL;
3211 	}
3212 
3213 	if (unlikely(!sw_context->kernel)) {
3214 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3215 		return -EPERM;
3216 	}
3217 
3218 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3219 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3220 
3221 	return 0;
3222 }
3223 
3224 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3225 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3226 		    false, false, false),
3227 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3228 		    false, false, false),
3229 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3230 		    true, false, false),
3231 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3232 		    true, false, false),
3233 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3234 		    true, false, false),
3235 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3236 		    false, false, false),
3237 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3238 		    false, false, false),
3239 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3240 		    true, false, false),
3241 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3242 		    true, false, false),
3243 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3244 		    true, false, false),
3245 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3246 		    &vmw_cmd_set_render_target_check, true, false, false),
3247 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3248 		    true, false, false),
3249 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3250 		    true, false, false),
3251 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3252 		    true, false, false),
3253 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3254 		    true, false, false),
3255 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3256 		    true, false, false),
3257 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3258 		    true, false, false),
3259 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3260 		    true, false, false),
3261 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3262 		    false, false, false),
3263 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3264 		    true, false, false),
3265 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3266 		    true, false, false),
3267 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3268 		    true, false, false),
3269 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3270 		    true, false, false),
3271 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3272 		    true, false, false),
3273 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3274 		    true, false, false),
3275 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3276 		    true, false, false),
3277 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3278 		    true, false, false),
3279 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3280 		    true, false, false),
3281 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3282 		    true, false, false),
3283 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3284 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3285 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3286 		    false, false, false),
3287 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3288 		    false, false, false),
3289 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3290 		    false, false, false),
3291 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3292 		    false, false, false),
3293 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3294 		    false, false, false),
3295 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3296 		    false, false, false),
3297 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3298 		    false, false, false),
3299 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3300 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3301 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3302 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3303 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3304 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3305 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3306 		    false, false, true),
3307 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3308 		    false, false, true),
3309 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3310 		    false, false, true),
3311 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3312 		    false, false, true),
3313 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3314 		    false, false, true),
3315 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3316 		    false, false, true),
3317 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3318 		    false, false, true),
3319 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3320 		    false, false, true),
3321 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3322 		    true, false, true),
3323 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3324 		    false, false, true),
3325 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3326 		    true, false, true),
3327 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3328 		    &vmw_cmd_update_gb_surface, true, false, true),
3329 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3330 		    &vmw_cmd_readback_gb_image, true, false, true),
3331 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3332 		    &vmw_cmd_readback_gb_surface, true, false, true),
3333 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3334 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3335 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3336 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3337 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3338 		    false, false, true),
3339 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3340 		    false, false, true),
3341 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3342 		    false, false, true),
3343 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3344 		    false, false, true),
3345 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3346 		    false, false, true),
3347 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3348 		    false, false, true),
3349 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3350 		    true, false, true),
3351 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3352 		    false, false, true),
3353 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3354 		    false, false, false),
3355 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3356 		    true, false, true),
3357 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3358 		    true, false, true),
3359 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3360 		    true, false, true),
3361 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3362 		    true, false, true),
3363 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3364 		    true, false, true),
3365 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3366 		    false, false, true),
3367 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3368 		    false, false, true),
3369 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3370 		    false, false, true),
3371 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3372 		    false, false, true),
3373 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3374 		    false, false, true),
3375 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3376 		    false, false, true),
3377 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3378 		    false, false, true),
3379 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3380 		    false, false, true),
3381 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3382 		    false, false, true),
3383 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3384 		    false, false, true),
3385 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3386 		    true, false, true),
3387 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3388 		    false, false, true),
3389 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3390 		    false, false, true),
3391 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3392 		    false, false, true),
3393 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3394 		    false, false, true),
3395 
3396 	/* SM commands */
3397 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3398 		    false, false, true),
3399 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3400 		    false, false, true),
3401 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3402 		    false, false, true),
3403 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3404 		    false, false, true),
3405 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3406 		    false, false, true),
3407 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3408 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3409 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3410 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3411 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3412 		    true, false, true),
3413 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3414 		    true, false, true),
3415 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3416 		    true, false, true),
3417 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3418 		    true, false, true),
3419 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3420 		    true, false, true),
3421 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3422 		    &vmw_cmd_dx_cid_check, true, false, true),
3423 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3424 		    true, false, true),
3425 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3426 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3427 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3428 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3429 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3430 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3431 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3432 		    true, false, true),
3433 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3434 		    &vmw_cmd_dx_cid_check, true, false, true),
3435 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3436 		    &vmw_cmd_dx_cid_check, true, false, true),
3437 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3438 		    true, false, true),
3439 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3440 		    true, false, true),
3441 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3442 		    true, false, true),
3443 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3444 		    &vmw_cmd_dx_cid_check, true, false, true),
3445 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3446 		    true, false, true),
3447 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3448 		    true, false, true),
3449 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3450 		    true, false, true),
3451 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3452 		    true, false, true),
3453 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3454 		    true, false, true),
3455 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3456 		    true, false, true),
3457 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3458 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3459 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3460 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3461 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3462 		    true, false, true),
3463 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3464 		    true, false, true),
3465 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3466 		    &vmw_cmd_dx_check_subresource, true, false, true),
3467 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3468 		    &vmw_cmd_dx_check_subresource, true, false, true),
3469 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3470 		    &vmw_cmd_dx_check_subresource, true, false, true),
3471 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3472 		    &vmw_cmd_dx_view_define, true, false, true),
3473 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3474 		    &vmw_cmd_dx_view_remove, true, false, true),
3475 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3476 		    &vmw_cmd_dx_view_define, true, false, true),
3477 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3478 		    &vmw_cmd_dx_view_remove, true, false, true),
3479 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3480 		    &vmw_cmd_dx_view_define, true, false, true),
3481 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3482 		    &vmw_cmd_dx_view_remove, true, false, true),
3483 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3484 		    &vmw_cmd_dx_so_define, true, false, true),
3485 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3486 		    &vmw_cmd_dx_cid_check, true, false, true),
3487 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3488 		    &vmw_cmd_dx_so_define, true, false, true),
3489 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3490 		    &vmw_cmd_dx_cid_check, true, false, true),
3491 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3492 		    &vmw_cmd_dx_so_define, true, false, true),
3493 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3494 		    &vmw_cmd_dx_cid_check, true, false, true),
3495 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3496 		    &vmw_cmd_dx_so_define, true, false, true),
3497 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3498 		    &vmw_cmd_dx_cid_check, true, false, true),
3499 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3500 		    &vmw_cmd_dx_so_define, true, false, true),
3501 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3502 		    &vmw_cmd_dx_cid_check, true, false, true),
3503 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3504 		    &vmw_cmd_dx_define_shader, true, false, true),
3505 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3506 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3507 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3508 		    &vmw_cmd_dx_bind_shader, true, false, true),
3509 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3510 		    &vmw_cmd_dx_so_define, true, false, true),
3511 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3512 		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3513 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3514 		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3515 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3516 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3517 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3518 		    &vmw_cmd_dx_cid_check, true, false, true),
3519 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3520 		    &vmw_cmd_dx_cid_check, true, false, true),
3521 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3522 		    &vmw_cmd_buffer_copy_check, true, false, true),
3523 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3524 		    &vmw_cmd_pred_copy_check, true, false, true),
3525 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3526 		    &vmw_cmd_dx_transfer_from_buffer,
3527 		    true, false, true),
3528 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3529 		    true, false, true),
3530 
3531 	/*
3532 	 * SM5 commands
3533 	 */
3534 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3535 		    true, false, true),
3536 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3537 		    true, false, true),
3538 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3539 		    true, false, true),
3540 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3541 		    &vmw_cmd_clear_uav_float, true, false, true),
3542 	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3543 		    false, true),
3544 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3545 		    true),
3546 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3547 		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3548 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3549 		    &vmw_cmd_instanced_indirect, true, false, true),
3550 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3551 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3552 		    &vmw_cmd_dispatch_indirect, true, false, true),
3553 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3554 		    false, true),
3555 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3556 		    &vmw_cmd_sm5_view_define, true, false, true),
3557 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3558 		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3559 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3560 		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3561 };
3562 
3563 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3564 {
3565 	u32 cmd_id = ((u32 *) buf)[0];
3566 
3567 	if (cmd_id >= SVGA_CMD_MAX) {
3568 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3569 		const struct vmw_cmd_entry *entry;
3570 
3571 		*size = header->size + sizeof(SVGA3dCmdHeader);
3572 		cmd_id = header->id;
3573 		if (cmd_id >= SVGA_3D_CMD_MAX)
3574 			return false;
3575 
3576 		cmd_id -= SVGA_3D_CMD_BASE;
3577 		entry = &vmw_cmd_entries[cmd_id];
3578 		*cmd = entry->cmd_name;
3579 		return true;
3580 	}
3581 
3582 	switch (cmd_id) {
3583 	case SVGA_CMD_UPDATE:
3584 		*cmd = "SVGA_CMD_UPDATE";
3585 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3586 		break;
3587 	case SVGA_CMD_DEFINE_GMRFB:
3588 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3589 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3590 		break;
3591 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3592 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3593 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3594 		break;
3595 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3596 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3597 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3598 		break;
3599 	default:
3600 		*cmd = "UNKNOWN";
3601 		*size = 0;
3602 		return false;
3603 	}
3604 
3605 	return true;
3606 }
3607 
3608 static int vmw_cmd_check(struct vmw_private *dev_priv,
3609 			 struct vmw_sw_context *sw_context, void *buf,
3610 			 uint32_t *size)
3611 {
3612 	uint32_t cmd_id;
3613 	uint32_t size_remaining = *size;
3614 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3615 	int ret;
3616 	const struct vmw_cmd_entry *entry;
3617 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3618 
3619 	cmd_id = ((uint32_t *)buf)[0];
3620 	/* Handle any none 3D commands */
3621 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3622 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3623 
3624 
3625 	cmd_id = header->id;
3626 	*size = header->size + sizeof(SVGA3dCmdHeader);
3627 
3628 	cmd_id -= SVGA_3D_CMD_BASE;
3629 	if (unlikely(*size > size_remaining))
3630 		goto out_invalid;
3631 
3632 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3633 		goto out_invalid;
3634 
3635 	entry = &vmw_cmd_entries[cmd_id];
3636 	if (unlikely(!entry->func))
3637 		goto out_invalid;
3638 
3639 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3640 		goto out_privileged;
3641 
3642 	if (unlikely(entry->gb_disable && gb))
3643 		goto out_old;
3644 
3645 	if (unlikely(entry->gb_enable && !gb))
3646 		goto out_new;
3647 
3648 	ret = entry->func(dev_priv, sw_context, header);
3649 	if (unlikely(ret != 0)) {
3650 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3651 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3652 		return ret;
3653 	}
3654 
3655 	return 0;
3656 out_invalid:
3657 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3658 		       cmd_id + SVGA_3D_CMD_BASE);
3659 	return -EINVAL;
3660 out_privileged:
3661 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3662 		       cmd_id + SVGA_3D_CMD_BASE);
3663 	return -EPERM;
3664 out_old:
3665 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3666 		       cmd_id + SVGA_3D_CMD_BASE);
3667 	return -EINVAL;
3668 out_new:
3669 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3670 		       cmd_id + SVGA_3D_CMD_BASE);
3671 	return -EINVAL;
3672 }
3673 
3674 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3675 			     struct vmw_sw_context *sw_context, void *buf,
3676 			     uint32_t size)
3677 {
3678 	int32_t cur_size = size;
3679 	int ret;
3680 
3681 	sw_context->buf_start = buf;
3682 
3683 	while (cur_size > 0) {
3684 		size = cur_size;
3685 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3686 		if (unlikely(ret != 0))
3687 			return ret;
3688 		buf = (void *)((unsigned long) buf + size);
3689 		cur_size -= size;
3690 	}
3691 
3692 	if (unlikely(cur_size != 0)) {
3693 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3694 		return -EINVAL;
3695 	}
3696 
3697 	return 0;
3698 }
3699 
3700 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3701 {
3702 	/* Memory is validation context memory, so no need to free it */
3703 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3704 }
3705 
3706 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3707 {
3708 	struct vmw_relocation *reloc;
3709 	struct ttm_buffer_object *bo;
3710 
3711 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3712 		bo = &reloc->vbo->base;
3713 		switch (bo->mem.mem_type) {
3714 		case TTM_PL_VRAM:
3715 			reloc->location->offset += bo->mem.start << PAGE_SHIFT;
3716 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3717 			break;
3718 		case VMW_PL_GMR:
3719 			reloc->location->gmrId = bo->mem.start;
3720 			break;
3721 		case VMW_PL_MOB:
3722 			*reloc->mob_loc = bo->mem.start;
3723 			break;
3724 		default:
3725 			BUG();
3726 		}
3727 	}
3728 	vmw_free_relocations(sw_context);
3729 }
3730 
3731 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3732 				 uint32_t size)
3733 {
3734 	if (likely(sw_context->cmd_bounce_size >= size))
3735 		return 0;
3736 
3737 	if (sw_context->cmd_bounce_size == 0)
3738 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3739 
3740 	while (sw_context->cmd_bounce_size < size) {
3741 		sw_context->cmd_bounce_size =
3742 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3743 				   (sw_context->cmd_bounce_size >> 1));
3744 	}
3745 
3746 	vfree(sw_context->cmd_bounce);
3747 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3748 
3749 	if (sw_context->cmd_bounce == NULL) {
3750 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3751 		sw_context->cmd_bounce_size = 0;
3752 		return -ENOMEM;
3753 	}
3754 
3755 	return 0;
3756 }
3757 
3758 /*
3759  * vmw_execbuf_fence_commands - create and submit a command stream fence
3760  *
3761  * Creates a fence object and submits a command stream marker.
3762  * If this fails for some reason, We sync the fifo and return NULL.
3763  * It is then safe to fence buffers with a NULL pointer.
3764  *
3765  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3766  * userspace handle if @p_handle is not NULL, otherwise not.
3767  */
3768 
3769 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3770 			       struct vmw_private *dev_priv,
3771 			       struct vmw_fence_obj **p_fence,
3772 			       uint32_t *p_handle)
3773 {
3774 	uint32_t sequence;
3775 	int ret;
3776 	bool synced = false;
3777 
3778 	/* p_handle implies file_priv. */
3779 	BUG_ON(p_handle != NULL && file_priv == NULL);
3780 
3781 	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3782 	if (unlikely(ret != 0)) {
3783 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3784 		synced = true;
3785 	}
3786 
3787 	if (p_handle != NULL)
3788 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3789 					    sequence, p_fence, p_handle);
3790 	else
3791 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3792 
3793 	if (unlikely(ret != 0 && !synced)) {
3794 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3795 					 false, VMW_FENCE_WAIT_TIMEOUT);
3796 		*p_fence = NULL;
3797 	}
3798 
3799 	return ret;
3800 }
3801 
3802 /**
3803  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3804  *
3805  * @dev_priv: Pointer to a vmw_private struct.
3806  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3807  * @ret: Return value from fence object creation.
3808  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3809  * the information should be copied.
3810  * @fence: Pointer to the fenc object.
3811  * @fence_handle: User-space fence handle.
3812  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3813  * @sync_file:  Only used to clean up in case of an error in this function.
3814  *
3815  * This function copies fence information to user-space. If copying fails, the
3816  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3817  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3818  * will hopefully be detected.
3819  *
3820  * Also if copying fails, user-space will be unable to signal the fence object
3821  * so we wait for it immediately, and then unreference the user-space reference.
3822  */
3823 void
3824 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3825 			    struct vmw_fpriv *vmw_fp, int ret,
3826 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3827 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3828 			    int32_t out_fence_fd, struct sync_file *sync_file)
3829 {
3830 	struct drm_vmw_fence_rep fence_rep;
3831 
3832 	if (user_fence_rep == NULL)
3833 		return;
3834 
3835 	memset(&fence_rep, 0, sizeof(fence_rep));
3836 
3837 	fence_rep.error = ret;
3838 	fence_rep.fd = out_fence_fd;
3839 	if (ret == 0) {
3840 		BUG_ON(fence == NULL);
3841 
3842 		fence_rep.handle = fence_handle;
3843 		fence_rep.seqno = fence->base.seqno;
3844 		vmw_update_seqno(dev_priv);
3845 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3846 	}
3847 
3848 	/*
3849 	 * copy_to_user errors will be detected by user space not seeing
3850 	 * fence_rep::error filled in. Typically user-space would have pre-set
3851 	 * that member to -EFAULT.
3852 	 */
3853 	ret = copy_to_user(user_fence_rep, &fence_rep,
3854 			   sizeof(fence_rep));
3855 
3856 	/*
3857 	 * User-space lost the fence object. We need to sync and unreference the
3858 	 * handle.
3859 	 */
3860 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3861 		if (sync_file)
3862 			fput(sync_file->file);
3863 
3864 		if (fence_rep.fd != -1) {
3865 			put_unused_fd(fence_rep.fd);
3866 			fence_rep.fd = -1;
3867 		}
3868 
3869 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3870 					  TTM_REF_USAGE);
3871 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3872 		(void) vmw_fence_obj_wait(fence, false, false,
3873 					  VMW_FENCE_WAIT_TIMEOUT);
3874 	}
3875 }
3876 
3877 /**
3878  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3879  *
3880  * @dev_priv: Pointer to a device private structure.
3881  * @kernel_commands: Pointer to the unpatched command batch.
3882  * @command_size: Size of the unpatched command batch.
3883  * @sw_context: Structure holding the relocation lists.
3884  *
3885  * Side effects: If this function returns 0, then the command batch pointed to
3886  * by @kernel_commands will have been modified.
3887  */
3888 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3889 				   void *kernel_commands, u32 command_size,
3890 				   struct vmw_sw_context *sw_context)
3891 {
3892 	void *cmd;
3893 
3894 	if (sw_context->dx_ctx_node)
3895 		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3896 					  sw_context->dx_ctx_node->ctx->id);
3897 	else
3898 		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3899 
3900 	if (!cmd)
3901 		return -ENOMEM;
3902 
3903 	vmw_apply_relocations(sw_context);
3904 	memcpy(cmd, kernel_commands, command_size);
3905 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3906 	vmw_resource_relocations_free(&sw_context->res_relocations);
3907 	vmw_cmd_commit(dev_priv, command_size);
3908 
3909 	return 0;
3910 }
3911 
3912 /**
3913  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3914  * command buffer manager.
3915  *
3916  * @dev_priv: Pointer to a device private structure.
3917  * @header: Opaque handle to the command buffer allocation.
3918  * @command_size: Size of the unpatched command batch.
3919  * @sw_context: Structure holding the relocation lists.
3920  *
3921  * Side effects: If this function returns 0, then the command buffer represented
3922  * by @header will have been modified.
3923  */
3924 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3925 				     struct vmw_cmdbuf_header *header,
3926 				     u32 command_size,
3927 				     struct vmw_sw_context *sw_context)
3928 {
3929 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3930 		  SVGA3D_INVALID_ID);
3931 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3932 				       header);
3933 
3934 	vmw_apply_relocations(sw_context);
3935 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3936 	vmw_resource_relocations_free(&sw_context->res_relocations);
3937 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3938 
3939 	return 0;
3940 }
3941 
3942 /**
3943  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3944  * submission using a command buffer.
3945  *
3946  * @dev_priv: Pointer to a device private structure.
3947  * @user_commands: User-space pointer to the commands to be submitted.
3948  * @command_size: Size of the unpatched command batch.
3949  * @header: Out parameter returning the opaque pointer to the command buffer.
3950  *
3951  * This function checks whether we can use the command buffer manager for
3952  * submission and if so, creates a command buffer of suitable size and copies
3953  * the user data into that buffer.
3954  *
3955  * On successful return, the function returns a pointer to the data in the
3956  * command buffer and *@header is set to non-NULL.
3957  *
3958  * @kernel_commands: If command buffers could not be used, the function will
3959  * return the value of @kernel_commands on function call. That value may be
3960  * NULL. In that case, the value of *@header will be set to NULL.
3961  *
3962  * If an error is encountered, the function will return a pointer error value.
3963  * If the function is interrupted by a signal while sleeping, it will return
3964  * -ERESTARTSYS casted to a pointer error value.
3965  */
3966 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3967 				void __user *user_commands,
3968 				void *kernel_commands, u32 command_size,
3969 				struct vmw_cmdbuf_header **header)
3970 {
3971 	size_t cmdbuf_size;
3972 	int ret;
3973 
3974 	*header = NULL;
3975 	if (command_size > SVGA_CB_MAX_SIZE) {
3976 		VMW_DEBUG_USER("Command buffer is too large.\n");
3977 		return ERR_PTR(-EINVAL);
3978 	}
3979 
3980 	if (!dev_priv->cman || kernel_commands)
3981 		return kernel_commands;
3982 
3983 	/* If possible, add a little space for fencing. */
3984 	cmdbuf_size = command_size + 512;
3985 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3986 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3987 					   header);
3988 	if (IS_ERR(kernel_commands))
3989 		return kernel_commands;
3990 
3991 	ret = copy_from_user(kernel_commands, user_commands, command_size);
3992 	if (ret) {
3993 		VMW_DEBUG_USER("Failed copying commands.\n");
3994 		vmw_cmdbuf_header_free(*header);
3995 		*header = NULL;
3996 		return ERR_PTR(-EFAULT);
3997 	}
3998 
3999 	return kernel_commands;
4000 }
4001 
4002 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4003 				   struct vmw_sw_context *sw_context,
4004 				   uint32_t handle)
4005 {
4006 	struct vmw_resource *res;
4007 	int ret;
4008 	unsigned int size;
4009 
4010 	if (handle == SVGA3D_INVALID_ID)
4011 		return 0;
4012 
4013 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4014 	ret = vmw_validation_preload_res(sw_context->ctx, size);
4015 	if (ret)
4016 		return ret;
4017 
4018 	res = vmw_user_resource_noref_lookup_handle
4019 		(dev_priv, sw_context->fp->tfile, handle,
4020 		 user_context_converter);
4021 	if (IS_ERR(res)) {
4022 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4023 			       (unsigned int) handle);
4024 		return PTR_ERR(res);
4025 	}
4026 
4027 	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4028 	if (unlikely(ret != 0))
4029 		return ret;
4030 
4031 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4032 	sw_context->man = vmw_context_res_man(res);
4033 
4034 	return 0;
4035 }
4036 
4037 int vmw_execbuf_process(struct drm_file *file_priv,
4038 			struct vmw_private *dev_priv,
4039 			void __user *user_commands, void *kernel_commands,
4040 			uint32_t command_size, uint64_t throttle_us,
4041 			uint32_t dx_context_handle,
4042 			struct drm_vmw_fence_rep __user *user_fence_rep,
4043 			struct vmw_fence_obj **out_fence, uint32_t flags)
4044 {
4045 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4046 	struct vmw_fence_obj *fence = NULL;
4047 	struct vmw_cmdbuf_header *header;
4048 	uint32_t handle = 0;
4049 	int ret;
4050 	int32_t out_fence_fd = -1;
4051 	struct sync_file *sync_file = NULL;
4052 	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4053 
4054 	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4055 
4056 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4057 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4058 		if (out_fence_fd < 0) {
4059 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4060 			return out_fence_fd;
4061 		}
4062 	}
4063 
4064 	if (throttle_us) {
4065 		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4066 	}
4067 
4068 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4069 					     kernel_commands, command_size,
4070 					     &header);
4071 	if (IS_ERR(kernel_commands)) {
4072 		ret = PTR_ERR(kernel_commands);
4073 		goto out_free_fence_fd;
4074 	}
4075 
4076 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4077 	if (ret) {
4078 		ret = -ERESTARTSYS;
4079 		goto out_free_header;
4080 	}
4081 
4082 	sw_context->kernel = false;
4083 	if (kernel_commands == NULL) {
4084 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4085 		if (unlikely(ret != 0))
4086 			goto out_unlock;
4087 
4088 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4089 				     command_size);
4090 		if (unlikely(ret != 0)) {
4091 			ret = -EFAULT;
4092 			VMW_DEBUG_USER("Failed copying commands.\n");
4093 			goto out_unlock;
4094 		}
4095 
4096 		kernel_commands = sw_context->cmd_bounce;
4097 	} else if (!header) {
4098 		sw_context->kernel = true;
4099 	}
4100 
4101 	sw_context->fp = vmw_fpriv(file_priv);
4102 	INIT_LIST_HEAD(&sw_context->ctx_list);
4103 	sw_context->cur_query_bo = dev_priv->pinned_bo;
4104 	sw_context->last_query_ctx = NULL;
4105 	sw_context->needs_post_query_barrier = false;
4106 	sw_context->dx_ctx_node = NULL;
4107 	sw_context->dx_query_mob = NULL;
4108 	sw_context->dx_query_ctx = NULL;
4109 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4110 	INIT_LIST_HEAD(&sw_context->res_relocations);
4111 	INIT_LIST_HEAD(&sw_context->bo_relocations);
4112 
4113 	if (sw_context->staged_bindings)
4114 		vmw_binding_state_reset(sw_context->staged_bindings);
4115 
4116 	if (!sw_context->res_ht_initialized) {
4117 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4118 		if (unlikely(ret != 0))
4119 			goto out_unlock;
4120 
4121 		sw_context->res_ht_initialized = true;
4122 	}
4123 
4124 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4125 	sw_context->ctx = &val_ctx;
4126 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4127 	if (unlikely(ret != 0))
4128 		goto out_err_nores;
4129 
4130 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4131 				command_size);
4132 	if (unlikely(ret != 0))
4133 		goto out_err_nores;
4134 
4135 	ret = vmw_resources_reserve(sw_context);
4136 	if (unlikely(ret != 0))
4137 		goto out_err_nores;
4138 
4139 	ret = vmw_validation_bo_reserve(&val_ctx, true);
4140 	if (unlikely(ret != 0))
4141 		goto out_err_nores;
4142 
4143 	ret = vmw_validation_bo_validate(&val_ctx, true);
4144 	if (unlikely(ret != 0))
4145 		goto out_err;
4146 
4147 	ret = vmw_validation_res_validate(&val_ctx, true);
4148 	if (unlikely(ret != 0))
4149 		goto out_err;
4150 
4151 	vmw_validation_drop_ht(&val_ctx);
4152 
4153 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4154 	if (unlikely(ret != 0)) {
4155 		ret = -ERESTARTSYS;
4156 		goto out_err;
4157 	}
4158 
4159 	if (dev_priv->has_mob) {
4160 		ret = vmw_rebind_contexts(sw_context);
4161 		if (unlikely(ret != 0))
4162 			goto out_unlock_binding;
4163 	}
4164 
4165 	if (!header) {
4166 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4167 					      command_size, sw_context);
4168 	} else {
4169 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4170 						sw_context);
4171 		header = NULL;
4172 	}
4173 	mutex_unlock(&dev_priv->binding_mutex);
4174 	if (ret)
4175 		goto out_err;
4176 
4177 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4178 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4179 					 (user_fence_rep) ? &handle : NULL);
4180 	/*
4181 	 * This error is harmless, because if fence submission fails,
4182 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4183 	 * user-space in @fence_rep
4184 	 */
4185 	if (ret != 0)
4186 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4187 
4188 	vmw_execbuf_bindings_commit(sw_context, false);
4189 	vmw_bind_dx_query_mob(sw_context);
4190 	vmw_validation_res_unreserve(&val_ctx, false);
4191 
4192 	vmw_validation_bo_fence(sw_context->ctx, fence);
4193 
4194 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4195 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4196 
4197 	/*
4198 	 * If anything fails here, give up trying to export the fence and do a
4199 	 * sync since the user mode will not be able to sync the fence itself.
4200 	 * This ensures we are still functionally correct.
4201 	 */
4202 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4203 
4204 		sync_file = sync_file_create(&fence->base);
4205 		if (!sync_file) {
4206 			VMW_DEBUG_USER("Sync file create failed for fence\n");
4207 			put_unused_fd(out_fence_fd);
4208 			out_fence_fd = -1;
4209 
4210 			(void) vmw_fence_obj_wait(fence, false, false,
4211 						  VMW_FENCE_WAIT_TIMEOUT);
4212 		} else {
4213 			/* Link the fence with the FD created earlier */
4214 			fd_install(out_fence_fd, sync_file->file);
4215 		}
4216 	}
4217 
4218 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4219 				    user_fence_rep, fence, handle, out_fence_fd,
4220 				    sync_file);
4221 
4222 	/* Don't unreference when handing fence out */
4223 	if (unlikely(out_fence != NULL)) {
4224 		*out_fence = fence;
4225 		fence = NULL;
4226 	} else if (likely(fence != NULL)) {
4227 		vmw_fence_obj_unreference(&fence);
4228 	}
4229 
4230 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4231 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4232 
4233 	/*
4234 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4235 	 * in resource destruction paths.
4236 	 */
4237 	vmw_validation_unref_lists(&val_ctx);
4238 
4239 	return 0;
4240 
4241 out_unlock_binding:
4242 	mutex_unlock(&dev_priv->binding_mutex);
4243 out_err:
4244 	vmw_validation_bo_backoff(&val_ctx);
4245 out_err_nores:
4246 	vmw_execbuf_bindings_commit(sw_context, true);
4247 	vmw_validation_res_unreserve(&val_ctx, true);
4248 	vmw_resource_relocations_free(&sw_context->res_relocations);
4249 	vmw_free_relocations(sw_context);
4250 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4251 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4252 out_unlock:
4253 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4254 	vmw_validation_drop_ht(&val_ctx);
4255 	WARN_ON(!list_empty(&sw_context->ctx_list));
4256 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4257 
4258 	/*
4259 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4260 	 * in resource destruction paths.
4261 	 */
4262 	vmw_validation_unref_lists(&val_ctx);
4263 out_free_header:
4264 	if (header)
4265 		vmw_cmdbuf_header_free(header);
4266 out_free_fence_fd:
4267 	if (out_fence_fd >= 0)
4268 		put_unused_fd(out_fence_fd);
4269 
4270 	return ret;
4271 }
4272 
4273 /**
4274  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4275  *
4276  * @dev_priv: The device private structure.
4277  *
4278  * This function is called to idle the fifo and unpin the query buffer if the
4279  * normal way to do this hits an error, which should typically be extremely
4280  * rare.
4281  */
4282 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4283 {
4284 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4285 
4286 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4287 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4288 	if (dev_priv->dummy_query_bo_pinned) {
4289 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4290 		dev_priv->dummy_query_bo_pinned = false;
4291 	}
4292 }
4293 
4294 
4295 /**
4296  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4297  * bo.
4298  *
4299  * @dev_priv: The device private structure.
4300  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4301  * query barrier that flushes all queries touching the current buffer pointed to
4302  * by @dev_priv->pinned_bo
4303  *
4304  * This function should be used to unpin the pinned query bo, or as a query
4305  * barrier when we need to make sure that all queries have finished before the
4306  * next fifo command. (For example on hardware context destructions where the
4307  * hardware may otherwise leak unfinished queries).
4308  *
4309  * This function does not return any failure codes, but make attempts to do safe
4310  * unpinning in case of errors.
4311  *
4312  * The function will synchronize on the previous query barrier, and will thus
4313  * not finish until that barrier has executed.
4314  *
4315  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4316  * calling this function.
4317  */
4318 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4319 				     struct vmw_fence_obj *fence)
4320 {
4321 	int ret = 0;
4322 	struct vmw_fence_obj *lfence = NULL;
4323 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4324 
4325 	if (dev_priv->pinned_bo == NULL)
4326 		goto out_unlock;
4327 
4328 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4329 				    false);
4330 	if (ret)
4331 		goto out_no_reserve;
4332 
4333 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4334 				    false);
4335 	if (ret)
4336 		goto out_no_reserve;
4337 
4338 	ret = vmw_validation_bo_reserve(&val_ctx, false);
4339 	if (ret)
4340 		goto out_no_reserve;
4341 
4342 	if (dev_priv->query_cid_valid) {
4343 		BUG_ON(fence != NULL);
4344 		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4345 		if (ret)
4346 			goto out_no_emit;
4347 		dev_priv->query_cid_valid = false;
4348 	}
4349 
4350 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4351 	if (dev_priv->dummy_query_bo_pinned) {
4352 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4353 		dev_priv->dummy_query_bo_pinned = false;
4354 	}
4355 	if (fence == NULL) {
4356 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4357 						  NULL);
4358 		fence = lfence;
4359 	}
4360 	vmw_validation_bo_fence(&val_ctx, fence);
4361 	if (lfence != NULL)
4362 		vmw_fence_obj_unreference(&lfence);
4363 
4364 	vmw_validation_unref_lists(&val_ctx);
4365 	vmw_bo_unreference(&dev_priv->pinned_bo);
4366 
4367 out_unlock:
4368 	return;
4369 out_no_emit:
4370 	vmw_validation_bo_backoff(&val_ctx);
4371 out_no_reserve:
4372 	vmw_validation_unref_lists(&val_ctx);
4373 	vmw_execbuf_unpin_panic(dev_priv);
4374 	vmw_bo_unreference(&dev_priv->pinned_bo);
4375 }
4376 
4377 /**
4378  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4379  *
4380  * @dev_priv: The device private structure.
4381  *
4382  * This function should be used to unpin the pinned query bo, or as a query
4383  * barrier when we need to make sure that all queries have finished before the
4384  * next fifo command. (For example on hardware context destructions where the
4385  * hardware may otherwise leak unfinished queries).
4386  *
4387  * This function does not return any failure codes, but make attempts to do safe
4388  * unpinning in case of errors.
4389  *
4390  * The function will synchronize on the previous query barrier, and will thus
4391  * not finish until that barrier has executed.
4392  */
4393 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4394 {
4395 	mutex_lock(&dev_priv->cmdbuf_mutex);
4396 	if (dev_priv->query_cid_valid)
4397 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4398 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4399 }
4400 
4401 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4402 		      struct drm_file *file_priv)
4403 {
4404 	struct vmw_private *dev_priv = vmw_priv(dev);
4405 	struct drm_vmw_execbuf_arg *arg = data;
4406 	int ret;
4407 	struct dma_fence *in_fence = NULL;
4408 
4409 	/*
4410 	 * Extend the ioctl argument while maintaining backwards compatibility:
4411 	 * We take different code paths depending on the value of arg->version.
4412 	 *
4413 	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4414 	 */
4415 	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4416 		     arg->version == 0)) {
4417 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4418 		return -EINVAL;
4419 	}
4420 
4421 	switch (arg->version) {
4422 	case 1:
4423 		/* For v1 core DRM have extended + zeropadded the data */
4424 		arg->context_handle = (uint32_t) -1;
4425 		break;
4426 	case 2:
4427 	default:
4428 		/* For v2 and later core DRM would have correctly copied it */
4429 		break;
4430 	}
4431 
4432 	/* If imported a fence FD from elsewhere, then wait on it */
4433 	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4434 		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4435 
4436 		if (!in_fence) {
4437 			VMW_DEBUG_USER("Cannot get imported fence\n");
4438 			return -EINVAL;
4439 		}
4440 
4441 		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4442 		if (ret)
4443 			goto out;
4444 	}
4445 
4446 	ret = vmw_execbuf_process(file_priv, dev_priv,
4447 				  (void __user *)(unsigned long)arg->commands,
4448 				  NULL, arg->command_size, arg->throttle_us,
4449 				  arg->context_handle,
4450 				  (void __user *)(unsigned long)arg->fence_rep,
4451 				  NULL, arg->flags);
4452 
4453 	if (unlikely(ret != 0))
4454 		goto out;
4455 
4456 	vmw_kms_cursor_post_execbuf(dev_priv);
4457 
4458 out:
4459 	if (in_fence)
4460 		dma_fence_put(in_fence);
4461 	return ret;
4462 }
4463