1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 **************************************************************************/
8
9 #include "vmwgfx_binding.h"
10 #include "vmwgfx_bo.h"
11 #include "vmwgfx_drv.h"
12 #include "vmwgfx_mksstat.h"
13 #include "vmwgfx_so.h"
14
15 #include <drm/ttm/ttm_bo.h>
16 #include <drm/ttm/ttm_placement.h>
17
18 #include <linux/sync_file.h>
19 #include <linux/hashtable.h>
20 #include <linux/vmalloc.h>
21
22 /*
23 * Helper macro to get dx_ctx_node if available otherwise print an error
24 * message. This is for use in command verifier function where if dx_ctx_node
25 * is not set then command is invalid.
26 */
27 #define VMW_GET_CTX_NODE(__sw_context) \
28 ({ \
29 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
30 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
31 __sw_context->dx_ctx_node; \
32 }); \
33 })
34
35 #define VMW_DECLARE_CMD_VAR(__var, __type) \
36 struct { \
37 SVGA3dCmdHeader header; \
38 __type body; \
39 } __var
40
41 /**
42 * struct vmw_relocation - Buffer object relocation
43 *
44 * @head: List head for the command submission context's relocation list
45 * @vbo: Non ref-counted pointer to buffer object
46 * @mob_loc: Pointer to location for mob id to be modified
47 * @location: Pointer to location for guest pointer to be modified
48 */
49 struct vmw_relocation {
50 struct list_head head;
51 struct vmw_bo *vbo;
52 union {
53 SVGAMobId *mob_loc;
54 SVGAGuestPtr *location;
55 };
56 };
57
58 /**
59 * enum vmw_resource_relocation_type - Relocation type for resources
60 *
61 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
62 * command stream is replaced with the actual id after validation.
63 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
64 * with a NOP.
65 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
66 * validation is -1, the command is replaced with a NOP. Otherwise no action.
67 * @vmw_res_rel_max: Last value in the enum - used for error checking
68 */
69 enum vmw_resource_relocation_type {
70 vmw_res_rel_normal,
71 vmw_res_rel_nop,
72 vmw_res_rel_cond_nop,
73 vmw_res_rel_max
74 };
75
76 /**
77 * struct vmw_resource_relocation - Relocation info for resources
78 *
79 * @head: List head for the software context's relocation list.
80 * @res: Non-ref-counted pointer to the resource.
81 * @offset: Offset of single byte entries into the command buffer where the id
82 * that needs fixup is located.
83 * @rel_type: Type of relocation.
84 */
85 struct vmw_resource_relocation {
86 struct list_head head;
87 const struct vmw_resource *res;
88 u32 offset:29;
89 enum vmw_resource_relocation_type rel_type:3;
90 };
91
92 /**
93 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
94 *
95 * @head: List head of context list
96 * @ctx: The context resource
97 * @cur: The context's persistent binding state
98 * @staged: The binding state changes of this command buffer
99 */
100 struct vmw_ctx_validation_info {
101 struct list_head head;
102 struct vmw_resource *ctx;
103 struct vmw_ctx_binding_state *cur;
104 struct vmw_ctx_binding_state *staged;
105 };
106
107 /**
108 * struct vmw_cmd_entry - Describe a command for the verifier
109 *
110 * @func: Call-back to handle the command.
111 * @user_allow: Whether allowed from the execbuf ioctl.
112 * @gb_disable: Whether disabled if guest-backed objects are available.
113 * @gb_enable: Whether enabled iff guest-backed objects are available.
114 * @cmd_name: Name of the command.
115 */
116 struct vmw_cmd_entry {
117 int (*func) (struct vmw_private *, struct vmw_sw_context *,
118 SVGA3dCmdHeader *);
119 bool user_allow;
120 bool gb_disable;
121 bool gb_enable;
122 const char *cmd_name;
123 };
124
125 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
126 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
127 (_gb_disable), (_gb_enable), #_cmd}
128
129 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
130 struct vmw_sw_context *sw_context,
131 struct vmw_resource *ctx);
132 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
133 struct vmw_sw_context *sw_context,
134 SVGAMobId *id,
135 struct vmw_bo **vmw_bo_p);
136 /**
137 * vmw_ptr_diff - Compute the offset from a to b in bytes
138 *
139 * @a: A starting pointer.
140 * @b: A pointer offset in the same address space.
141 *
142 * Returns: The offset in bytes between the two pointers.
143 */
vmw_ptr_diff(void * a,void * b)144 static size_t vmw_ptr_diff(void *a, void *b)
145 {
146 return (unsigned long) b - (unsigned long) a;
147 }
148
149 /**
150 * vmw_execbuf_bindings_commit - Commit modified binding state
151 *
152 * @sw_context: The command submission context
153 * @backoff: Whether this is part of the error path and binding state changes
154 * should be ignored
155 */
vmw_execbuf_bindings_commit(struct vmw_sw_context * sw_context,bool backoff)156 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
157 bool backoff)
158 {
159 struct vmw_ctx_validation_info *entry;
160
161 list_for_each_entry(entry, &sw_context->ctx_list, head) {
162 if (!backoff)
163 vmw_binding_state_commit(entry->cur, entry->staged);
164
165 if (entry->staged != sw_context->staged_bindings)
166 vmw_binding_state_free(entry->staged);
167 else
168 sw_context->staged_bindings_inuse = false;
169 }
170
171 /* List entries are freed with the validation context */
172 INIT_LIST_HEAD(&sw_context->ctx_list);
173 }
174
175 /**
176 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
177 *
178 * @sw_context: The command submission context
179 */
vmw_bind_dx_query_mob(struct vmw_sw_context * sw_context)180 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
181 {
182 if (sw_context->dx_query_mob)
183 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
184 sw_context->dx_query_mob);
185 }
186
187 /**
188 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
189 * the validate list.
190 *
191 * @dev_priv: Pointer to the device private:
192 * @sw_context: The command submission context
193 * @res: Pointer to the resource
194 * @node: The validation node holding the context resource metadata
195 */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_ctx_validation_info * node)196 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
197 struct vmw_sw_context *sw_context,
198 struct vmw_resource *res,
199 struct vmw_ctx_validation_info *node)
200 {
201 int ret;
202
203 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
204 if (unlikely(ret != 0))
205 goto out_err;
206
207 if (!sw_context->staged_bindings) {
208 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
209 if (IS_ERR(sw_context->staged_bindings)) {
210 ret = PTR_ERR(sw_context->staged_bindings);
211 sw_context->staged_bindings = NULL;
212 goto out_err;
213 }
214 }
215
216 if (sw_context->staged_bindings_inuse) {
217 node->staged = vmw_binding_state_alloc(dev_priv);
218 if (IS_ERR(node->staged)) {
219 ret = PTR_ERR(node->staged);
220 node->staged = NULL;
221 goto out_err;
222 }
223 } else {
224 node->staged = sw_context->staged_bindings;
225 sw_context->staged_bindings_inuse = true;
226 }
227
228 node->ctx = res;
229 node->cur = vmw_context_binding_state(res);
230 list_add_tail(&node->head, &sw_context->ctx_list);
231
232 return 0;
233
234 out_err:
235 return ret;
236 }
237
238 /**
239 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
240 *
241 * @dev_priv: Pointer to the device private struct.
242 * @res_type: The resource type.
243 *
244 * Guest-backed contexts and DX contexts require extra size to store execbuf
245 * private information in the validation node. Typically the binding manager
246 * associated data structures.
247 *
248 * Returns: The extra size requirement based on resource type.
249 */
vmw_execbuf_res_size(struct vmw_private * dev_priv,enum vmw_res_type res_type)250 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
251 enum vmw_res_type res_type)
252 {
253 return (res_type == vmw_res_dx_context ||
254 (res_type == vmw_res_context && dev_priv->has_mob)) ?
255 sizeof(struct vmw_ctx_validation_info) : 0;
256 }
257
258 /**
259 * vmw_execbuf_rcache_update - Update a resource-node cache entry
260 *
261 * @rcache: Pointer to the entry to update.
262 * @res: Pointer to the resource.
263 * @private: Pointer to the execbuf-private space in the resource validation
264 * node.
265 */
vmw_execbuf_rcache_update(struct vmw_res_cache_entry * rcache,struct vmw_resource * res,void * private)266 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
267 struct vmw_resource *res,
268 void *private)
269 {
270 rcache->res = res;
271 rcache->private = private;
272 rcache->valid = 1;
273 rcache->valid_handle = 0;
274 }
275
276 enum vmw_val_add_flags {
277 vmw_val_add_flag_none = 0,
278 vmw_val_add_flag_noctx = 1 << 0,
279 };
280
281 /**
282 * vmw_execbuf_res_val_add - Add a resource to the validation list.
283 *
284 * @sw_context: Pointer to the software context.
285 * @res: Unreferenced rcu-protected pointer to the resource.
286 * @dirty: Whether to change dirty status.
287 * @flags: specifies whether to use the context or not
288 *
289 * Returns: 0 on success. Negative error code on failure. Typical error codes
290 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
291 */
vmw_execbuf_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty,u32 flags)292 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
293 struct vmw_resource *res,
294 u32 dirty,
295 u32 flags)
296 {
297 struct vmw_private *dev_priv = res->dev_priv;
298 int ret;
299 enum vmw_res_type res_type = vmw_res_type(res);
300 struct vmw_res_cache_entry *rcache;
301 struct vmw_ctx_validation_info *ctx_info;
302 bool first_usage;
303 unsigned int priv_size;
304
305 rcache = &sw_context->res_cache[res_type];
306 if (likely(rcache->valid && rcache->res == res)) {
307 if (dirty)
308 vmw_validation_res_set_dirty(sw_context->ctx,
309 rcache->private, dirty);
310 return 0;
311 }
312
313 if ((flags & vmw_val_add_flag_noctx) != 0) {
314 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
315 (void **)&ctx_info, NULL);
316 if (ret)
317 return ret;
318
319 } else {
320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322 dirty, (void **)&ctx_info,
323 &first_usage);
324 if (ret)
325 return ret;
326
327 if (priv_size && first_usage) {
328 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
329 ctx_info);
330 if (ret) {
331 VMW_DEBUG_USER("Failed first usage context setup.\n");
332 return ret;
333 }
334 }
335 }
336
337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339 }
340
341 /**
342 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
343 * validation list
344 *
345 * @sw_context: The software context holding the validation list.
346 * @view: Pointer to the view resource.
347 *
348 * Returns 0 if success, negative error code otherwise.
349 */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)350 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
351 struct vmw_resource *view)
352 {
353 int ret;
354
355 /*
356 * First add the resource the view is pointing to, otherwise it may be
357 * swapped out when the view is validated.
358 */
359 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
360 vmw_view_dirtying(view), vmw_val_add_flag_noctx);
361 if (ret)
362 return ret;
363
364 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
365 vmw_val_add_flag_noctx);
366 }
367
368 /**
369 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
370 * to to the validation list.
371 *
372 * @sw_context: The software context holding the validation list.
373 * @view_type: The view type to look up.
374 * @id: view id of the view.
375 *
376 * The view is represented by a view id and the DX context it's created on, or
377 * scheduled for creation on. If there is no DX context set, the function will
378 * return an -EINVAL error pointer.
379 *
380 * Returns: Unreferenced pointer to the resource on success, negative error
381 * pointer on failure.
382 */
383 static struct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)384 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
385 enum vmw_view_type view_type, u32 id)
386 {
387 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
388 struct vmw_resource *view;
389 int ret;
390
391 if (!ctx_node)
392 return ERR_PTR(-EINVAL);
393
394 view = vmw_view_lookup(sw_context->man, view_type, id);
395 if (IS_ERR(view))
396 return view;
397
398 ret = vmw_view_res_val_add(sw_context, view);
399 if (ret)
400 return ERR_PTR(ret);
401
402 return view;
403 }
404
405 /**
406 * vmw_resource_context_res_add - Put resources previously bound to a context on
407 * the validation list
408 *
409 * @dev_priv: Pointer to a device private structure
410 * @sw_context: Pointer to a software context used for this command submission
411 * @ctx: Pointer to the context resource
412 *
413 * This function puts all resources that were previously bound to @ctx on the
414 * resource validation list. This is part of the context state reemission
415 */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)416 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
417 struct vmw_sw_context *sw_context,
418 struct vmw_resource *ctx)
419 {
420 struct list_head *binding_list;
421 struct vmw_ctx_bindinfo *entry;
422 int ret = 0;
423 struct vmw_resource *res;
424 u32 i;
425 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
426 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
427
428 /* Add all cotables to the validation list. */
429 if (has_sm4_context(dev_priv) &&
430 vmw_res_type(ctx) == vmw_res_dx_context) {
431 for (i = 0; i < cotable_max; ++i) {
432 res = vmw_context_cotable(ctx, i);
433 if (IS_ERR_OR_NULL(res))
434 continue;
435
436 ret = vmw_execbuf_res_val_add(sw_context, res,
437 VMW_RES_DIRTY_SET,
438 vmw_val_add_flag_noctx);
439 if (unlikely(ret != 0))
440 return ret;
441 }
442 }
443
444 /* Add all resources bound to the context to the validation list */
445 mutex_lock(&dev_priv->binding_mutex);
446 binding_list = vmw_context_binding_list(ctx);
447
448 list_for_each_entry(entry, binding_list, ctx_list) {
449 if (vmw_res_type(entry->res) == vmw_res_view)
450 ret = vmw_view_res_val_add(sw_context, entry->res);
451 else
452 ret = vmw_execbuf_res_val_add(sw_context, entry->res,
453 vmw_binding_dirtying(entry->bt),
454 vmw_val_add_flag_noctx);
455 if (unlikely(ret != 0))
456 break;
457 }
458
459 if (has_sm4_context(dev_priv) &&
460 vmw_res_type(ctx) == vmw_res_dx_context) {
461 struct vmw_bo *dx_query_mob;
462
463 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
464 if (dx_query_mob) {
465 vmw_bo_placement_set(dx_query_mob,
466 VMW_BO_DOMAIN_MOB,
467 VMW_BO_DOMAIN_MOB);
468 ret = vmw_validation_add_bo(sw_context->ctx,
469 dx_query_mob);
470 }
471 }
472
473 mutex_unlock(&dev_priv->binding_mutex);
474 return ret;
475 }
476
477 /**
478 * vmw_resource_relocation_add - Add a relocation to the relocation list
479 *
480 * @sw_context: Pointer to the software context.
481 * @res: The resource.
482 * @offset: Offset into the command buffer currently being parsed where the id
483 * that needs fixup is located. Granularity is one byte.
484 * @rel_type: Relocation type.
485 */
vmw_resource_relocation_add(struct vmw_sw_context * sw_context,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)486 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
487 const struct vmw_resource *res,
488 unsigned long offset,
489 enum vmw_resource_relocation_type
490 rel_type)
491 {
492 struct vmw_resource_relocation *rel;
493
494 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
495 if (unlikely(!rel)) {
496 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
497 return -ENOMEM;
498 }
499
500 rel->res = res;
501 rel->offset = offset;
502 rel->rel_type = rel_type;
503 list_add_tail(&rel->head, &sw_context->res_relocations);
504
505 return 0;
506 }
507
508 /**
509 * vmw_resource_relocations_free - Free all relocations on a list
510 *
511 * @list: Pointer to the head of the relocation list
512 */
vmw_resource_relocations_free(struct list_head * list)513 static void vmw_resource_relocations_free(struct list_head *list)
514 {
515 /* Memory is validation context memory, so no need to free it */
516 INIT_LIST_HEAD(list);
517 }
518
519 /**
520 * vmw_resource_relocations_apply - Apply all relocations on a list
521 *
522 * @cb: Pointer to the start of the command buffer bein patch. This need not be
523 * the same buffer as the one being parsed when the relocation list was built,
524 * but the contents must be the same modulo the resource ids.
525 * @list: Pointer to the head of the relocation list.
526 */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)527 static void vmw_resource_relocations_apply(uint32_t *cb,
528 struct list_head *list)
529 {
530 struct vmw_resource_relocation *rel;
531
532 /* Validate the struct vmw_resource_relocation member size */
533 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
534 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
535
536 list_for_each_entry(rel, list, head) {
537 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
538 switch (rel->rel_type) {
539 case vmw_res_rel_normal:
540 *addr = rel->res->id;
541 break;
542 case vmw_res_rel_nop:
543 *addr = SVGA_3D_CMD_NOP;
544 break;
545 default:
546 if (rel->res->id == -1)
547 *addr = SVGA_3D_CMD_NOP;
548 break;
549 }
550 }
551 }
552
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)553 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
554 struct vmw_sw_context *sw_context,
555 SVGA3dCmdHeader *header)
556 {
557 return -EINVAL;
558 }
559
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)560 static int vmw_cmd_ok(struct vmw_private *dev_priv,
561 struct vmw_sw_context *sw_context,
562 SVGA3dCmdHeader *header)
563 {
564 return 0;
565 }
566
567 /**
568 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
569 * list.
570 *
571 * @sw_context: Pointer to the software context.
572 *
573 * Note that since vmware's command submission currently is protected by the
574 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
575 * only a single thread at once will attempt this.
576 */
vmw_resources_reserve(struct vmw_sw_context * sw_context)577 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
578 {
579 int ret;
580
581 ret = vmw_validation_res_reserve(sw_context->ctx, true);
582 if (ret)
583 return ret;
584
585 if (sw_context->dx_query_mob) {
586 struct vmw_bo *expected_dx_query_mob;
587
588 expected_dx_query_mob =
589 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
590 if (expected_dx_query_mob &&
591 expected_dx_query_mob != sw_context->dx_query_mob) {
592 ret = -EINVAL;
593 }
594 }
595
596 return ret;
597 }
598
599 /**
600 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
601 * resource validate list unless it's already there.
602 *
603 * @dev_priv: Pointer to a device private structure.
604 * @sw_context: Pointer to the software context.
605 * @res_type: Resource type.
606 * @dirty: Whether to change dirty status.
607 * @converter: User-space visible type specific information.
608 * @id_loc: Pointer to the location in the command buffer currently being parsed
609 * from where the user-space resource id handle is located.
610 * @p_res: Pointer to pointer to resource validation node. Populated on
611 * exit.
612 */
613 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,u32 dirty,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource ** p_res)614 vmw_cmd_res_check(struct vmw_private *dev_priv,
615 struct vmw_sw_context *sw_context,
616 enum vmw_res_type res_type,
617 u32 dirty,
618 const struct vmw_user_resource_conv *converter,
619 uint32_t *id_loc,
620 struct vmw_resource **p_res)
621 {
622 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
623 struct vmw_resource *res;
624 int ret = 0;
625 bool needs_unref = false;
626
627 if (p_res)
628 *p_res = NULL;
629
630 if (*id_loc == SVGA3D_INVALID_ID) {
631 if (res_type == vmw_res_context) {
632 VMW_DEBUG_USER("Illegal context invalid id.\n");
633 return -EINVAL;
634 }
635 return 0;
636 }
637
638 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
639 res = rcache->res;
640 if (dirty)
641 vmw_validation_res_set_dirty(sw_context->ctx,
642 rcache->private, dirty);
643 } else {
644 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
645
646 ret = vmw_validation_preload_res(sw_context->ctx, size);
647 if (ret)
648 return ret;
649
650 ret = vmw_user_resource_lookup_handle
651 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
652 if (ret != 0) {
653 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
654 (unsigned int) *id_loc);
655 return ret;
656 }
657 needs_unref = true;
658
659 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
660 if (unlikely(ret != 0))
661 goto res_check_done;
662
663 if (rcache->valid && rcache->res == res) {
664 rcache->valid_handle = true;
665 rcache->handle = *id_loc;
666 }
667 }
668
669 ret = vmw_resource_relocation_add(sw_context, res,
670 vmw_ptr_diff(sw_context->buf_start,
671 id_loc),
672 vmw_res_rel_normal);
673 if (p_res)
674 *p_res = res;
675
676 res_check_done:
677 if (needs_unref)
678 vmw_resource_unreference(&res);
679
680 return ret;
681 }
682
683 /**
684 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
685 *
686 * @ctx_res: context the query belongs to
687 *
688 * This function assumes binding_mutex is held.
689 */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)690 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
691 {
692 struct vmw_private *dev_priv = ctx_res->dev_priv;
693 struct vmw_bo *dx_query_mob;
694 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
695
696 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
697
698 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
699 return 0;
700
701 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
702 if (cmd == NULL)
703 return -ENOMEM;
704
705 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
706 cmd->header.size = sizeof(cmd->body);
707 cmd->body.cid = ctx_res->id;
708 cmd->body.mobid = dx_query_mob->tbo.resource->start;
709 vmw_cmd_commit(dev_priv, sizeof(*cmd));
710
711 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
712
713 return 0;
714 }
715
716 /**
717 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
718 * contexts.
719 *
720 * @sw_context: Pointer to the software context.
721 *
722 * Rebind context binding points that have been scrubbed because of eviction.
723 */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)724 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
725 {
726 struct vmw_ctx_validation_info *val;
727 int ret;
728
729 list_for_each_entry(val, &sw_context->ctx_list, head) {
730 ret = vmw_binding_rebind_all(val->cur);
731 if (unlikely(ret != 0)) {
732 if (ret != -ERESTARTSYS)
733 VMW_DEBUG_USER("Failed to rebind context.\n");
734 return ret;
735 }
736
737 ret = vmw_rebind_all_dx_query(val->ctx);
738 if (ret != 0) {
739 VMW_DEBUG_USER("Failed to rebind queries.\n");
740 return ret;
741 }
742 }
743
744 return 0;
745 }
746
747 /**
748 * vmw_view_bindings_add - Add an array of view bindings to a context binding
749 * state tracker.
750 *
751 * @sw_context: The execbuf state used for this command.
752 * @view_type: View type for the bindings.
753 * @binding_type: Binding type for the bindings.
754 * @shader_slot: The shader slot to user for the bindings.
755 * @view_ids: Array of view ids to be bound.
756 * @num_views: Number of view ids in @view_ids.
757 * @first_slot: The binding slot to be used for the first view id in @view_ids.
758 */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)759 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
760 enum vmw_view_type view_type,
761 enum vmw_ctx_binding_type binding_type,
762 uint32 shader_slot,
763 uint32 view_ids[], u32 num_views,
764 u32 first_slot)
765 {
766 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
767 u32 i;
768
769 if (!ctx_node)
770 return -EINVAL;
771
772 for (i = 0; i < num_views; ++i) {
773 struct vmw_ctx_bindinfo_view binding;
774 struct vmw_resource *view = NULL;
775
776 if (view_ids[i] != SVGA3D_INVALID_ID) {
777 view = vmw_view_id_val_add(sw_context, view_type,
778 view_ids[i]);
779 if (IS_ERR(view)) {
780 VMW_DEBUG_USER("View not found.\n");
781 return PTR_ERR(view);
782 }
783 }
784 binding.bi.ctx = ctx_node->ctx;
785 binding.bi.res = view;
786 binding.bi.bt = binding_type;
787 binding.shader_slot = shader_slot;
788 binding.slot = first_slot + i;
789 vmw_binding_add(ctx_node->staged, &binding.bi,
790 shader_slot, binding.slot);
791 }
792
793 return 0;
794 }
795
796 /**
797 * vmw_cmd_cid_check - Check a command header for valid context information.
798 *
799 * @dev_priv: Pointer to a device private structure.
800 * @sw_context: Pointer to the software context.
801 * @header: A command header with an embedded user-space context handle.
802 *
803 * Convenience function: Call vmw_cmd_res_check with the user-space context
804 * handle embedded in @header.
805 */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)806 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
807 struct vmw_sw_context *sw_context,
808 SVGA3dCmdHeader *header)
809 {
810 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
811 container_of(header, typeof(*cmd), header);
812
813 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
814 VMW_RES_DIRTY_SET, user_context_converter,
815 &cmd->body, NULL);
816 }
817
818 /**
819 * vmw_execbuf_info_from_res - Get the private validation metadata for a
820 * recently validated resource
821 *
822 * @sw_context: Pointer to the command submission context
823 * @res: The resource
824 *
825 * The resource pointed to by @res needs to be present in the command submission
826 * context's resource cache and hence the last resource of that type to be
827 * processed by the validation code.
828 *
829 * Return: a pointer to the private metadata of the resource, or NULL if it
830 * wasn't found
831 */
832 static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context * sw_context,struct vmw_resource * res)833 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
834 struct vmw_resource *res)
835 {
836 struct vmw_res_cache_entry *rcache =
837 &sw_context->res_cache[vmw_res_type(res)];
838
839 if (rcache->valid && rcache->res == res)
840 return rcache->private;
841
842 WARN_ON_ONCE(true);
843 return NULL;
844 }
845
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)846 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
847 struct vmw_sw_context *sw_context,
848 SVGA3dCmdHeader *header)
849 {
850 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
851 struct vmw_resource *ctx;
852 struct vmw_resource *res;
853 int ret;
854
855 cmd = container_of(header, typeof(*cmd), header);
856
857 if (cmd->body.type >= SVGA3D_RT_MAX) {
858 VMW_DEBUG_USER("Illegal render target type %u.\n",
859 (unsigned int) cmd->body.type);
860 return -EINVAL;
861 }
862
863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
864 VMW_RES_DIRTY_SET, user_context_converter,
865 &cmd->body.cid, &ctx);
866 if (unlikely(ret != 0))
867 return ret;
868
869 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
870 VMW_RES_DIRTY_SET, user_surface_converter,
871 &cmd->body.target.sid, &res);
872 if (unlikely(ret))
873 return ret;
874
875 if (dev_priv->has_mob) {
876 struct vmw_ctx_bindinfo_view binding;
877 struct vmw_ctx_validation_info *node;
878
879 node = vmw_execbuf_info_from_res(sw_context, ctx);
880 if (!node)
881 return -EINVAL;
882
883 binding.bi.ctx = ctx;
884 binding.bi.res = res;
885 binding.bi.bt = vmw_ctx_binding_rt;
886 binding.slot = cmd->body.type;
887 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
888 }
889
890 return 0;
891 }
892
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)893 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
894 struct vmw_sw_context *sw_context,
895 SVGA3dCmdHeader *header)
896 {
897 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
898 int ret;
899
900 cmd = container_of(header, typeof(*cmd), header);
901
902 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
903 VMW_RES_DIRTY_NONE, user_surface_converter,
904 &cmd->body.src.sid, NULL);
905 if (ret)
906 return ret;
907
908 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
909 VMW_RES_DIRTY_SET, user_surface_converter,
910 &cmd->body.dest.sid, NULL);
911 }
912
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)913 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
914 struct vmw_sw_context *sw_context,
915 SVGA3dCmdHeader *header)
916 {
917 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
918 int ret;
919
920 cmd = container_of(header, typeof(*cmd), header);
921 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
922 VMW_RES_DIRTY_NONE, user_surface_converter,
923 &cmd->body.src, NULL);
924 if (ret != 0)
925 return ret;
926
927 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
928 VMW_RES_DIRTY_SET, user_surface_converter,
929 &cmd->body.dest, NULL);
930 }
931
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)932 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
933 struct vmw_sw_context *sw_context,
934 SVGA3dCmdHeader *header)
935 {
936 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
937 int ret;
938
939 cmd = container_of(header, typeof(*cmd), header);
940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
941 VMW_RES_DIRTY_NONE, user_surface_converter,
942 &cmd->body.srcSid, NULL);
943 if (ret != 0)
944 return ret;
945
946 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
947 VMW_RES_DIRTY_SET, user_surface_converter,
948 &cmd->body.dstSid, NULL);
949 }
950
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)951 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
952 struct vmw_sw_context *sw_context,
953 SVGA3dCmdHeader *header)
954 {
955 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
956 int ret;
957
958 cmd = container_of(header, typeof(*cmd), header);
959 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
960 VMW_RES_DIRTY_NONE, user_surface_converter,
961 &cmd->body.src.sid, NULL);
962 if (unlikely(ret != 0))
963 return ret;
964
965 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
966 VMW_RES_DIRTY_SET, user_surface_converter,
967 &cmd->body.dest.sid, NULL);
968 }
969
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)970 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
971 struct vmw_sw_context *sw_context,
972 SVGA3dCmdHeader *header)
973 {
974 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
975 container_of(header, typeof(*cmd), header);
976
977 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
978 VMW_RES_DIRTY_NONE, user_surface_converter,
979 &cmd->body.srcImage.sid, NULL);
980 }
981
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)982 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
983 struct vmw_sw_context *sw_context,
984 SVGA3dCmdHeader *header)
985 {
986 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
987 container_of(header, typeof(*cmd), header);
988
989 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
990 VMW_RES_DIRTY_NONE, user_surface_converter,
991 &cmd->body.sid, NULL);
992 }
993
994 /**
995 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
996 *
997 * @dev_priv: The device private structure.
998 * @new_query_bo: The new buffer holding query results.
999 * @sw_context: The software context used for this command submission.
1000 *
1001 * This function checks whether @new_query_bo is suitable for holding query
1002 * results, and if another buffer currently is pinned for query results. If so,
1003 * the function prepares the state of @sw_context for switching pinned buffers
1004 * after successful submission of the current command batch.
1005 */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_bo * new_query_bo,struct vmw_sw_context * sw_context)1006 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1007 struct vmw_bo *new_query_bo,
1008 struct vmw_sw_context *sw_context)
1009 {
1010 struct vmw_res_cache_entry *ctx_entry =
1011 &sw_context->res_cache[vmw_res_context];
1012 int ret;
1013
1014 BUG_ON(!ctx_entry->valid);
1015 sw_context->last_query_ctx = ctx_entry->res;
1016
1017 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1018
1019 if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1020 VMW_DEBUG_USER("Query buffer too large.\n");
1021 return -EINVAL;
1022 }
1023
1024 if (unlikely(sw_context->cur_query_bo != NULL)) {
1025 sw_context->needs_post_query_barrier = true;
1026 vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1027 ret = vmw_validation_add_bo(sw_context->ctx,
1028 sw_context->cur_query_bo);
1029 if (unlikely(ret != 0))
1030 return ret;
1031 }
1032 sw_context->cur_query_bo = new_query_bo;
1033
1034 vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1035 ret = vmw_validation_add_bo(sw_context->ctx,
1036 dev_priv->dummy_query_bo);
1037 if (unlikely(ret != 0))
1038 return ret;
1039 }
1040
1041 return 0;
1042 }
1043
1044 /**
1045 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1046 *
1047 * @dev_priv: The device private structure.
1048 * @sw_context: The software context used for this command submission batch.
1049 *
1050 * This function will check if we're switching query buffers, and will then,
1051 * issue a dummy occlusion query wait used as a query barrier. When the fence
1052 * object following that query wait has signaled, we are sure that all preceding
1053 * queries have finished, and the old query buffer can be unpinned. However,
1054 * since both the new query buffer and the old one are fenced with that fence,
1055 * we can do an asynchronus unpin now, and be sure that the old query buffer
1056 * won't be moved until the fence has signaled.
1057 *
1058 * As mentioned above, both the new - and old query buffers need to be fenced
1059 * using a sequence emitted *after* calling this function.
1060 */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1061 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1062 struct vmw_sw_context *sw_context)
1063 {
1064 /*
1065 * The validate list should still hold references to all
1066 * contexts here.
1067 */
1068 if (sw_context->needs_post_query_barrier) {
1069 struct vmw_res_cache_entry *ctx_entry =
1070 &sw_context->res_cache[vmw_res_context];
1071 struct vmw_resource *ctx;
1072 int ret;
1073
1074 BUG_ON(!ctx_entry->valid);
1075 ctx = ctx_entry->res;
1076
1077 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1078
1079 if (unlikely(ret != 0))
1080 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1081 }
1082
1083 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1084 if (dev_priv->pinned_bo) {
1085 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1086 vmw_bo_unreference(&dev_priv->pinned_bo);
1087 }
1088
1089 if (!sw_context->needs_post_query_barrier) {
1090 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1091
1092 /*
1093 * We pin also the dummy_query_bo buffer so that we
1094 * don't need to validate it when emitting dummy queries
1095 * in context destroy paths.
1096 */
1097 if (!dev_priv->dummy_query_bo_pinned) {
1098 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1099 true);
1100 dev_priv->dummy_query_bo_pinned = true;
1101 }
1102
1103 BUG_ON(sw_context->last_query_ctx == NULL);
1104 dev_priv->query_cid = sw_context->last_query_ctx->id;
1105 dev_priv->query_cid_valid = true;
1106 dev_priv->pinned_bo =
1107 vmw_bo_reference(sw_context->cur_query_bo);
1108 }
1109 }
1110 }
1111
1112 /**
1113 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1114 * to a MOB id.
1115 *
1116 * @dev_priv: Pointer to a device private structure.
1117 * @sw_context: The software context used for this command batch validation.
1118 * @id: Pointer to the user-space handle to be translated.
1119 * @vmw_bo_p: Points to a location that, on successful return will carry a
1120 * non-reference-counted pointer to the buffer object identified by the
1121 * user-space handle in @id.
1122 *
1123 * This function saves information needed to translate a user-space buffer
1124 * handle to a MOB id. The translation does not take place immediately, but
1125 * during a call to vmw_apply_relocations().
1126 *
1127 * This function builds a relocation list and a list of buffers to validate. The
1128 * former needs to be freed using either vmw_apply_relocations() or
1129 * vmw_free_relocations(). The latter needs to be freed using
1130 * vmw_clear_validations.
1131 */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_bo ** vmw_bo_p)1132 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1133 struct vmw_sw_context *sw_context,
1134 SVGAMobId *id,
1135 struct vmw_bo **vmw_bo_p)
1136 {
1137 struct vmw_bo *vmw_bo, *tmp_bo;
1138 uint32_t handle = *id;
1139 struct vmw_relocation *reloc;
1140 int ret;
1141
1142 vmw_validation_preload_bo(sw_context->ctx);
1143 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1144 if (ret != 0) {
1145 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1146 return PTR_ERR(vmw_bo);
1147 }
1148 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1149 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1150 tmp_bo = vmw_bo;
1151 vmw_user_bo_unref(&tmp_bo);
1152 if (unlikely(ret != 0))
1153 return ret;
1154
1155 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1156 if (!reloc)
1157 return -ENOMEM;
1158
1159 reloc->mob_loc = id;
1160 reloc->vbo = vmw_bo;
1161
1162 *vmw_bo_p = vmw_bo;
1163 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1164
1165 return 0;
1166 }
1167
1168 /**
1169 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1170 * to a valid SVGAGuestPtr
1171 *
1172 * @dev_priv: Pointer to a device private structure.
1173 * @sw_context: The software context used for this command batch validation.
1174 * @ptr: Pointer to the user-space handle to be translated.
1175 * @vmw_bo_p: Points to a location that, on successful return will carry a
1176 * non-reference-counted pointer to the DMA buffer identified by the user-space
1177 * handle in @id.
1178 *
1179 * This function saves information needed to translate a user-space buffer
1180 * handle to a valid SVGAGuestPtr. The translation does not take place
1181 * immediately, but during a call to vmw_apply_relocations().
1182 *
1183 * This function builds a relocation list and a list of buffers to validate.
1184 * The former needs to be freed using either vmw_apply_relocations() or
1185 * vmw_free_relocations(). The latter needs to be freed using
1186 * vmw_clear_validations.
1187 */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_bo ** vmw_bo_p)1188 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1189 struct vmw_sw_context *sw_context,
1190 SVGAGuestPtr *ptr,
1191 struct vmw_bo **vmw_bo_p)
1192 {
1193 struct vmw_bo *vmw_bo, *tmp_bo;
1194 uint32_t handle = ptr->gmrId;
1195 struct vmw_relocation *reloc;
1196 int ret;
1197
1198 vmw_validation_preload_bo(sw_context->ctx);
1199 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1200 if (ret != 0) {
1201 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1202 return PTR_ERR(vmw_bo);
1203 }
1204 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1205 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1206 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1207 tmp_bo = vmw_bo;
1208 vmw_user_bo_unref(&tmp_bo);
1209 if (unlikely(ret != 0))
1210 return ret;
1211
1212 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1213 if (!reloc)
1214 return -ENOMEM;
1215
1216 reloc->location = ptr;
1217 reloc->vbo = vmw_bo;
1218 *vmw_bo_p = vmw_bo;
1219 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1220
1221 return 0;
1222 }
1223
1224 /**
1225 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1226 *
1227 * @dev_priv: Pointer to a device private struct.
1228 * @sw_context: The software context used for this command submission.
1229 * @header: Pointer to the command header in the command stream.
1230 *
1231 * This function adds the new query into the query COTABLE
1232 */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1233 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1234 struct vmw_sw_context *sw_context,
1235 SVGA3dCmdHeader *header)
1236 {
1237 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1238 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1239 struct vmw_resource *cotable_res;
1240 int ret;
1241
1242 if (!ctx_node)
1243 return -EINVAL;
1244
1245 cmd = container_of(header, typeof(*cmd), header);
1246
1247 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1248 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1249 return -EINVAL;
1250
1251 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1252 if (IS_ERR_OR_NULL(cotable_res))
1253 return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1254 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1255
1256 return ret;
1257 }
1258
1259 /**
1260 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1261 *
1262 * @dev_priv: Pointer to a device private struct.
1263 * @sw_context: The software context used for this command submission.
1264 * @header: Pointer to the command header in the command stream.
1265 *
1266 * The query bind operation will eventually associate the query ID with its
1267 * backing MOB. In this function, we take the user mode MOB ID and use
1268 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1269 */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1270 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1271 struct vmw_sw_context *sw_context,
1272 SVGA3dCmdHeader *header)
1273 {
1274 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1275 struct vmw_bo *vmw_bo;
1276 int ret;
1277
1278 cmd = container_of(header, typeof(*cmd), header);
1279
1280 /*
1281 * Look up the buffer pointed to by q.mobid, put it on the relocation
1282 * list so its kernel mode MOB ID can be filled in later
1283 */
1284 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1285 &vmw_bo);
1286
1287 if (ret != 0)
1288 return ret;
1289
1290 sw_context->dx_query_mob = vmw_bo;
1291 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1292 return 0;
1293 }
1294
1295 /**
1296 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1297 *
1298 * @dev_priv: Pointer to a device private struct.
1299 * @sw_context: The software context used for this command submission.
1300 * @header: Pointer to the command header in the command stream.
1301 */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1302 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1303 struct vmw_sw_context *sw_context,
1304 SVGA3dCmdHeader *header)
1305 {
1306 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1307 container_of(header, typeof(*cmd), header);
1308
1309 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1310 VMW_RES_DIRTY_SET, user_context_converter,
1311 &cmd->body.cid, NULL);
1312 }
1313
1314 /**
1315 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1316 *
1317 * @dev_priv: Pointer to a device private struct.
1318 * @sw_context: The software context used for this command submission.
1319 * @header: Pointer to the command header in the command stream.
1320 */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1321 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1322 struct vmw_sw_context *sw_context,
1323 SVGA3dCmdHeader *header)
1324 {
1325 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1326 container_of(header, typeof(*cmd), header);
1327
1328 if (unlikely(dev_priv->has_mob)) {
1329 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1330
1331 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1332
1333 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1334 gb_cmd.header.size = cmd->header.size;
1335 gb_cmd.body.cid = cmd->body.cid;
1336 gb_cmd.body.type = cmd->body.type;
1337
1338 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1339 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1340 }
1341
1342 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1343 VMW_RES_DIRTY_SET, user_context_converter,
1344 &cmd->body.cid, NULL);
1345 }
1346
1347 /**
1348 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1349 *
1350 * @dev_priv: Pointer to a device private struct.
1351 * @sw_context: The software context used for this command submission.
1352 * @header: Pointer to the command header in the command stream.
1353 */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1354 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1355 struct vmw_sw_context *sw_context,
1356 SVGA3dCmdHeader *header)
1357 {
1358 struct vmw_bo *vmw_bo;
1359 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1360 int ret;
1361
1362 cmd = container_of(header, typeof(*cmd), header);
1363 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1364 if (unlikely(ret != 0))
1365 return ret;
1366
1367 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1368 &vmw_bo);
1369 if (unlikely(ret != 0))
1370 return ret;
1371
1372 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1373
1374 return ret;
1375 }
1376
1377 /**
1378 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1379 *
1380 * @dev_priv: Pointer to a device private struct.
1381 * @sw_context: The software context used for this command submission.
1382 * @header: Pointer to the command header in the command stream.
1383 */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1384 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1385 struct vmw_sw_context *sw_context,
1386 SVGA3dCmdHeader *header)
1387 {
1388 struct vmw_bo *vmw_bo;
1389 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1390 int ret;
1391
1392 cmd = container_of(header, typeof(*cmd), header);
1393 if (dev_priv->has_mob) {
1394 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1395
1396 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1397
1398 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1399 gb_cmd.header.size = cmd->header.size;
1400 gb_cmd.body.cid = cmd->body.cid;
1401 gb_cmd.body.type = cmd->body.type;
1402 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1403 gb_cmd.body.offset = cmd->body.guestResult.offset;
1404
1405 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1406 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1407 }
1408
1409 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1410 if (unlikely(ret != 0))
1411 return ret;
1412
1413 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1414 &cmd->body.guestResult, &vmw_bo);
1415 if (unlikely(ret != 0))
1416 return ret;
1417
1418 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1419
1420 return ret;
1421 }
1422
1423 /**
1424 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1425 *
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context used for this command submission.
1428 * @header: Pointer to the command header in the command stream.
1429 */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1430 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1431 struct vmw_sw_context *sw_context,
1432 SVGA3dCmdHeader *header)
1433 {
1434 struct vmw_bo *vmw_bo;
1435 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1436 int ret;
1437
1438 cmd = container_of(header, typeof(*cmd), header);
1439 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1440 if (unlikely(ret != 0))
1441 return ret;
1442
1443 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1444 &vmw_bo);
1445 if (unlikely(ret != 0))
1446 return ret;
1447
1448 return 0;
1449 }
1450
1451 /**
1452 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1453 *
1454 * @dev_priv: Pointer to a device private struct.
1455 * @sw_context: The software context used for this command submission.
1456 * @header: Pointer to the command header in the command stream.
1457 */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1458 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1459 struct vmw_sw_context *sw_context,
1460 SVGA3dCmdHeader *header)
1461 {
1462 struct vmw_bo *vmw_bo;
1463 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1464 int ret;
1465
1466 cmd = container_of(header, typeof(*cmd), header);
1467 if (dev_priv->has_mob) {
1468 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1469
1470 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1471
1472 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1473 gb_cmd.header.size = cmd->header.size;
1474 gb_cmd.body.cid = cmd->body.cid;
1475 gb_cmd.body.type = cmd->body.type;
1476 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1477 gb_cmd.body.offset = cmd->body.guestResult.offset;
1478
1479 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1481 }
1482
1483 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1484 if (unlikely(ret != 0))
1485 return ret;
1486
1487 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1488 &cmd->body.guestResult, &vmw_bo);
1489 if (unlikely(ret != 0))
1490 return ret;
1491
1492 return 0;
1493 }
1494
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1495 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1496 struct vmw_sw_context *sw_context,
1497 SVGA3dCmdHeader *header)
1498 {
1499 struct vmw_bo *vmw_bo = NULL;
1500 struct vmw_resource *res;
1501 struct vmw_surface *srf = NULL;
1502 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1503 int ret;
1504 SVGA3dCmdSurfaceDMASuffix *suffix;
1505 uint32_t bo_size;
1506 bool dirty;
1507
1508 cmd = container_of(header, typeof(*cmd), header);
1509 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1510 header->size - sizeof(*suffix));
1511
1512 /* Make sure device and verifier stays in sync. */
1513 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1514 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1515 return -EINVAL;
1516 }
1517
1518 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1519 &cmd->body.guest.ptr, &vmw_bo);
1520 if (unlikely(ret != 0))
1521 return ret;
1522
1523 /* Make sure DMA doesn't cross BO boundaries. */
1524 bo_size = vmw_bo->tbo.base.size;
1525 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1526 VMW_DEBUG_USER("Invalid DMA offset.\n");
1527 return -EINVAL;
1528 }
1529
1530 bo_size -= cmd->body.guest.ptr.offset;
1531 if (unlikely(suffix->maximumOffset > bo_size))
1532 suffix->maximumOffset = bo_size;
1533
1534 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1535 VMW_RES_DIRTY_SET : 0;
1536 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty,
1537 user_surface_converter, &cmd->body.host.sid,
1538 NULL);
1539 if (unlikely(ret != 0)) {
1540 if (unlikely(ret != -ERESTARTSYS))
1541 VMW_DEBUG_USER("could not find surface for DMA.\n");
1542 return ret;
1543 }
1544
1545 res = sw_context->res_cache[vmw_res_surface].res;
1546 if (!res) {
1547 VMW_DEBUG_USER("Invalid DMA surface.\n");
1548 return -EINVAL;
1549 }
1550
1551 srf = vmw_res_to_srf(res);
1552 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo,
1553 header);
1554
1555 return 0;
1556 }
1557
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1558 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1559 struct vmw_sw_context *sw_context,
1560 SVGA3dCmdHeader *header)
1561 {
1562 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1563 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1564 (unsigned long)header + sizeof(*cmd));
1565 SVGA3dPrimitiveRange *range;
1566 uint32_t i;
1567 uint32_t maxnum;
1568 int ret;
1569
1570 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1571 if (unlikely(ret != 0))
1572 return ret;
1573
1574 cmd = container_of(header, typeof(*cmd), header);
1575 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1576
1577 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1578 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1579 return -EINVAL;
1580 }
1581
1582 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1583 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1584 VMW_RES_DIRTY_NONE,
1585 user_surface_converter,
1586 &decl->array.surfaceId, NULL);
1587 if (unlikely(ret != 0))
1588 return ret;
1589 }
1590
1591 maxnum = (header->size - sizeof(cmd->body) -
1592 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1593 if (unlikely(cmd->body.numRanges > maxnum)) {
1594 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1595 return -EINVAL;
1596 }
1597
1598 range = (SVGA3dPrimitiveRange *) decl;
1599 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1600 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1601 VMW_RES_DIRTY_NONE,
1602 user_surface_converter,
1603 &range->indexArray.surfaceId, NULL);
1604 if (unlikely(ret != 0))
1605 return ret;
1606 }
1607 return 0;
1608 }
1609
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1610 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1611 struct vmw_sw_context *sw_context,
1612 SVGA3dCmdHeader *header)
1613 {
1614 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1615 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1616 ((unsigned long) header + header->size + sizeof(*header));
1617 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1618 ((unsigned long) header + sizeof(*cmd));
1619 struct vmw_resource *ctx;
1620 struct vmw_resource *res;
1621 int ret;
1622
1623 cmd = container_of(header, typeof(*cmd), header);
1624
1625 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1626 VMW_RES_DIRTY_SET, user_context_converter,
1627 &cmd->body.cid, &ctx);
1628 if (unlikely(ret != 0))
1629 return ret;
1630
1631 for (; cur_state < last_state; ++cur_state) {
1632 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1633 continue;
1634
1635 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1636 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1637 (unsigned int) cur_state->stage);
1638 return -EINVAL;
1639 }
1640
1641 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1642 VMW_RES_DIRTY_NONE,
1643 user_surface_converter,
1644 &cur_state->value, &res);
1645 if (unlikely(ret != 0))
1646 return ret;
1647
1648 if (dev_priv->has_mob) {
1649 struct vmw_ctx_bindinfo_tex binding;
1650 struct vmw_ctx_validation_info *node;
1651
1652 node = vmw_execbuf_info_from_res(sw_context, ctx);
1653 if (!node)
1654 return -EINVAL;
1655
1656 binding.bi.ctx = ctx;
1657 binding.bi.res = res;
1658 binding.bi.bt = vmw_ctx_binding_tex;
1659 binding.texture_stage = cur_state->stage;
1660 vmw_binding_add(node->staged, &binding.bi, 0,
1661 binding.texture_stage);
1662 }
1663 }
1664
1665 return 0;
1666 }
1667
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1668 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1669 struct vmw_sw_context *sw_context,
1670 void *buf)
1671 {
1672 struct vmw_bo *vmw_bo;
1673
1674 struct {
1675 uint32_t header;
1676 SVGAFifoCmdDefineGMRFB body;
1677 } *cmd = buf;
1678
1679 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1680 &vmw_bo);
1681 }
1682
1683 /**
1684 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1685 * switching
1686 *
1687 * @dev_priv: Pointer to a device private struct.
1688 * @sw_context: The software context being used for this batch.
1689 * @res: Pointer to the resource.
1690 * @buf_id: Pointer to the user-space backup buffer handle in the command
1691 * stream.
1692 * @backup_offset: Offset of backup into MOB.
1693 *
1694 * This function prepares for registering a switch of backup buffers in the
1695 * resource metadata just prior to unreserving. It's basically a wrapper around
1696 * vmw_cmd_res_switch_backup with a different interface.
1697 */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,uint32_t * buf_id,unsigned long backup_offset)1698 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1699 struct vmw_sw_context *sw_context,
1700 struct vmw_resource *res, uint32_t *buf_id,
1701 unsigned long backup_offset)
1702 {
1703 struct vmw_bo *vbo;
1704 void *info;
1705 int ret;
1706
1707 info = vmw_execbuf_info_from_res(sw_context, res);
1708 if (!info)
1709 return -EINVAL;
1710
1711 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1712 if (ret)
1713 return ret;
1714
1715 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1716 backup_offset);
1717 return 0;
1718 }
1719
1720 /**
1721 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1722 *
1723 * @dev_priv: Pointer to a device private struct.
1724 * @sw_context: The software context being used for this batch.
1725 * @res_type: The resource type.
1726 * @converter: Information about user-space binding for this resource type.
1727 * @res_id: Pointer to the user-space resource handle in the command stream.
1728 * @buf_id: Pointer to the user-space backup buffer handle in the command
1729 * stream.
1730 * @backup_offset: Offset of backup into MOB.
1731 *
1732 * This function prepares for registering a switch of backup buffers in the
1733 * resource metadata just prior to unreserving. It's basically a wrapper around
1734 * vmw_cmd_res_switch_backup with a different interface.
1735 */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1736 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1737 struct vmw_sw_context *sw_context,
1738 enum vmw_res_type res_type,
1739 const struct vmw_user_resource_conv
1740 *converter, uint32_t *res_id, uint32_t *buf_id,
1741 unsigned long backup_offset)
1742 {
1743 struct vmw_resource *res;
1744 int ret;
1745
1746 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1747 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1748 if (ret)
1749 return ret;
1750
1751 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1752 backup_offset);
1753 }
1754
1755 /**
1756 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1757 *
1758 * @dev_priv: Pointer to a device private struct.
1759 * @sw_context: The software context being used for this batch.
1760 * @header: Pointer to the command header in the command stream.
1761 */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1762 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1763 struct vmw_sw_context *sw_context,
1764 SVGA3dCmdHeader *header)
1765 {
1766 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1767 container_of(header, typeof(*cmd), header);
1768
1769 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1770 user_surface_converter, &cmd->body.sid,
1771 &cmd->body.mobid, 0);
1772 }
1773
1774 /**
1775 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1776 *
1777 * @dev_priv: Pointer to a device private struct.
1778 * @sw_context: The software context being used for this batch.
1779 * @header: Pointer to the command header in the command stream.
1780 */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1781 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1782 struct vmw_sw_context *sw_context,
1783 SVGA3dCmdHeader *header)
1784 {
1785 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1786 container_of(header, typeof(*cmd), header);
1787
1788 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1789 VMW_RES_DIRTY_NONE, user_surface_converter,
1790 &cmd->body.image.sid, NULL);
1791 }
1792
1793 /**
1794 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1795 *
1796 * @dev_priv: Pointer to a device private struct.
1797 * @sw_context: The software context being used for this batch.
1798 * @header: Pointer to the command header in the command stream.
1799 */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1800 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1801 struct vmw_sw_context *sw_context,
1802 SVGA3dCmdHeader *header)
1803 {
1804 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1805 container_of(header, typeof(*cmd), header);
1806
1807 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1808 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1809 &cmd->body.sid, NULL);
1810 }
1811
1812 /**
1813 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1814 *
1815 * @dev_priv: Pointer to a device private struct.
1816 * @sw_context: The software context being used for this batch.
1817 * @header: Pointer to the command header in the command stream.
1818 */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1819 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1820 struct vmw_sw_context *sw_context,
1821 SVGA3dCmdHeader *header)
1822 {
1823 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1824 container_of(header, typeof(*cmd), header);
1825
1826 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1827 VMW_RES_DIRTY_NONE, user_surface_converter,
1828 &cmd->body.image.sid, NULL);
1829 }
1830
1831 /**
1832 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1833 * command
1834 *
1835 * @dev_priv: Pointer to a device private struct.
1836 * @sw_context: The software context being used for this batch.
1837 * @header: Pointer to the command header in the command stream.
1838 */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1839 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1840 struct vmw_sw_context *sw_context,
1841 SVGA3dCmdHeader *header)
1842 {
1843 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1844 container_of(header, typeof(*cmd), header);
1845
1846 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1847 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1848 &cmd->body.sid, NULL);
1849 }
1850
1851 /**
1852 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1853 * command
1854 *
1855 * @dev_priv: Pointer to a device private struct.
1856 * @sw_context: The software context being used for this batch.
1857 * @header: Pointer to the command header in the command stream.
1858 */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1859 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1860 struct vmw_sw_context *sw_context,
1861 SVGA3dCmdHeader *header)
1862 {
1863 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1864 container_of(header, typeof(*cmd), header);
1865
1866 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1867 VMW_RES_DIRTY_NONE, user_surface_converter,
1868 &cmd->body.image.sid, NULL);
1869 }
1870
1871 /**
1872 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1873 * command
1874 *
1875 * @dev_priv: Pointer to a device private struct.
1876 * @sw_context: The software context being used for this batch.
1877 * @header: Pointer to the command header in the command stream.
1878 */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1879 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1880 struct vmw_sw_context *sw_context,
1881 SVGA3dCmdHeader *header)
1882 {
1883 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1884 container_of(header, typeof(*cmd), header);
1885
1886 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1887 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1888 &cmd->body.sid, NULL);
1889 }
1890
1891 /**
1892 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1893 *
1894 * @dev_priv: Pointer to a device private struct.
1895 * @sw_context: The software context being used for this batch.
1896 * @header: Pointer to the command header in the command stream.
1897 */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1898 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1899 struct vmw_sw_context *sw_context,
1900 SVGA3dCmdHeader *header)
1901 {
1902 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1903 int ret;
1904 size_t size;
1905 struct vmw_resource *ctx;
1906
1907 cmd = container_of(header, typeof(*cmd), header);
1908
1909 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1910 VMW_RES_DIRTY_SET, user_context_converter,
1911 &cmd->body.cid, &ctx);
1912 if (unlikely(ret != 0))
1913 return ret;
1914
1915 if (unlikely(!dev_priv->has_mob))
1916 return 0;
1917
1918 size = cmd->header.size - sizeof(cmd->body);
1919 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1920 cmd->body.shid, cmd + 1, cmd->body.type,
1921 size, &sw_context->staged_cmd_res);
1922 if (unlikely(ret != 0))
1923 return ret;
1924
1925 return vmw_resource_relocation_add(sw_context, NULL,
1926 vmw_ptr_diff(sw_context->buf_start,
1927 &cmd->header.id),
1928 vmw_res_rel_nop);
1929 }
1930
1931 /**
1932 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1933 *
1934 * @dev_priv: Pointer to a device private struct.
1935 * @sw_context: The software context being used for this batch.
1936 * @header: Pointer to the command header in the command stream.
1937 */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1938 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1939 struct vmw_sw_context *sw_context,
1940 SVGA3dCmdHeader *header)
1941 {
1942 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1943 int ret;
1944 struct vmw_resource *ctx;
1945
1946 cmd = container_of(header, typeof(*cmd), header);
1947
1948 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1949 VMW_RES_DIRTY_SET, user_context_converter,
1950 &cmd->body.cid, &ctx);
1951 if (unlikely(ret != 0))
1952 return ret;
1953
1954 if (unlikely(!dev_priv->has_mob))
1955 return 0;
1956
1957 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1958 cmd->body.type, &sw_context->staged_cmd_res);
1959 if (unlikely(ret != 0))
1960 return ret;
1961
1962 return vmw_resource_relocation_add(sw_context, NULL,
1963 vmw_ptr_diff(sw_context->buf_start,
1964 &cmd->header.id),
1965 vmw_res_rel_nop);
1966 }
1967
1968 /**
1969 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1970 *
1971 * @dev_priv: Pointer to a device private struct.
1972 * @sw_context: The software context being used for this batch.
1973 * @header: Pointer to the command header in the command stream.
1974 */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1975 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1976 struct vmw_sw_context *sw_context,
1977 SVGA3dCmdHeader *header)
1978 {
1979 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1980 struct vmw_ctx_bindinfo_shader binding;
1981 struct vmw_resource *ctx, *res = NULL;
1982 struct vmw_ctx_validation_info *ctx_info;
1983 int ret;
1984
1985 cmd = container_of(header, typeof(*cmd), header);
1986
1987 if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1988 VMW_DEBUG_USER("Illegal shader type %u.\n",
1989 (unsigned int) cmd->body.type);
1990 return -EINVAL;
1991 }
1992
1993 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1994 VMW_RES_DIRTY_SET, user_context_converter,
1995 &cmd->body.cid, &ctx);
1996 if (unlikely(ret != 0))
1997 return ret;
1998
1999 if (!dev_priv->has_mob)
2000 return 0;
2001
2002 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2003 /*
2004 * This is the compat shader path - Per device guest-backed
2005 * shaders, but user-space thinks it's per context host-
2006 * backed shaders.
2007 */
2008 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2009 cmd->body.shid, cmd->body.type);
2010 if (!IS_ERR(res)) {
2011 ret = vmw_execbuf_res_val_add(sw_context, res,
2012 VMW_RES_DIRTY_NONE,
2013 vmw_val_add_flag_noctx);
2014 if (unlikely(ret != 0))
2015 return ret;
2016
2017 ret = vmw_resource_relocation_add
2018 (sw_context, res,
2019 vmw_ptr_diff(sw_context->buf_start,
2020 &cmd->body.shid),
2021 vmw_res_rel_normal);
2022 if (unlikely(ret != 0))
2023 return ret;
2024 }
2025 }
2026
2027 if (IS_ERR_OR_NULL(res)) {
2028 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2029 VMW_RES_DIRTY_NONE,
2030 user_shader_converter, &cmd->body.shid,
2031 &res);
2032 if (unlikely(ret != 0))
2033 return ret;
2034 }
2035
2036 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2037 if (!ctx_info)
2038 return -EINVAL;
2039
2040 binding.bi.ctx = ctx;
2041 binding.bi.res = res;
2042 binding.bi.bt = vmw_ctx_binding_shader;
2043 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2044 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2045
2046 return 0;
2047 }
2048
2049 /**
2050 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2051 *
2052 * @dev_priv: Pointer to a device private struct.
2053 * @sw_context: The software context being used for this batch.
2054 * @header: Pointer to the command header in the command stream.
2055 */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2056 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2057 struct vmw_sw_context *sw_context,
2058 SVGA3dCmdHeader *header)
2059 {
2060 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2061 int ret;
2062
2063 cmd = container_of(header, typeof(*cmd), header);
2064
2065 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2066 VMW_RES_DIRTY_SET, user_context_converter,
2067 &cmd->body.cid, NULL);
2068 if (unlikely(ret != 0))
2069 return ret;
2070
2071 if (dev_priv->has_mob)
2072 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2073
2074 return 0;
2075 }
2076
2077 /**
2078 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2079 *
2080 * @dev_priv: Pointer to a device private struct.
2081 * @sw_context: The software context being used for this batch.
2082 * @header: Pointer to the command header in the command stream.
2083 */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2084 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2085 struct vmw_sw_context *sw_context,
2086 SVGA3dCmdHeader *header)
2087 {
2088 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2089 container_of(header, typeof(*cmd), header);
2090
2091 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2092 user_shader_converter, &cmd->body.shid,
2093 &cmd->body.mobid, cmd->body.offsetInBytes);
2094 }
2095
2096 /**
2097 * vmw_cmd_dx_set_single_constant_buffer - Validate
2098 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2099 *
2100 * @dev_priv: Pointer to a device private struct.
2101 * @sw_context: The software context being used for this batch.
2102 * @header: Pointer to the command header in the command stream.
2103 */
2104 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2105 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2106 struct vmw_sw_context *sw_context,
2107 SVGA3dCmdHeader *header)
2108 {
2109 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2110
2111 struct vmw_resource *res = NULL;
2112 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2113 struct vmw_ctx_bindinfo_cb binding;
2114 int ret;
2115
2116 if (!ctx_node)
2117 return -EINVAL;
2118
2119 cmd = container_of(header, typeof(*cmd), header);
2120 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2121 VMW_RES_DIRTY_NONE, user_surface_converter,
2122 &cmd->body.sid, &res);
2123 if (unlikely(ret != 0))
2124 return ret;
2125
2126 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2127 cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2128 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2129 (unsigned int) cmd->body.type,
2130 (unsigned int) cmd->body.slot);
2131 return -EINVAL;
2132 }
2133
2134 binding.bi.ctx = ctx_node->ctx;
2135 binding.bi.res = res;
2136 binding.bi.bt = vmw_ctx_binding_cb;
2137 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2138 binding.offset = cmd->body.offsetInBytes;
2139 binding.size = cmd->body.sizeInBytes;
2140 binding.slot = cmd->body.slot;
2141
2142 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2143 binding.slot);
2144
2145 return 0;
2146 }
2147
2148 /**
2149 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2150 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2151 *
2152 * @dev_priv: Pointer to a device private struct.
2153 * @sw_context: The software context being used for this batch.
2154 * @header: Pointer to the command header in the command stream.
2155 */
2156 static int
vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2157 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2158 struct vmw_sw_context *sw_context,
2159 SVGA3dCmdHeader *header)
2160 {
2161 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2162
2163 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2164 u32 shader_slot;
2165
2166 if (!has_sm5_context(dev_priv))
2167 return -EINVAL;
2168
2169 if (!ctx_node)
2170 return -EINVAL;
2171
2172 cmd = container_of(header, typeof(*cmd), header);
2173 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2174 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2175 (unsigned int) cmd->body.slot);
2176 return -EINVAL;
2177 }
2178
2179 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2180 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2181 cmd->body.slot, cmd->body.offsetInBytes);
2182
2183 return 0;
2184 }
2185
2186 /**
2187 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2188 * command
2189 *
2190 * @dev_priv: Pointer to a device private struct.
2191 * @sw_context: The software context being used for this batch.
2192 * @header: Pointer to the command header in the command stream.
2193 */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2194 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2195 struct vmw_sw_context *sw_context,
2196 SVGA3dCmdHeader *header)
2197 {
2198 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2199 container_of(header, typeof(*cmd), header);
2200
2201 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2202 sizeof(SVGA3dShaderResourceViewId);
2203
2204 if ((u64) cmd->body.startView + (u64) num_sr_view >
2205 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2206 !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2207 VMW_DEBUG_USER("Invalid shader binding.\n");
2208 return -EINVAL;
2209 }
2210
2211 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2212 vmw_ctx_binding_sr,
2213 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2214 (void *) &cmd[1], num_sr_view,
2215 cmd->body.startView);
2216 }
2217
2218 /**
2219 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2220 *
2221 * @dev_priv: Pointer to a device private struct.
2222 * @sw_context: The software context being used for this batch.
2223 * @header: Pointer to the command header in the command stream.
2224 */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2225 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2226 struct vmw_sw_context *sw_context,
2227 SVGA3dCmdHeader *header)
2228 {
2229 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2230 struct vmw_resource *res = NULL;
2231 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2232 struct vmw_ctx_bindinfo_shader binding;
2233 int ret = 0;
2234
2235 if (!ctx_node)
2236 return -EINVAL;
2237
2238 cmd = container_of(header, typeof(*cmd), header);
2239
2240 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2241 VMW_DEBUG_USER("Illegal shader type %u.\n",
2242 (unsigned int) cmd->body.type);
2243 return -EINVAL;
2244 }
2245
2246 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2247 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2248 if (IS_ERR(res)) {
2249 VMW_DEBUG_USER("Could not find shader for binding.\n");
2250 return PTR_ERR(res);
2251 }
2252
2253 ret = vmw_execbuf_res_val_add(sw_context, res,
2254 VMW_RES_DIRTY_NONE,
2255 vmw_val_add_flag_noctx);
2256 if (ret)
2257 return ret;
2258 }
2259
2260 binding.bi.ctx = ctx_node->ctx;
2261 binding.bi.res = res;
2262 binding.bi.bt = vmw_ctx_binding_dx_shader;
2263 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264
2265 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2266
2267 return 0;
2268 }
2269
2270 /**
2271 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2272 * command
2273 *
2274 * @dev_priv: Pointer to a device private struct.
2275 * @sw_context: The software context being used for this batch.
2276 * @header: Pointer to the command header in the command stream.
2277 */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2278 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2279 struct vmw_sw_context *sw_context,
2280 SVGA3dCmdHeader *header)
2281 {
2282 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2283 struct vmw_ctx_bindinfo_vb binding;
2284 struct vmw_resource *res;
2285 struct {
2286 SVGA3dCmdHeader header;
2287 SVGA3dCmdDXSetVertexBuffers body;
2288 SVGA3dVertexBuffer buf[];
2289 } *cmd;
2290 int i, ret, num;
2291
2292 if (!ctx_node)
2293 return -EINVAL;
2294
2295 cmd = container_of(header, typeof(*cmd), header);
2296 num = (cmd->header.size - sizeof(cmd->body)) /
2297 sizeof(SVGA3dVertexBuffer);
2298 if ((u64)num + (u64)cmd->body.startBuffer >
2299 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2300 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2301 return -EINVAL;
2302 }
2303
2304 for (i = 0; i < num; i++) {
2305 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2306 VMW_RES_DIRTY_NONE,
2307 user_surface_converter,
2308 &cmd->buf[i].sid, &res);
2309 if (unlikely(ret != 0))
2310 return ret;
2311
2312 binding.bi.ctx = ctx_node->ctx;
2313 binding.bi.bt = vmw_ctx_binding_vb;
2314 binding.bi.res = res;
2315 binding.offset = cmd->buf[i].offset;
2316 binding.stride = cmd->buf[i].stride;
2317 binding.slot = i + cmd->body.startBuffer;
2318
2319 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2320 }
2321
2322 return 0;
2323 }
2324
2325 /**
2326 * vmw_cmd_dx_set_index_buffer - Validate
2327 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2328 *
2329 * @dev_priv: Pointer to a device private struct.
2330 * @sw_context: The software context being used for this batch.
2331 * @header: Pointer to the command header in the command stream.
2332 */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2333 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2334 struct vmw_sw_context *sw_context,
2335 SVGA3dCmdHeader *header)
2336 {
2337 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2338 struct vmw_ctx_bindinfo_ib binding;
2339 struct vmw_resource *res;
2340 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2341 int ret;
2342
2343 if (!ctx_node)
2344 return -EINVAL;
2345
2346 cmd = container_of(header, typeof(*cmd), header);
2347 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2348 VMW_RES_DIRTY_NONE, user_surface_converter,
2349 &cmd->body.sid, &res);
2350 if (unlikely(ret != 0))
2351 return ret;
2352
2353 binding.bi.ctx = ctx_node->ctx;
2354 binding.bi.res = res;
2355 binding.bi.bt = vmw_ctx_binding_ib;
2356 binding.offset = cmd->body.offset;
2357 binding.format = cmd->body.format;
2358
2359 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2360
2361 return 0;
2362 }
2363
2364 /**
2365 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2366 * command
2367 *
2368 * @dev_priv: Pointer to a device private struct.
2369 * @sw_context: The software context being used for this batch.
2370 * @header: Pointer to the command header in the command stream.
2371 */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2372 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2373 struct vmw_sw_context *sw_context,
2374 SVGA3dCmdHeader *header)
2375 {
2376 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2377 container_of(header, typeof(*cmd), header);
2378 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2379 sizeof(SVGA3dRenderTargetViewId);
2380 int ret;
2381
2382 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2383 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2384 return -EINVAL;
2385 }
2386
2387 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2388 0, &cmd->body.depthStencilViewId, 1, 0);
2389 if (ret)
2390 return ret;
2391
2392 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2393 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2394 num_rt_view, 0);
2395 }
2396
2397 /**
2398 * vmw_cmd_dx_clear_rendertarget_view - Validate
2399 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2400 *
2401 * @dev_priv: Pointer to a device private struct.
2402 * @sw_context: The software context being used for this batch.
2403 * @header: Pointer to the command header in the command stream.
2404 */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2405 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2406 struct vmw_sw_context *sw_context,
2407 SVGA3dCmdHeader *header)
2408 {
2409 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2410 container_of(header, typeof(*cmd), header);
2411 struct vmw_resource *ret;
2412
2413 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2414 cmd->body.renderTargetViewId);
2415
2416 return PTR_ERR_OR_ZERO(ret);
2417 }
2418
2419 /**
2420 * vmw_cmd_dx_clear_depthstencil_view - Validate
2421 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2422 *
2423 * @dev_priv: Pointer to a device private struct.
2424 * @sw_context: The software context being used for this batch.
2425 * @header: Pointer to the command header in the command stream.
2426 */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2427 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2428 struct vmw_sw_context *sw_context,
2429 SVGA3dCmdHeader *header)
2430 {
2431 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2432 container_of(header, typeof(*cmd), header);
2433 struct vmw_resource *ret;
2434
2435 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2436 cmd->body.depthStencilViewId);
2437
2438 return PTR_ERR_OR_ZERO(ret);
2439 }
2440
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2441 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2442 struct vmw_sw_context *sw_context,
2443 SVGA3dCmdHeader *header)
2444 {
2445 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2446 struct vmw_resource *srf;
2447 struct vmw_resource *res;
2448 enum vmw_view_type view_type;
2449 int ret;
2450 /*
2451 * This is based on the fact that all affected define commands have the
2452 * same initial command body layout.
2453 */
2454 struct {
2455 SVGA3dCmdHeader header;
2456 uint32 defined_id;
2457 uint32 sid;
2458 } *cmd;
2459
2460 if (!ctx_node)
2461 return -EINVAL;
2462
2463 view_type = vmw_view_cmd_to_type(header->id);
2464 if (view_type == vmw_view_max)
2465 return -EINVAL;
2466
2467 cmd = container_of(header, typeof(*cmd), header);
2468 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2469 VMW_DEBUG_USER("Invalid surface id.\n");
2470 return -EINVAL;
2471 }
2472 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2473 VMW_RES_DIRTY_NONE, user_surface_converter,
2474 &cmd->sid, &srf);
2475 if (unlikely(ret != 0))
2476 return ret;
2477
2478 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2479 if (IS_ERR_OR_NULL(res))
2480 return res ? PTR_ERR(res) : -EINVAL;
2481 ret = vmw_cotable_notify(res, cmd->defined_id);
2482 if (unlikely(ret != 0))
2483 return ret;
2484
2485 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2486 cmd->defined_id, header,
2487 header->size + sizeof(*header),
2488 &sw_context->staged_cmd_res);
2489 }
2490
2491 /**
2492 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2493 *
2494 * @dev_priv: Pointer to a device private struct.
2495 * @sw_context: The software context being used for this batch.
2496 * @header: Pointer to the command header in the command stream.
2497 */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2498 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2499 struct vmw_sw_context *sw_context,
2500 SVGA3dCmdHeader *header)
2501 {
2502 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2503 struct vmw_ctx_bindinfo_so_target binding;
2504 struct vmw_resource *res;
2505 struct {
2506 SVGA3dCmdHeader header;
2507 SVGA3dCmdDXSetSOTargets body;
2508 SVGA3dSoTarget targets[];
2509 } *cmd;
2510 int i, ret, num;
2511
2512 if (!ctx_node)
2513 return -EINVAL;
2514
2515 cmd = container_of(header, typeof(*cmd), header);
2516 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2517
2518 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2519 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2520 return -EINVAL;
2521 }
2522
2523 for (i = 0; i < num; i++) {
2524 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2525 VMW_RES_DIRTY_SET,
2526 user_surface_converter,
2527 &cmd->targets[i].sid, &res);
2528 if (unlikely(ret != 0))
2529 return ret;
2530
2531 binding.bi.ctx = ctx_node->ctx;
2532 binding.bi.res = res;
2533 binding.bi.bt = vmw_ctx_binding_so_target;
2534 binding.offset = cmd->targets[i].offset;
2535 binding.size = cmd->targets[i].sizeInBytes;
2536 binding.slot = i;
2537
2538 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2539 }
2540
2541 return 0;
2542 }
2543
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2544 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2545 struct vmw_sw_context *sw_context,
2546 SVGA3dCmdHeader *header)
2547 {
2548 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2549 struct vmw_resource *res;
2550 /*
2551 * This is based on the fact that all affected define commands have
2552 * the same initial command body layout.
2553 */
2554 struct {
2555 SVGA3dCmdHeader header;
2556 uint32 defined_id;
2557 } *cmd;
2558 enum vmw_so_type so_type;
2559 int ret;
2560
2561 if (!ctx_node)
2562 return -EINVAL;
2563
2564 so_type = vmw_so_cmd_to_type(header->id);
2565 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2566 if (IS_ERR_OR_NULL(res))
2567 return res ? PTR_ERR(res) : -EINVAL;
2568 cmd = container_of(header, typeof(*cmd), header);
2569 ret = vmw_cotable_notify(res, cmd->defined_id);
2570
2571 return ret;
2572 }
2573
2574 /**
2575 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2576 * command
2577 *
2578 * @dev_priv: Pointer to a device private struct.
2579 * @sw_context: The software context being used for this batch.
2580 * @header: Pointer to the command header in the command stream.
2581 */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2582 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2583 struct vmw_sw_context *sw_context,
2584 SVGA3dCmdHeader *header)
2585 {
2586 struct {
2587 SVGA3dCmdHeader header;
2588 union {
2589 SVGA3dCmdDXReadbackSubResource r_body;
2590 SVGA3dCmdDXInvalidateSubResource i_body;
2591 SVGA3dCmdDXUpdateSubResource u_body;
2592 SVGA3dSurfaceId sid;
2593 };
2594 } *cmd;
2595
2596 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2597 offsetof(typeof(*cmd), sid));
2598 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2599 offsetof(typeof(*cmd), sid));
2600 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2601 offsetof(typeof(*cmd), sid));
2602
2603 cmd = container_of(header, typeof(*cmd), header);
2604 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2605 VMW_RES_DIRTY_NONE, user_surface_converter,
2606 &cmd->sid, NULL);
2607 }
2608
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2609 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2610 struct vmw_sw_context *sw_context,
2611 SVGA3dCmdHeader *header)
2612 {
2613 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2614
2615 if (!ctx_node)
2616 return -EINVAL;
2617
2618 return 0;
2619 }
2620
2621 /**
2622 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2623 * resource for removal.
2624 *
2625 * @dev_priv: Pointer to a device private struct.
2626 * @sw_context: The software context being used for this batch.
2627 * @header: Pointer to the command header in the command stream.
2628 *
2629 * Check that the view exists, and if it was not created using this command
2630 * batch, conditionally make this command a NOP.
2631 */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2632 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2633 struct vmw_sw_context *sw_context,
2634 SVGA3dCmdHeader *header)
2635 {
2636 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2637 struct {
2638 SVGA3dCmdHeader header;
2639 union vmw_view_destroy body;
2640 } *cmd = container_of(header, typeof(*cmd), header);
2641 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2642 struct vmw_resource *view;
2643 int ret;
2644
2645 if (!ctx_node)
2646 return -EINVAL;
2647
2648 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2649 &sw_context->staged_cmd_res, &view);
2650 if (ret || !view)
2651 return ret;
2652
2653 /*
2654 * If the view wasn't created during this command batch, it might
2655 * have been removed due to a context swapout, so add a
2656 * relocation to conditionally make this command a NOP to avoid
2657 * device errors.
2658 */
2659 return vmw_resource_relocation_add(sw_context, view,
2660 vmw_ptr_diff(sw_context->buf_start,
2661 &cmd->header.id),
2662 vmw_res_rel_cond_nop);
2663 }
2664
2665 /**
2666 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2667 *
2668 * @dev_priv: Pointer to a device private struct.
2669 * @sw_context: The software context being used for this batch.
2670 * @header: Pointer to the command header in the command stream.
2671 */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2672 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2673 struct vmw_sw_context *sw_context,
2674 SVGA3dCmdHeader *header)
2675 {
2676 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2677 struct vmw_resource *res;
2678 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2679 container_of(header, typeof(*cmd), header);
2680 int ret;
2681
2682 if (!ctx_node)
2683 return -EINVAL;
2684
2685 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2686 if (IS_ERR_OR_NULL(res))
2687 return res ? PTR_ERR(res) : -EINVAL;
2688 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2689 if (ret)
2690 return ret;
2691
2692 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2693 cmd->body.shaderId, cmd->body.type,
2694 &sw_context->staged_cmd_res);
2695 }
2696
2697 /**
2698 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2699 *
2700 * @dev_priv: Pointer to a device private struct.
2701 * @sw_context: The software context being used for this batch.
2702 * @header: Pointer to the command header in the command stream.
2703 */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2704 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2705 struct vmw_sw_context *sw_context,
2706 SVGA3dCmdHeader *header)
2707 {
2708 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2709 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2710 container_of(header, typeof(*cmd), header);
2711 int ret;
2712
2713 if (!ctx_node)
2714 return -EINVAL;
2715
2716 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2717 &sw_context->staged_cmd_res);
2718
2719 return ret;
2720 }
2721
2722 /**
2723 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2724 *
2725 * @dev_priv: Pointer to a device private struct.
2726 * @sw_context: The software context being used for this batch.
2727 * @header: Pointer to the command header in the command stream.
2728 */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2729 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2730 struct vmw_sw_context *sw_context,
2731 SVGA3dCmdHeader *header)
2732 {
2733 struct vmw_resource *ctx;
2734 struct vmw_resource *res;
2735 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2736 container_of(header, typeof(*cmd), header);
2737 int ret;
2738
2739 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2740 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2741 VMW_RES_DIRTY_SET,
2742 user_context_converter, &cmd->body.cid,
2743 &ctx);
2744 if (ret)
2745 return ret;
2746 } else {
2747 struct vmw_ctx_validation_info *ctx_node =
2748 VMW_GET_CTX_NODE(sw_context);
2749
2750 if (!ctx_node)
2751 return -EINVAL;
2752
2753 ctx = ctx_node->ctx;
2754 }
2755
2756 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2757 if (IS_ERR(res)) {
2758 VMW_DEBUG_USER("Could not find shader to bind.\n");
2759 return PTR_ERR(res);
2760 }
2761
2762 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2763 vmw_val_add_flag_noctx);
2764 if (ret) {
2765 VMW_DEBUG_USER("Error creating resource validation node.\n");
2766 return ret;
2767 }
2768
2769 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2770 &cmd->body.mobid,
2771 cmd->body.offsetInBytes);
2772 }
2773
2774 /**
2775 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2776 *
2777 * @dev_priv: Pointer to a device private struct.
2778 * @sw_context: The software context being used for this batch.
2779 * @header: Pointer to the command header in the command stream.
2780 */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2781 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2782 struct vmw_sw_context *sw_context,
2783 SVGA3dCmdHeader *header)
2784 {
2785 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2786 container_of(header, typeof(*cmd), header);
2787 struct vmw_resource *view;
2788 struct vmw_res_cache_entry *rcache;
2789
2790 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2791 cmd->body.shaderResourceViewId);
2792 if (IS_ERR(view))
2793 return PTR_ERR(view);
2794
2795 /*
2796 * Normally the shader-resource view is not gpu-dirtying, but for
2797 * this particular command it is...
2798 * So mark the last looked-up surface, which is the surface
2799 * the view points to, gpu-dirty.
2800 */
2801 rcache = &sw_context->res_cache[vmw_res_surface];
2802 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2803 VMW_RES_DIRTY_SET);
2804 return 0;
2805 }
2806
2807 /**
2808 * vmw_cmd_dx_transfer_from_buffer - Validate
2809 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2810 *
2811 * @dev_priv: Pointer to a device private struct.
2812 * @sw_context: The software context being used for this batch.
2813 * @header: Pointer to the command header in the command stream.
2814 */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2815 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2816 struct vmw_sw_context *sw_context,
2817 SVGA3dCmdHeader *header)
2818 {
2819 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2820 container_of(header, typeof(*cmd), header);
2821 int ret;
2822
2823 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2824 VMW_RES_DIRTY_NONE, user_surface_converter,
2825 &cmd->body.srcSid, NULL);
2826 if (ret != 0)
2827 return ret;
2828
2829 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2830 VMW_RES_DIRTY_SET, user_surface_converter,
2831 &cmd->body.destSid, NULL);
2832 }
2833
2834 /**
2835 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2836 *
2837 * @dev_priv: Pointer to a device private struct.
2838 * @sw_context: The software context being used for this batch.
2839 * @header: Pointer to the command header in the command stream.
2840 */
vmw_cmd_intra_surface_copy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2841 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2842 struct vmw_sw_context *sw_context,
2843 SVGA3dCmdHeader *header)
2844 {
2845 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2846 container_of(header, typeof(*cmd), header);
2847
2848 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2849 return -EINVAL;
2850
2851 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2852 VMW_RES_DIRTY_SET, user_surface_converter,
2853 &cmd->body.surface.sid, NULL);
2854 }
2855
vmw_cmd_sm5(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2856 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2857 struct vmw_sw_context *sw_context,
2858 SVGA3dCmdHeader *header)
2859 {
2860 if (!has_sm5_context(dev_priv))
2861 return -EINVAL;
2862
2863 return 0;
2864 }
2865
vmw_cmd_sm5_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2866 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2867 struct vmw_sw_context *sw_context,
2868 SVGA3dCmdHeader *header)
2869 {
2870 if (!has_sm5_context(dev_priv))
2871 return -EINVAL;
2872
2873 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2874 }
2875
vmw_cmd_sm5_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2876 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2877 struct vmw_sw_context *sw_context,
2878 SVGA3dCmdHeader *header)
2879 {
2880 if (!has_sm5_context(dev_priv))
2881 return -EINVAL;
2882
2883 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2884 }
2885
vmw_cmd_clear_uav_uint(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2886 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2887 struct vmw_sw_context *sw_context,
2888 SVGA3dCmdHeader *header)
2889 {
2890 struct {
2891 SVGA3dCmdHeader header;
2892 SVGA3dCmdDXClearUAViewUint body;
2893 } *cmd = container_of(header, typeof(*cmd), header);
2894 struct vmw_resource *ret;
2895
2896 if (!has_sm5_context(dev_priv))
2897 return -EINVAL;
2898
2899 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2900 cmd->body.uaViewId);
2901
2902 return PTR_ERR_OR_ZERO(ret);
2903 }
2904
vmw_cmd_clear_uav_float(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2905 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2906 struct vmw_sw_context *sw_context,
2907 SVGA3dCmdHeader *header)
2908 {
2909 struct {
2910 SVGA3dCmdHeader header;
2911 SVGA3dCmdDXClearUAViewFloat body;
2912 } *cmd = container_of(header, typeof(*cmd), header);
2913 struct vmw_resource *ret;
2914
2915 if (!has_sm5_context(dev_priv))
2916 return -EINVAL;
2917
2918 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2919 cmd->body.uaViewId);
2920
2921 return PTR_ERR_OR_ZERO(ret);
2922 }
2923
vmw_cmd_set_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2924 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2925 struct vmw_sw_context *sw_context,
2926 SVGA3dCmdHeader *header)
2927 {
2928 struct {
2929 SVGA3dCmdHeader header;
2930 SVGA3dCmdDXSetUAViews body;
2931 } *cmd = container_of(header, typeof(*cmd), header);
2932 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2933 sizeof(SVGA3dUAViewId);
2934 int ret;
2935
2936 if (!has_sm5_context(dev_priv))
2937 return -EINVAL;
2938
2939 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2940 VMW_DEBUG_USER("Invalid UAV binding.\n");
2941 return -EINVAL;
2942 }
2943
2944 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2945 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2946 num_uav, 0);
2947 if (ret)
2948 return ret;
2949
2950 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2951 cmd->body.uavSpliceIndex);
2952
2953 return ret;
2954 }
2955
vmw_cmd_set_cs_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2956 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2957 struct vmw_sw_context *sw_context,
2958 SVGA3dCmdHeader *header)
2959 {
2960 struct {
2961 SVGA3dCmdHeader header;
2962 SVGA3dCmdDXSetCSUAViews body;
2963 } *cmd = container_of(header, typeof(*cmd), header);
2964 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2965 sizeof(SVGA3dUAViewId);
2966 int ret;
2967
2968 if (!has_sm5_context(dev_priv))
2969 return -EINVAL;
2970
2971 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2972 VMW_DEBUG_USER("Invalid UAV binding.\n");
2973 return -EINVAL;
2974 }
2975
2976 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2977 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2978 num_uav, 0);
2979 if (ret)
2980 return ret;
2981
2982 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2983 cmd->body.startIndex);
2984
2985 return ret;
2986 }
2987
vmw_cmd_dx_define_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2988 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2989 struct vmw_sw_context *sw_context,
2990 SVGA3dCmdHeader *header)
2991 {
2992 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2993 struct vmw_resource *res;
2994 struct {
2995 SVGA3dCmdHeader header;
2996 SVGA3dCmdDXDefineStreamOutputWithMob body;
2997 } *cmd = container_of(header, typeof(*cmd), header);
2998 int ret;
2999
3000 if (!has_sm5_context(dev_priv))
3001 return -EINVAL;
3002
3003 if (!ctx_node) {
3004 DRM_ERROR("DX Context not set.\n");
3005 return -EINVAL;
3006 }
3007
3008 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3009 if (IS_ERR_OR_NULL(res))
3010 return res ? PTR_ERR(res) : -EINVAL;
3011 ret = vmw_cotable_notify(res, cmd->body.soid);
3012 if (ret)
3013 return ret;
3014
3015 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3016 cmd->body.soid,
3017 &sw_context->staged_cmd_res);
3018 }
3019
vmw_cmd_dx_destroy_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3020 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3021 struct vmw_sw_context *sw_context,
3022 SVGA3dCmdHeader *header)
3023 {
3024 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3025 struct vmw_resource *res;
3026 struct {
3027 SVGA3dCmdHeader header;
3028 SVGA3dCmdDXDestroyStreamOutput body;
3029 } *cmd = container_of(header, typeof(*cmd), header);
3030
3031 if (!ctx_node) {
3032 DRM_ERROR("DX Context not set.\n");
3033 return -EINVAL;
3034 }
3035
3036 /*
3037 * When device does not support SM5 then streamoutput with mob command is
3038 * not available to user-space. Simply return in this case.
3039 */
3040 if (!has_sm5_context(dev_priv))
3041 return 0;
3042
3043 /*
3044 * With SM5 capable device if lookup fails then user-space probably used
3045 * old streamoutput define command. Return without an error.
3046 */
3047 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3048 cmd->body.soid);
3049 if (IS_ERR(res))
3050 return 0;
3051
3052 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3053 &sw_context->staged_cmd_res);
3054 }
3055
vmw_cmd_dx_bind_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3056 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3057 struct vmw_sw_context *sw_context,
3058 SVGA3dCmdHeader *header)
3059 {
3060 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3061 struct vmw_resource *res;
3062 struct {
3063 SVGA3dCmdHeader header;
3064 SVGA3dCmdDXBindStreamOutput body;
3065 } *cmd = container_of(header, typeof(*cmd), header);
3066 int ret;
3067
3068 if (!has_sm5_context(dev_priv))
3069 return -EINVAL;
3070
3071 if (!ctx_node) {
3072 DRM_ERROR("DX Context not set.\n");
3073 return -EINVAL;
3074 }
3075
3076 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3077 cmd->body.soid);
3078 if (IS_ERR(res)) {
3079 DRM_ERROR("Could not find streamoutput to bind.\n");
3080 return PTR_ERR(res);
3081 }
3082
3083 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3084
3085 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3086 vmw_val_add_flag_noctx);
3087 if (ret) {
3088 DRM_ERROR("Error creating resource validation node.\n");
3089 return ret;
3090 }
3091
3092 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3093 &cmd->body.mobid,
3094 cmd->body.offsetInBytes);
3095 }
3096
vmw_cmd_dx_set_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3097 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3098 struct vmw_sw_context *sw_context,
3099 SVGA3dCmdHeader *header)
3100 {
3101 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3102 struct vmw_resource *res;
3103 struct vmw_ctx_bindinfo_so binding;
3104 struct {
3105 SVGA3dCmdHeader header;
3106 SVGA3dCmdDXSetStreamOutput body;
3107 } *cmd = container_of(header, typeof(*cmd), header);
3108 int ret;
3109
3110 if (!ctx_node) {
3111 DRM_ERROR("DX Context not set.\n");
3112 return -EINVAL;
3113 }
3114
3115 if (cmd->body.soid == SVGA3D_INVALID_ID)
3116 return 0;
3117
3118 /*
3119 * When device does not support SM5 then streamoutput with mob command is
3120 * not available to user-space. Simply return in this case.
3121 */
3122 if (!has_sm5_context(dev_priv))
3123 return 0;
3124
3125 /*
3126 * With SM5 capable device if lookup fails then user-space probably used
3127 * old streamoutput define command. Return without an error.
3128 */
3129 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3130 cmd->body.soid);
3131 if (IS_ERR(res)) {
3132 return 0;
3133 }
3134
3135 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3136 vmw_val_add_flag_noctx);
3137 if (ret) {
3138 DRM_ERROR("Error creating resource validation node.\n");
3139 return ret;
3140 }
3141
3142 binding.bi.ctx = ctx_node->ctx;
3143 binding.bi.res = res;
3144 binding.bi.bt = vmw_ctx_binding_so;
3145 binding.slot = 0; /* Only one SO set to context at a time. */
3146
3147 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3148 binding.slot);
3149
3150 return ret;
3151 }
3152
vmw_cmd_indexed_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3153 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3154 struct vmw_sw_context *sw_context,
3155 SVGA3dCmdHeader *header)
3156 {
3157 struct vmw_draw_indexed_instanced_indirect_cmd {
3158 SVGA3dCmdHeader header;
3159 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3160 } *cmd = container_of(header, typeof(*cmd), header);
3161
3162 if (!has_sm5_context(dev_priv))
3163 return -EINVAL;
3164
3165 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3166 VMW_RES_DIRTY_NONE, user_surface_converter,
3167 &cmd->body.argsBufferSid, NULL);
3168 }
3169
vmw_cmd_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3170 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3171 struct vmw_sw_context *sw_context,
3172 SVGA3dCmdHeader *header)
3173 {
3174 struct vmw_draw_instanced_indirect_cmd {
3175 SVGA3dCmdHeader header;
3176 SVGA3dCmdDXDrawInstancedIndirect body;
3177 } *cmd = container_of(header, typeof(*cmd), header);
3178
3179 if (!has_sm5_context(dev_priv))
3180 return -EINVAL;
3181
3182 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3183 VMW_RES_DIRTY_NONE, user_surface_converter,
3184 &cmd->body.argsBufferSid, NULL);
3185 }
3186
vmw_cmd_dispatch_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3187 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3188 struct vmw_sw_context *sw_context,
3189 SVGA3dCmdHeader *header)
3190 {
3191 struct vmw_dispatch_indirect_cmd {
3192 SVGA3dCmdHeader header;
3193 SVGA3dCmdDXDispatchIndirect body;
3194 } *cmd = container_of(header, typeof(*cmd), header);
3195
3196 if (!has_sm5_context(dev_priv))
3197 return -EINVAL;
3198
3199 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3200 VMW_RES_DIRTY_NONE, user_surface_converter,
3201 &cmd->body.argsBufferSid, NULL);
3202 }
3203
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3204 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3205 struct vmw_sw_context *sw_context,
3206 void *buf, uint32_t *size)
3207 {
3208 uint32_t size_remaining = *size;
3209 uint32_t cmd_id;
3210
3211 cmd_id = ((uint32_t *)buf)[0];
3212 switch (cmd_id) {
3213 case SVGA_CMD_UPDATE:
3214 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3215 break;
3216 case SVGA_CMD_DEFINE_GMRFB:
3217 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3218 break;
3219 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3220 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3221 break;
3222 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3223 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3224 break;
3225 default:
3226 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3227 return -EINVAL;
3228 }
3229
3230 if (*size > size_remaining) {
3231 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3232 cmd_id);
3233 return -EINVAL;
3234 }
3235
3236 if (unlikely(!sw_context->kernel)) {
3237 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3238 return -EPERM;
3239 }
3240
3241 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3242 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3243
3244 return 0;
3245 }
3246
3247 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3248 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3249 false, false, false),
3250 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3251 false, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3253 true, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3255 true, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3257 true, false, false),
3258 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3259 false, false, false),
3260 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3261 false, false, false),
3262 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3263 true, false, false),
3264 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3265 true, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3267 true, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3269 &vmw_cmd_set_render_target_check, true, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3271 true, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3273 true, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3275 true, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3277 true, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3279 true, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3281 true, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3283 true, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3285 false, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3287 true, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3289 true, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3291 true, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3293 true, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3295 true, false, false),
3296 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3297 true, false, false),
3298 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3299 true, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3301 true, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3303 true, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3305 true, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3307 &vmw_cmd_blt_surf_screen_check, false, false, false),
3308 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3309 false, false, false),
3310 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3311 false, false, false),
3312 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3313 false, false, false),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3315 false, false, false),
3316 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3317 false, false, false),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3319 false, false, false),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3321 false, false, false),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3323 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3325 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3327 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3328 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3329 false, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3335 false, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3337 false, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3339 false, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3345 true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3347 false, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3349 true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3351 &vmw_cmd_update_gb_surface, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3353 &vmw_cmd_readback_gb_image, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3355 &vmw_cmd_readback_gb_surface, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3357 &vmw_cmd_invalidate_gb_image, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3359 &vmw_cmd_invalidate_gb_surface, true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3361 false, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3363 false, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3365 false, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3367 false, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3369 false, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3371 false, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3373 true, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3375 false, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3377 false, false, false),
3378 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3379 true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3381 true, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3383 true, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3385 true, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3387 true, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3389 false, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3391 false, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3393 false, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3395 false, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3397 false, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3399 false, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3401 false, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3403 false, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3405 false, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3407 false, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3409 true, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3411 false, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3413 false, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3415 false, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3417 false, false, true),
3418
3419 /* SM commands */
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3421 false, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3423 false, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3425 false, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3427 false, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3429 false, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3431 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3433 &vmw_cmd_dx_set_shader_res, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3435 true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3437 true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3439 true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3441 true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3443 true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3445 &vmw_cmd_dx_cid_check, true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3447 true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3449 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3451 &vmw_cmd_dx_set_index_buffer, true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3453 &vmw_cmd_dx_set_rendertargets, true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3455 true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3457 &vmw_cmd_dx_cid_check, true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3459 &vmw_cmd_dx_cid_check, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3461 true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3463 true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3465 true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3467 &vmw_cmd_dx_cid_check, true, false, true),
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3469 true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3471 true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3473 true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3475 true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3477 true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3479 true, false, true),
3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3481 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3483 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3485 true, false, true),
3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3487 true, false, true),
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3489 &vmw_cmd_dx_check_subresource, true, false, true),
3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3491 &vmw_cmd_dx_check_subresource, true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3493 &vmw_cmd_dx_check_subresource, true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3495 &vmw_cmd_dx_view_define, true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3497 &vmw_cmd_dx_view_remove, true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3499 &vmw_cmd_dx_view_define, true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3501 &vmw_cmd_dx_view_remove, true, false, true),
3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3503 &vmw_cmd_dx_view_define, true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3505 &vmw_cmd_dx_view_remove, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3507 &vmw_cmd_dx_so_define, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3509 &vmw_cmd_dx_cid_check, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3511 &vmw_cmd_dx_so_define, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3513 &vmw_cmd_dx_cid_check, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3515 &vmw_cmd_dx_so_define, true, false, true),
3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3517 &vmw_cmd_dx_cid_check, true, false, true),
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3519 &vmw_cmd_dx_so_define, true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3521 &vmw_cmd_dx_cid_check, true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3523 &vmw_cmd_dx_so_define, true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3525 &vmw_cmd_dx_cid_check, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3527 &vmw_cmd_dx_define_shader, true, false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3529 &vmw_cmd_dx_destroy_shader, true, false, true),
3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3531 &vmw_cmd_dx_bind_shader, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3533 &vmw_cmd_dx_so_define, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3535 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3537 &vmw_cmd_dx_set_streamoutput, true, false, true),
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3539 &vmw_cmd_dx_set_so_targets, true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3541 &vmw_cmd_dx_cid_check, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3543 &vmw_cmd_dx_cid_check, true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3545 &vmw_cmd_buffer_copy_check, true, false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3547 &vmw_cmd_pred_copy_check, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3549 &vmw_cmd_dx_transfer_from_buffer,
3550 true, false, true),
3551 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3552 &vmw_cmd_dx_set_constant_buffer_offset,
3553 true, false, true),
3554 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3555 &vmw_cmd_dx_set_constant_buffer_offset,
3556 true, false, true),
3557 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3558 &vmw_cmd_dx_set_constant_buffer_offset,
3559 true, false, true),
3560 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3561 &vmw_cmd_dx_set_constant_buffer_offset,
3562 true, false, true),
3563 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3564 &vmw_cmd_dx_set_constant_buffer_offset,
3565 true, false, true),
3566 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3567 &vmw_cmd_dx_set_constant_buffer_offset,
3568 true, false, true),
3569 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3570 true, false, true),
3571
3572 /*
3573 * SM5 commands
3574 */
3575 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3576 true, false, true),
3577 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3578 true, false, true),
3579 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3580 true, false, true),
3581 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3582 &vmw_cmd_clear_uav_float, true, false, true),
3583 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3584 false, true),
3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3586 true),
3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3588 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3590 &vmw_cmd_instanced_indirect, true, false, true),
3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3592 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3593 &vmw_cmd_dispatch_indirect, true, false, true),
3594 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3595 false, true),
3596 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3597 &vmw_cmd_sm5_view_define, true, false, true),
3598 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3599 &vmw_cmd_dx_define_streamoutput, true, false, true),
3600 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3601 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3602 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3603 &vmw_cmd_dx_so_define, true, false, true),
3604 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
3605 &vmw_cmd_invalid, false, false, true),
3606 };
3607
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3608 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3609 {
3610 u32 cmd_id = ((u32 *) buf)[0];
3611
3612 if (cmd_id >= SVGA_CMD_MAX) {
3613 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3614 const struct vmw_cmd_entry *entry;
3615
3616 *size = header->size + sizeof(SVGA3dCmdHeader);
3617 cmd_id = header->id;
3618 if (cmd_id >= SVGA_3D_CMD_MAX)
3619 return false;
3620
3621 cmd_id -= SVGA_3D_CMD_BASE;
3622 entry = &vmw_cmd_entries[cmd_id];
3623 *cmd = entry->cmd_name;
3624 return true;
3625 }
3626
3627 switch (cmd_id) {
3628 case SVGA_CMD_UPDATE:
3629 *cmd = "SVGA_CMD_UPDATE";
3630 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3631 break;
3632 case SVGA_CMD_DEFINE_GMRFB:
3633 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3634 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3635 break;
3636 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3637 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3638 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3639 break;
3640 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3641 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3642 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3643 break;
3644 default:
3645 *cmd = "UNKNOWN";
3646 *size = 0;
3647 return false;
3648 }
3649
3650 return true;
3651 }
3652
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3653 static int vmw_cmd_check(struct vmw_private *dev_priv,
3654 struct vmw_sw_context *sw_context, void *buf,
3655 uint32_t *size)
3656 {
3657 uint32_t cmd_id;
3658 uint32_t size_remaining = *size;
3659 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3660 int ret;
3661 const struct vmw_cmd_entry *entry;
3662 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3663
3664 cmd_id = ((uint32_t *)buf)[0];
3665 /* Handle any none 3D commands */
3666 if (unlikely(cmd_id < SVGA_CMD_MAX))
3667 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3668
3669
3670 cmd_id = header->id;
3671 *size = header->size + sizeof(SVGA3dCmdHeader);
3672
3673 cmd_id -= SVGA_3D_CMD_BASE;
3674 if (unlikely(*size > size_remaining))
3675 goto out_invalid;
3676
3677 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3678 goto out_invalid;
3679
3680 entry = &vmw_cmd_entries[cmd_id];
3681 if (unlikely(!entry->func))
3682 goto out_invalid;
3683
3684 if (unlikely(!entry->user_allow && !sw_context->kernel))
3685 goto out_privileged;
3686
3687 if (unlikely(entry->gb_disable && gb))
3688 goto out_old;
3689
3690 if (unlikely(entry->gb_enable && !gb))
3691 goto out_new;
3692
3693 ret = entry->func(dev_priv, sw_context, header);
3694 if (unlikely(ret != 0)) {
3695 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3696 cmd_id + SVGA_3D_CMD_BASE, ret);
3697 return ret;
3698 }
3699
3700 return 0;
3701 out_invalid:
3702 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3703 cmd_id + SVGA_3D_CMD_BASE);
3704 return -EINVAL;
3705 out_privileged:
3706 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3707 cmd_id + SVGA_3D_CMD_BASE);
3708 return -EPERM;
3709 out_old:
3710 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3711 cmd_id + SVGA_3D_CMD_BASE);
3712 return -EINVAL;
3713 out_new:
3714 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3715 cmd_id + SVGA_3D_CMD_BASE);
3716 return -EINVAL;
3717 }
3718
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3719 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3720 struct vmw_sw_context *sw_context, void *buf,
3721 uint32_t size)
3722 {
3723 int32_t cur_size = size;
3724 int ret;
3725
3726 sw_context->buf_start = buf;
3727
3728 while (cur_size > 0) {
3729 size = cur_size;
3730 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3731 if (unlikely(ret != 0))
3732 return ret;
3733 buf = (void *)((unsigned long) buf + size);
3734 cur_size -= size;
3735 }
3736
3737 if (unlikely(cur_size != 0)) {
3738 VMW_DEBUG_USER("Command verifier out of sync.\n");
3739 return -EINVAL;
3740 }
3741
3742 return 0;
3743 }
3744
vmw_free_relocations(struct vmw_sw_context * sw_context)3745 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3746 {
3747 /* Memory is validation context memory, so no need to free it */
3748 INIT_LIST_HEAD(&sw_context->bo_relocations);
3749 }
3750
vmw_apply_relocations(struct vmw_sw_context * sw_context)3751 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3752 {
3753 struct vmw_relocation *reloc;
3754 struct ttm_buffer_object *bo;
3755
3756 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3757 bo = &reloc->vbo->tbo;
3758 switch (bo->resource->mem_type) {
3759 case TTM_PL_VRAM:
3760 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3761 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3762 break;
3763 case VMW_PL_GMR:
3764 reloc->location->gmrId = bo->resource->start;
3765 break;
3766 case VMW_PL_MOB:
3767 *reloc->mob_loc = bo->resource->start;
3768 break;
3769 default:
3770 BUG();
3771 }
3772 }
3773 vmw_free_relocations(sw_context);
3774 }
3775
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3776 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3777 uint32_t size)
3778 {
3779 if (likely(sw_context->cmd_bounce_size >= size))
3780 return 0;
3781
3782 if (sw_context->cmd_bounce_size == 0)
3783 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3784
3785 while (sw_context->cmd_bounce_size < size) {
3786 sw_context->cmd_bounce_size =
3787 PAGE_ALIGN(sw_context->cmd_bounce_size +
3788 (sw_context->cmd_bounce_size >> 1));
3789 }
3790
3791 vfree(sw_context->cmd_bounce);
3792 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3793
3794 if (sw_context->cmd_bounce == NULL) {
3795 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3796 sw_context->cmd_bounce_size = 0;
3797 return -ENOMEM;
3798 }
3799
3800 return 0;
3801 }
3802
3803 /*
3804 * vmw_execbuf_fence_commands - create and submit a command stream fence
3805 *
3806 * Creates a fence object and submits a command stream marker.
3807 * If this fails for some reason, We sync the fifo and return NULL.
3808 * It is then safe to fence buffers with a NULL pointer.
3809 *
3810 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3811 * userspace handle if @p_handle is not NULL, otherwise not.
3812 */
3813
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3814 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3815 struct vmw_private *dev_priv,
3816 struct vmw_fence_obj **p_fence,
3817 uint32_t *p_handle)
3818 {
3819 uint32_t sequence;
3820 int ret;
3821 bool synced = false;
3822
3823 /* p_handle implies file_priv. */
3824 BUG_ON(p_handle != NULL && file_priv == NULL);
3825
3826 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3827 if (unlikely(ret != 0)) {
3828 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3829 synced = true;
3830 }
3831
3832 if (p_handle != NULL)
3833 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3834 sequence, p_fence, p_handle);
3835 else
3836 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3837
3838 if (unlikely(ret != 0 && !synced)) {
3839 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3840 false, VMW_FENCE_WAIT_TIMEOUT);
3841 *p_fence = NULL;
3842 }
3843
3844 return ret;
3845 }
3846
3847 /**
3848 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3849 *
3850 * @dev_priv: Pointer to a vmw_private struct.
3851 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3852 * @ret: Return value from fence object creation.
3853 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3854 * the information should be copied.
3855 * @fence: Pointer to the fenc object.
3856 * @fence_handle: User-space fence handle.
3857 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3858 *
3859 * This function copies fence information to user-space. If copying fails, the
3860 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3861 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3862 * will hopefully be detected.
3863 *
3864 * Also if copying fails, user-space will be unable to signal the fence object
3865 * so we wait for it immediately, and then unreference the user-space reference.
3866 */
3867 int
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd)3868 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3869 struct vmw_fpriv *vmw_fp, int ret,
3870 struct drm_vmw_fence_rep __user *user_fence_rep,
3871 struct vmw_fence_obj *fence, uint32_t fence_handle,
3872 int32_t out_fence_fd)
3873 {
3874 struct drm_vmw_fence_rep fence_rep;
3875
3876 if (user_fence_rep == NULL)
3877 return 0;
3878
3879 memset(&fence_rep, 0, sizeof(fence_rep));
3880
3881 fence_rep.error = ret;
3882 fence_rep.fd = out_fence_fd;
3883 if (ret == 0) {
3884 BUG_ON(fence == NULL);
3885
3886 fence_rep.handle = fence_handle;
3887 fence_rep.seqno = fence->base.seqno;
3888 fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
3889 }
3890
3891 /*
3892 * copy_to_user errors will be detected by user space not seeing
3893 * fence_rep::error filled in. Typically user-space would have pre-set
3894 * that member to -EFAULT.
3895 */
3896 ret = copy_to_user(user_fence_rep, &fence_rep,
3897 sizeof(fence_rep));
3898
3899 /*
3900 * User-space lost the fence object. We need to sync and unreference the
3901 * handle.
3902 */
3903 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3904 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3905 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3906 (void) vmw_fence_obj_wait(fence, false, false,
3907 VMW_FENCE_WAIT_TIMEOUT);
3908 }
3909
3910 return ret ? -EFAULT : 0;
3911 }
3912
3913 /**
3914 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3915 *
3916 * @dev_priv: Pointer to a device private structure.
3917 * @kernel_commands: Pointer to the unpatched command batch.
3918 * @command_size: Size of the unpatched command batch.
3919 * @sw_context: Structure holding the relocation lists.
3920 *
3921 * Side effects: If this function returns 0, then the command batch pointed to
3922 * by @kernel_commands will have been modified.
3923 */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3924 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3925 void *kernel_commands, u32 command_size,
3926 struct vmw_sw_context *sw_context)
3927 {
3928 void *cmd;
3929
3930 if (sw_context->dx_ctx_node)
3931 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3932 sw_context->dx_ctx_node->ctx->id);
3933 else
3934 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3935
3936 if (!cmd)
3937 return -ENOMEM;
3938
3939 vmw_apply_relocations(sw_context);
3940 memcpy(cmd, kernel_commands, command_size);
3941 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3942 vmw_resource_relocations_free(&sw_context->res_relocations);
3943 vmw_cmd_commit(dev_priv, command_size);
3944
3945 return 0;
3946 }
3947
3948 /**
3949 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3950 * command buffer manager.
3951 *
3952 * @dev_priv: Pointer to a device private structure.
3953 * @header: Opaque handle to the command buffer allocation.
3954 * @command_size: Size of the unpatched command batch.
3955 * @sw_context: Structure holding the relocation lists.
3956 *
3957 * Side effects: If this function returns 0, then the command buffer represented
3958 * by @header will have been modified.
3959 */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3960 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3961 struct vmw_cmdbuf_header *header,
3962 u32 command_size,
3963 struct vmw_sw_context *sw_context)
3964 {
3965 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3966 SVGA3D_INVALID_ID);
3967 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3968 header);
3969
3970 vmw_apply_relocations(sw_context);
3971 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3972 vmw_resource_relocations_free(&sw_context->res_relocations);
3973 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3974
3975 return 0;
3976 }
3977
3978 /**
3979 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3980 * submission using a command buffer.
3981 *
3982 * @dev_priv: Pointer to a device private structure.
3983 * @user_commands: User-space pointer to the commands to be submitted.
3984 * @command_size: Size of the unpatched command batch.
3985 * @header: Out parameter returning the opaque pointer to the command buffer.
3986 *
3987 * This function checks whether we can use the command buffer manager for
3988 * submission and if so, creates a command buffer of suitable size and copies
3989 * the user data into that buffer.
3990 *
3991 * On successful return, the function returns a pointer to the data in the
3992 * command buffer and *@header is set to non-NULL.
3993 *
3994 * @kernel_commands: If command buffers could not be used, the function will
3995 * return the value of @kernel_commands on function call. That value may be
3996 * NULL. In that case, the value of *@header will be set to NULL.
3997 *
3998 * If an error is encountered, the function will return a pointer error value.
3999 * If the function is interrupted by a signal while sleeping, it will return
4000 * -ERESTARTSYS casted to a pointer error value.
4001 */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)4002 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4003 void __user *user_commands,
4004 void *kernel_commands, u32 command_size,
4005 struct vmw_cmdbuf_header **header)
4006 {
4007 size_t cmdbuf_size;
4008 int ret;
4009
4010 *header = NULL;
4011 if (command_size > SVGA_CB_MAX_SIZE) {
4012 VMW_DEBUG_USER("Command buffer is too large.\n");
4013 return ERR_PTR(-EINVAL);
4014 }
4015
4016 if (!dev_priv->cman || kernel_commands)
4017 return kernel_commands;
4018
4019 /* If possible, add a little space for fencing. */
4020 cmdbuf_size = command_size + 512;
4021 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4022 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4023 header);
4024 if (IS_ERR(kernel_commands))
4025 return kernel_commands;
4026
4027 ret = copy_from_user(kernel_commands, user_commands, command_size);
4028 if (ret) {
4029 VMW_DEBUG_USER("Failed copying commands.\n");
4030 vmw_cmdbuf_header_free(*header);
4031 *header = NULL;
4032 return ERR_PTR(-EFAULT);
4033 }
4034
4035 return kernel_commands;
4036 }
4037
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)4038 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4039 struct vmw_sw_context *sw_context,
4040 uint32_t handle)
4041 {
4042 struct vmw_resource *res;
4043 int ret;
4044 unsigned int size;
4045
4046 if (handle == SVGA3D_INVALID_ID)
4047 return 0;
4048
4049 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4050 ret = vmw_validation_preload_res(sw_context->ctx, size);
4051 if (ret)
4052 return ret;
4053
4054 ret = vmw_user_resource_lookup_handle
4055 (dev_priv, sw_context->fp->tfile, handle,
4056 user_context_converter, &res);
4057 if (ret != 0) {
4058 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4059 (unsigned int) handle);
4060 return ret;
4061 }
4062
4063 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4064 vmw_val_add_flag_none);
4065 if (unlikely(ret != 0)) {
4066 vmw_resource_unreference(&res);
4067 return ret;
4068 }
4069
4070 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4071 sw_context->man = vmw_context_res_man(res);
4072
4073 vmw_resource_unreference(&res);
4074 return 0;
4075 }
4076
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)4077 int vmw_execbuf_process(struct drm_file *file_priv,
4078 struct vmw_private *dev_priv,
4079 void __user *user_commands, void *kernel_commands,
4080 uint32_t command_size, uint64_t throttle_us,
4081 uint32_t dx_context_handle,
4082 struct drm_vmw_fence_rep __user *user_fence_rep,
4083 struct vmw_fence_obj **out_fence, uint32_t flags)
4084 {
4085 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4086 struct vmw_fence_obj *fence = NULL;
4087 struct vmw_cmdbuf_header *header;
4088 uint32_t handle = 0;
4089 int ret;
4090 int32_t out_fence_fd = -1;
4091 struct sync_file *sync_file = NULL;
4092 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4093
4094 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4095 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4096 if (out_fence_fd < 0) {
4097 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4098 return out_fence_fd;
4099 }
4100 }
4101
4102 if (throttle_us) {
4103 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4104 }
4105
4106 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4107 kernel_commands, command_size,
4108 &header);
4109 if (IS_ERR(kernel_commands)) {
4110 ret = PTR_ERR(kernel_commands);
4111 goto out_free_fence_fd;
4112 }
4113
4114 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4115 if (ret) {
4116 ret = -ERESTARTSYS;
4117 goto out_free_header;
4118 }
4119
4120 sw_context->kernel = false;
4121 if (kernel_commands == NULL) {
4122 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4123 if (unlikely(ret != 0))
4124 goto out_unlock;
4125
4126 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4127 command_size);
4128 if (unlikely(ret != 0)) {
4129 ret = -EFAULT;
4130 VMW_DEBUG_USER("Failed copying commands.\n");
4131 goto out_unlock;
4132 }
4133
4134 kernel_commands = sw_context->cmd_bounce;
4135 } else if (!header) {
4136 sw_context->kernel = true;
4137 }
4138
4139 sw_context->filp = file_priv;
4140 sw_context->fp = vmw_fpriv(file_priv);
4141 INIT_LIST_HEAD(&sw_context->ctx_list);
4142 sw_context->cur_query_bo = dev_priv->pinned_bo;
4143 sw_context->last_query_ctx = NULL;
4144 sw_context->needs_post_query_barrier = false;
4145 sw_context->dx_ctx_node = NULL;
4146 sw_context->dx_query_mob = NULL;
4147 sw_context->dx_query_ctx = NULL;
4148 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4149 INIT_LIST_HEAD(&sw_context->res_relocations);
4150 INIT_LIST_HEAD(&sw_context->bo_relocations);
4151
4152 if (sw_context->staged_bindings)
4153 vmw_binding_state_reset(sw_context->staged_bindings);
4154
4155 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4156 sw_context->ctx = &val_ctx;
4157 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4158 if (unlikely(ret != 0))
4159 goto out_err_nores;
4160
4161 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4162 command_size);
4163 if (unlikely(ret != 0))
4164 goto out_err_nores;
4165
4166 ret = vmw_resources_reserve(sw_context);
4167 if (unlikely(ret != 0))
4168 goto out_err_nores;
4169
4170 ret = vmw_validation_bo_reserve(&val_ctx, true);
4171 if (unlikely(ret != 0))
4172 goto out_err_nores;
4173
4174 ret = vmw_validation_bo_validate(&val_ctx, true);
4175 if (unlikely(ret != 0))
4176 goto out_err;
4177
4178 ret = vmw_validation_res_validate(&val_ctx, true);
4179 if (unlikely(ret != 0))
4180 goto out_err;
4181
4182 vmw_validation_drop_ht(&val_ctx);
4183
4184 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4185 if (unlikely(ret != 0)) {
4186 ret = -ERESTARTSYS;
4187 goto out_err;
4188 }
4189
4190 if (dev_priv->has_mob) {
4191 ret = vmw_rebind_contexts(sw_context);
4192 if (unlikely(ret != 0))
4193 goto out_unlock_binding;
4194 }
4195
4196 if (!header) {
4197 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4198 command_size, sw_context);
4199 } else {
4200 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4201 sw_context);
4202 header = NULL;
4203 }
4204 mutex_unlock(&dev_priv->binding_mutex);
4205 if (ret)
4206 goto out_err;
4207
4208 vmw_query_bo_switch_commit(dev_priv, sw_context);
4209 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4210 (user_fence_rep) ? &handle : NULL);
4211 /*
4212 * This error is harmless, because if fence submission fails,
4213 * vmw_fifo_send_fence will sync. The error will be propagated to
4214 * user-space in @fence_rep
4215 */
4216 if (ret != 0)
4217 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4218
4219 vmw_execbuf_bindings_commit(sw_context, false);
4220 vmw_bind_dx_query_mob(sw_context);
4221 vmw_validation_res_unreserve(&val_ctx, false);
4222
4223 vmw_validation_bo_fence(sw_context->ctx, fence);
4224
4225 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4226 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4227
4228 /*
4229 * If anything fails here, give up trying to export the fence and do a
4230 * sync since the user mode will not be able to sync the fence itself.
4231 * This ensures we are still functionally correct.
4232 */
4233 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4234
4235 sync_file = sync_file_create(&fence->base);
4236 if (!sync_file) {
4237 VMW_DEBUG_USER("Sync file create failed for fence\n");
4238 put_unused_fd(out_fence_fd);
4239 out_fence_fd = -1;
4240
4241 (void) vmw_fence_obj_wait(fence, false, false,
4242 VMW_FENCE_WAIT_TIMEOUT);
4243 }
4244 }
4245
4246 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4247 user_fence_rep, fence, handle, out_fence_fd);
4248
4249 if (sync_file) {
4250 if (ret) {
4251 /* usercopy of fence failed, put the file object */
4252 fput(sync_file->file);
4253 put_unused_fd(out_fence_fd);
4254 } else {
4255 /* Link the fence with the FD created earlier */
4256 fd_install(out_fence_fd, sync_file->file);
4257 }
4258 }
4259
4260 /* Don't unreference when handing fence out */
4261 if (unlikely(out_fence != NULL)) {
4262 *out_fence = fence;
4263 fence = NULL;
4264 } else if (likely(fence != NULL)) {
4265 vmw_fence_obj_unreference(&fence);
4266 }
4267
4268 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4269 mutex_unlock(&dev_priv->cmdbuf_mutex);
4270
4271 /*
4272 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4273 * in resource destruction paths.
4274 */
4275 vmw_validation_unref_lists(&val_ctx);
4276
4277 return ret;
4278
4279 out_unlock_binding:
4280 mutex_unlock(&dev_priv->binding_mutex);
4281 out_err:
4282 vmw_validation_bo_backoff(&val_ctx);
4283 out_err_nores:
4284 vmw_execbuf_bindings_commit(sw_context, true);
4285 vmw_validation_res_unreserve(&val_ctx, true);
4286 vmw_resource_relocations_free(&sw_context->res_relocations);
4287 vmw_free_relocations(sw_context);
4288 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4289 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4290 out_unlock:
4291 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4292 vmw_validation_drop_ht(&val_ctx);
4293 WARN_ON(!list_empty(&sw_context->ctx_list));
4294 mutex_unlock(&dev_priv->cmdbuf_mutex);
4295
4296 /*
4297 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4298 * in resource destruction paths.
4299 */
4300 vmw_validation_unref_lists(&val_ctx);
4301 out_free_header:
4302 if (header)
4303 vmw_cmdbuf_header_free(header);
4304 out_free_fence_fd:
4305 if (out_fence_fd >= 0)
4306 put_unused_fd(out_fence_fd);
4307
4308 return ret;
4309 }
4310
4311 /**
4312 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4313 *
4314 * @dev_priv: The device private structure.
4315 *
4316 * This function is called to idle the fifo and unpin the query buffer if the
4317 * normal way to do this hits an error, which should typically be extremely
4318 * rare.
4319 */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4320 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4321 {
4322 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4323
4324 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4325 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4326 if (dev_priv->dummy_query_bo_pinned) {
4327 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4328 dev_priv->dummy_query_bo_pinned = false;
4329 }
4330 }
4331
4332
4333 /**
4334 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4335 * bo.
4336 *
4337 * @dev_priv: The device private structure.
4338 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4339 * query barrier that flushes all queries touching the current buffer pointed to
4340 * by @dev_priv->pinned_bo
4341 *
4342 * This function should be used to unpin the pinned query bo, or as a query
4343 * barrier when we need to make sure that all queries have finished before the
4344 * next fifo command. (For example on hardware context destructions where the
4345 * hardware may otherwise leak unfinished queries).
4346 *
4347 * This function does not return any failure codes, but make attempts to do safe
4348 * unpinning in case of errors.
4349 *
4350 * The function will synchronize on the previous query barrier, and will thus
4351 * not finish until that barrier has executed.
4352 *
4353 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4354 * calling this function.
4355 */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4356 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4357 struct vmw_fence_obj *fence)
4358 {
4359 int ret = 0;
4360 struct vmw_fence_obj *lfence = NULL;
4361 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4362
4363 if (dev_priv->pinned_bo == NULL)
4364 goto out_unlock;
4365
4366 vmw_bo_placement_set(dev_priv->pinned_bo,
4367 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4368 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4369 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4370 if (ret)
4371 goto out_no_reserve;
4372
4373 vmw_bo_placement_set(dev_priv->dummy_query_bo,
4374 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4375 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4376 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4377 if (ret)
4378 goto out_no_reserve;
4379
4380 ret = vmw_validation_bo_reserve(&val_ctx, false);
4381 if (ret)
4382 goto out_no_reserve;
4383
4384 if (dev_priv->query_cid_valid) {
4385 BUG_ON(fence != NULL);
4386 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4387 if (ret)
4388 goto out_no_emit;
4389 dev_priv->query_cid_valid = false;
4390 }
4391
4392 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4393 if (dev_priv->dummy_query_bo_pinned) {
4394 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4395 dev_priv->dummy_query_bo_pinned = false;
4396 }
4397 if (fence == NULL) {
4398 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4399 NULL);
4400 fence = lfence;
4401 }
4402 vmw_validation_bo_fence(&val_ctx, fence);
4403 if (lfence != NULL)
4404 vmw_fence_obj_unreference(&lfence);
4405
4406 vmw_validation_unref_lists(&val_ctx);
4407 vmw_bo_unreference(&dev_priv->pinned_bo);
4408
4409 out_unlock:
4410 return;
4411 out_no_emit:
4412 vmw_validation_bo_backoff(&val_ctx);
4413 out_no_reserve:
4414 vmw_validation_unref_lists(&val_ctx);
4415 vmw_execbuf_unpin_panic(dev_priv);
4416 vmw_bo_unreference(&dev_priv->pinned_bo);
4417 }
4418
4419 /**
4420 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4421 *
4422 * @dev_priv: The device private structure.
4423 *
4424 * This function should be used to unpin the pinned query bo, or as a query
4425 * barrier when we need to make sure that all queries have finished before the
4426 * next fifo command. (For example on hardware context destructions where the
4427 * hardware may otherwise leak unfinished queries).
4428 *
4429 * This function does not return any failure codes, but make attempts to do safe
4430 * unpinning in case of errors.
4431 *
4432 * The function will synchronize on the previous query barrier, and will thus
4433 * not finish until that barrier has executed.
4434 */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4435 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4436 {
4437 mutex_lock(&dev_priv->cmdbuf_mutex);
4438 if (dev_priv->query_cid_valid)
4439 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4440 mutex_unlock(&dev_priv->cmdbuf_mutex);
4441 }
4442
vmw_execbuf_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)4443 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4444 struct drm_file *file_priv)
4445 {
4446 struct vmw_private *dev_priv = vmw_priv(dev);
4447 struct drm_vmw_execbuf_arg *arg = data;
4448 int ret;
4449 struct dma_fence *in_fence = NULL;
4450
4451 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4452 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4453
4454 /*
4455 * Extend the ioctl argument while maintaining backwards compatibility:
4456 * We take different code paths depending on the value of arg->version.
4457 *
4458 * Note: The ioctl argument is extended and zeropadded by core DRM.
4459 */
4460 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4461 arg->version == 0)) {
4462 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4463 ret = -EINVAL;
4464 goto mksstats_out;
4465 }
4466
4467 switch (arg->version) {
4468 case 1:
4469 /* For v1 core DRM have extended + zeropadded the data */
4470 arg->context_handle = (uint32_t) -1;
4471 break;
4472 case 2:
4473 default:
4474 /* For v2 and later core DRM would have correctly copied it */
4475 break;
4476 }
4477
4478 /* If imported a fence FD from elsewhere, then wait on it */
4479 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4480 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4481
4482 if (!in_fence) {
4483 VMW_DEBUG_USER("Cannot get imported fence\n");
4484 ret = -EINVAL;
4485 goto mksstats_out;
4486 }
4487
4488 ret = dma_fence_wait(in_fence, true);
4489 if (ret)
4490 goto out;
4491 }
4492
4493 ret = vmw_execbuf_process(file_priv, dev_priv,
4494 (void __user *)(unsigned long)arg->commands,
4495 NULL, arg->command_size, arg->throttle_us,
4496 arg->context_handle,
4497 (void __user *)(unsigned long)arg->fence_rep,
4498 NULL, arg->flags);
4499
4500 if (unlikely(ret != 0))
4501 goto out;
4502
4503 out:
4504 if (in_fence)
4505 dma_fence_put(in_fence);
4506
4507 mksstats_out:
4508 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4509 return ret;
4510 }
4511