xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c (revision 156010ed9c2ac1e9df6c11b1f688cf8a6e0152e6)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "vmw_surface_cache.h"
35 #include "device_include/svga3d_surfacedefs.h"
36 
37 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
38 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
39 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
40 	(svga3d_flags & ((uint64_t)U32_MAX))
41 
42 /**
43  * struct vmw_user_surface - User-space visible surface resource
44  *
45  * @prime:          The TTM prime object.
46  * @base:           The TTM base object handling user-space visibility.
47  * @srf:            The surface metadata.
48  * @master:         Master of the creating client. Used for security check.
49  */
50 struct vmw_user_surface {
51 	struct ttm_prime_object prime;
52 	struct vmw_surface srf;
53 	struct drm_master *master;
54 };
55 
56 /**
57  * struct vmw_surface_offset - Backing store mip level offset info
58  *
59  * @face:           Surface face.
60  * @mip:            Mip level.
61  * @bo_offset:      Offset into backing store of this mip level.
62  *
63  */
64 struct vmw_surface_offset {
65 	uint32_t face;
66 	uint32_t mip;
67 	uint32_t bo_offset;
68 };
69 
70 /**
71  * struct vmw_surface_dirty - Surface dirty-tracker
72  * @cache: Cached layout information of the surface.
73  * @num_subres: Number of subresources.
74  * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
75  */
76 struct vmw_surface_dirty {
77 	struct vmw_surface_cache cache;
78 	u32 num_subres;
79 	SVGA3dBox boxes[];
80 };
81 
82 static void vmw_user_surface_free(struct vmw_resource *res);
83 static struct vmw_resource *
84 vmw_user_surface_base_to_res(struct ttm_base_object *base);
85 static int vmw_legacy_srf_bind(struct vmw_resource *res,
86 			       struct ttm_validate_buffer *val_buf);
87 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
88 				 bool readback,
89 				 struct ttm_validate_buffer *val_buf);
90 static int vmw_legacy_srf_create(struct vmw_resource *res);
91 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
92 static int vmw_gb_surface_create(struct vmw_resource *res);
93 static int vmw_gb_surface_bind(struct vmw_resource *res,
94 			       struct ttm_validate_buffer *val_buf);
95 static int vmw_gb_surface_unbind(struct vmw_resource *res,
96 				 bool readback,
97 				 struct ttm_validate_buffer *val_buf);
98 static int vmw_gb_surface_destroy(struct vmw_resource *res);
99 static int
100 vmw_gb_surface_define_internal(struct drm_device *dev,
101 			       struct drm_vmw_gb_surface_create_ext_req *req,
102 			       struct drm_vmw_gb_surface_create_rep *rep,
103 			       struct drm_file *file_priv);
104 static int
105 vmw_gb_surface_reference_internal(struct drm_device *dev,
106 				  struct drm_vmw_surface_arg *req,
107 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
108 				  struct drm_file *file_priv);
109 
110 static void vmw_surface_dirty_free(struct vmw_resource *res);
111 static int vmw_surface_dirty_alloc(struct vmw_resource *res);
112 static int vmw_surface_dirty_sync(struct vmw_resource *res);
113 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
114 					size_t end);
115 static int vmw_surface_clean(struct vmw_resource *res);
116 
117 static const struct vmw_user_resource_conv user_surface_conv = {
118 	.object_type = VMW_RES_SURFACE,
119 	.base_obj_to_res = vmw_user_surface_base_to_res,
120 	.res_free = vmw_user_surface_free
121 };
122 
123 const struct vmw_user_resource_conv *user_surface_converter =
124 	&user_surface_conv;
125 
126 static const struct vmw_res_func vmw_legacy_surface_func = {
127 	.res_type = vmw_res_surface,
128 	.needs_backup = false,
129 	.may_evict = true,
130 	.prio = 1,
131 	.dirty_prio = 1,
132 	.type_name = "legacy surfaces",
133 	.backup_placement = &vmw_srf_placement,
134 	.create = &vmw_legacy_srf_create,
135 	.destroy = &vmw_legacy_srf_destroy,
136 	.bind = &vmw_legacy_srf_bind,
137 	.unbind = &vmw_legacy_srf_unbind
138 };
139 
140 static const struct vmw_res_func vmw_gb_surface_func = {
141 	.res_type = vmw_res_surface,
142 	.needs_backup = true,
143 	.may_evict = true,
144 	.prio = 1,
145 	.dirty_prio = 2,
146 	.type_name = "guest backed surfaces",
147 	.backup_placement = &vmw_mob_placement,
148 	.create = vmw_gb_surface_create,
149 	.destroy = vmw_gb_surface_destroy,
150 	.bind = vmw_gb_surface_bind,
151 	.unbind = vmw_gb_surface_unbind,
152 	.dirty_alloc = vmw_surface_dirty_alloc,
153 	.dirty_free = vmw_surface_dirty_free,
154 	.dirty_sync = vmw_surface_dirty_sync,
155 	.dirty_range_add = vmw_surface_dirty_range_add,
156 	.clean = vmw_surface_clean,
157 };
158 
159 /*
160  * struct vmw_surface_dma - SVGA3D DMA command
161  */
162 struct vmw_surface_dma {
163 	SVGA3dCmdHeader header;
164 	SVGA3dCmdSurfaceDMA body;
165 	SVGA3dCopyBox cb;
166 	SVGA3dCmdSurfaceDMASuffix suffix;
167 };
168 
169 /*
170  * struct vmw_surface_define - SVGA3D Surface Define command
171  */
172 struct vmw_surface_define {
173 	SVGA3dCmdHeader header;
174 	SVGA3dCmdDefineSurface body;
175 };
176 
177 /*
178  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
179  */
180 struct vmw_surface_destroy {
181 	SVGA3dCmdHeader header;
182 	SVGA3dCmdDestroySurface body;
183 };
184 
185 
186 /**
187  * vmw_surface_dma_size - Compute fifo size for a dma command.
188  *
189  * @srf: Pointer to a struct vmw_surface
190  *
191  * Computes the required size for a surface dma command for backup or
192  * restoration of the surface represented by @srf.
193  */
194 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
195 {
196 	return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
197 }
198 
199 
200 /**
201  * vmw_surface_define_size - Compute fifo size for a surface define command.
202  *
203  * @srf: Pointer to a struct vmw_surface
204  *
205  * Computes the required size for a surface define command for the definition
206  * of the surface represented by @srf.
207  */
208 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
209 {
210 	return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
211 		sizeof(SVGA3dSize);
212 }
213 
214 
215 /**
216  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
217  *
218  * Computes the required size for a surface destroy command for the destruction
219  * of a hw surface.
220  */
221 static inline uint32_t vmw_surface_destroy_size(void)
222 {
223 	return sizeof(struct vmw_surface_destroy);
224 }
225 
226 /**
227  * vmw_surface_destroy_encode - Encode a surface_destroy command.
228  *
229  * @id: The surface id
230  * @cmd_space: Pointer to memory area in which the commands should be encoded.
231  */
232 static void vmw_surface_destroy_encode(uint32_t id,
233 				       void *cmd_space)
234 {
235 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
236 		cmd_space;
237 
238 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
239 	cmd->header.size = sizeof(cmd->body);
240 	cmd->body.sid = id;
241 }
242 
243 /**
244  * vmw_surface_define_encode - Encode a surface_define command.
245  *
246  * @srf: Pointer to a struct vmw_surface object.
247  * @cmd_space: Pointer to memory area in which the commands should be encoded.
248  */
249 static void vmw_surface_define_encode(const struct vmw_surface *srf,
250 				      void *cmd_space)
251 {
252 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
253 		cmd_space;
254 	struct drm_vmw_size *src_size;
255 	SVGA3dSize *cmd_size;
256 	uint32_t cmd_len;
257 	int i;
258 
259 	cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
260 		sizeof(SVGA3dSize);
261 
262 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
263 	cmd->header.size = cmd_len;
264 	cmd->body.sid = srf->res.id;
265 	/*
266 	 * Downcast of surfaceFlags, was upcasted when received from user-space,
267 	 * since driver internally stores as 64 bit.
268 	 * For legacy surface define only 32 bit flag is supported.
269 	 */
270 	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
271 	cmd->body.format = srf->metadata.format;
272 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
273 		cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
274 
275 	cmd += 1;
276 	cmd_size = (SVGA3dSize *) cmd;
277 	src_size = srf->metadata.sizes;
278 
279 	for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
280 		cmd_size->width = src_size->width;
281 		cmd_size->height = src_size->height;
282 		cmd_size->depth = src_size->depth;
283 	}
284 }
285 
286 /**
287  * vmw_surface_dma_encode - Encode a surface_dma command.
288  *
289  * @srf: Pointer to a struct vmw_surface object.
290  * @cmd_space: Pointer to memory area in which the commands should be encoded.
291  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
292  * should be placed or read from.
293  * @to_surface: Boolean whether to DMA to the surface or from the surface.
294  */
295 static void vmw_surface_dma_encode(struct vmw_surface *srf,
296 				   void *cmd_space,
297 				   const SVGAGuestPtr *ptr,
298 				   bool to_surface)
299 {
300 	uint32_t i;
301 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
302 	const struct SVGA3dSurfaceDesc *desc =
303 		vmw_surface_get_desc(srf->metadata.format);
304 
305 	for (i = 0; i < srf->metadata.num_sizes; ++i) {
306 		SVGA3dCmdHeader *header = &cmd->header;
307 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
308 		SVGA3dCopyBox *cb = &cmd->cb;
309 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
310 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
311 		const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
312 
313 		header->id = SVGA_3D_CMD_SURFACE_DMA;
314 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
315 
316 		body->guest.ptr = *ptr;
317 		body->guest.ptr.offset += cur_offset->bo_offset;
318 		body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size);
319 		body->host.sid = srf->res.id;
320 		body->host.face = cur_offset->face;
321 		body->host.mipmap = cur_offset->mip;
322 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
323 				  SVGA3D_READ_HOST_VRAM);
324 		cb->x = 0;
325 		cb->y = 0;
326 		cb->z = 0;
327 		cb->srcx = 0;
328 		cb->srcy = 0;
329 		cb->srcz = 0;
330 		cb->w = cur_size->width;
331 		cb->h = cur_size->height;
332 		cb->d = cur_size->depth;
333 
334 		suffix->suffixSize = sizeof(*suffix);
335 		suffix->maximumOffset =
336 			vmw_surface_get_image_buffer_size(desc, cur_size,
337 							    body->guest.pitch);
338 		suffix->flags.discard = 0;
339 		suffix->flags.unsynchronized = 0;
340 		suffix->flags.reserved = 0;
341 		++cmd;
342 	}
343 };
344 
345 
346 /**
347  * vmw_hw_surface_destroy - destroy a Device surface
348  *
349  * @res:        Pointer to a struct vmw_resource embedded in a struct
350  *              vmw_surface.
351  *
352  * Destroys a the device surface associated with a struct vmw_surface if
353  * any, and adjusts resource count accordingly.
354  */
355 static void vmw_hw_surface_destroy(struct vmw_resource *res)
356 {
357 
358 	struct vmw_private *dev_priv = res->dev_priv;
359 	void *cmd;
360 
361 	if (res->func->destroy == vmw_gb_surface_destroy) {
362 		(void) vmw_gb_surface_destroy(res);
363 		return;
364 	}
365 
366 	if (res->id != -1) {
367 
368 		cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
369 		if (unlikely(!cmd))
370 			return;
371 
372 		vmw_surface_destroy_encode(res->id, cmd);
373 		vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
374 
375 		/*
376 		 * used_memory_size_atomic, or separate lock
377 		 * to avoid taking dev_priv::cmdbuf_mutex in
378 		 * the destroy path.
379 		 */
380 
381 		mutex_lock(&dev_priv->cmdbuf_mutex);
382 		dev_priv->used_memory_size -= res->backup_size;
383 		mutex_unlock(&dev_priv->cmdbuf_mutex);
384 	}
385 }
386 
387 /**
388  * vmw_legacy_srf_create - Create a device surface as part of the
389  * resource validation process.
390  *
391  * @res: Pointer to a struct vmw_surface.
392  *
393  * If the surface doesn't have a hw id.
394  *
395  * Returns -EBUSY if there wasn't sufficient device resources to
396  * complete the validation. Retry after freeing up resources.
397  *
398  * May return other errors if the kernel is out of guest resources.
399  */
400 static int vmw_legacy_srf_create(struct vmw_resource *res)
401 {
402 	struct vmw_private *dev_priv = res->dev_priv;
403 	struct vmw_surface *srf;
404 	uint32_t submit_size;
405 	uint8_t *cmd;
406 	int ret;
407 
408 	if (likely(res->id != -1))
409 		return 0;
410 
411 	srf = vmw_res_to_srf(res);
412 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
413 		     dev_priv->memory_size))
414 		return -EBUSY;
415 
416 	/*
417 	 * Alloc id for the resource.
418 	 */
419 
420 	ret = vmw_resource_alloc_id(res);
421 	if (unlikely(ret != 0)) {
422 		DRM_ERROR("Failed to allocate a surface id.\n");
423 		goto out_no_id;
424 	}
425 
426 	if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
427 		ret = -EBUSY;
428 		goto out_no_fifo;
429 	}
430 
431 	/*
432 	 * Encode surface define- commands.
433 	 */
434 
435 	submit_size = vmw_surface_define_size(srf);
436 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
437 	if (unlikely(!cmd)) {
438 		ret = -ENOMEM;
439 		goto out_no_fifo;
440 	}
441 
442 	vmw_surface_define_encode(srf, cmd);
443 	vmw_cmd_commit(dev_priv, submit_size);
444 	vmw_fifo_resource_inc(dev_priv);
445 
446 	/*
447 	 * Surface memory usage accounting.
448 	 */
449 
450 	dev_priv->used_memory_size += res->backup_size;
451 	return 0;
452 
453 out_no_fifo:
454 	vmw_resource_release_id(res);
455 out_no_id:
456 	return ret;
457 }
458 
459 /**
460  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
461  *
462  * @res:            Pointer to a struct vmw_res embedded in a struct
463  *                  vmw_surface.
464  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
465  *                  information about the backup buffer.
466  * @bind:           Boolean wether to DMA to the surface.
467  *
468  * Transfer backup data to or from a legacy surface as part of the
469  * validation process.
470  * May return other errors if the kernel is out of guest resources.
471  * The backup buffer will be fenced or idle upon successful completion,
472  * and if the surface needs persistent backup storage, the backup buffer
473  * will also be returned reserved iff @bind is true.
474  */
475 static int vmw_legacy_srf_dma(struct vmw_resource *res,
476 			      struct ttm_validate_buffer *val_buf,
477 			      bool bind)
478 {
479 	SVGAGuestPtr ptr;
480 	struct vmw_fence_obj *fence;
481 	uint32_t submit_size;
482 	struct vmw_surface *srf = vmw_res_to_srf(res);
483 	uint8_t *cmd;
484 	struct vmw_private *dev_priv = res->dev_priv;
485 
486 	BUG_ON(!val_buf->bo);
487 	submit_size = vmw_surface_dma_size(srf);
488 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
489 	if (unlikely(!cmd))
490 		return -ENOMEM;
491 
492 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
493 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
494 
495 	vmw_cmd_commit(dev_priv, submit_size);
496 
497 	/*
498 	 * Create a fence object and fence the backup buffer.
499 	 */
500 
501 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
502 					  &fence, NULL);
503 
504 	vmw_bo_fence_single(val_buf->bo, fence);
505 
506 	if (likely(fence != NULL))
507 		vmw_fence_obj_unreference(&fence);
508 
509 	return 0;
510 }
511 
512 /**
513  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
514  *                       surface validation process.
515  *
516  * @res:            Pointer to a struct vmw_res embedded in a struct
517  *                  vmw_surface.
518  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
519  *                  information about the backup buffer.
520  *
521  * This function will copy backup data to the surface if the
522  * backup buffer is dirty.
523  */
524 static int vmw_legacy_srf_bind(struct vmw_resource *res,
525 			       struct ttm_validate_buffer *val_buf)
526 {
527 	if (!res->backup_dirty)
528 		return 0;
529 
530 	return vmw_legacy_srf_dma(res, val_buf, true);
531 }
532 
533 
534 /**
535  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
536  *                         surface eviction process.
537  *
538  * @res:            Pointer to a struct vmw_res embedded in a struct
539  *                  vmw_surface.
540  * @readback:       Readback - only true if dirty
541  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
542  *                  information about the backup buffer.
543  *
544  * This function will copy backup data from the surface.
545  */
546 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
547 				 bool readback,
548 				 struct ttm_validate_buffer *val_buf)
549 {
550 	if (unlikely(readback))
551 		return vmw_legacy_srf_dma(res, val_buf, false);
552 	return 0;
553 }
554 
555 /**
556  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
557  *                          resource eviction process.
558  *
559  * @res:            Pointer to a struct vmw_res embedded in a struct
560  *                  vmw_surface.
561  */
562 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
563 {
564 	struct vmw_private *dev_priv = res->dev_priv;
565 	uint32_t submit_size;
566 	uint8_t *cmd;
567 
568 	BUG_ON(res->id == -1);
569 
570 	/*
571 	 * Encode the dma- and surface destroy commands.
572 	 */
573 
574 	submit_size = vmw_surface_destroy_size();
575 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
576 	if (unlikely(!cmd))
577 		return -ENOMEM;
578 
579 	vmw_surface_destroy_encode(res->id, cmd);
580 	vmw_cmd_commit(dev_priv, submit_size);
581 
582 	/*
583 	 * Surface memory usage accounting.
584 	 */
585 
586 	dev_priv->used_memory_size -= res->backup_size;
587 
588 	/*
589 	 * Release the surface ID.
590 	 */
591 
592 	vmw_resource_release_id(res);
593 	vmw_fifo_resource_dec(dev_priv);
594 
595 	return 0;
596 }
597 
598 
599 /**
600  * vmw_surface_init - initialize a struct vmw_surface
601  *
602  * @dev_priv:       Pointer to a device private struct.
603  * @srf:            Pointer to the struct vmw_surface to initialize.
604  * @res_free:       Pointer to a resource destructor used to free
605  *                  the object.
606  */
607 static int vmw_surface_init(struct vmw_private *dev_priv,
608 			    struct vmw_surface *srf,
609 			    void (*res_free) (struct vmw_resource *res))
610 {
611 	int ret;
612 	struct vmw_resource *res = &srf->res;
613 
614 	BUG_ON(!res_free);
615 	ret = vmw_resource_init(dev_priv, res, true, res_free,
616 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
617 				&vmw_legacy_surface_func);
618 
619 	if (unlikely(ret != 0)) {
620 		res_free(res);
621 		return ret;
622 	}
623 
624 	/*
625 	 * The surface won't be visible to hardware until a
626 	 * surface validate.
627 	 */
628 
629 	INIT_LIST_HEAD(&srf->view_list);
630 	res->hw_destroy = vmw_hw_surface_destroy;
631 	return ret;
632 }
633 
634 /**
635  * vmw_user_surface_base_to_res - TTM base object to resource converter for
636  *                                user visible surfaces
637  *
638  * @base:           Pointer to a TTM base object
639  *
640  * Returns the struct vmw_resource embedded in a struct vmw_surface
641  * for the user-visible object identified by the TTM base object @base.
642  */
643 static struct vmw_resource *
644 vmw_user_surface_base_to_res(struct ttm_base_object *base)
645 {
646 	return &(container_of(base, struct vmw_user_surface,
647 			      prime.base)->srf.res);
648 }
649 
650 /**
651  * vmw_user_surface_free - User visible surface resource destructor
652  *
653  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
654  */
655 static void vmw_user_surface_free(struct vmw_resource *res)
656 {
657 	struct vmw_surface *srf = vmw_res_to_srf(res);
658 	struct vmw_user_surface *user_srf =
659 	    container_of(srf, struct vmw_user_surface, srf);
660 
661 	WARN_ON_ONCE(res->dirty);
662 	if (user_srf->master)
663 		drm_master_put(&user_srf->master);
664 	kfree(srf->offsets);
665 	kfree(srf->metadata.sizes);
666 	kfree(srf->snooper.image);
667 	ttm_prime_object_kfree(user_srf, prime);
668 }
669 
670 /**
671  * vmw_user_surface_base_release - User visible surface TTM base object destructor
672  *
673  * @p_base:         Pointer to a pointer to a TTM base object
674  *                  embedded in a struct vmw_user_surface.
675  *
676  * Drops the base object's reference on its resource, and the
677  * pointer pointed to by *p_base is set to NULL.
678  */
679 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
680 {
681 	struct ttm_base_object *base = *p_base;
682 	struct vmw_user_surface *user_srf =
683 	    container_of(base, struct vmw_user_surface, prime.base);
684 	struct vmw_resource *res = &user_srf->srf.res;
685 
686 	if (base->shareable && res && res->backup)
687 		drm_gem_object_put(&res->backup->base.base);
688 
689 	*p_base = NULL;
690 	vmw_resource_unreference(&res);
691 }
692 
693 /**
694  * vmw_surface_destroy_ioctl - Ioctl function implementing
695  *                                  the user surface destroy functionality.
696  *
697  * @dev:            Pointer to a struct drm_device.
698  * @data:           Pointer to data copied from / to user-space.
699  * @file_priv:      Pointer to a drm file private structure.
700  */
701 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
702 			      struct drm_file *file_priv)
703 {
704 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
705 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
706 
707 	return ttm_ref_object_base_unref(tfile, arg->sid);
708 }
709 
710 /**
711  * vmw_surface_define_ioctl - Ioctl function implementing
712  *                                  the user surface define functionality.
713  *
714  * @dev:            Pointer to a struct drm_device.
715  * @data:           Pointer to data copied from / to user-space.
716  * @file_priv:      Pointer to a drm file private structure.
717  */
718 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
719 			     struct drm_file *file_priv)
720 {
721 	struct vmw_private *dev_priv = vmw_priv(dev);
722 	struct vmw_user_surface *user_srf;
723 	struct vmw_surface *srf;
724 	struct vmw_surface_metadata *metadata;
725 	struct vmw_resource *res;
726 	struct vmw_resource *tmp;
727 	union drm_vmw_surface_create_arg *arg =
728 	    (union drm_vmw_surface_create_arg *)data;
729 	struct drm_vmw_surface_create_req *req = &arg->req;
730 	struct drm_vmw_surface_arg *rep = &arg->rep;
731 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
732 	int ret;
733 	int i, j;
734 	uint32_t cur_bo_offset;
735 	struct drm_vmw_size *cur_size;
736 	struct vmw_surface_offset *cur_offset;
737 	uint32_t num_sizes;
738 	const SVGA3dSurfaceDesc *desc;
739 
740 	num_sizes = 0;
741 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
742 		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
743 			return -EINVAL;
744 		num_sizes += req->mip_levels[i];
745 	}
746 
747 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
748 	    num_sizes == 0)
749 		return -EINVAL;
750 
751 	desc = vmw_surface_get_desc(req->format);
752 	if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
753 		VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
754 			       req->format);
755 		return -EINVAL;
756 	}
757 
758 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
759 	if (unlikely(!user_srf)) {
760 		ret = -ENOMEM;
761 		goto out_unlock;
762 	}
763 
764 	srf = &user_srf->srf;
765 	metadata = &srf->metadata;
766 	res = &srf->res;
767 
768 	/* Driver internally stores as 64-bit flags */
769 	metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
770 	metadata->format = req->format;
771 	metadata->scanout = req->scanout;
772 
773 	memcpy(metadata->mip_levels, req->mip_levels,
774 	       sizeof(metadata->mip_levels));
775 	metadata->num_sizes = num_sizes;
776 	metadata->sizes =
777 		memdup_user((struct drm_vmw_size __user *)(unsigned long)
778 			    req->size_addr,
779 			    sizeof(*metadata->sizes) * metadata->num_sizes);
780 	if (IS_ERR(metadata->sizes)) {
781 		ret = PTR_ERR(metadata->sizes);
782 		goto out_no_sizes;
783 	}
784 	srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
785 				     GFP_KERNEL);
786 	if (unlikely(!srf->offsets)) {
787 		ret = -ENOMEM;
788 		goto out_no_offsets;
789 	}
790 
791 	metadata->base_size = *srf->metadata.sizes;
792 	metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
793 	metadata->multisample_count = 0;
794 	metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
795 	metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
796 
797 	cur_bo_offset = 0;
798 	cur_offset = srf->offsets;
799 	cur_size = metadata->sizes;
800 
801 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
802 		for (j = 0; j < metadata->mip_levels[i]; ++j) {
803 			uint32_t stride = vmw_surface_calculate_pitch(
804 						  desc, cur_size);
805 
806 			cur_offset->face = i;
807 			cur_offset->mip = j;
808 			cur_offset->bo_offset = cur_bo_offset;
809 			cur_bo_offset += vmw_surface_get_image_buffer_size
810 				(desc, cur_size, stride);
811 			++cur_offset;
812 			++cur_size;
813 		}
814 	}
815 	res->backup_size = cur_bo_offset;
816 	if (metadata->scanout &&
817 	    metadata->num_sizes == 1 &&
818 	    metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
819 	    metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
820 	    metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
821 		const struct SVGA3dSurfaceDesc *desc =
822 			vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
823 		const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
824 					      VMW_CURSOR_SNOOP_HEIGHT *
825 					      desc->pitchBytesPerBlock;
826 		srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
827 		if (!srf->snooper.image) {
828 			DRM_ERROR("Failed to allocate cursor_image\n");
829 			ret = -ENOMEM;
830 			goto out_no_copy;
831 		}
832 	} else {
833 		srf->snooper.image = NULL;
834 	}
835 
836 	user_srf->prime.base.shareable = false;
837 	user_srf->prime.base.tfile = NULL;
838 	if (drm_is_primary_client(file_priv))
839 		user_srf->master = drm_file_get_master(file_priv);
840 
841 	/**
842 	 * From this point, the generic resource management functions
843 	 * destroy the object on failure.
844 	 */
845 
846 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
847 	if (unlikely(ret != 0))
848 		goto out_unlock;
849 
850 	/*
851 	 * A gb-aware client referencing a shared surface will
852 	 * expect a backup buffer to be present.
853 	 */
854 	if (dev_priv->has_mob && req->shareable) {
855 		uint32_t backup_handle;
856 
857 		ret = vmw_gem_object_create_with_handle(dev_priv,
858 							file_priv,
859 							res->backup_size,
860 							&backup_handle,
861 							&res->backup);
862 		if (unlikely(ret != 0)) {
863 			vmw_resource_unreference(&res);
864 			goto out_unlock;
865 		}
866 		vmw_bo_reference(res->backup);
867 		drm_gem_object_get(&res->backup->base.base);
868 	}
869 
870 	tmp = vmw_resource_reference(&srf->res);
871 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
872 				    req->shareable, VMW_RES_SURFACE,
873 				    &vmw_user_surface_base_release);
874 
875 	if (unlikely(ret != 0)) {
876 		vmw_resource_unreference(&tmp);
877 		vmw_resource_unreference(&res);
878 		goto out_unlock;
879 	}
880 
881 	rep->sid = user_srf->prime.base.handle;
882 	vmw_resource_unreference(&res);
883 
884 	return 0;
885 out_no_copy:
886 	kfree(srf->offsets);
887 out_no_offsets:
888 	kfree(metadata->sizes);
889 out_no_sizes:
890 	ttm_prime_object_kfree(user_srf, prime);
891 out_unlock:
892 	return ret;
893 }
894 
895 
896 static int
897 vmw_surface_handle_reference(struct vmw_private *dev_priv,
898 			     struct drm_file *file_priv,
899 			     uint32_t u_handle,
900 			     enum drm_vmw_handle_type handle_type,
901 			     struct ttm_base_object **base_p)
902 {
903 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
904 	struct vmw_user_surface *user_srf;
905 	uint32_t handle;
906 	struct ttm_base_object *base;
907 	int ret;
908 
909 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
910 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
911 		if (unlikely(ret != 0))
912 			return ret;
913 	} else {
914 		handle = u_handle;
915 	}
916 
917 	ret = -EINVAL;
918 	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
919 	if (unlikely(!base)) {
920 		VMW_DEBUG_USER("Could not find surface to reference.\n");
921 		goto out_no_lookup;
922 	}
923 
924 	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
925 		VMW_DEBUG_USER("Referenced object is not a surface.\n");
926 		goto out_bad_resource;
927 	}
928 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
929 		bool require_exist = false;
930 
931 		user_srf = container_of(base, struct vmw_user_surface,
932 					prime.base);
933 
934 		/* Error out if we are unauthenticated primary */
935 		if (drm_is_primary_client(file_priv) &&
936 		    !file_priv->authenticated) {
937 			ret = -EACCES;
938 			goto out_bad_resource;
939 		}
940 
941 		/*
942 		 * Make sure the surface creator has the same
943 		 * authenticating master, or is already registered with us.
944 		 */
945 		if (drm_is_primary_client(file_priv) &&
946 		    user_srf->master != file_priv->master)
947 			require_exist = true;
948 
949 		if (unlikely(drm_is_render_client(file_priv)))
950 			require_exist = true;
951 
952 		ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
953 		if (unlikely(ret != 0)) {
954 			DRM_ERROR("Could not add a reference to a surface.\n");
955 			goto out_bad_resource;
956 		}
957 	}
958 
959 	*base_p = base;
960 	return 0;
961 
962 out_bad_resource:
963 	ttm_base_object_unref(&base);
964 out_no_lookup:
965 	if (handle_type == DRM_VMW_HANDLE_PRIME)
966 		(void) ttm_ref_object_base_unref(tfile, handle);
967 
968 	return ret;
969 }
970 
971 /**
972  * vmw_surface_reference_ioctl - Ioctl function implementing
973  *                                  the user surface reference functionality.
974  *
975  * @dev:            Pointer to a struct drm_device.
976  * @data:           Pointer to data copied from / to user-space.
977  * @file_priv:      Pointer to a drm file private structure.
978  */
979 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
980 				struct drm_file *file_priv)
981 {
982 	struct vmw_private *dev_priv = vmw_priv(dev);
983 	union drm_vmw_surface_reference_arg *arg =
984 	    (union drm_vmw_surface_reference_arg *)data;
985 	struct drm_vmw_surface_arg *req = &arg->req;
986 	struct drm_vmw_surface_create_req *rep = &arg->rep;
987 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
988 	struct vmw_surface *srf;
989 	struct vmw_user_surface *user_srf;
990 	struct drm_vmw_size __user *user_sizes;
991 	struct ttm_base_object *base;
992 	int ret;
993 
994 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
995 					   req->handle_type, &base);
996 	if (unlikely(ret != 0))
997 		return ret;
998 
999 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1000 	srf = &user_srf->srf;
1001 
1002 	/* Downcast of flags when sending back to user space */
1003 	rep->flags = (uint32_t)srf->metadata.flags;
1004 	rep->format = srf->metadata.format;
1005 	memcpy(rep->mip_levels, srf->metadata.mip_levels,
1006 	       sizeof(srf->metadata.mip_levels));
1007 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1008 	    rep->size_addr;
1009 
1010 	if (user_sizes)
1011 		ret = copy_to_user(user_sizes, &srf->metadata.base_size,
1012 				   sizeof(srf->metadata.base_size));
1013 	if (unlikely(ret != 0)) {
1014 		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1015 			       srf->metadata.num_sizes);
1016 		ttm_ref_object_base_unref(tfile, base->handle);
1017 		ret = -EFAULT;
1018 	}
1019 
1020 	ttm_base_object_unref(&base);
1021 
1022 	return ret;
1023 }
1024 
1025 /**
1026  * vmw_gb_surface_create - Encode a surface_define command.
1027  *
1028  * @res:        Pointer to a struct vmw_resource embedded in a struct
1029  *              vmw_surface.
1030  */
1031 static int vmw_gb_surface_create(struct vmw_resource *res)
1032 {
1033 	struct vmw_private *dev_priv = res->dev_priv;
1034 	struct vmw_surface *srf = vmw_res_to_srf(res);
1035 	struct vmw_surface_metadata *metadata = &srf->metadata;
1036 	uint32_t cmd_len, cmd_id, submit_len;
1037 	int ret;
1038 	struct {
1039 		SVGA3dCmdHeader header;
1040 		SVGA3dCmdDefineGBSurface body;
1041 	} *cmd;
1042 	struct {
1043 		SVGA3dCmdHeader header;
1044 		SVGA3dCmdDefineGBSurface_v2 body;
1045 	} *cmd2;
1046 	struct {
1047 		SVGA3dCmdHeader header;
1048 		SVGA3dCmdDefineGBSurface_v3 body;
1049 	} *cmd3;
1050 	struct {
1051 		SVGA3dCmdHeader header;
1052 		SVGA3dCmdDefineGBSurface_v4 body;
1053 	} *cmd4;
1054 
1055 	if (likely(res->id != -1))
1056 		return 0;
1057 
1058 	vmw_fifo_resource_inc(dev_priv);
1059 	ret = vmw_resource_alloc_id(res);
1060 	if (unlikely(ret != 0)) {
1061 		DRM_ERROR("Failed to allocate a surface id.\n");
1062 		goto out_no_id;
1063 	}
1064 
1065 	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1066 		ret = -EBUSY;
1067 		goto out_no_fifo;
1068 	}
1069 
1070 	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1071 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
1072 		cmd_len = sizeof(cmd4->body);
1073 		submit_len = sizeof(*cmd4);
1074 	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1075 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1076 		cmd_len = sizeof(cmd3->body);
1077 		submit_len = sizeof(*cmd3);
1078 	} else if (metadata->array_size > 0) {
1079 		/* VMW_SM_4 support verified at creation time. */
1080 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1081 		cmd_len = sizeof(cmd2->body);
1082 		submit_len = sizeof(*cmd2);
1083 	} else {
1084 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1085 		cmd_len = sizeof(cmd->body);
1086 		submit_len = sizeof(*cmd);
1087 	}
1088 
1089 	cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
1090 	cmd2 = (typeof(cmd2))cmd;
1091 	cmd3 = (typeof(cmd3))cmd;
1092 	cmd4 = (typeof(cmd4))cmd;
1093 	if (unlikely(!cmd)) {
1094 		ret = -ENOMEM;
1095 		goto out_no_fifo;
1096 	}
1097 
1098 	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1099 		cmd4->header.id = cmd_id;
1100 		cmd4->header.size = cmd_len;
1101 		cmd4->body.sid = srf->res.id;
1102 		cmd4->body.surfaceFlags = metadata->flags;
1103 		cmd4->body.format = metadata->format;
1104 		cmd4->body.numMipLevels = metadata->mip_levels[0];
1105 		cmd4->body.multisampleCount = metadata->multisample_count;
1106 		cmd4->body.multisamplePattern = metadata->multisample_pattern;
1107 		cmd4->body.qualityLevel = metadata->quality_level;
1108 		cmd4->body.autogenFilter = metadata->autogen_filter;
1109 		cmd4->body.size.width = metadata->base_size.width;
1110 		cmd4->body.size.height = metadata->base_size.height;
1111 		cmd4->body.size.depth = metadata->base_size.depth;
1112 		cmd4->body.arraySize = metadata->array_size;
1113 		cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
1114 	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1115 		cmd3->header.id = cmd_id;
1116 		cmd3->header.size = cmd_len;
1117 		cmd3->body.sid = srf->res.id;
1118 		cmd3->body.surfaceFlags = metadata->flags;
1119 		cmd3->body.format = metadata->format;
1120 		cmd3->body.numMipLevels = metadata->mip_levels[0];
1121 		cmd3->body.multisampleCount = metadata->multisample_count;
1122 		cmd3->body.multisamplePattern = metadata->multisample_pattern;
1123 		cmd3->body.qualityLevel = metadata->quality_level;
1124 		cmd3->body.autogenFilter = metadata->autogen_filter;
1125 		cmd3->body.size.width = metadata->base_size.width;
1126 		cmd3->body.size.height = metadata->base_size.height;
1127 		cmd3->body.size.depth = metadata->base_size.depth;
1128 		cmd3->body.arraySize = metadata->array_size;
1129 	} else if (metadata->array_size > 0) {
1130 		cmd2->header.id = cmd_id;
1131 		cmd2->header.size = cmd_len;
1132 		cmd2->body.sid = srf->res.id;
1133 		cmd2->body.surfaceFlags = metadata->flags;
1134 		cmd2->body.format = metadata->format;
1135 		cmd2->body.numMipLevels = metadata->mip_levels[0];
1136 		cmd2->body.multisampleCount = metadata->multisample_count;
1137 		cmd2->body.autogenFilter = metadata->autogen_filter;
1138 		cmd2->body.size.width = metadata->base_size.width;
1139 		cmd2->body.size.height = metadata->base_size.height;
1140 		cmd2->body.size.depth = metadata->base_size.depth;
1141 		cmd2->body.arraySize = metadata->array_size;
1142 	} else {
1143 		cmd->header.id = cmd_id;
1144 		cmd->header.size = cmd_len;
1145 		cmd->body.sid = srf->res.id;
1146 		cmd->body.surfaceFlags = metadata->flags;
1147 		cmd->body.format = metadata->format;
1148 		cmd->body.numMipLevels = metadata->mip_levels[0];
1149 		cmd->body.multisampleCount = metadata->multisample_count;
1150 		cmd->body.autogenFilter = metadata->autogen_filter;
1151 		cmd->body.size.width = metadata->base_size.width;
1152 		cmd->body.size.height = metadata->base_size.height;
1153 		cmd->body.size.depth = metadata->base_size.depth;
1154 	}
1155 
1156 	vmw_cmd_commit(dev_priv, submit_len);
1157 
1158 	return 0;
1159 
1160 out_no_fifo:
1161 	vmw_resource_release_id(res);
1162 out_no_id:
1163 	vmw_fifo_resource_dec(dev_priv);
1164 	return ret;
1165 }
1166 
1167 
1168 static int vmw_gb_surface_bind(struct vmw_resource *res,
1169 			       struct ttm_validate_buffer *val_buf)
1170 {
1171 	struct vmw_private *dev_priv = res->dev_priv;
1172 	struct {
1173 		SVGA3dCmdHeader header;
1174 		SVGA3dCmdBindGBSurface body;
1175 	} *cmd1;
1176 	struct {
1177 		SVGA3dCmdHeader header;
1178 		SVGA3dCmdUpdateGBSurface body;
1179 	} *cmd2;
1180 	uint32_t submit_size;
1181 	struct ttm_buffer_object *bo = val_buf->bo;
1182 
1183 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1184 
1185 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1186 
1187 	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
1188 	if (unlikely(!cmd1))
1189 		return -ENOMEM;
1190 
1191 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1192 	cmd1->header.size = sizeof(cmd1->body);
1193 	cmd1->body.sid = res->id;
1194 	cmd1->body.mobid = bo->resource->start;
1195 	if (res->backup_dirty) {
1196 		cmd2 = (void *) &cmd1[1];
1197 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1198 		cmd2->header.size = sizeof(cmd2->body);
1199 		cmd2->body.sid = res->id;
1200 	}
1201 	vmw_cmd_commit(dev_priv, submit_size);
1202 
1203 	if (res->backup->dirty && res->backup_dirty) {
1204 		/* We've just made a full upload. Cear dirty regions. */
1205 		vmw_bo_dirty_clear_res(res);
1206 	}
1207 
1208 	res->backup_dirty = false;
1209 
1210 	return 0;
1211 }
1212 
1213 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1214 				 bool readback,
1215 				 struct ttm_validate_buffer *val_buf)
1216 {
1217 	struct vmw_private *dev_priv = res->dev_priv;
1218 	struct ttm_buffer_object *bo = val_buf->bo;
1219 	struct vmw_fence_obj *fence;
1220 
1221 	struct {
1222 		SVGA3dCmdHeader header;
1223 		SVGA3dCmdReadbackGBSurface body;
1224 	} *cmd1;
1225 	struct {
1226 		SVGA3dCmdHeader header;
1227 		SVGA3dCmdInvalidateGBSurface body;
1228 	} *cmd2;
1229 	struct {
1230 		SVGA3dCmdHeader header;
1231 		SVGA3dCmdBindGBSurface body;
1232 	} *cmd3;
1233 	uint32_t submit_size;
1234 	uint8_t *cmd;
1235 
1236 
1237 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1238 
1239 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1240 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
1241 	if (unlikely(!cmd))
1242 		return -ENOMEM;
1243 
1244 	if (readback) {
1245 		cmd1 = (void *) cmd;
1246 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1247 		cmd1->header.size = sizeof(cmd1->body);
1248 		cmd1->body.sid = res->id;
1249 		cmd3 = (void *) &cmd1[1];
1250 	} else {
1251 		cmd2 = (void *) cmd;
1252 		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1253 		cmd2->header.size = sizeof(cmd2->body);
1254 		cmd2->body.sid = res->id;
1255 		cmd3 = (void *) &cmd2[1];
1256 	}
1257 
1258 	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1259 	cmd3->header.size = sizeof(cmd3->body);
1260 	cmd3->body.sid = res->id;
1261 	cmd3->body.mobid = SVGA3D_INVALID_ID;
1262 
1263 	vmw_cmd_commit(dev_priv, submit_size);
1264 
1265 	/*
1266 	 * Create a fence object and fence the backup buffer.
1267 	 */
1268 
1269 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1270 					  &fence, NULL);
1271 
1272 	vmw_bo_fence_single(val_buf->bo, fence);
1273 
1274 	if (likely(fence != NULL))
1275 		vmw_fence_obj_unreference(&fence);
1276 
1277 	return 0;
1278 }
1279 
1280 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1281 {
1282 	struct vmw_private *dev_priv = res->dev_priv;
1283 	struct vmw_surface *srf = vmw_res_to_srf(res);
1284 	struct {
1285 		SVGA3dCmdHeader header;
1286 		SVGA3dCmdDestroyGBSurface body;
1287 	} *cmd;
1288 
1289 	if (likely(res->id == -1))
1290 		return 0;
1291 
1292 	mutex_lock(&dev_priv->binding_mutex);
1293 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1294 	vmw_binding_res_list_scrub(&res->binding_head);
1295 
1296 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
1297 	if (unlikely(!cmd)) {
1298 		mutex_unlock(&dev_priv->binding_mutex);
1299 		return -ENOMEM;
1300 	}
1301 
1302 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1303 	cmd->header.size = sizeof(cmd->body);
1304 	cmd->body.sid = res->id;
1305 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
1306 	mutex_unlock(&dev_priv->binding_mutex);
1307 	vmw_resource_release_id(res);
1308 	vmw_fifo_resource_dec(dev_priv);
1309 
1310 	return 0;
1311 }
1312 
1313 /**
1314  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1315  * the user surface define functionality.
1316  *
1317  * @dev: Pointer to a struct drm_device.
1318  * @data: Pointer to data copied from / to user-space.
1319  * @file_priv: Pointer to a drm file private structure.
1320  */
1321 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1322 				struct drm_file *file_priv)
1323 {
1324 	union drm_vmw_gb_surface_create_arg *arg =
1325 	    (union drm_vmw_gb_surface_create_arg *)data;
1326 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1327 	struct drm_vmw_gb_surface_create_ext_req req_ext;
1328 
1329 	req_ext.base = arg->req;
1330 	req_ext.version = drm_vmw_gb_surface_v1;
1331 	req_ext.svga3d_flags_upper_32_bits = 0;
1332 	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1333 	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1334 	req_ext.buffer_byte_stride = 0;
1335 	req_ext.must_be_zero = 0;
1336 
1337 	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1338 }
1339 
1340 /**
1341  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1342  * the user surface reference functionality.
1343  *
1344  * @dev: Pointer to a struct drm_device.
1345  * @data: Pointer to data copied from / to user-space.
1346  * @file_priv: Pointer to a drm file private structure.
1347  */
1348 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1349 				   struct drm_file *file_priv)
1350 {
1351 	union drm_vmw_gb_surface_reference_arg *arg =
1352 	    (union drm_vmw_gb_surface_reference_arg *)data;
1353 	struct drm_vmw_surface_arg *req = &arg->req;
1354 	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1355 	struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1356 	int ret;
1357 
1358 	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1359 
1360 	if (unlikely(ret != 0))
1361 		return ret;
1362 
1363 	rep->creq = rep_ext.creq.base;
1364 	rep->crep = rep_ext.crep;
1365 
1366 	return ret;
1367 }
1368 
1369 /**
1370  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1371  * the user surface define functionality.
1372  *
1373  * @dev: Pointer to a struct drm_device.
1374  * @data: Pointer to data copied from / to user-space.
1375  * @file_priv: Pointer to a drm file private structure.
1376  */
1377 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1378 				struct drm_file *file_priv)
1379 {
1380 	union drm_vmw_gb_surface_create_ext_arg *arg =
1381 	    (union drm_vmw_gb_surface_create_ext_arg *)data;
1382 	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1383 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1384 
1385 	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1386 }
1387 
1388 /**
1389  * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1390  * the user surface reference functionality.
1391  *
1392  * @dev: Pointer to a struct drm_device.
1393  * @data: Pointer to data copied from / to user-space.
1394  * @file_priv: Pointer to a drm file private structure.
1395  */
1396 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1397 				   struct drm_file *file_priv)
1398 {
1399 	union drm_vmw_gb_surface_reference_ext_arg *arg =
1400 	    (union drm_vmw_gb_surface_reference_ext_arg *)data;
1401 	struct drm_vmw_surface_arg *req = &arg->req;
1402 	struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1403 
1404 	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1405 }
1406 
1407 /**
1408  * vmw_gb_surface_define_internal - Ioctl function implementing
1409  * the user surface define functionality.
1410  *
1411  * @dev: Pointer to a struct drm_device.
1412  * @req: Request argument from user-space.
1413  * @rep: Response argument to user-space.
1414  * @file_priv: Pointer to a drm file private structure.
1415  */
1416 static int
1417 vmw_gb_surface_define_internal(struct drm_device *dev,
1418 			       struct drm_vmw_gb_surface_create_ext_req *req,
1419 			       struct drm_vmw_gb_surface_create_rep *rep,
1420 			       struct drm_file *file_priv)
1421 {
1422 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1423 	struct vmw_private *dev_priv = vmw_priv(dev);
1424 	struct vmw_user_surface *user_srf;
1425 	struct vmw_surface_metadata metadata = {0};
1426 	struct vmw_surface *srf;
1427 	struct vmw_resource *res;
1428 	struct vmw_resource *tmp;
1429 	int ret = 0;
1430 	uint32_t backup_handle = 0;
1431 	SVGA3dSurfaceAllFlags svga3d_flags_64 =
1432 		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1433 				req->base.svga3d_flags);
1434 
1435 	/* array_size must be null for non-GL3 host. */
1436 	if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
1437 		VMW_DEBUG_USER("SM4 surface not supported.\n");
1438 		return -EINVAL;
1439 	}
1440 
1441 	if (!has_sm4_1_context(dev_priv)) {
1442 		if (req->svga3d_flags_upper_32_bits != 0)
1443 			ret = -EINVAL;
1444 
1445 		if (req->base.multisample_count != 0)
1446 			ret = -EINVAL;
1447 
1448 		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1449 			ret = -EINVAL;
1450 
1451 		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1452 			ret = -EINVAL;
1453 
1454 		if (ret) {
1455 			VMW_DEBUG_USER("SM4.1 surface not supported.\n");
1456 			return ret;
1457 		}
1458 	}
1459 
1460 	if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
1461 		VMW_DEBUG_USER("SM5 surface not supported.\n");
1462 		return -EINVAL;
1463 	}
1464 
1465 	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1466 	    req->base.multisample_count == 0) {
1467 		VMW_DEBUG_USER("Invalid sample count.\n");
1468 		return -EINVAL;
1469 	}
1470 
1471 	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
1472 		VMW_DEBUG_USER("Invalid mip level.\n");
1473 		return -EINVAL;
1474 	}
1475 
1476 	metadata.flags = svga3d_flags_64;
1477 	metadata.format = req->base.format;
1478 	metadata.mip_levels[0] = req->base.mip_levels;
1479 	metadata.multisample_count = req->base.multisample_count;
1480 	metadata.multisample_pattern = req->multisample_pattern;
1481 	metadata.quality_level = req->quality_level;
1482 	metadata.array_size = req->base.array_size;
1483 	metadata.buffer_byte_stride = req->buffer_byte_stride;
1484 	metadata.num_sizes = 1;
1485 	metadata.base_size = req->base.base_size;
1486 	metadata.scanout = req->base.drm_surface_flags &
1487 		drm_vmw_surface_flag_scanout;
1488 
1489 	/* Define a surface based on the parameters. */
1490 	ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
1491 	if (ret != 0) {
1492 		VMW_DEBUG_USER("Failed to define surface.\n");
1493 		return ret;
1494 	}
1495 
1496 	user_srf = container_of(srf, struct vmw_user_surface, srf);
1497 	if (drm_is_primary_client(file_priv))
1498 		user_srf->master = drm_file_get_master(file_priv);
1499 
1500 	res = &user_srf->srf.res;
1501 
1502 	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1503 		ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
1504 					 &res->backup);
1505 		if (ret == 0) {
1506 			if (res->backup->base.base.size < res->backup_size) {
1507 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
1508 				vmw_bo_unreference(&res->backup);
1509 				ret = -EINVAL;
1510 				goto out_unlock;
1511 			} else {
1512 				backup_handle = req->base.buffer_handle;
1513 			}
1514 		}
1515 	} else if (req->base.drm_surface_flags &
1516 		   (drm_vmw_surface_flag_create_buffer |
1517 		    drm_vmw_surface_flag_coherent)) {
1518 		ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
1519 							res->backup_size,
1520 							&backup_handle,
1521 							&res->backup);
1522 		if (ret == 0)
1523 			vmw_bo_reference(res->backup);
1524 	}
1525 
1526 	if (unlikely(ret != 0)) {
1527 		vmw_resource_unreference(&res);
1528 		goto out_unlock;
1529 	}
1530 
1531 	if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
1532 		struct vmw_buffer_object *backup = res->backup;
1533 
1534 		ttm_bo_reserve(&backup->base, false, false, NULL);
1535 		if (!res->func->dirty_alloc)
1536 			ret = -EINVAL;
1537 		if (!ret)
1538 			ret = vmw_bo_dirty_add(backup);
1539 		if (!ret) {
1540 			res->coherent = true;
1541 			ret = res->func->dirty_alloc(res);
1542 		}
1543 		ttm_bo_unreserve(&backup->base);
1544 		if (ret) {
1545 			vmw_resource_unreference(&res);
1546 			goto out_unlock;
1547 		}
1548 
1549 	}
1550 
1551 	tmp = vmw_resource_reference(res);
1552 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1553 				    req->base.drm_surface_flags &
1554 				    drm_vmw_surface_flag_shareable,
1555 				    VMW_RES_SURFACE,
1556 				    &vmw_user_surface_base_release);
1557 
1558 	if (unlikely(ret != 0)) {
1559 		vmw_resource_unreference(&tmp);
1560 		vmw_resource_unreference(&res);
1561 		goto out_unlock;
1562 	}
1563 
1564 	rep->handle      = user_srf->prime.base.handle;
1565 	rep->backup_size = res->backup_size;
1566 	if (res->backup) {
1567 		rep->buffer_map_handle =
1568 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
1569 		rep->buffer_size = res->backup->base.base.size;
1570 		rep->buffer_handle = backup_handle;
1571 		if (user_srf->prime.base.shareable)
1572 			drm_gem_object_get(&res->backup->base.base);
1573 	} else {
1574 		rep->buffer_map_handle = 0;
1575 		rep->buffer_size = 0;
1576 		rep->buffer_handle = SVGA3D_INVALID_ID;
1577 	}
1578 	vmw_resource_unreference(&res);
1579 
1580 out_unlock:
1581 	return ret;
1582 }
1583 
1584 /**
1585  * vmw_gb_surface_reference_internal - Ioctl function implementing
1586  * the user surface reference functionality.
1587  *
1588  * @dev: Pointer to a struct drm_device.
1589  * @req: Pointer to user-space request surface arg.
1590  * @rep: Pointer to response to user-space.
1591  * @file_priv: Pointer to a drm file private structure.
1592  */
1593 static int
1594 vmw_gb_surface_reference_internal(struct drm_device *dev,
1595 				  struct drm_vmw_surface_arg *req,
1596 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
1597 				  struct drm_file *file_priv)
1598 {
1599 	struct vmw_private *dev_priv = vmw_priv(dev);
1600 	struct vmw_surface *srf;
1601 	struct vmw_user_surface *user_srf;
1602 	struct vmw_surface_metadata *metadata;
1603 	struct ttm_base_object *base;
1604 	u32 backup_handle;
1605 	int ret;
1606 
1607 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1608 					   req->handle_type, &base);
1609 	if (unlikely(ret != 0))
1610 		return ret;
1611 
1612 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1613 	srf = &user_srf->srf;
1614 	if (!srf->res.backup) {
1615 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1616 		goto out_bad_resource;
1617 	}
1618 	metadata = &srf->metadata;
1619 
1620 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1621 	ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
1622 				    &backup_handle);
1623 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1624 	if (ret != 0) {
1625 		drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
1626 			req->sid);
1627 		goto out_bad_resource;
1628 	}
1629 
1630 	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
1631 	rep->creq.base.format = metadata->format;
1632 	rep->creq.base.mip_levels = metadata->mip_levels[0];
1633 	rep->creq.base.drm_surface_flags = 0;
1634 	rep->creq.base.multisample_count = metadata->multisample_count;
1635 	rep->creq.base.autogen_filter = metadata->autogen_filter;
1636 	rep->creq.base.array_size = metadata->array_size;
1637 	rep->creq.base.buffer_handle = backup_handle;
1638 	rep->creq.base.base_size = metadata->base_size;
1639 	rep->crep.handle = user_srf->prime.base.handle;
1640 	rep->crep.backup_size = srf->res.backup_size;
1641 	rep->crep.buffer_handle = backup_handle;
1642 	rep->crep.buffer_map_handle =
1643 		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
1644 	rep->crep.buffer_size = srf->res.backup->base.base.size;
1645 
1646 	rep->creq.version = drm_vmw_gb_surface_v1;
1647 	rep->creq.svga3d_flags_upper_32_bits =
1648 		SVGA3D_FLAGS_UPPER_32(metadata->flags);
1649 	rep->creq.multisample_pattern = metadata->multisample_pattern;
1650 	rep->creq.quality_level = metadata->quality_level;
1651 	rep->creq.must_be_zero = 0;
1652 
1653 out_bad_resource:
1654 	ttm_base_object_unref(&base);
1655 
1656 	return ret;
1657 }
1658 
1659 /**
1660  * vmw_subres_dirty_add - Add a dirty region to a subresource
1661  * @dirty: The surfaces's dirty tracker.
1662  * @loc_start: The location corresponding to the start of the region.
1663  * @loc_end: The location corresponding to the end of the region.
1664  *
1665  * As we are assuming that @loc_start and @loc_end represent a sequential
1666  * range of backing store memory, if the region spans multiple lines then
1667  * regardless of the x coordinate, the full lines are dirtied.
1668  * Correspondingly if the region spans multiple z slices, then full rather
1669  * than partial z slices are dirtied.
1670  */
1671 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
1672 				 const struct vmw_surface_loc *loc_start,
1673 				 const struct vmw_surface_loc *loc_end)
1674 {
1675 	const struct vmw_surface_cache *cache = &dirty->cache;
1676 	SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
1677 	u32 mip = loc_start->sub_resource % cache->num_mip_levels;
1678 	const struct drm_vmw_size *size = &cache->mip[mip].size;
1679 	u32 box_c2 = box->z + box->d;
1680 
1681 	if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
1682 		return;
1683 
1684 	if (box->d == 0 || box->z > loc_start->z)
1685 		box->z = loc_start->z;
1686 	if (box_c2 < loc_end->z)
1687 		box->d = loc_end->z - box->z;
1688 
1689 	if (loc_start->z + 1 == loc_end->z) {
1690 		box_c2 = box->y + box->h;
1691 		if (box->h == 0 || box->y > loc_start->y)
1692 			box->y = loc_start->y;
1693 		if (box_c2 < loc_end->y)
1694 			box->h = loc_end->y - box->y;
1695 
1696 		if (loc_start->y + 1 == loc_end->y) {
1697 			box_c2 = box->x + box->w;
1698 			if (box->w == 0 || box->x > loc_start->x)
1699 				box->x = loc_start->x;
1700 			if (box_c2 < loc_end->x)
1701 				box->w = loc_end->x - box->x;
1702 		} else {
1703 			box->x = 0;
1704 			box->w = size->width;
1705 		}
1706 	} else {
1707 		box->y = 0;
1708 		box->h = size->height;
1709 		box->x = 0;
1710 		box->w = size->width;
1711 	}
1712 }
1713 
1714 /**
1715  * vmw_subres_dirty_full - Mark a full subresource as dirty
1716  * @dirty: The surface's dirty tracker.
1717  * @subres: The subresource
1718  */
1719 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
1720 {
1721 	const struct vmw_surface_cache *cache = &dirty->cache;
1722 	u32 mip = subres % cache->num_mip_levels;
1723 	const struct drm_vmw_size *size = &cache->mip[mip].size;
1724 	SVGA3dBox *box = &dirty->boxes[subres];
1725 
1726 	box->x = 0;
1727 	box->y = 0;
1728 	box->z = 0;
1729 	box->w = size->width;
1730 	box->h = size->height;
1731 	box->d = size->depth;
1732 }
1733 
1734 /*
1735  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1736  * surfaces.
1737  */
1738 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
1739 					    size_t start, size_t end)
1740 {
1741 	struct vmw_surface_dirty *dirty =
1742 		(struct vmw_surface_dirty *) res->dirty;
1743 	size_t backup_end = res->backup_offset + res->backup_size;
1744 	struct vmw_surface_loc loc1, loc2;
1745 	const struct vmw_surface_cache *cache;
1746 
1747 	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1748 	end = min(end, backup_end) - res->backup_offset;
1749 	cache = &dirty->cache;
1750 	vmw_surface_get_loc(cache, &loc1, start);
1751 	vmw_surface_get_loc(cache, &loc2, end - 1);
1752 	vmw_surface_inc_loc(cache, &loc2);
1753 
1754 	if (loc1.sheet != loc2.sheet) {
1755 		u32 sub_res;
1756 
1757 		/*
1758 		 * Multiple multisample sheets. To do this in an optimized
1759 		 * fashion, compute the dirty region for each sheet and the
1760 		 * resulting union. Since this is not a common case, just dirty
1761 		 * the whole surface.
1762 		 */
1763 		for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
1764 			vmw_subres_dirty_full(dirty, sub_res);
1765 		return;
1766 	}
1767 	if (loc1.sub_resource + 1 == loc2.sub_resource) {
1768 		/* Dirty range covers a single sub-resource */
1769 		vmw_subres_dirty_add(dirty, &loc1, &loc2);
1770 	} else {
1771 		/* Dirty range covers multiple sub-resources */
1772 		struct vmw_surface_loc loc_min, loc_max;
1773 		u32 sub_res;
1774 
1775 		vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max);
1776 		vmw_subres_dirty_add(dirty, &loc1, &loc_max);
1777 		vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
1778 		vmw_subres_dirty_add(dirty, &loc_min, &loc2);
1779 		for (sub_res = loc1.sub_resource + 1;
1780 		     sub_res < loc2.sub_resource - 1; ++sub_res)
1781 			vmw_subres_dirty_full(dirty, sub_res);
1782 	}
1783 }
1784 
1785 /*
1786  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1787  * surfaces.
1788  */
1789 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
1790 					    size_t start, size_t end)
1791 {
1792 	struct vmw_surface_dirty *dirty =
1793 		(struct vmw_surface_dirty *) res->dirty;
1794 	const struct vmw_surface_cache *cache = &dirty->cache;
1795 	size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
1796 	SVGA3dBox *box = &dirty->boxes[0];
1797 	u32 box_c2;
1798 
1799 	box->h = box->d = 1;
1800 	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1801 	end = min(end, backup_end) - res->backup_offset;
1802 	box_c2 = box->x + box->w;
1803 	if (box->w == 0 || box->x > start)
1804 		box->x = start;
1805 	if (box_c2 < end)
1806 		box->w = end - box->x;
1807 }
1808 
1809 /*
1810  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1811  */
1812 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
1813 					size_t end)
1814 {
1815 	struct vmw_surface *srf = vmw_res_to_srf(res);
1816 
1817 	if (WARN_ON(end <= res->backup_offset ||
1818 		    start >= res->backup_offset + res->backup_size))
1819 		return;
1820 
1821 	if (srf->metadata.format == SVGA3D_BUFFER)
1822 		vmw_surface_buf_dirty_range_add(res, start, end);
1823 	else
1824 		vmw_surface_tex_dirty_range_add(res, start, end);
1825 }
1826 
1827 /*
1828  * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1829  */
1830 static int vmw_surface_dirty_sync(struct vmw_resource *res)
1831 {
1832 	struct vmw_private *dev_priv = res->dev_priv;
1833 	u32 i, num_dirty;
1834 	struct vmw_surface_dirty *dirty =
1835 		(struct vmw_surface_dirty *) res->dirty;
1836 	size_t alloc_size;
1837 	const struct vmw_surface_cache *cache = &dirty->cache;
1838 	struct {
1839 		SVGA3dCmdHeader header;
1840 		SVGA3dCmdDXUpdateSubResource body;
1841 	} *cmd1;
1842 	struct {
1843 		SVGA3dCmdHeader header;
1844 		SVGA3dCmdUpdateGBImage body;
1845 	} *cmd2;
1846 	void *cmd;
1847 
1848 	num_dirty = 0;
1849 	for (i = 0; i < dirty->num_subres; ++i) {
1850 		const SVGA3dBox *box = &dirty->boxes[i];
1851 
1852 		if (box->d)
1853 			num_dirty++;
1854 	}
1855 
1856 	if (!num_dirty)
1857 		goto out;
1858 
1859 	alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
1860 	cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
1861 	if (!cmd)
1862 		return -ENOMEM;
1863 
1864 	cmd1 = cmd;
1865 	cmd2 = cmd;
1866 
1867 	for (i = 0; i < dirty->num_subres; ++i) {
1868 		const SVGA3dBox *box = &dirty->boxes[i];
1869 
1870 		if (!box->d)
1871 			continue;
1872 
1873 		/*
1874 		 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
1875 		 * UPDATE_GB_IMAGE is not.
1876 		 */
1877 		if (has_sm4_context(dev_priv)) {
1878 			cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
1879 			cmd1->header.size = sizeof(cmd1->body);
1880 			cmd1->body.sid = res->id;
1881 			cmd1->body.subResource = i;
1882 			cmd1->body.box = *box;
1883 			cmd1++;
1884 		} else {
1885 			cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1886 			cmd2->header.size = sizeof(cmd2->body);
1887 			cmd2->body.image.sid = res->id;
1888 			cmd2->body.image.face = i / cache->num_mip_levels;
1889 			cmd2->body.image.mipmap = i -
1890 				(cache->num_mip_levels * cmd2->body.image.face);
1891 			cmd2->body.box = *box;
1892 			cmd2++;
1893 		}
1894 
1895 	}
1896 	vmw_cmd_commit(dev_priv, alloc_size);
1897  out:
1898 	memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
1899 	       dirty->num_subres);
1900 
1901 	return 0;
1902 }
1903 
1904 /*
1905  * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
1906  */
1907 static int vmw_surface_dirty_alloc(struct vmw_resource *res)
1908 {
1909 	struct vmw_surface *srf = vmw_res_to_srf(res);
1910 	const struct vmw_surface_metadata *metadata = &srf->metadata;
1911 	struct vmw_surface_dirty *dirty;
1912 	u32 num_layers = 1;
1913 	u32 num_mip;
1914 	u32 num_subres;
1915 	u32 num_samples;
1916 	size_t dirty_size;
1917 	int ret;
1918 
1919 	if (metadata->array_size)
1920 		num_layers = metadata->array_size;
1921 	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
1922 		num_layers *= SVGA3D_MAX_SURFACE_FACES;
1923 
1924 	num_mip = metadata->mip_levels[0];
1925 	if (!num_mip)
1926 		num_mip = 1;
1927 
1928 	num_subres = num_layers * num_mip;
1929 	dirty_size = struct_size(dirty, boxes, num_subres);
1930 
1931 	dirty = kvzalloc(dirty_size, GFP_KERNEL);
1932 	if (!dirty) {
1933 		ret = -ENOMEM;
1934 		goto out_no_dirty;
1935 	}
1936 
1937 	num_samples = max_t(u32, 1, metadata->multisample_count);
1938 	ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
1939 				      num_mip, num_layers, num_samples,
1940 				      &dirty->cache);
1941 	if (ret)
1942 		goto out_no_cache;
1943 
1944 	dirty->num_subres = num_subres;
1945 	res->dirty = (struct vmw_resource_dirty *) dirty;
1946 
1947 	return 0;
1948 
1949 out_no_cache:
1950 	kvfree(dirty);
1951 out_no_dirty:
1952 	return ret;
1953 }
1954 
1955 /*
1956  * vmw_surface_dirty_free - The surface's dirty_free callback
1957  */
1958 static void vmw_surface_dirty_free(struct vmw_resource *res)
1959 {
1960 	struct vmw_surface_dirty *dirty =
1961 		(struct vmw_surface_dirty *) res->dirty;
1962 
1963 	kvfree(dirty);
1964 	res->dirty = NULL;
1965 }
1966 
1967 /*
1968  * vmw_surface_clean - The surface's clean callback
1969  */
1970 static int vmw_surface_clean(struct vmw_resource *res)
1971 {
1972 	struct vmw_private *dev_priv = res->dev_priv;
1973 	size_t alloc_size;
1974 	struct {
1975 		SVGA3dCmdHeader header;
1976 		SVGA3dCmdReadbackGBSurface body;
1977 	} *cmd;
1978 
1979 	alloc_size = sizeof(*cmd);
1980 	cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
1981 	if (!cmd)
1982 		return -ENOMEM;
1983 
1984 	cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1985 	cmd->header.size = sizeof(cmd->body);
1986 	cmd->body.sid = res->id;
1987 	vmw_cmd_commit(dev_priv, alloc_size);
1988 
1989 	return 0;
1990 }
1991 
1992 /*
1993  * vmw_gb_surface_define - Define a private GB surface
1994  *
1995  * @dev_priv: Pointer to a device private.
1996  * @metadata: Metadata representing the surface to create.
1997  * @user_srf_out: allocated user_srf. Set to NULL on failure.
1998  *
1999  * GB surfaces allocated by this function will not have a user mode handle, and
2000  * thus will only be visible to vmwgfx.  For optimization reasons the
2001  * surface may later be given a user mode handle by another function to make
2002  * it available to user mode drivers.
2003  */
2004 int vmw_gb_surface_define(struct vmw_private *dev_priv,
2005 			  const struct vmw_surface_metadata *req,
2006 			  struct vmw_surface **srf_out)
2007 {
2008 	struct vmw_surface_metadata *metadata;
2009 	struct vmw_user_surface *user_srf;
2010 	struct vmw_surface *srf;
2011 	u32 sample_count = 1;
2012 	u32 num_layers = 1;
2013 	int ret;
2014 
2015 	*srf_out = NULL;
2016 
2017 	if (req->scanout) {
2018 		if (!vmw_surface_is_screen_target_format(req->format)) {
2019 			VMW_DEBUG_USER("Invalid Screen Target surface format.");
2020 			return -EINVAL;
2021 		}
2022 
2023 		if (req->base_size.width > dev_priv->texture_max_width ||
2024 		    req->base_size.height > dev_priv->texture_max_height) {
2025 			VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
2026 				       req->base_size.width,
2027 				       req->base_size.height,
2028 				       dev_priv->texture_max_width,
2029 				       dev_priv->texture_max_height);
2030 			return -EINVAL;
2031 		}
2032 	} else {
2033 		const SVGA3dSurfaceDesc *desc =
2034 			vmw_surface_get_desc(req->format);
2035 
2036 		if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
2037 			VMW_DEBUG_USER("Invalid surface format.\n");
2038 			return -EINVAL;
2039 		}
2040 	}
2041 
2042 	if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
2043 		return -EINVAL;
2044 
2045 	if (req->num_sizes != 1)
2046 		return -EINVAL;
2047 
2048 	if (req->sizes != NULL)
2049 		return -EINVAL;
2050 
2051 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
2052 	if (unlikely(!user_srf)) {
2053 		ret = -ENOMEM;
2054 		goto out_unlock;
2055 	}
2056 
2057 	*srf_out  = &user_srf->srf;
2058 	user_srf->prime.base.shareable = false;
2059 	user_srf->prime.base.tfile = NULL;
2060 
2061 	srf = &user_srf->srf;
2062 	srf->metadata = *req;
2063 	srf->offsets = NULL;
2064 
2065 	metadata = &srf->metadata;
2066 
2067 	if (metadata->array_size)
2068 		num_layers = req->array_size;
2069 	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2070 		num_layers = SVGA3D_MAX_SURFACE_FACES;
2071 
2072 	if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
2073 		sample_count = metadata->multisample_count;
2074 
2075 	srf->res.backup_size =
2076 		vmw_surface_get_serialized_size_extended(
2077 				metadata->format,
2078 				metadata->base_size,
2079 				metadata->mip_levels[0],
2080 				num_layers,
2081 				sample_count);
2082 
2083 	if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
2084 		srf->res.backup_size += sizeof(SVGA3dDXSOState);
2085 
2086 	/*
2087 	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
2088 	 * size greater than STDU max width/height. This is really a workaround
2089 	 * to support creation of big framebuffer requested by some user-space
2090 	 * for whole topology. That big framebuffer won't really be used for
2091 	 * binding with screen target as during prepare_fb a separate surface is
2092 	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
2093 	 */
2094 	if (dev_priv->active_display_unit == vmw_du_screen_target &&
2095 	    metadata->scanout &&
2096 	    metadata->base_size.width <= dev_priv->stdu_max_width &&
2097 	    metadata->base_size.height <= dev_priv->stdu_max_height)
2098 		metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
2099 
2100 	/*
2101 	 * From this point, the generic resource management functions
2102 	 * destroy the object on failure.
2103 	 */
2104 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
2105 
2106 	return ret;
2107 
2108 out_unlock:
2109 	return ret;
2110 }
2111