xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  **************************************************************************/
8 
9 #ifndef _VMWGFX_DRV_H_
10 #define _VMWGFX_DRV_H_
11 
12 #include <linux/suspend.h>
13 #include <linux/sync_file.h>
14 #include <linux/hashtable.h>
15 
16 #include <drm/drm_auth.h>
17 #include <drm/drm_device.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_print.h>
20 #include <drm/drm_rect.h>
21 
22 #include <drm/ttm/ttm_execbuf_util.h>
23 #include <drm/ttm/ttm_tt.h>
24 #include <drm/ttm/ttm_placement.h>
25 #include <drm/ttm/ttm_bo.h>
26 
27 #include "ttm_object.h"
28 
29 #include "vmwgfx_fence.h"
30 #include "vmwgfx_reg.h"
31 #include "vmwgfx_validation.h"
32 
33 /*
34  * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
35  * uapi headers should not depend on header files outside uapi/.
36  */
37 #include <drm/vmwgfx_drm.h>
38 
39 
40 #define VMWGFX_DRIVER_NAME "vmwgfx"
41 #define VMWGFX_DRIVER_MAJOR 2
42 #define VMWGFX_DRIVER_MINOR 21
43 #define VMWGFX_DRIVER_PATCHLEVEL 0
44 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
45 #define VMWGFX_NUM_DISPLAY_UNITS 8
46 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
47 
48 #define VMWGFX_MIN_INITIAL_WIDTH 1280
49 #define VMWGFX_MIN_INITIAL_HEIGHT 800
50 
51 #define VMWGFX_PCI_ID_SVGA2              0x0405
52 #define VMWGFX_PCI_ID_SVGA3              0x0406
53 
54 /*
55  * This has to match get_count_order(SVGA_IRQFLAG_MAX)
56  */
57 #define VMWGFX_MAX_NUM_IRQS 6
58 
59 /*
60  * Perhaps we should have sysfs entries for these.
61  */
62 #define VMWGFX_NUM_GB_CONTEXT 256
63 #define VMWGFX_NUM_GB_SHADER 20000
64 #define VMWGFX_NUM_GB_SURFACE 32768
65 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
66 #define VMWGFX_NUM_DXCONTEXT 256
67 #define VMWGFX_NUM_DXQUERY 512
68 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
69 			VMWGFX_NUM_GB_SHADER +\
70 			VMWGFX_NUM_GB_SURFACE +\
71 			VMWGFX_NUM_GB_SCREEN_TARGET)
72 
73 #define VMW_PL_GMR      (TTM_PL_PRIV + 0)
74 #define VMW_PL_MOB      (TTM_PL_PRIV + 1)
75 #define VMW_PL_SYSTEM   (TTM_PL_PRIV + 2)
76 
77 #define VMW_RES_CONTEXT ttm_driver_type0
78 #define VMW_RES_SURFACE ttm_driver_type1
79 #define VMW_RES_STREAM ttm_driver_type2
80 #define VMW_RES_FENCE ttm_driver_type3
81 #define VMW_RES_SHADER ttm_driver_type4
82 #define VMW_RES_HT_ORDER 12
83 
84 #define MKSSTAT_CAPACITY_LOG2 5U
85 #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
86 
87 struct vmw_fpriv {
88 	struct ttm_object_file *tfile;
89 	bool gb_aware; /* user-space is guest-backed aware */
90 };
91 
92 struct vmwgfx_hash_item {
93 	struct hlist_node head;
94 	unsigned long key;
95 };
96 
97 struct vmw_res_func;
98 
99 /**
100  * struct vmw-resource - base class for hardware resources
101  *
102  * @kref: For refcounting.
103  * @dev_priv: Pointer to the device private for this resource. Immutable.
104  * @id: Device id. Protected by @dev_priv::resource_lock.
105  * @guest_memory_size: Guest memory buffer size. Immutable.
106  * @res_dirty: Resource contains data not yet in the guest memory buffer.
107  * Protected by resource reserved.
108  * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW
109  * resource. Protected by resource reserved.
110  * @coherent: Emulate coherency by tracking vm accesses.
111  * @guest_memory_bo: The guest memory buffer if any. Protected by resource
112  * reserved.
113  * @guest_memory_offset: Offset into the guest memory buffer if any. Protected
114  * by resource reserved. Note that only a few resource types can have a
115  * @guest_memory_offset different from zero.
116  * @pin_count: The pin count for this resource. A pinned resource has a
117  * pin-count greater than zero. It is not on the resource LRU lists and its
118  * guest memory buffer is pinned. Hence it can't be evicted.
119  * @func: Method vtable for this resource. Immutable.
120  * @mob_node; Node for the MOB guest memory rbtree. Protected by
121  * @guest_memory_bo reserved.
122  * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
123  * @binding_head: List head for the context binding list. Protected by
124  * the @dev_priv::binding_mutex
125  * @res_free: The resource destructor.
126  * @hw_destroy: Callback to destroy the resource on the device, as part of
127  * resource destruction.
128  */
129 struct vmw_bo;
130 struct vmw_bo;
131 struct vmw_resource_dirty;
132 struct vmw_resource {
133 	struct kref kref;
134 	struct vmw_private *dev_priv;
135 	int id;
136 	u32 used_prio;
137 	unsigned long guest_memory_size;
138 	u32 res_dirty : 1;
139 	u32 guest_memory_dirty : 1;
140 	u32 coherent : 1;
141 	struct vmw_bo *guest_memory_bo;
142 	unsigned long guest_memory_offset;
143 	unsigned long pin_count;
144 	const struct vmw_res_func *func;
145 	struct rb_node mob_node;
146 	struct list_head lru_head;
147 	struct list_head binding_head;
148 	struct vmw_resource_dirty *dirty;
149 	void (*res_free) (struct vmw_resource *res);
150 	void (*hw_destroy) (struct vmw_resource *res);
151 };
152 
153 
154 /*
155  * Resources that are managed using ioctls.
156  */
157 enum vmw_res_type {
158 	vmw_res_context,
159 	vmw_res_surface,
160 	vmw_res_stream,
161 	vmw_res_shader,
162 	vmw_res_dx_context,
163 	vmw_res_cotable,
164 	vmw_res_view,
165 	vmw_res_streamoutput,
166 	vmw_res_max
167 };
168 
169 /*
170  * Resources that are managed using command streams.
171  */
172 enum vmw_cmdbuf_res_type {
173 	vmw_cmdbuf_res_shader,
174 	vmw_cmdbuf_res_view,
175 	vmw_cmdbuf_res_streamoutput
176 };
177 
178 struct vmw_cmdbuf_res_manager;
179 
180 struct vmw_cursor_snooper {
181 	size_t id;
182 	uint32_t *image;
183 };
184 
185 struct vmw_framebuffer;
186 struct vmw_surface_offset;
187 
188 /**
189  * struct vmw_surface_metadata - Metadata describing a surface.
190  *
191  * @flags: Device flags.
192  * @format: Surface SVGA3D_x format.
193  * @mip_levels: Mip level for each face. For GB first index is used only.
194  * @multisample_count: Sample count.
195  * @multisample_pattern: Sample patterns.
196  * @quality_level: Quality level.
197  * @autogen_filter: Filter for automatically generated mipmaps.
198  * @array_size: Number of array elements for a 1D/2D texture. For cubemap
199                 texture number of faces * array_size. This should be 0 for pre
200 		SM4 device.
201  * @buffer_byte_stride: Buffer byte stride.
202  * @num_sizes: Size of @sizes. For GB surface this should always be 1.
203  * @base_size: Surface dimension.
204  * @sizes: Array representing mip sizes. Legacy only.
205  * @scanout: Whether this surface will be used for scanout.
206  *
207  * This tracks metadata for both legacy and guest backed surface.
208  */
209 struct vmw_surface_metadata {
210 	u64 flags;
211 	u32 format;
212 	u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
213 	u32 multisample_count;
214 	u32 multisample_pattern;
215 	u32 quality_level;
216 	u32 autogen_filter;
217 	u32 array_size;
218 	u32 num_sizes;
219 	u32 buffer_byte_stride;
220 	struct drm_vmw_size base_size;
221 	struct drm_vmw_size *sizes;
222 	bool scanout;
223 };
224 
225 /**
226  * struct vmw_surface: Resource structure for a surface.
227  *
228  * @res: The base resource for this surface.
229  * @metadata: Metadata for this surface resource.
230  * @snooper: Cursor data. Legacy surface only.
231  * @offsets: Legacy surface only.
232  * @view_list: List of views bound to this surface.
233  */
234 struct vmw_surface {
235 	struct vmw_resource res;
236 	struct vmw_surface_metadata metadata;
237 	struct vmw_cursor_snooper snooper;
238 	struct vmw_surface_offset *offsets;
239 	struct list_head view_list;
240 };
241 
242 struct vmw_fifo_state {
243 	unsigned long reserved_size;
244 	u32 *dynamic_buffer;
245 	u32 *static_buffer;
246 	unsigned long static_buffer_size;
247 	bool using_bounce_buffer;
248 	uint32_t capabilities;
249 	struct mutex fifo_mutex;
250 	struct rw_semaphore rwsem;
251 };
252 
253 /**
254  * struct vmw_res_cache_entry - resource information cache entry
255  * @handle: User-space handle of a resource.
256  * @res: Non-ref-counted pointer to the resource.
257  * @valid_handle: Whether the @handle member is valid.
258  * @valid: Whether the entry is valid, which also implies that the execbuf
259  * code holds a reference to the resource, and it's placed on the
260  * validation list.
261  *
262  * Used to avoid frequent repeated user-space handle lookups of the
263  * same resource.
264  */
265 struct vmw_res_cache_entry {
266 	uint32_t handle;
267 	struct vmw_resource *res;
268 	void *private;
269 	unsigned short valid_handle;
270 	unsigned short valid;
271 };
272 
273 /**
274  * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
275  */
276 enum vmw_dma_map_mode {
277 	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
278 	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
279 	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
280 	vmw_dma_map_max
281 };
282 
283 /**
284  * struct vmw_sg_table - Scatter/gather table for binding, with additional
285  * device-specific information.
286  *
287  * @sgt: Pointer to a struct sg_table with binding information
288  * @num_regions: Number of regions with device-address contiguous pages
289  */
290 struct vmw_sg_table {
291 	enum vmw_dma_map_mode mode;
292 	struct page **pages;
293 	const dma_addr_t *addrs;
294 	struct sg_table *sgt;
295 	unsigned long num_pages;
296 };
297 
298 /**
299  * struct vmw_piter - Page iterator that iterates over a list of pages
300  * and DMA addresses that could be either a scatter-gather list or
301  * arrays
302  *
303  * @pages: Array of page pointers to the pages.
304  * @addrs: DMA addresses to the pages if coherent pages are used.
305  * @iter: Scatter-gather page iterator. Current position in SG list.
306  * @i: Current position in arrays.
307  * @num_pages: Number of pages total.
308  * @next: Function to advance the iterator. Returns false if past the list
309  * of pages, true otherwise.
310  * @dma_address: Function to return the DMA address of the current page.
311  */
312 struct vmw_piter {
313 	struct page **pages;
314 	const dma_addr_t *addrs;
315 	struct sg_dma_page_iter iter;
316 	unsigned long i;
317 	unsigned long num_pages;
318 	bool (*next)(struct vmw_piter *);
319 	dma_addr_t (*dma_address)(struct vmw_piter *);
320 };
321 
322 
323 struct vmw_ttm_tt {
324 	struct ttm_tt dma_ttm;
325 	struct vmw_private *dev_priv;
326 	int gmr_id;
327 	struct vmw_mob *mob;
328 	int mem_type;
329 	struct sg_table sgt;
330 	struct vmw_sg_table vsgt;
331 	bool mapped;
332 	bool bound;
333 };
334 
335 /*
336  * enum vmw_display_unit_type - Describes the display unit
337  */
338 enum vmw_display_unit_type {
339 	vmw_du_invalid = 0,
340 	vmw_du_legacy,
341 	vmw_du_screen_object,
342 	vmw_du_screen_target,
343 	vmw_du_max
344 };
345 
346 struct vmw_validation_context;
347 struct vmw_ctx_validation_info;
348 
349 /**
350  * struct vmw_sw_context - Command submission context
351  * @res_ht: Pointer hash table used to find validation duplicates
352  * @kernel: Whether the command buffer originates from kernel code rather
353  * than from user-space
354  * @fp: If @kernel is false, points to the file of the client. Otherwise
355  * NULL
356  * @cmd_bounce: Command bounce buffer used for command validation before
357  * copying to fifo space
358  * @cmd_bounce_size: Current command bounce buffer size
359  * @cur_query_bo: Current buffer object used as query result buffer
360  * @bo_relocations: List of buffer object relocations
361  * @res_relocations: List of resource relocations
362  * @buf_start: Pointer to start of memory where command validation takes
363  * place
364  * @res_cache: Cache of recently looked up resources
365  * @last_query_ctx: Last context that submitted a query
366  * @needs_post_query_barrier: Whether a query barrier is needed after
367  * command submission
368  * @staged_bindings: Cached per-context binding tracker
369  * @staged_bindings_inuse: Whether the cached per-context binding tracker
370  * is in use
371  * @staged_cmd_res: List of staged command buffer managed resources in this
372  * command buffer
373  * @ctx_list: List of context resources referenced in this command buffer
374  * @dx_ctx_node: Validation metadata of the current DX context
375  * @dx_query_mob: The MOB used for DX queries
376  * @dx_query_ctx: The DX context used for the last DX query
377  * @man: Pointer to the command buffer managed resource manager
378  * @ctx: The validation context
379  */
380 struct vmw_sw_context{
381 	DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER);
382 	bool kernel;
383 	struct vmw_fpriv *fp;
384 	struct drm_file *filp;
385 	uint32_t *cmd_bounce;
386 	uint32_t cmd_bounce_size;
387 	struct vmw_bo *cur_query_bo;
388 	struct list_head bo_relocations;
389 	struct list_head res_relocations;
390 	uint32_t *buf_start;
391 	struct vmw_res_cache_entry res_cache[vmw_res_max];
392 	struct vmw_resource *last_query_ctx;
393 	bool needs_post_query_barrier;
394 	struct vmw_ctx_binding_state *staged_bindings;
395 	bool staged_bindings_inuse;
396 	struct list_head staged_cmd_res;
397 	struct list_head ctx_list;
398 	struct vmw_ctx_validation_info *dx_ctx_node;
399 	struct vmw_bo *dx_query_mob;
400 	struct vmw_resource *dx_query_ctx;
401 	struct vmw_cmdbuf_res_manager *man;
402 	struct vmw_validation_context *ctx;
403 };
404 
405 struct vmw_legacy_display;
406 struct vmw_overlay;
407 
408 /*
409  * struct vmw_otable - Guest Memory OBject table metadata
410  *
411  * @size:           Size of the table (page-aligned).
412  * @page_table:     Pointer to a struct vmw_mob holding the page table.
413  */
414 struct vmw_otable {
415 	unsigned long size;
416 	struct vmw_mob *page_table;
417 	bool enabled;
418 };
419 
420 struct vmw_otable_batch {
421 	unsigned num_otables;
422 	struct vmw_otable *otables;
423 	struct vmw_resource *context;
424 	struct vmw_bo *otable_bo;
425 };
426 
427 enum {
428 	VMW_IRQTHREAD_FENCE,
429 	VMW_IRQTHREAD_CMDBUF,
430 	VMW_IRQTHREAD_MAX
431 };
432 
433 /**
434  * enum vmw_sm_type - Graphics context capability supported by device.
435  * @VMW_SM_LEGACY: Pre DX context.
436  * @VMW_SM_4: Context support upto SM4.
437  * @VMW_SM_4_1: Context support upto SM4_1.
438  * @VMW_SM_5: Context support up to SM5.
439  * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions.
440  * @VMW_SM_MAX: Should be the last.
441  */
442 enum vmw_sm_type {
443 	VMW_SM_LEGACY = 0,
444 	VMW_SM_4,
445 	VMW_SM_4_1,
446 	VMW_SM_5,
447 	VMW_SM_5_1X,
448 	VMW_SM_MAX
449 };
450 
451 struct vmw_private {
452 	struct drm_device drm;
453 	struct ttm_device bdev;
454 
455 	u32 pci_id;
456 	resource_size_t io_start;
457 	resource_size_t vram_start;
458 	resource_size_t vram_size;
459 	resource_size_t max_primary_mem;
460 	u32 __iomem *rmmio;
461 	u32 *fifo_mem;
462 	resource_size_t fifo_mem_size;
463 	uint32_t fb_max_width;
464 	uint32_t fb_max_height;
465 	uint32_t texture_max_width;
466 	uint32_t texture_max_height;
467 	uint32_t stdu_max_width;
468 	uint32_t stdu_max_height;
469 	uint32_t initial_width;
470 	uint32_t initial_height;
471 	uint32_t capabilities;
472 	uint32_t capabilities2;
473 	uint32_t max_gmr_ids;
474 	uint32_t max_gmr_pages;
475 	uint32_t max_mob_pages;
476 	uint32_t max_mob_size;
477 	uint32_t memory_size;
478 	bool has_gmr;
479 	bool has_mob;
480 	spinlock_t hw_lock;
481 	bool assume_16bpp;
482 	u32 irqs[VMWGFX_MAX_NUM_IRQS];
483 	u32 num_irq_vectors;
484 
485 	enum vmw_sm_type sm_type;
486 
487 	/*
488 	 * Framebuffer info.
489 	 */
490 
491 	enum vmw_display_unit_type active_display_unit;
492 	struct vmw_legacy_display *ldu_priv;
493 	struct vmw_overlay *overlay_priv;
494 	struct drm_property *hotplug_mode_update_property;
495 	struct drm_property *implicit_placement_property;
496 	spinlock_t cursor_lock;
497 	struct drm_atomic_state *suspend_state;
498 
499 	/*
500 	 * Context and surface management.
501 	 */
502 
503 	spinlock_t resource_lock;
504 	struct idr res_idr[vmw_res_max];
505 
506 	/*
507 	 * A resource manager for kernel-only surfaces and
508 	 * contexts.
509 	 */
510 
511 	struct ttm_object_device *tdev;
512 
513 	/*
514 	 * Fencing and IRQs.
515 	 */
516 
517 	atomic_t marker_seq;
518 	wait_queue_head_t fence_queue;
519 	wait_queue_head_t fifo_queue;
520 	spinlock_t waiter_lock;
521 	int fence_queue_waiters; /* Protected by waiter_lock */
522 	int goal_queue_waiters; /* Protected by waiter_lock */
523 	int cmdbuf_waiters; /* Protected by waiter_lock */
524 	int error_waiters; /* Protected by waiter_lock */
525 	int fifo_queue_waiters; /* Protected by waiter_lock */
526 	atomic_t last_read_seqno;
527 	struct vmw_fence_manager *fman;
528 	uint32_t irq_mask; /* Updates protected by waiter_lock */
529 
530 	/*
531 	 * Device state
532 	 */
533 
534 	uint32_t traces_state;
535 	uint32_t enable_state;
536 	uint32_t config_done_state;
537 
538 	/**
539 	 * Execbuf
540 	 */
541 	/**
542 	 * Protected by the cmdbuf mutex.
543 	 */
544 
545 	struct vmw_sw_context ctx;
546 	struct mutex cmdbuf_mutex;
547 	struct mutex binding_mutex;
548 
549 	/**
550 	 * PM management.
551 	 */
552 	struct notifier_block pm_nb;
553 	bool refuse_hibernation;
554 	bool suspend_locked;
555 
556 	atomic_t num_fifo_resources;
557 
558 	/*
559 	 * Query processing. These members
560 	 * are protected by the cmdbuf mutex.
561 	 */
562 
563 	struct vmw_bo *dummy_query_bo;
564 	struct vmw_bo *pinned_bo;
565 	uint32_t query_cid;
566 	uint32_t query_cid_valid;
567 	bool dummy_query_bo_pinned;
568 
569 	/*
570 	 * Surface swapping. The "surface_lru" list is protected by the
571 	 * resource lock in order to be able to destroy a surface and take
572 	 * it off the lru atomically. "used_memory_size" is currently
573 	 * protected by the cmdbuf mutex for simplicity.
574 	 */
575 
576 	struct list_head res_lru[vmw_res_max];
577 	uint32_t used_memory_size;
578 
579 	/*
580 	 * DMA mapping stuff.
581 	 */
582 	enum vmw_dma_map_mode map_mode;
583 
584 	/*
585 	 * Guest Backed stuff
586 	 */
587 	struct vmw_otable_batch otable_batch;
588 
589 	struct vmw_fifo_state *fifo;
590 	struct vmw_cmdbuf_man *cman;
591 	DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
592 
593 	uint32 *devcaps;
594 
595 	bool vkms_enabled;
596 	struct workqueue_struct *crc_workq;
597 
598 	/*
599 	 * mksGuestStat instance-descriptor and pid arrays
600 	 */
601 	struct page *mksstat_user_pages[MKSSTAT_CAPACITY];
602 	atomic_t mksstat_user_pids[MKSSTAT_CAPACITY];
603 
604 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
605 	struct page *mksstat_kern_pages[MKSSTAT_CAPACITY];
606 	u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY];
607 	atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY];
608 #endif
609 };
610 
611 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
612 {
613 	return container_of(res, struct vmw_surface, res);
614 }
615 
616 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
617 {
618 	return container_of(dev, struct vmw_private, drm);
619 }
620 
621 static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
622 {
623 	return container_of(bdev, struct vmw_private, bdev);
624 }
625 
626 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
627 {
628 	return (struct vmw_fpriv *)file_priv->driver_priv;
629 }
630 
631 /*
632  * SVGA v3 has mmio register access and lacks fifo cmds
633  */
634 static inline bool vmw_is_svga_v3(const struct vmw_private *dev)
635 {
636 	return dev->pci_id == VMWGFX_PCI_ID_SVGA3;
637 }
638 
639 /*
640  * The locking here is fine-grained, so that it is performed once
641  * for every read- and write operation. This is of course costly, but we
642  * don't perform much register access in the timing critical paths anyway.
643  * Instead we have the extra benefit of being sure that we don't forget
644  * the hw lock around register accesses.
645  */
646 static inline void vmw_write(struct vmw_private *dev_priv,
647 			     unsigned int offset, uint32_t value)
648 {
649 	if (vmw_is_svga_v3(dev_priv)) {
650 		iowrite32(value, dev_priv->rmmio + offset);
651 	} else {
652 		spin_lock(&dev_priv->hw_lock);
653 		outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
654 		outl(value, dev_priv->io_start + SVGA_VALUE_PORT);
655 		spin_unlock(&dev_priv->hw_lock);
656 	}
657 }
658 
659 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
660 				unsigned int offset)
661 {
662 	u32 val;
663 
664 	if (vmw_is_svga_v3(dev_priv)) {
665 		val = ioread32(dev_priv->rmmio + offset);
666 	} else {
667 		spin_lock(&dev_priv->hw_lock);
668 		outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
669 		val = inl(dev_priv->io_start + SVGA_VALUE_PORT);
670 		spin_unlock(&dev_priv->hw_lock);
671 	}
672 
673 	return val;
674 }
675 
676 /**
677  * has_sm4_context - Does the device support SM4 context.
678  * @dev_priv: Device private.
679  *
680  * Return: Bool value if device support SM4 context or not.
681  */
682 static inline bool has_sm4_context(const struct vmw_private *dev_priv)
683 {
684 	return (dev_priv->sm_type >= VMW_SM_4);
685 }
686 
687 /**
688  * has_sm4_1_context - Does the device support SM4_1 context.
689  * @dev_priv: Device private.
690  *
691  * Return: Bool value if device support SM4_1 context or not.
692  */
693 static inline bool has_sm4_1_context(const struct vmw_private *dev_priv)
694 {
695 	return (dev_priv->sm_type >= VMW_SM_4_1);
696 }
697 
698 /**
699  * has_sm5_context - Does the device support SM5 context.
700  * @dev_priv: Device private.
701  *
702  * Return: Bool value if device support SM5 context or not.
703  */
704 static inline bool has_sm5_context(const struct vmw_private *dev_priv)
705 {
706 	return (dev_priv->sm_type >= VMW_SM_5);
707 }
708 
709 /**
710  * has_gl43_context - Does the device support GL43 context.
711  * @dev_priv: Device private.
712  *
713  * Return: Bool value if device support SM5 context or not.
714  */
715 static inline bool has_gl43_context(const struct vmw_private *dev_priv)
716 {
717 	return (dev_priv->sm_type >= VMW_SM_5_1X);
718 }
719 
720 
721 static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv)
722 {
723 	return (has_gl43_context(dev_priv) ?
724 			SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS);
725 }
726 
727 extern void vmw_svga_enable(struct vmw_private *dev_priv);
728 extern void vmw_svga_disable(struct vmw_private *dev_priv);
729 bool vmwgfx_supported(struct vmw_private *vmw);
730 
731 
732 /**
733  * GMR utilities - vmwgfx_gmr.c
734  */
735 
736 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
737 			const struct vmw_sg_table *vsgt,
738 			unsigned long num_pages,
739 			int gmr_id);
740 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
741 
742 /**
743  * User handles
744  */
745 struct vmw_user_object {
746 	struct vmw_surface *surface;
747 	struct vmw_bo *buffer;
748 };
749 
750 int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp,
751 			   u32 handle, struct vmw_user_object *uo);
752 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo);
753 void vmw_user_object_unref(struct vmw_user_object *uo);
754 bool vmw_user_object_is_null(struct vmw_user_object *uo);
755 struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo);
756 struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo);
757 void *vmw_user_object_map(struct vmw_user_object *uo);
758 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
759 void vmw_user_object_unmap(struct vmw_user_object *uo);
760 bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
761 
762 /**
763  * Resource utilities - vmwgfx_resource.c
764  */
765 struct vmw_user_resource_conv;
766 
767 extern void vmw_resource_unreference(struct vmw_resource **p_res);
768 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
769 extern struct vmw_resource *
770 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
771 extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
772 				 bool dirtying);
773 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
774 				bool no_backup);
775 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
776 extern int vmw_user_resource_lookup_handle(
777 	struct vmw_private *dev_priv,
778 	struct ttm_object_file *tfile,
779 	uint32_t handle,
780 	const struct vmw_user_resource_conv *converter,
781 	struct vmw_resource **p_res);
782 
783 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
784 				  struct drm_file *file_priv);
785 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
786 				  struct drm_file *file_priv);
787 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
788 				  struct ttm_object_file *tfile,
789 				  uint32_t *inout_id,
790 				  struct vmw_resource **out);
791 extern void vmw_resource_unreserve(struct vmw_resource *res,
792 				   bool dirty_set,
793 				   bool dirty,
794 				   bool switch_guest_memory,
795 				   struct vmw_bo *new_guest_memory,
796 				   unsigned long new_guest_memory_offset);
797 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
798 				  struct ttm_resource *old_mem,
799 				  struct ttm_resource *new_mem);
800 int vmw_query_readback_all(struct vmw_bo *dx_query_mob);
801 void vmw_resource_evict_all(struct vmw_private *dev_priv);
802 void vmw_resource_unbind_list(struct vmw_bo *vbo);
803 void vmw_resource_mob_attach(struct vmw_resource *res);
804 void vmw_resource_mob_detach(struct vmw_resource *res);
805 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
806 			       pgoff_t end);
807 int vmw_resource_clean(struct vmw_resource *res);
808 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
809 			pgoff_t end, pgoff_t *num_prefault);
810 
811 /**
812  * vmw_resource_mob_attached - Whether a resource currently has a mob attached
813  * @res: The resource
814  *
815  * Return: true if the resource has a mob attached, false otherwise.
816  */
817 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
818 {
819 	return !RB_EMPTY_NODE(&res->mob_node);
820 }
821 
822 /**
823  * GEM related functionality - vmwgfx_gem.c
824  */
825 struct vmw_bo_params;
826 extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
827 extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
828 					     struct drm_file *filp,
829 					     uint32_t size,
830 					     uint32_t *handle,
831 					     struct vmw_bo **p_vbo);
832 extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
833 				       struct drm_file *filp);
834 extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
835 
836 /**
837  * Misc Ioctl functionality - vmwgfx_ioctl.c
838  */
839 
840 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
841 			      struct drm_file *file_priv);
842 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
843 				struct drm_file *file_priv);
844 extern int vmw_present_ioctl(struct drm_device *dev, void *data,
845 			     struct drm_file *file_priv);
846 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
847 				      struct drm_file *file_priv);
848 
849 /**
850  * Fifo utilities - vmwgfx_fifo.c
851  */
852 
853 extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv);
854 extern void vmw_fifo_destroy(struct vmw_private *dev_priv);
855 extern bool vmw_cmd_supported(struct vmw_private *vmw);
856 extern void *
857 vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
858 extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
859 extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
860 extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno);
861 extern bool vmw_supports_3d(struct vmw_private *dev_priv);
862 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
863 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
864 extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
865 				    uint32_t cid);
866 extern int vmw_cmd_flush(struct vmw_private *dev_priv,
867 			 bool interruptible);
868 
869 #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id)                        \
870 ({                                                                            \
871 	vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({                 \
872 		DRM_ERROR("FIFO reserve failed at %s for %u bytes\n",         \
873 			  __func__, (unsigned int) __bytes);                  \
874 		NULL;                                                         \
875 	});                                                                   \
876 })
877 
878 #define VMW_CMD_RESERVE(__priv, __bytes)                                     \
879 	VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
880 
881 
882 /**
883  * vmw_fifo_caps - Returns the capabilities of the FIFO command
884  * queue or 0 if fifo memory isn't present.
885  * @dev_priv: The device private context
886  */
887 static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
888 {
889 	if (!dev_priv->fifo_mem || !dev_priv->fifo)
890 		return 0;
891 	return dev_priv->fifo->capabilities;
892 }
893 
894 
895 /**
896  * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
897  * is enabled in the FIFO.
898  * @dev_priv: The device private context
899  */
900 static inline bool
901 vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
902 {
903 	return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
904 }
905 
906 /**
907  * TTM buffer object driver - vmwgfx_ttm_buffer.c
908  */
909 
910 extern const size_t vmw_tt_size;
911 extern struct ttm_placement vmw_vram_placement;
912 extern struct ttm_placement vmw_sys_placement;
913 extern struct ttm_device_funcs vmw_bo_driver;
914 extern const struct vmw_sg_table *
915 vmw_bo_sg_table(struct ttm_buffer_object *bo);
916 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
917 			       size_t bo_size,
918 			       u32 domain,
919 			       struct vmw_bo **bo_p);
920 
921 extern void vmw_piter_start(struct vmw_piter *viter,
922 			    const struct vmw_sg_table *vsgt,
923 			    unsigned long p_offs);
924 
925 /**
926  * vmw_piter_next - Advance the iterator one page.
927  *
928  * @viter: Pointer to the iterator to advance.
929  *
930  * Returns false if past the list of pages, true otherwise.
931  */
932 static inline bool vmw_piter_next(struct vmw_piter *viter)
933 {
934 	return viter->next(viter);
935 }
936 
937 /**
938  * vmw_piter_dma_addr - Return the DMA address of the current page.
939  *
940  * @viter: Pointer to the iterator
941  *
942  * Returns the DMA address of the page pointed to by @viter.
943  */
944 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
945 {
946 	return viter->dma_address(viter);
947 }
948 
949 /**
950  * vmw_piter_page - Return a pointer to the current page.
951  *
952  * @viter: Pointer to the iterator
953  *
954  * Returns the DMA address of the page pointed to by @viter.
955  */
956 static inline struct page *vmw_piter_page(struct vmw_piter *viter)
957 {
958 	return viter->pages[viter->i];
959 }
960 
961 /**
962  * Command submission - vmwgfx_execbuf.c
963  */
964 
965 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
966 			     struct drm_file *file_priv);
967 extern int vmw_execbuf_process(struct drm_file *file_priv,
968 			       struct vmw_private *dev_priv,
969 			       void __user *user_commands,
970 			       void *kernel_commands,
971 			       uint32_t command_size,
972 			       uint64_t throttle_us,
973 			       uint32_t dx_context_handle,
974 			       struct drm_vmw_fence_rep __user
975 			       *user_fence_rep,
976 			       struct vmw_fence_obj **out_fence,
977 			       uint32_t flags);
978 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
979 					    struct vmw_fence_obj *fence);
980 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
981 
982 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
983 				      struct vmw_private *dev_priv,
984 				      struct vmw_fence_obj **p_fence,
985 				      uint32_t *p_handle);
986 extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
987 					struct vmw_fpriv *vmw_fp,
988 					int ret,
989 					struct drm_vmw_fence_rep __user
990 					*user_fence_rep,
991 					struct vmw_fence_obj *fence,
992 					uint32_t fence_handle,
993 					int32_t out_fence_fd);
994 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
995 
996 /**
997  * IRQs and wating - vmwgfx_irq.c
998  */
999 
1000 extern int vmw_irq_install(struct vmw_private *dev_priv);
1001 extern void vmw_irq_uninstall(struct drm_device *dev);
1002 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
1003 				uint32_t seqno);
1004 extern int vmw_fallback_wait(struct vmw_private *dev_priv,
1005 			     bool lazy,
1006 			     bool fifo_idle,
1007 			     uint32_t seqno,
1008 			     bool interruptible,
1009 			     unsigned long timeout);
1010 bool vmw_seqno_waiter_add(struct vmw_private *dev_priv);
1011 bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
1012 bool vmw_goal_waiter_add(struct vmw_private *dev_priv);
1013 bool vmw_goal_waiter_remove(struct vmw_private *dev_priv);
1014 bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
1015 			    int *waiter_count);
1016 bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
1017 			       u32 flag, int *waiter_count);
1018 
1019 /**
1020  * Kernel modesetting - vmwgfx_kms.c
1021  */
1022 
1023 int vmw_kms_init(struct vmw_private *dev_priv);
1024 int vmw_kms_close(struct vmw_private *dev_priv);
1025 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1026 				struct drm_file *file_priv);
1027 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
1028 			  struct ttm_object_file *tfile,
1029 			  struct ttm_buffer_object *bo,
1030 			  SVGA3dCmdHeader *header);
1031 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1032 		       unsigned width, unsigned height, unsigned pitch,
1033 		       unsigned bpp, unsigned depth);
1034 int vmw_kms_present(struct vmw_private *dev_priv,
1035 		    struct drm_file *file_priv,
1036 		    struct vmw_framebuffer *vfb,
1037 		    struct vmw_surface *surface,
1038 		    uint32_t sid, int32_t destX, int32_t destY,
1039 		    struct drm_vmw_rect *clips,
1040 		    uint32_t num_clips);
1041 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1042 				struct drm_file *file_priv);
1043 int vmw_kms_suspend(struct drm_device *dev);
1044 int vmw_kms_resume(struct drm_device *dev);
1045 void vmw_kms_lost_device(struct drm_device *dev);
1046 
1047 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
1048 extern void vmw_resource_unpin(struct vmw_resource *res);
1049 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
1050 
1051 /**
1052  * Overlay control - vmwgfx_overlay.c
1053  */
1054 
1055 int vmw_overlay_init(struct vmw_private *dev_priv);
1056 int vmw_overlay_close(struct vmw_private *dev_priv);
1057 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
1058 		      struct drm_file *file_priv);
1059 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
1060 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
1061 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
1062 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
1063 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
1064 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
1065 
1066 /**
1067  * GMR Id manager
1068  */
1069 
1070 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
1071 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
1072 
1073 /**
1074  * System memory manager
1075  */
1076 int vmw_sys_man_init(struct vmw_private *dev_priv);
1077 void vmw_sys_man_fini(struct vmw_private *dev_priv);
1078 
1079 /**
1080  * Prime - vmwgfx_prime.c
1081  */
1082 
1083 extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
1084 extern int vmw_prime_fd_to_handle(struct drm_device *dev,
1085 				  struct drm_file *file_priv,
1086 				  int fd, u32 *handle);
1087 extern int vmw_prime_handle_to_fd(struct drm_device *dev,
1088 				  struct drm_file *file_priv,
1089 				  uint32_t handle, uint32_t flags,
1090 				  int *prime_fd);
1091 struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
1092 						 struct dma_buf_attachment *attach,
1093 						 struct sg_table *table);
1094 
1095 /*
1096  * MemoryOBject management -  vmwgfx_mob.c
1097  */
1098 struct vmw_mob;
1099 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
1100 			const struct vmw_sg_table *vsgt,
1101 			unsigned long num_data_pages, int32_t mob_id);
1102 extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1103 			   struct vmw_mob *mob);
1104 extern void vmw_mob_destroy(struct vmw_mob *mob);
1105 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1106 extern int vmw_otables_setup(struct vmw_private *dev_priv);
1107 extern void vmw_otables_takedown(struct vmw_private *dev_priv);
1108 
1109 /*
1110  * Context management - vmwgfx_context.c
1111  */
1112 
1113 extern const struct vmw_user_resource_conv *user_context_converter;
1114 
1115 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1116 				    struct drm_file *file_priv);
1117 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1118 					     struct drm_file *file_priv);
1119 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1120 				     struct drm_file *file_priv);
1121 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1122 extern struct vmw_cmdbuf_res_manager *
1123 vmw_context_res_man(struct vmw_resource *ctx);
1124 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1125 						SVGACOTableType cotable_type);
1126 struct vmw_ctx_binding_state;
1127 extern struct vmw_ctx_binding_state *
1128 vmw_context_binding_state(struct vmw_resource *ctx);
1129 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1130 					  bool readback);
1131 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1132 				     struct vmw_bo *mob);
1133 extern struct vmw_bo *
1134 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1135 
1136 
1137 /*
1138  * Surface management - vmwgfx_surface.c
1139  */
1140 
1141 extern const struct vmw_user_resource_conv *user_surface_converter;
1142 
1143 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1144 				     struct drm_file *file_priv);
1145 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1146 				    struct drm_file *file_priv);
1147 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1148 				       struct drm_file *file_priv);
1149 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1150 				       struct drm_file *file_priv);
1151 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1152 					  struct drm_file *file_priv);
1153 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
1154 					   void *data,
1155 					   struct drm_file *file_priv);
1156 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
1157 					      void *data,
1158 					      struct drm_file *file_priv);
1159 
1160 int vmw_gb_surface_define(struct vmw_private *dev_priv,
1161 			  const struct vmw_surface_metadata *req,
1162 			  struct vmw_surface **srf_out);
1163 struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
1164 						  struct vmw_bo *bo,
1165 						  u32 handle);
1166 u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
1167 					 struct vmw_bo *bo,
1168 					 u32 handle);
1169 int vmw_dumb_create(struct drm_file *file_priv,
1170 		    struct drm_device *dev,
1171 		    struct drm_mode_create_dumb *args);
1172 
1173 /*
1174  * Shader management - vmwgfx_shader.c
1175  */
1176 
1177 extern const struct vmw_user_resource_conv *user_shader_converter;
1178 
1179 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1180 				   struct drm_file *file_priv);
1181 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1182 				    struct drm_file *file_priv);
1183 extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1184 				 struct vmw_cmdbuf_res_manager *man,
1185 				 u32 user_key, const void *bytecode,
1186 				 SVGA3dShaderType shader_type,
1187 				 size_t size,
1188 				 struct list_head *list);
1189 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1190 			     u32 user_key, SVGA3dShaderType shader_type,
1191 			     struct list_head *list);
1192 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1193 			     struct vmw_resource *ctx,
1194 			     u32 user_key,
1195 			     SVGA3dShaderType shader_type,
1196 			     struct list_head *list);
1197 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1198 					     struct list_head *list,
1199 					     bool readback);
1200 
1201 extern struct vmw_resource *
1202 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1203 		  u32 user_key, SVGA3dShaderType shader_type);
1204 
1205 /*
1206  * Streamoutput management
1207  */
1208 struct vmw_resource *
1209 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
1210 			   u32 user_key);
1211 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
1212 			    struct vmw_resource *ctx,
1213 			    SVGA3dStreamOutputId user_key,
1214 			    struct list_head *list);
1215 void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size);
1216 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
1217 			       SVGA3dStreamOutputId user_key,
1218 			       struct list_head *list);
1219 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
1220 					    struct list_head *list,
1221 					    bool readback);
1222 
1223 /*
1224  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1225  */
1226 
1227 extern struct vmw_cmdbuf_res_manager *
1228 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1229 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1230 extern struct vmw_resource *
1231 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1232 		      enum vmw_cmdbuf_res_type res_type,
1233 		      u32 user_key);
1234 extern void vmw_cmdbuf_res_revert(struct list_head *list);
1235 extern void vmw_cmdbuf_res_commit(struct list_head *list);
1236 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1237 			      enum vmw_cmdbuf_res_type res_type,
1238 			      u32 user_key,
1239 			      struct vmw_resource *res,
1240 			      struct list_head *list);
1241 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1242 				 enum vmw_cmdbuf_res_type res_type,
1243 				 u32 user_key,
1244 				 struct list_head *list,
1245 				 struct vmw_resource **res);
1246 
1247 /*
1248  * COTable management - vmwgfx_cotable.c
1249  */
1250 extern const SVGACOTableType vmw_cotable_scrub_order[];
1251 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1252 					      struct vmw_resource *ctx,
1253 					      u32 type);
1254 extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1255 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1256 extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1257 				     struct list_head *head);
1258 
1259 /*
1260  * Command buffer managerment vmwgfx_cmdbuf.c
1261  */
1262 struct vmw_cmdbuf_man;
1263 struct vmw_cmdbuf_header;
1264 
1265 extern struct vmw_cmdbuf_man *
1266 vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1267 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size);
1268 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1269 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1270 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1271 			   unsigned long timeout);
1272 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1273 				int ctx_id, bool interruptible,
1274 				struct vmw_cmdbuf_header *header);
1275 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1276 			      struct vmw_cmdbuf_header *header,
1277 			      bool flush);
1278 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1279 			      size_t size, bool interruptible,
1280 			      struct vmw_cmdbuf_header **p_header);
1281 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1282 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1283 				bool interruptible);
1284 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
1285 
1286 /* CPU blit utilities - vmwgfx_blit.c */
1287 
1288 /**
1289  * struct vmw_diff_cpy - CPU blit information structure
1290  *
1291  * @rect: The output bounding box rectangle.
1292  * @line: The current line of the blit.
1293  * @line_offset: Offset of the current line segment.
1294  * @cpp: Bytes per pixel (granularity information).
1295  * @memcpy: Which memcpy function to use.
1296  */
1297 struct vmw_diff_cpy {
1298 	struct drm_rect rect;
1299 	size_t line;
1300 	size_t line_offset;
1301 	int cpp;
1302 	void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1303 		       size_t n);
1304 };
1305 
1306 #define VMW_CPU_BLIT_INITIALIZER {	\
1307 	.do_cpy = vmw_memcpy,		\
1308 }
1309 
1310 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) {	  \
1311 	.line = 0,				  \
1312 	.line_offset = 0,			  \
1313 	.rect = { .x1 = INT_MAX/2,		  \
1314 		  .y1 = INT_MAX/2,		  \
1315 		  .x2 = INT_MIN/2,		  \
1316 		  .y2 = INT_MIN/2		  \
1317 	},					  \
1318 	.cpp = _cpp,				  \
1319 	.do_cpy = vmw_diff_memcpy,		  \
1320 }
1321 
1322 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1323 		     size_t n);
1324 
1325 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
1326 
1327 int vmw_bo_cpu_blit(struct vmw_bo *dst,
1328 		    u32 dst_offset, u32 dst_stride,
1329 		    struct vmw_bo *src,
1330 		    u32 src_offset, u32 src_stride,
1331 		    u32 w, u32 h,
1332 		    struct vmw_diff_cpy *diff);
1333 
1334 /* Host messaging -vmwgfx_msg.c: */
1335 void vmw_disable_backdoor(void);
1336 int vmw_host_get_guestinfo(const char *guest_info_param,
1337 			   char *buffer, size_t *length);
1338 __printf(1, 2) int vmw_host_printf(const char *fmt, ...);
1339 int vmw_msg_ioctl(struct drm_device *dev, void *data,
1340 		  struct drm_file *file_priv);
1341 
1342 /* Host mksGuestStats -vmwgfx_msg.c: */
1343 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv);
1344 
1345 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
1346 		      struct drm_file *file_priv);
1347 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
1348 		      struct drm_file *file_priv);
1349 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1350 		      struct drm_file *file_priv);
1351 int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
1352 
1353 /* VMW logging */
1354 
1355 /**
1356  * VMW_DEBUG_USER - Debug output for user-space debugging.
1357  *
1358  * @fmt: printf() like format string.
1359  *
1360  * This macro is for logging user-space error and debugging messages for e.g.
1361  * command buffer execution errors due to malformed commands, invalid context,
1362  * etc.
1363  */
1364 #define VMW_DEBUG_USER(fmt, ...)                                              \
1365 	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1366 
1367 /* Resource dirtying - vmwgfx_page_dirty.c */
1368 bool vmw_bo_is_dirty(struct vmw_bo *vbo);
1369 void vmw_bo_dirty_scan(struct vmw_bo *vbo);
1370 int vmw_bo_dirty_add(struct vmw_bo *vbo);
1371 void vmw_bo_dirty_clear(struct vmw_bo *vbo);
1372 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
1373 void vmw_bo_dirty_clear_res(struct vmw_resource *res);
1374 void vmw_bo_dirty_release(struct vmw_bo *vbo);
1375 void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
1376 			pgoff_t start, pgoff_t end);
1377 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
1378 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
1379 
1380 
1381 /**
1382  * VMW_DEBUG_KMS - Debug output for kernel mode-setting
1383  *
1384  * This macro is for debugging vmwgfx mode-setting code.
1385  */
1386 #define VMW_DEBUG_KMS(fmt, ...)                                               \
1387 	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1388 
1389 /**
1390  * Inline helper functions
1391  */
1392 
1393 static inline void vmw_surface_unreference(struct vmw_surface **srf)
1394 {
1395 	struct vmw_surface *tmp_srf = *srf;
1396 	struct vmw_resource *res = &tmp_srf->res;
1397 	*srf = NULL;
1398 
1399 	vmw_resource_unreference(&res);
1400 }
1401 
1402 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1403 {
1404 	(void) vmw_resource_reference(&srf->res);
1405 	return srf;
1406 }
1407 
1408 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1409 {
1410 	atomic_inc(&dev_priv->num_fifo_resources);
1411 }
1412 
1413 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1414 {
1415 	atomic_dec(&dev_priv->num_fifo_resources);
1416 }
1417 
1418 /**
1419  * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
1420  *
1421  * @fifo_reg: The fifo register to read from
1422  *
1423  * This function is intended to be equivalent to ioread32() on
1424  * memremap'd memory, but without byteswapping.
1425  */
1426 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
1427 {
1428 	BUG_ON(vmw_is_svga_v3(vmw));
1429 	return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
1430 }
1431 
1432 /**
1433  * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
1434  *
1435  * @addr: The fifo register to write to
1436  *
1437  * This function is intended to be equivalent to iowrite32 on
1438  * memremap'd memory, but without byteswapping.
1439  */
1440 static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
1441 				      u32 value)
1442 {
1443 	BUG_ON(vmw_is_svga_v3(vmw));
1444 	WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
1445 }
1446 
1447 static inline u32 vmw_fence_read(struct vmw_private *dev_priv)
1448 {
1449 	u32 fence;
1450 	if (vmw_is_svga_v3(dev_priv))
1451 		fence = vmw_read(dev_priv, SVGA_REG_FENCE);
1452 	else
1453 		fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
1454 	return fence;
1455 }
1456 
1457 static inline void vmw_fence_write(struct vmw_private *dev_priv,
1458 				  u32 fence)
1459 {
1460 	BUG_ON(vmw_is_svga_v3(dev_priv));
1461 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence);
1462 }
1463 
1464 static inline u32 vmw_irq_status_read(struct vmw_private *vmw)
1465 {
1466 	u32 status;
1467 	if (vmw_is_svga_v3(vmw))
1468 		status = vmw_read(vmw, SVGA_REG_IRQ_STATUS);
1469 	else
1470 		status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT);
1471 	return status;
1472 }
1473 
1474 static inline void vmw_irq_status_write(struct vmw_private *vmw,
1475 					uint32 status)
1476 {
1477 	if (vmw_is_svga_v3(vmw))
1478 		vmw_write(vmw, SVGA_REG_IRQ_STATUS, status);
1479 	else
1480 		outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
1481 }
1482 
1483 static inline bool vmw_has_fences(struct vmw_private *vmw)
1484 {
1485 	if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
1486 				  SVGA_CAP_CMD_BUFFERS_2)) != 0)
1487 		return true;
1488 	return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
1489 }
1490 
1491 static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
1492 					   u32 shader_type)
1493 {
1494 	SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
1495 
1496 	if (shader_model >= VMW_SM_5)
1497 		max_allowed = SVGA3D_SHADERTYPE_MAX;
1498 	else if (shader_model >= VMW_SM_4)
1499 		max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
1500 	return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
1501 }
1502 
1503 #endif
1504