1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 **************************************************************************/ 8 9 #ifndef _VMWGFX_DRV_H_ 10 #define _VMWGFX_DRV_H_ 11 12 #include <linux/suspend.h> 13 #include <linux/sync_file.h> 14 #include <linux/hashtable.h> 15 16 #include <drm/drm_auth.h> 17 #include <drm/drm_device.h> 18 #include <drm/drm_file.h> 19 #include <drm/drm_print.h> 20 #include <drm/drm_rect.h> 21 22 #include <drm/ttm/ttm_execbuf_util.h> 23 #include <drm/ttm/ttm_tt.h> 24 #include <drm/ttm/ttm_placement.h> 25 #include <drm/ttm/ttm_bo.h> 26 27 #include "ttm_object.h" 28 29 #include "vmwgfx_fence.h" 30 #include "vmwgfx_reg.h" 31 #include "vmwgfx_validation.h" 32 33 /* 34 * FIXME: vmwgfx_drm.h needs to be last due to dependencies. 35 * uapi headers should not depend on header files outside uapi/. 36 */ 37 #include <drm/vmwgfx_drm.h> 38 39 40 #define VMWGFX_DRIVER_NAME "vmwgfx" 41 #define VMWGFX_DRIVER_MAJOR 2 42 #define VMWGFX_DRIVER_MINOR 21 43 #define VMWGFX_DRIVER_PATCHLEVEL 0 44 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 45 #define VMWGFX_NUM_DISPLAY_UNITS 8 46 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 47 48 #define VMWGFX_MIN_INITIAL_WIDTH 1280 49 #define VMWGFX_MIN_INITIAL_HEIGHT 800 50 51 #define VMWGFX_PCI_ID_SVGA2 0x0405 52 #define VMWGFX_PCI_ID_SVGA3 0x0406 53 54 /* 55 * This has to match get_count_order(SVGA_IRQFLAG_MAX) 56 */ 57 #define VMWGFX_MAX_NUM_IRQS 6 58 59 /* 60 * Perhaps we should have sysfs entries for these. 61 */ 62 #define VMWGFX_NUM_GB_CONTEXT 256 63 #define VMWGFX_NUM_GB_SHADER 20000 64 #define VMWGFX_NUM_GB_SURFACE 32768 65 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS 66 #define VMWGFX_NUM_DXCONTEXT 256 67 #define VMWGFX_NUM_DXQUERY 512 68 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 69 VMWGFX_NUM_GB_SHADER +\ 70 VMWGFX_NUM_GB_SURFACE +\ 71 VMWGFX_NUM_GB_SCREEN_TARGET) 72 73 #define VMW_PL_GMR (TTM_PL_PRIV + 0) 74 #define VMW_PL_MOB (TTM_PL_PRIV + 1) 75 #define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) 76 77 #define VMW_RES_CONTEXT ttm_driver_type0 78 #define VMW_RES_SURFACE ttm_driver_type1 79 #define VMW_RES_STREAM ttm_driver_type2 80 #define VMW_RES_FENCE ttm_driver_type3 81 #define VMW_RES_SHADER ttm_driver_type4 82 #define VMW_RES_HT_ORDER 12 83 84 #define MKSSTAT_CAPACITY_LOG2 5U 85 #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) 86 87 struct vmw_fpriv { 88 struct ttm_object_file *tfile; 89 bool gb_aware; /* user-space is guest-backed aware */ 90 }; 91 92 struct vmwgfx_hash_item { 93 struct hlist_node head; 94 unsigned long key; 95 }; 96 97 struct vmw_res_func; 98 99 struct vmw_bo; 100 struct vmw_bo; 101 struct vmw_resource_dirty; 102 103 /** 104 * struct vmw_resource - base class for hardware resources 105 * 106 * @kref: For refcounting. 107 * @dev_priv: Pointer to the device private for this resource. Immutable. 108 * @id: Device id. Protected by @dev_priv::resource_lock. 109 * @used_prio: Priority for this resource. 110 * @guest_memory_size: Guest memory buffer size. Immutable. 111 * @res_dirty: Resource contains data not yet in the guest memory buffer. 112 * Protected by resource reserved. 113 * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW 114 * resource. Protected by resource reserved. 115 * @coherent: Emulate coherency by tracking vm accesses. 116 * @guest_memory_bo: The guest memory buffer if any. Protected by resource 117 * reserved. 118 * @guest_memory_offset: Offset into the guest memory buffer if any. Protected 119 * by resource reserved. Note that only a few resource types can have a 120 * @guest_memory_offset different from zero. 121 * @pin_count: The pin count for this resource. A pinned resource has a 122 * pin-count greater than zero. It is not on the resource LRU lists and its 123 * guest memory buffer is pinned. Hence it can't be evicted. 124 * @func: Method vtable for this resource. Immutable. 125 * @mob_node: Node for the MOB guest memory rbtree. Protected by 126 * @guest_memory_bo reserved. 127 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. 128 * @binding_head: List head for the context binding list. Protected by 129 * the @dev_priv::binding_mutex 130 * @dirty: resource's dirty tracker 131 * @res_free: The resource destructor. 132 * @hw_destroy: Callback to destroy the resource on the device, as part of 133 * resource destruction. 134 */ 135 struct vmw_resource { 136 struct kref kref; 137 struct vmw_private *dev_priv; 138 int id; 139 u32 used_prio; 140 unsigned long guest_memory_size; 141 u32 res_dirty : 1; 142 u32 guest_memory_dirty : 1; 143 u32 coherent : 1; 144 struct vmw_bo *guest_memory_bo; 145 unsigned long guest_memory_offset; 146 unsigned long pin_count; 147 const struct vmw_res_func *func; 148 struct rb_node mob_node; 149 struct list_head lru_head; 150 struct list_head binding_head; 151 struct vmw_resource_dirty *dirty; 152 void (*res_free) (struct vmw_resource *res); 153 void (*hw_destroy) (struct vmw_resource *res); 154 }; 155 156 157 /* 158 * Resources that are managed using ioctls. 159 */ 160 enum vmw_res_type { 161 vmw_res_context, 162 vmw_res_surface, 163 vmw_res_stream, 164 vmw_res_shader, 165 vmw_res_dx_context, 166 vmw_res_cotable, 167 vmw_res_view, 168 vmw_res_streamoutput, 169 vmw_res_max 170 }; 171 172 /* 173 * Resources that are managed using command streams. 174 */ 175 enum vmw_cmdbuf_res_type { 176 vmw_cmdbuf_res_shader, 177 vmw_cmdbuf_res_view, 178 vmw_cmdbuf_res_streamoutput 179 }; 180 181 struct vmw_cmdbuf_res_manager; 182 183 struct vmw_cursor_snooper { 184 size_t id; 185 uint32_t *image; 186 }; 187 188 struct vmw_framebuffer; 189 struct vmw_surface_offset; 190 191 /** 192 * struct vmw_surface_metadata - Metadata describing a surface. 193 * 194 * @flags: Device flags. 195 * @format: Surface SVGA3D_x format. 196 * @mip_levels: Mip level for each face. For GB first index is used only. 197 * @multisample_count: Sample count. 198 * @multisample_pattern: Sample patterns. 199 * @quality_level: Quality level. 200 * @autogen_filter: Filter for automatically generated mipmaps. 201 * @array_size: Number of array elements for a 1D/2D texture. For cubemap 202 * texture number of faces * array_size. This should be 0 for pre 203 * SM4 device. 204 * @buffer_byte_stride: Buffer byte stride. 205 * @num_sizes: Size of @sizes. For GB surface this should always be 1. 206 * @base_size: Surface dimension. 207 * @sizes: Array representing mip sizes. Legacy only. 208 * @scanout: Whether this surface will be used for scanout. 209 * 210 * This tracks metadata for both legacy and guest backed surface. 211 */ 212 struct vmw_surface_metadata { 213 u64 flags; 214 u32 format; 215 u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 216 u32 multisample_count; 217 u32 multisample_pattern; 218 u32 quality_level; 219 u32 autogen_filter; 220 u32 array_size; 221 u32 num_sizes; 222 u32 buffer_byte_stride; 223 struct drm_vmw_size base_size; 224 struct drm_vmw_size *sizes; 225 bool scanout; 226 }; 227 228 /** 229 * struct vmw_surface: Resource structure for a surface. 230 * 231 * @res: The base resource for this surface. 232 * @metadata: Metadata for this surface resource. 233 * @snooper: Cursor data. Legacy surface only. 234 * @offsets: Legacy surface only. 235 * @view_list: List of views bound to this surface. 236 */ 237 struct vmw_surface { 238 struct vmw_resource res; 239 struct vmw_surface_metadata metadata; 240 struct vmw_cursor_snooper snooper; 241 struct vmw_surface_offset *offsets; 242 struct list_head view_list; 243 }; 244 245 struct vmw_fifo_state { 246 unsigned long reserved_size; 247 u32 *dynamic_buffer; 248 u32 *static_buffer; 249 unsigned long static_buffer_size; 250 bool using_bounce_buffer; 251 uint32_t capabilities; 252 struct mutex fifo_mutex; 253 struct rw_semaphore rwsem; 254 }; 255 256 /** 257 * struct vmw_res_cache_entry - resource information cache entry 258 * @handle: User-space handle of a resource. 259 * @res: Non-ref-counted pointer to the resource. 260 * @valid_handle: Whether the @handle member is valid. 261 * @valid: Whether the entry is valid, which also implies that the execbuf 262 * code holds a reference to the resource, and it's placed on the 263 * validation list. 264 * 265 * Used to avoid frequent repeated user-space handle lookups of the 266 * same resource. 267 */ 268 struct vmw_res_cache_entry { 269 uint32_t handle; 270 struct vmw_resource *res; 271 /* private: */ 272 void *private; 273 /* public: */ 274 unsigned short valid_handle; 275 unsigned short valid; 276 }; 277 278 /** 279 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. 280 * @vmw_dma_alloc_coherent: Use TTM coherent pages 281 * @vmw_dma_map_populate: Unmap from DMA just after unpopulate 282 * @vmw_dma_map_bind: Unmap from DMA just before unbind 283 */ 284 enum vmw_dma_map_mode { 285 vmw_dma_alloc_coherent, 286 vmw_dma_map_populate, 287 vmw_dma_map_bind, 288 /* private: */ 289 vmw_dma_map_max 290 }; 291 292 /** 293 * struct vmw_sg_table - Scatter/gather table for binding, with additional 294 * device-specific information. 295 * 296 * @mode: which page mapping mode to use 297 * @pages: Array of page pointers to the pages. 298 * @addrs: DMA addresses to the pages if coherent pages are used. 299 * @sgt: Pointer to a struct sg_table with binding information 300 * @num_pages: Number of @pages 301 */ 302 struct vmw_sg_table { 303 enum vmw_dma_map_mode mode; 304 struct page **pages; 305 const dma_addr_t *addrs; 306 struct sg_table *sgt; 307 unsigned long num_pages; 308 }; 309 310 /** 311 * struct vmw_piter - Page iterator that iterates over a list of pages 312 * and DMA addresses that could be either a scatter-gather list or 313 * arrays 314 * 315 * @pages: Array of page pointers to the pages. 316 * @addrs: DMA addresses to the pages if coherent pages are used. 317 * @iter: Scatter-gather page iterator. Current position in SG list. 318 * @i: Current position in arrays. 319 * @num_pages: Number of pages total. 320 * @next: Function to advance the iterator. Returns false if past the list 321 * of pages, true otherwise. 322 * @dma_address: Function to return the DMA address of the current page. 323 */ 324 struct vmw_piter { 325 struct page **pages; 326 const dma_addr_t *addrs; 327 struct sg_dma_page_iter iter; 328 unsigned long i; 329 unsigned long num_pages; 330 bool (*next)(struct vmw_piter *); 331 dma_addr_t (*dma_address)(struct vmw_piter *); 332 }; 333 334 335 struct vmw_ttm_tt { 336 struct ttm_tt dma_ttm; 337 struct vmw_private *dev_priv; 338 int gmr_id; 339 struct vmw_mob *mob; 340 int mem_type; 341 struct sg_table sgt; 342 struct vmw_sg_table vsgt; 343 bool mapped; 344 bool bound; 345 }; 346 347 /* 348 * enum vmw_display_unit_type - Describes the display unit 349 */ 350 enum vmw_display_unit_type { 351 vmw_du_invalid = 0, 352 vmw_du_legacy, 353 vmw_du_screen_object, 354 vmw_du_screen_target, 355 vmw_du_max 356 }; 357 358 struct vmw_validation_context; 359 struct vmw_ctx_validation_info; 360 361 /** 362 * struct vmw_sw_context - Command submission context 363 * @res_ht: Pointer hash table used to find validation duplicates 364 * @kernel: Whether the command buffer originates from kernel code rather 365 * than from user-space 366 * @fp: If @kernel is false, points to the file of the client. Otherwise 367 * NULL 368 * @filp: DRM state for this file 369 * @cmd_bounce: Command bounce buffer used for command validation before 370 * copying to fifo space 371 * @cmd_bounce_size: Current command bounce buffer size 372 * @cur_query_bo: Current buffer object used as query result buffer 373 * @bo_relocations: List of buffer object relocations 374 * @res_relocations: List of resource relocations 375 * @buf_start: Pointer to start of memory where command validation takes 376 * place 377 * @res_cache: Cache of recently looked up resources 378 * @last_query_ctx: Last context that submitted a query 379 * @needs_post_query_barrier: Whether a query barrier is needed after 380 * command submission 381 * @staged_bindings: Cached per-context binding tracker 382 * @staged_bindings_inuse: Whether the cached per-context binding tracker 383 * is in use 384 * @staged_cmd_res: List of staged command buffer managed resources in this 385 * command buffer 386 * @ctx_list: List of context resources referenced in this command buffer 387 * @dx_ctx_node: Validation metadata of the current DX context 388 * @dx_query_mob: The MOB used for DX queries 389 * @dx_query_ctx: The DX context used for the last DX query 390 * @man: Pointer to the command buffer managed resource manager 391 * @ctx: The validation context 392 */ 393 struct vmw_sw_context{ 394 DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER); 395 bool kernel; 396 struct vmw_fpriv *fp; 397 struct drm_file *filp; 398 uint32_t *cmd_bounce; 399 uint32_t cmd_bounce_size; 400 struct vmw_bo *cur_query_bo; 401 struct list_head bo_relocations; 402 struct list_head res_relocations; 403 uint32_t *buf_start; 404 struct vmw_res_cache_entry res_cache[vmw_res_max]; 405 struct vmw_resource *last_query_ctx; 406 bool needs_post_query_barrier; 407 struct vmw_ctx_binding_state *staged_bindings; 408 bool staged_bindings_inuse; 409 struct list_head staged_cmd_res; 410 struct list_head ctx_list; 411 struct vmw_ctx_validation_info *dx_ctx_node; 412 struct vmw_bo *dx_query_mob; 413 struct vmw_resource *dx_query_ctx; 414 struct vmw_cmdbuf_res_manager *man; 415 struct vmw_validation_context *ctx; 416 }; 417 418 struct vmw_legacy_display; 419 struct vmw_overlay; 420 421 /* 422 * struct vmw_otable - Guest Memory OBject table metadata 423 * 424 * @size: Size of the table (page-aligned). 425 * @page_table: Pointer to a struct vmw_mob holding the page table. 426 */ 427 struct vmw_otable { 428 unsigned long size; 429 struct vmw_mob *page_table; 430 bool enabled; 431 }; 432 433 struct vmw_otable_batch { 434 unsigned num_otables; 435 struct vmw_otable *otables; 436 struct vmw_resource *context; 437 struct vmw_bo *otable_bo; 438 }; 439 440 enum { 441 VMW_IRQTHREAD_FENCE, 442 VMW_IRQTHREAD_CMDBUF, 443 VMW_IRQTHREAD_MAX 444 }; 445 446 /** 447 * enum vmw_sm_type - Graphics context capability supported by device. 448 * @VMW_SM_LEGACY: Pre DX context. 449 * @VMW_SM_4: Context support upto SM4. 450 * @VMW_SM_4_1: Context support upto SM4_1. 451 * @VMW_SM_5: Context support up to SM5. 452 * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. 453 * @VMW_SM_MAX: Should be the last. 454 */ 455 enum vmw_sm_type { 456 VMW_SM_LEGACY = 0, 457 VMW_SM_4, 458 VMW_SM_4_1, 459 VMW_SM_5, 460 VMW_SM_5_1X, 461 VMW_SM_MAX 462 }; 463 464 struct vmw_private { 465 struct drm_device drm; 466 struct ttm_device bdev; 467 468 u32 pci_id; 469 resource_size_t io_start; 470 resource_size_t vram_start; 471 resource_size_t vram_size; 472 resource_size_t max_primary_mem; 473 u32 __iomem *rmmio; 474 u32 *fifo_mem; 475 resource_size_t fifo_mem_size; 476 uint32_t fb_max_width; 477 uint32_t fb_max_height; 478 uint32_t texture_max_width; 479 uint32_t texture_max_height; 480 uint32_t stdu_max_width; 481 uint32_t stdu_max_height; 482 uint32_t initial_width; 483 uint32_t initial_height; 484 uint32_t capabilities; 485 uint32_t capabilities2; 486 uint32_t max_gmr_ids; 487 uint32_t max_gmr_pages; 488 uint32_t max_mob_pages; 489 uint32_t max_mob_size; 490 uint32_t memory_size; 491 bool has_gmr; 492 bool has_mob; 493 spinlock_t hw_lock; 494 bool assume_16bpp; 495 u32 irqs[VMWGFX_MAX_NUM_IRQS]; 496 u32 num_irq_vectors; 497 498 enum vmw_sm_type sm_type; 499 500 /* 501 * Framebuffer info. 502 */ 503 504 enum vmw_display_unit_type active_display_unit; 505 struct vmw_legacy_display *ldu_priv; 506 struct vmw_overlay *overlay_priv; 507 struct drm_property *hotplug_mode_update_property; 508 struct drm_property *implicit_placement_property; 509 spinlock_t cursor_lock; 510 struct drm_atomic_state *suspend_state; 511 512 /* 513 * Context and surface management. 514 */ 515 516 spinlock_t resource_lock; 517 struct idr res_idr[vmw_res_max]; 518 519 /* 520 * A resource manager for kernel-only surfaces and 521 * contexts. 522 */ 523 524 struct ttm_object_device *tdev; 525 526 /* 527 * Fencing and IRQs. 528 */ 529 530 atomic_t marker_seq; 531 wait_queue_head_t fence_queue; 532 wait_queue_head_t fifo_queue; 533 spinlock_t waiter_lock; 534 int fence_queue_waiters; /* Protected by waiter_lock */ 535 int goal_queue_waiters; /* Protected by waiter_lock */ 536 int cmdbuf_waiters; /* Protected by waiter_lock */ 537 int error_waiters; /* Protected by waiter_lock */ 538 int fifo_queue_waiters; /* Protected by waiter_lock */ 539 atomic_t last_read_seqno; 540 struct vmw_fence_manager *fman; 541 uint32_t irq_mask; /* Updates protected by waiter_lock */ 542 543 /* 544 * Device state 545 */ 546 547 uint32_t traces_state; 548 uint32_t enable_state; 549 uint32_t config_done_state; 550 551 /** 552 * Execbuf 553 */ 554 /** 555 * Protected by the cmdbuf mutex. 556 */ 557 558 struct vmw_sw_context ctx; 559 struct mutex cmdbuf_mutex; 560 struct mutex binding_mutex; 561 562 /** 563 * PM management. 564 */ 565 struct notifier_block pm_nb; 566 bool refuse_hibernation; 567 bool suspend_locked; 568 569 atomic_t num_fifo_resources; 570 571 /* 572 * Query processing. These members 573 * are protected by the cmdbuf mutex. 574 */ 575 576 struct vmw_bo *dummy_query_bo; 577 struct vmw_bo *pinned_bo; 578 uint32_t query_cid; 579 uint32_t query_cid_valid; 580 bool dummy_query_bo_pinned; 581 582 /* 583 * Surface swapping. The "surface_lru" list is protected by the 584 * resource lock in order to be able to destroy a surface and take 585 * it off the lru atomically. "used_memory_size" is currently 586 * protected by the cmdbuf mutex for simplicity. 587 */ 588 589 struct list_head res_lru[vmw_res_max]; 590 uint32_t used_memory_size; 591 592 /* 593 * DMA mapping stuff. 594 */ 595 enum vmw_dma_map_mode map_mode; 596 597 /* 598 * Guest Backed stuff 599 */ 600 struct vmw_otable_batch otable_batch; 601 602 struct vmw_fifo_state *fifo; 603 struct vmw_cmdbuf_man *cman; 604 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); 605 606 uint32 *devcaps; 607 608 bool vkms_enabled; 609 struct workqueue_struct *crc_workq; 610 611 /* 612 * mksGuestStat instance-descriptor and pid arrays 613 */ 614 struct page *mksstat_user_pages[MKSSTAT_CAPACITY]; 615 atomic_t mksstat_user_pids[MKSSTAT_CAPACITY]; 616 617 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 618 struct page *mksstat_kern_pages[MKSSTAT_CAPACITY]; 619 u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY]; 620 atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY]; 621 #endif 622 }; 623 624 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 625 { 626 return container_of(res, struct vmw_surface, res); 627 } 628 629 static inline struct vmw_private *vmw_priv(struct drm_device *dev) 630 { 631 return container_of(dev, struct vmw_private, drm); 632 } 633 634 static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) 635 { 636 return container_of(bdev, struct vmw_private, bdev); 637 } 638 639 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 640 { 641 return (struct vmw_fpriv *)file_priv->driver_priv; 642 } 643 644 /* 645 * SVGA v3 has mmio register access and lacks fifo cmds 646 */ 647 static inline bool vmw_is_svga_v3(const struct vmw_private *dev) 648 { 649 return dev->pci_id == VMWGFX_PCI_ID_SVGA3; 650 } 651 652 /* 653 * The locking here is fine-grained, so that it is performed once 654 * for every read- and write operation. This is of course costly, but we 655 * don't perform much register access in the timing critical paths anyway. 656 * Instead we have the extra benefit of being sure that we don't forget 657 * the hw lock around register accesses. 658 */ 659 static inline void vmw_write(struct vmw_private *dev_priv, 660 unsigned int offset, uint32_t value) 661 { 662 if (vmw_is_svga_v3(dev_priv)) { 663 iowrite32(value, dev_priv->rmmio + offset); 664 } else { 665 spin_lock(&dev_priv->hw_lock); 666 outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); 667 outl(value, dev_priv->io_start + SVGA_VALUE_PORT); 668 spin_unlock(&dev_priv->hw_lock); 669 } 670 } 671 672 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 673 unsigned int offset) 674 { 675 u32 val; 676 677 if (vmw_is_svga_v3(dev_priv)) { 678 val = ioread32(dev_priv->rmmio + offset); 679 } else { 680 spin_lock(&dev_priv->hw_lock); 681 outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); 682 val = inl(dev_priv->io_start + SVGA_VALUE_PORT); 683 spin_unlock(&dev_priv->hw_lock); 684 } 685 686 return val; 687 } 688 689 /** 690 * has_sm4_context - Does the device support SM4 context. 691 * @dev_priv: Device private. 692 * 693 * Return: Bool value if device support SM4 context or not. 694 */ 695 static inline bool has_sm4_context(const struct vmw_private *dev_priv) 696 { 697 return (dev_priv->sm_type >= VMW_SM_4); 698 } 699 700 /** 701 * has_sm4_1_context - Does the device support SM4_1 context. 702 * @dev_priv: Device private. 703 * 704 * Return: Bool value if device support SM4_1 context or not. 705 */ 706 static inline bool has_sm4_1_context(const struct vmw_private *dev_priv) 707 { 708 return (dev_priv->sm_type >= VMW_SM_4_1); 709 } 710 711 /** 712 * has_sm5_context - Does the device support SM5 context. 713 * @dev_priv: Device private. 714 * 715 * Return: Bool value if device support SM5 context or not. 716 */ 717 static inline bool has_sm5_context(const struct vmw_private *dev_priv) 718 { 719 return (dev_priv->sm_type >= VMW_SM_5); 720 } 721 722 /** 723 * has_gl43_context - Does the device support GL43 context. 724 * @dev_priv: Device private. 725 * 726 * Return: Bool value if device support SM5 context or not. 727 */ 728 static inline bool has_gl43_context(const struct vmw_private *dev_priv) 729 { 730 return (dev_priv->sm_type >= VMW_SM_5_1X); 731 } 732 733 734 static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv) 735 { 736 return (has_gl43_context(dev_priv) ? 737 SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS); 738 } 739 740 extern void vmw_svga_enable(struct vmw_private *dev_priv); 741 extern void vmw_svga_disable(struct vmw_private *dev_priv); 742 bool vmwgfx_supported(struct vmw_private *vmw); 743 744 745 /* 746 * GMR utilities - vmwgfx_gmr.c 747 */ 748 749 extern int vmw_gmr_bind(struct vmw_private *dev_priv, 750 const struct vmw_sg_table *vsgt, 751 unsigned long num_pages, 752 int gmr_id); 753 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 754 755 /* 756 * User handles 757 */ 758 struct vmw_user_object { 759 struct vmw_surface *surface; 760 struct vmw_bo *buffer; 761 }; 762 763 int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp, 764 u32 handle, struct vmw_user_object *uo); 765 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo); 766 void vmw_user_object_unref(struct vmw_user_object *uo); 767 bool vmw_user_object_is_null(struct vmw_user_object *uo); 768 struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo); 769 struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo); 770 void *vmw_user_object_map(struct vmw_user_object *uo); 771 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size); 772 void vmw_user_object_unmap(struct vmw_user_object *uo); 773 bool vmw_user_object_is_mapped(struct vmw_user_object *uo); 774 775 /* 776 * Resource utilities - vmwgfx_resource.c 777 */ 778 struct vmw_user_resource_conv; 779 780 extern void vmw_resource_unreference(struct vmw_resource **p_res); 781 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 782 extern struct vmw_resource * 783 vmw_resource_reference_unless_doomed(struct vmw_resource *res); 784 extern int vmw_resource_validate(struct vmw_resource *res, bool intr, 785 bool dirtying); 786 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 787 bool no_backup); 788 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 789 extern int vmw_user_resource_lookup_handle( 790 struct vmw_private *dev_priv, 791 struct ttm_object_file *tfile, 792 uint32_t handle, 793 const struct vmw_user_resource_conv *converter, 794 struct vmw_resource **p_res); 795 796 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 797 struct drm_file *file_priv); 798 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 799 struct drm_file *file_priv); 800 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, 801 struct ttm_object_file *tfile, 802 uint32_t *inout_id, 803 struct vmw_resource **out); 804 extern void vmw_resource_unreserve(struct vmw_resource *res, 805 bool dirty_set, 806 bool dirty, 807 bool switch_guest_memory, 808 struct vmw_bo *new_guest_memory, 809 unsigned long new_guest_memory_offset); 810 extern void vmw_query_move_notify(struct ttm_buffer_object *bo, 811 struct ttm_resource *old_mem, 812 struct ttm_resource *new_mem); 813 int vmw_query_readback_all(struct vmw_bo *dx_query_mob); 814 void vmw_resource_evict_all(struct vmw_private *dev_priv); 815 void vmw_resource_unbind_list(struct vmw_bo *vbo); 816 void vmw_resource_mob_attach(struct vmw_resource *res); 817 void vmw_resource_mob_detach(struct vmw_resource *res); 818 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, 819 pgoff_t end); 820 int vmw_resource_clean(struct vmw_resource *res); 821 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, 822 pgoff_t end, pgoff_t *num_prefault); 823 824 /** 825 * vmw_resource_mob_attached - Whether a resource currently has a mob attached 826 * @res: The resource 827 * 828 * Return: true if the resource has a mob attached, false otherwise. 829 */ 830 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) 831 { 832 return !RB_EMPTY_NODE(&res->mob_node); 833 } 834 835 /* 836 * GEM related functionality - vmwgfx_gem.c 837 */ 838 struct vmw_bo_params; 839 extern const struct drm_gem_object_funcs vmw_gem_object_funcs; 840 extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, 841 struct drm_file *filp, 842 uint32_t size, 843 uint32_t *handle, 844 struct vmw_bo **p_vbo); 845 extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, 846 struct drm_file *filp); 847 extern void vmw_debugfs_gem_init(struct vmw_private *vdev); 848 849 /* 850 * Misc Ioctl functionality - vmwgfx_ioctl.c 851 */ 852 853 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, 854 struct drm_file *file_priv); 855 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 856 struct drm_file *file_priv); 857 extern int vmw_present_ioctl(struct drm_device *dev, void *data, 858 struct drm_file *file_priv); 859 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 860 struct drm_file *file_priv); 861 862 /* 863 * Fifo utilities - vmwgfx_fifo.c 864 */ 865 866 extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv); 867 extern void vmw_fifo_destroy(struct vmw_private *dev_priv); 868 extern bool vmw_cmd_supported(struct vmw_private *vmw); 869 extern void * 870 vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); 871 extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); 872 extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); 873 extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); 874 extern bool vmw_supports_3d(struct vmw_private *dev_priv); 875 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 876 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 877 extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, 878 uint32_t cid); 879 extern int vmw_cmd_flush(struct vmw_private *dev_priv, 880 bool interruptible); 881 882 #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ 883 ({ \ 884 vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ 885 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ 886 __func__, (unsigned int) __bytes); \ 887 NULL; \ 888 }); \ 889 }) 890 891 #define VMW_CMD_RESERVE(__priv, __bytes) \ 892 VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) 893 894 895 /** 896 * vmw_fifo_caps - Get the capabilities of the FIFO command 897 * queue or 0 if fifo memory isn't present. 898 * @dev_priv: The device private context 899 * 900 * Returns: capabilities of the FIFO command or %0 if fifo memory not present 901 */ 902 static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) 903 { 904 if (!dev_priv->fifo_mem || !dev_priv->fifo) 905 return 0; 906 return dev_priv->fifo->capabilities; 907 } 908 909 910 /** 911 * vmw_is_cursor_bypass3_enabled - check Cursor Bypass 3 enabled setting 912 * in the FIFO. 913 * @dev_priv: The device private context 914 * 915 * Returns: %true iff Cursor Bypass 3 is enabled in the FIFO 916 */ 917 static inline bool 918 vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) 919 { 920 return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; 921 } 922 923 /* 924 * TTM buffer object driver - vmwgfx_ttm_buffer.c 925 */ 926 927 extern const size_t vmw_tt_size; 928 extern struct ttm_placement vmw_vram_placement; 929 extern struct ttm_placement vmw_sys_placement; 930 extern struct ttm_device_funcs vmw_bo_driver; 931 extern const struct vmw_sg_table * 932 vmw_bo_sg_table(struct ttm_buffer_object *bo); 933 int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 934 size_t bo_size, 935 u32 domain, 936 struct vmw_bo **bo_p); 937 938 extern void vmw_piter_start(struct vmw_piter *viter, 939 const struct vmw_sg_table *vsgt, 940 unsigned long p_offs); 941 942 /** 943 * vmw_piter_next - Advance the iterator one page. 944 * 945 * @viter: Pointer to the iterator to advance. 946 * 947 * Returns: false if past the list of pages, true otherwise. 948 */ 949 static inline bool vmw_piter_next(struct vmw_piter *viter) 950 { 951 return viter->next(viter); 952 } 953 954 /** 955 * vmw_piter_dma_addr - Return the DMA address of the current page. 956 * 957 * @viter: Pointer to the iterator 958 * 959 * Returns: the DMA address of the page pointed to by @viter. 960 */ 961 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) 962 { 963 return viter->dma_address(viter); 964 } 965 966 /** 967 * vmw_piter_page - Return a pointer to the current page. 968 * 969 * @viter: Pointer to the iterator 970 * 971 * Returns: the DMA address of the page pointed to by @viter. 972 */ 973 static inline struct page *vmw_piter_page(struct vmw_piter *viter) 974 { 975 return viter->pages[viter->i]; 976 } 977 978 /* 979 * Command submission - vmwgfx_execbuf.c 980 */ 981 982 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 983 struct drm_file *file_priv); 984 extern int vmw_execbuf_process(struct drm_file *file_priv, 985 struct vmw_private *dev_priv, 986 void __user *user_commands, 987 void *kernel_commands, 988 uint32_t command_size, 989 uint64_t throttle_us, 990 uint32_t dx_context_handle, 991 struct drm_vmw_fence_rep __user 992 *user_fence_rep, 993 struct vmw_fence_obj **out_fence, 994 uint32_t flags); 995 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 996 struct vmw_fence_obj *fence); 997 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); 998 999 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 1000 struct vmw_private *dev_priv, 1001 struct vmw_fence_obj **p_fence, 1002 uint32_t *p_handle); 1003 extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 1004 struct vmw_fpriv *vmw_fp, 1005 int ret, 1006 struct drm_vmw_fence_rep __user 1007 *user_fence_rep, 1008 struct vmw_fence_obj *fence, 1009 uint32_t fence_handle, 1010 int32_t out_fence_fd); 1011 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); 1012 1013 /* 1014 * IRQs and wating - vmwgfx_irq.c 1015 */ 1016 1017 extern int vmw_irq_install(struct vmw_private *dev_priv); 1018 extern void vmw_irq_uninstall(struct drm_device *dev); 1019 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 1020 uint32_t seqno); 1021 extern int vmw_fallback_wait(struct vmw_private *dev_priv, 1022 bool lazy, 1023 bool fifo_idle, 1024 uint32_t seqno, 1025 bool interruptible, 1026 unsigned long timeout); 1027 bool vmw_seqno_waiter_add(struct vmw_private *dev_priv); 1028 bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 1029 bool vmw_goal_waiter_add(struct vmw_private *dev_priv); 1030 bool vmw_goal_waiter_remove(struct vmw_private *dev_priv); 1031 bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, 1032 int *waiter_count); 1033 bool vmw_generic_waiter_remove(struct vmw_private *dev_priv, 1034 u32 flag, int *waiter_count); 1035 1036 /* 1037 * Kernel modesetting - vmwgfx_kms.c 1038 */ 1039 1040 int vmw_kms_init(struct vmw_private *dev_priv); 1041 int vmw_kms_close(struct vmw_private *dev_priv); 1042 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1043 struct drm_file *file_priv); 1044 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 1045 struct ttm_object_file *tfile, 1046 struct ttm_buffer_object *bo, 1047 SVGA3dCmdHeader *header); 1048 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1049 unsigned width, unsigned height, unsigned pitch, 1050 unsigned bpp, unsigned depth); 1051 int vmw_kms_present(struct vmw_private *dev_priv, 1052 struct drm_file *file_priv, 1053 struct vmw_framebuffer *vfb, 1054 struct vmw_surface *surface, 1055 uint32_t sid, int32_t destX, int32_t destY, 1056 struct drm_vmw_rect *clips, 1057 uint32_t num_clips); 1058 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 1059 struct drm_file *file_priv); 1060 int vmw_kms_suspend(struct drm_device *dev); 1061 int vmw_kms_resume(struct drm_device *dev); 1062 void vmw_kms_lost_device(struct drm_device *dev); 1063 1064 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 1065 extern void vmw_resource_unpin(struct vmw_resource *res); 1066 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); 1067 1068 /* 1069 * Overlay control - vmwgfx_overlay.c 1070 */ 1071 1072 int vmw_overlay_init(struct vmw_private *dev_priv); 1073 int vmw_overlay_close(struct vmw_private *dev_priv); 1074 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 1075 struct drm_file *file_priv); 1076 int vmw_overlay_resume_all(struct vmw_private *dev_priv); 1077 int vmw_overlay_pause_all(struct vmw_private *dev_priv); 1078 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); 1079 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); 1080 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 1081 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 1082 1083 /* 1084 * GMR Id manager 1085 */ 1086 1087 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); 1088 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); 1089 1090 /* 1091 * System memory manager 1092 */ 1093 int vmw_sys_man_init(struct vmw_private *dev_priv); 1094 void vmw_sys_man_fini(struct vmw_private *dev_priv); 1095 1096 /* 1097 * Prime - vmwgfx_prime.c 1098 */ 1099 1100 extern const struct dma_buf_ops vmw_prime_dmabuf_ops; 1101 extern int vmw_prime_fd_to_handle(struct drm_device *dev, 1102 struct drm_file *file_priv, 1103 int fd, u32 *handle); 1104 extern int vmw_prime_handle_to_fd(struct drm_device *dev, 1105 struct drm_file *file_priv, 1106 uint32_t handle, uint32_t flags, 1107 int *prime_fd); 1108 struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev, 1109 struct dma_buf_attachment *attach, 1110 struct sg_table *table); 1111 1112 /* 1113 * MemoryOBject management - vmwgfx_mob.c 1114 */ 1115 struct vmw_mob; 1116 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, 1117 const struct vmw_sg_table *vsgt, 1118 unsigned long num_data_pages, int32_t mob_id); 1119 extern void vmw_mob_unbind(struct vmw_private *dev_priv, 1120 struct vmw_mob *mob); 1121 extern void vmw_mob_destroy(struct vmw_mob *mob); 1122 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); 1123 extern int vmw_otables_setup(struct vmw_private *dev_priv); 1124 extern void vmw_otables_takedown(struct vmw_private *dev_priv); 1125 1126 /* 1127 * Context management - vmwgfx_context.c 1128 */ 1129 1130 extern const struct vmw_user_resource_conv *user_context_converter; 1131 1132 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1133 struct drm_file *file_priv); 1134 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 1135 struct drm_file *file_priv); 1136 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1137 struct drm_file *file_priv); 1138 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1139 extern struct vmw_cmdbuf_res_manager * 1140 vmw_context_res_man(struct vmw_resource *ctx); 1141 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 1142 SVGACOTableType cotable_type); 1143 struct vmw_ctx_binding_state; 1144 extern struct vmw_ctx_binding_state * 1145 vmw_context_binding_state(struct vmw_resource *ctx); 1146 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 1147 bool readback); 1148 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 1149 struct vmw_bo *mob); 1150 extern struct vmw_bo * 1151 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); 1152 1153 1154 /* 1155 * Surface management - vmwgfx_surface.c 1156 */ 1157 1158 extern const struct vmw_user_resource_conv *user_surface_converter; 1159 1160 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 1161 struct drm_file *file_priv); 1162 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 1163 struct drm_file *file_priv); 1164 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1165 struct drm_file *file_priv); 1166 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1167 struct drm_file *file_priv); 1168 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1169 struct drm_file *file_priv); 1170 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, 1171 void *data, 1172 struct drm_file *file_priv); 1173 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, 1174 void *data, 1175 struct drm_file *file_priv); 1176 1177 int vmw_gb_surface_define(struct vmw_private *dev_priv, 1178 const struct vmw_surface_metadata *req, 1179 struct vmw_surface **srf_out); 1180 struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, 1181 struct vmw_bo *bo, 1182 u32 handle); 1183 u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, 1184 struct vmw_bo *bo, 1185 u32 handle); 1186 int vmw_dumb_create(struct drm_file *file_priv, 1187 struct drm_device *dev, 1188 struct drm_mode_create_dumb *args); 1189 1190 /* 1191 * Shader management - vmwgfx_shader.c 1192 */ 1193 1194 extern const struct vmw_user_resource_conv *user_shader_converter; 1195 1196 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 1197 struct drm_file *file_priv); 1198 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1199 struct drm_file *file_priv); 1200 extern int vmw_compat_shader_add(struct vmw_private *dev_priv, 1201 struct vmw_cmdbuf_res_manager *man, 1202 u32 user_key, const void *bytecode, 1203 SVGA3dShaderType shader_type, 1204 size_t size, 1205 struct list_head *list); 1206 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, 1207 u32 user_key, SVGA3dShaderType shader_type, 1208 struct list_head *list); 1209 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, 1210 struct vmw_resource *ctx, 1211 u32 user_key, 1212 SVGA3dShaderType shader_type, 1213 struct list_head *list); 1214 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, 1215 struct list_head *list, 1216 bool readback); 1217 1218 extern struct vmw_resource * 1219 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1220 u32 user_key, SVGA3dShaderType shader_type); 1221 1222 /* 1223 * Streamoutput management 1224 */ 1225 struct vmw_resource * 1226 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, 1227 u32 user_key); 1228 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, 1229 struct vmw_resource *ctx, 1230 SVGA3dStreamOutputId user_key, 1231 struct list_head *list); 1232 void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size); 1233 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man, 1234 SVGA3dStreamOutputId user_key, 1235 struct list_head *list); 1236 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, 1237 struct list_head *list, 1238 bool readback); 1239 1240 /* 1241 * Command buffer managed resources - vmwgfx_cmdbuf_res.c 1242 */ 1243 1244 extern struct vmw_cmdbuf_res_manager * 1245 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); 1246 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); 1247 extern struct vmw_resource * 1248 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, 1249 enum vmw_cmdbuf_res_type res_type, 1250 u32 user_key); 1251 extern void vmw_cmdbuf_res_revert(struct list_head *list); 1252 extern void vmw_cmdbuf_res_commit(struct list_head *list); 1253 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, 1254 enum vmw_cmdbuf_res_type res_type, 1255 u32 user_key, 1256 struct vmw_resource *res, 1257 struct list_head *list); 1258 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1259 enum vmw_cmdbuf_res_type res_type, 1260 u32 user_key, 1261 struct list_head *list, 1262 struct vmw_resource **res); 1263 1264 /* 1265 * COTable management - vmwgfx_cotable.c 1266 */ 1267 extern const SVGACOTableType vmw_cotable_scrub_order[]; 1268 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, 1269 struct vmw_resource *ctx, 1270 u32 type); 1271 extern int vmw_cotable_notify(struct vmw_resource *res, int id); 1272 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); 1273 extern void vmw_cotable_add_resource(struct vmw_resource *ctx, 1274 struct list_head *head); 1275 1276 /* 1277 * Command buffer managerment vmwgfx_cmdbuf.c 1278 */ 1279 struct vmw_cmdbuf_man; 1280 struct vmw_cmdbuf_header; 1281 1282 extern struct vmw_cmdbuf_man * 1283 vmw_cmdbuf_man_create(struct vmw_private *dev_priv); 1284 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); 1285 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); 1286 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); 1287 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 1288 unsigned long timeout); 1289 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 1290 int ctx_id, bool interruptible, 1291 struct vmw_cmdbuf_header *header); 1292 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1293 struct vmw_cmdbuf_header *header, 1294 bool flush); 1295 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 1296 size_t size, bool interruptible, 1297 struct vmw_cmdbuf_header **p_header); 1298 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); 1299 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 1300 bool interruptible); 1301 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man); 1302 1303 /* CPU blit utilities - vmwgfx_blit.c */ 1304 1305 /** 1306 * struct vmw_diff_cpy - CPU blit information structure 1307 * 1308 * @rect: The output bounding box rectangle. 1309 * @line: The current line of the blit. 1310 * @line_offset: Offset of the current line segment. 1311 * @cpp: Bytes per pixel (granularity information). 1312 * @do_cpy: Which memcpy function to use. 1313 */ 1314 struct vmw_diff_cpy { 1315 struct drm_rect rect; 1316 size_t line; 1317 size_t line_offset; 1318 int cpp; 1319 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, 1320 size_t n); 1321 }; 1322 1323 #define VMW_CPU_BLIT_INITIALIZER { \ 1324 .do_cpy = vmw_memcpy, \ 1325 } 1326 1327 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \ 1328 .line = 0, \ 1329 .line_offset = 0, \ 1330 .rect = { .x1 = INT_MAX/2, \ 1331 .y1 = INT_MAX/2, \ 1332 .x2 = INT_MIN/2, \ 1333 .y2 = INT_MIN/2 \ 1334 }, \ 1335 .cpp = _cpp, \ 1336 .do_cpy = vmw_diff_memcpy, \ 1337 } 1338 1339 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, 1340 size_t n); 1341 1342 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); 1343 1344 int vmw_bo_cpu_blit(struct vmw_bo *dst, 1345 u32 dst_offset, u32 dst_stride, 1346 struct vmw_bo *src, 1347 u32 src_offset, u32 src_stride, 1348 u32 w, u32 h, 1349 struct vmw_diff_cpy *diff); 1350 1351 /* Host messaging -vmwgfx_msg.c: */ 1352 void vmw_disable_backdoor(void); 1353 int vmw_host_get_guestinfo(const char *guest_info_param, 1354 char *buffer, size_t *length); 1355 __printf(1, 2) int vmw_host_printf(const char *fmt, ...); 1356 int vmw_msg_ioctl(struct drm_device *dev, void *data, 1357 struct drm_file *file_priv); 1358 1359 /* Host mksGuestStats -vmwgfx_msg.c: */ 1360 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv); 1361 1362 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, 1363 struct drm_file *file_priv); 1364 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, 1365 struct drm_file *file_priv); 1366 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, 1367 struct drm_file *file_priv); 1368 int vmw_mksstat_remove_all(struct vmw_private *dev_priv); 1369 1370 /* VMW logging */ 1371 1372 /** 1373 * VMW_DEBUG_USER - Debug output for user-space debugging. 1374 * 1375 * @fmt: printf() like format string. 1376 * 1377 * This macro is for logging user-space error and debugging messages for e.g. 1378 * command buffer execution errors due to malformed commands, invalid context, 1379 * etc. 1380 */ 1381 #define VMW_DEBUG_USER(fmt, ...) \ 1382 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1383 1384 /* Resource dirtying - vmwgfx_page_dirty.c */ 1385 bool vmw_bo_is_dirty(struct vmw_bo *vbo); 1386 void vmw_bo_dirty_scan(struct vmw_bo *vbo); 1387 int vmw_bo_dirty_add(struct vmw_bo *vbo); 1388 void vmw_bo_dirty_clear(struct vmw_bo *vbo); 1389 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); 1390 void vmw_bo_dirty_clear_res(struct vmw_resource *res); 1391 void vmw_bo_dirty_release(struct vmw_bo *vbo); 1392 void vmw_bo_dirty_unmap(struct vmw_bo *vbo, 1393 pgoff_t start, pgoff_t end); 1394 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); 1395 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); 1396 1397 1398 /** 1399 * VMW_DEBUG_KMS - Debug output for kernel mode-setting 1400 * @fmt: format string for the args 1401 * 1402 * This macro is for debugging vmwgfx mode-setting code. 1403 */ 1404 #define VMW_DEBUG_KMS(fmt, ...) \ 1405 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1406 1407 /* 1408 * Inline helper functions 1409 */ 1410 1411 static inline void vmw_surface_unreference(struct vmw_surface **srf) 1412 { 1413 struct vmw_surface *tmp_srf = *srf; 1414 struct vmw_resource *res = &tmp_srf->res; 1415 *srf = NULL; 1416 1417 vmw_resource_unreference(&res); 1418 } 1419 1420 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) 1421 { 1422 (void) vmw_resource_reference(&srf->res); 1423 return srf; 1424 } 1425 1426 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) 1427 { 1428 atomic_inc(&dev_priv->num_fifo_resources); 1429 } 1430 1431 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) 1432 { 1433 atomic_dec(&dev_priv->num_fifo_resources); 1434 } 1435 1436 /** 1437 * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory 1438 * @vmw: The device private structure 1439 * @fifo_reg: The fifo register to read from 1440 * 1441 * This function is intended to be equivalent to ioread32() on 1442 * memremap'd memory, but without byteswapping. 1443 * 1444 * Returns: the value read 1445 */ 1446 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) 1447 { 1448 BUG_ON(vmw_is_svga_v3(vmw)); 1449 return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); 1450 } 1451 1452 /** 1453 * vmw_fifo_mem_write - Perform a MMIO write to volatile memory 1454 * @vmw: The device private structure 1455 * @fifo_reg: The fifo register to write to 1456 * @value: The value to write 1457 * 1458 * This function is intended to be equivalent to iowrite32 on 1459 * memremap'd memory, but without byteswapping. 1460 */ 1461 static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, 1462 u32 value) 1463 { 1464 BUG_ON(vmw_is_svga_v3(vmw)); 1465 WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); 1466 } 1467 1468 static inline u32 vmw_fence_read(struct vmw_private *dev_priv) 1469 { 1470 u32 fence; 1471 if (vmw_is_svga_v3(dev_priv)) 1472 fence = vmw_read(dev_priv, SVGA_REG_FENCE); 1473 else 1474 fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); 1475 return fence; 1476 } 1477 1478 static inline void vmw_fence_write(struct vmw_private *dev_priv, 1479 u32 fence) 1480 { 1481 BUG_ON(vmw_is_svga_v3(dev_priv)); 1482 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence); 1483 } 1484 1485 static inline u32 vmw_irq_status_read(struct vmw_private *vmw) 1486 { 1487 u32 status; 1488 if (vmw_is_svga_v3(vmw)) 1489 status = vmw_read(vmw, SVGA_REG_IRQ_STATUS); 1490 else 1491 status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT); 1492 return status; 1493 } 1494 1495 static inline void vmw_irq_status_write(struct vmw_private *vmw, 1496 uint32 status) 1497 { 1498 if (vmw_is_svga_v3(vmw)) 1499 vmw_write(vmw, SVGA_REG_IRQ_STATUS, status); 1500 else 1501 outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); 1502 } 1503 1504 static inline bool vmw_has_fences(struct vmw_private *vmw) 1505 { 1506 if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | 1507 SVGA_CAP_CMD_BUFFERS_2)) != 0) 1508 return true; 1509 return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; 1510 } 1511 1512 static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, 1513 u32 shader_type) 1514 { 1515 SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; 1516 1517 if (shader_model >= VMW_SM_5) 1518 max_allowed = SVGA3D_SHADERTYPE_MAX; 1519 else if (shader_model >= VMW_SM_4) 1520 max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; 1521 return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; 1522 } 1523 1524 #endif 1525