1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef _VMWGFX_DRV_H_ 29 #define _VMWGFX_DRV_H_ 30 31 #include "vmwgfx_reg.h" 32 #include <drm/drmP.h> 33 #include <drm/vmwgfx_drm.h> 34 #include <drm/drm_hashtab.h> 35 #include <linux/suspend.h> 36 #include <drm/ttm/ttm_bo_driver.h> 37 #include <drm/ttm/ttm_object.h> 38 #include <drm/ttm/ttm_lock.h> 39 #include <drm/ttm/ttm_execbuf_util.h> 40 #include <drm/ttm/ttm_module.h> 41 #include "vmwgfx_fence.h" 42 43 #define VMWGFX_DRIVER_DATE "20120209" 44 #define VMWGFX_DRIVER_MAJOR 2 45 #define VMWGFX_DRIVER_MINOR 4 46 #define VMWGFX_DRIVER_PATCHLEVEL 0 47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49 #define VMWGFX_MAX_RELOCATIONS 2048 50 #define VMWGFX_MAX_VALIDATIONS 2048 51 #define VMWGFX_MAX_DISPLAYS 16 52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 53 54 #define VMW_PL_GMR TTM_PL_PRIV0 55 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 56 57 #define VMW_RES_CONTEXT ttm_driver_type0 58 #define VMW_RES_SURFACE ttm_driver_type1 59 #define VMW_RES_STREAM ttm_driver_type2 60 #define VMW_RES_FENCE ttm_driver_type3 61 62 struct vmw_fpriv { 63 struct drm_master *locked_master; 64 struct ttm_object_file *tfile; 65 struct list_head fence_events; 66 }; 67 68 struct vmw_dma_buffer { 69 struct ttm_buffer_object base; 70 struct list_head res_list; 71 }; 72 73 /** 74 * struct vmw_validate_buffer - Carries validation info about buffers. 75 * 76 * @base: Validation info for TTM. 77 * @hash: Hash entry for quick lookup of the TTM buffer object. 78 * 79 * This structure contains also driver private validation info 80 * on top of the info needed by TTM. 81 */ 82 struct vmw_validate_buffer { 83 struct ttm_validate_buffer base; 84 struct drm_hash_item hash; 85 }; 86 87 struct vmw_res_func; 88 struct vmw_resource { 89 struct kref kref; 90 struct vmw_private *dev_priv; 91 int id; 92 bool avail; 93 unsigned long backup_size; 94 bool res_dirty; /* Protected by backup buffer reserved */ 95 bool backup_dirty; /* Protected by backup buffer reserved */ 96 struct vmw_dma_buffer *backup; 97 unsigned long backup_offset; 98 const struct vmw_res_func *func; 99 struct list_head lru_head; /* Protected by the resource lock */ 100 struct list_head mob_head; /* Protected by @backup reserved */ 101 void (*res_free) (struct vmw_resource *res); 102 void (*hw_destroy) (struct vmw_resource *res); 103 }; 104 105 enum vmw_res_type { 106 vmw_res_context, 107 vmw_res_surface, 108 vmw_res_stream, 109 vmw_res_max 110 }; 111 112 struct vmw_cursor_snooper { 113 struct drm_crtc *crtc; 114 size_t age; 115 uint32_t *image; 116 }; 117 118 struct vmw_framebuffer; 119 struct vmw_surface_offset; 120 121 struct vmw_surface { 122 struct vmw_resource res; 123 uint32_t flags; 124 uint32_t format; 125 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 126 struct drm_vmw_size base_size; 127 struct drm_vmw_size *sizes; 128 uint32_t num_sizes; 129 bool scanout; 130 /* TODO so far just a extra pointer */ 131 struct vmw_cursor_snooper snooper; 132 struct vmw_surface_offset *offsets; 133 SVGA3dTextureFilter autogen_filter; 134 uint32_t multisample_count; 135 }; 136 137 struct vmw_marker_queue { 138 struct list_head head; 139 struct timespec lag; 140 struct timespec lag_time; 141 spinlock_t lock; 142 }; 143 144 struct vmw_fifo_state { 145 unsigned long reserved_size; 146 __le32 *dynamic_buffer; 147 __le32 *static_buffer; 148 unsigned long static_buffer_size; 149 bool using_bounce_buffer; 150 uint32_t capabilities; 151 struct mutex fifo_mutex; 152 struct rw_semaphore rwsem; 153 struct vmw_marker_queue marker_queue; 154 }; 155 156 struct vmw_relocation { 157 SVGAGuestPtr *location; 158 uint32_t index; 159 }; 160 161 /** 162 * struct vmw_res_cache_entry - resource information cache entry 163 * 164 * @valid: Whether the entry is valid, which also implies that the execbuf 165 * code holds a reference to the resource, and it's placed on the 166 * validation list. 167 * @handle: User-space handle of a resource. 168 * @res: Non-ref-counted pointer to the resource. 169 * 170 * Used to avoid frequent repeated user-space handle lookups of the 171 * same resource. 172 */ 173 struct vmw_res_cache_entry { 174 bool valid; 175 uint32_t handle; 176 struct vmw_resource *res; 177 struct vmw_resource_val_node *node; 178 }; 179 180 struct vmw_sw_context{ 181 struct drm_open_hash res_ht; 182 bool res_ht_initialized; 183 bool kernel; /**< is the called made from the kernel */ 184 struct ttm_object_file *tfile; 185 struct list_head validate_nodes; 186 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 187 uint32_t cur_reloc; 188 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; 189 uint32_t cur_val_buf; 190 uint32_t *cmd_bounce; 191 uint32_t cmd_bounce_size; 192 struct list_head resource_list; 193 uint32_t fence_flags; 194 struct ttm_buffer_object *cur_query_bo; 195 struct list_head res_relocations; 196 uint32_t *buf_start; 197 struct vmw_res_cache_entry res_cache[vmw_res_max]; 198 struct vmw_resource *last_query_ctx; 199 bool needs_post_query_barrier; 200 struct vmw_resource *error_resource; 201 }; 202 203 struct vmw_legacy_display; 204 struct vmw_overlay; 205 206 struct vmw_master { 207 struct ttm_lock lock; 208 struct mutex fb_surf_mutex; 209 struct list_head fb_surf; 210 }; 211 212 struct vmw_vga_topology_state { 213 uint32_t width; 214 uint32_t height; 215 uint32_t primary; 216 uint32_t pos_x; 217 uint32_t pos_y; 218 }; 219 220 struct vmw_private { 221 struct ttm_bo_device bdev; 222 struct ttm_bo_global_ref bo_global_ref; 223 struct drm_global_reference mem_global_ref; 224 225 struct vmw_fifo_state fifo; 226 227 struct drm_device *dev; 228 unsigned long vmw_chipset; 229 unsigned int io_start; 230 uint32_t vram_start; 231 uint32_t vram_size; 232 uint32_t mmio_start; 233 uint32_t mmio_size; 234 uint32_t fb_max_width; 235 uint32_t fb_max_height; 236 uint32_t initial_width; 237 uint32_t initial_height; 238 __le32 __iomem *mmio_virt; 239 int mmio_mtrr; 240 uint32_t capabilities; 241 uint32_t max_gmr_descriptors; 242 uint32_t max_gmr_ids; 243 uint32_t max_gmr_pages; 244 uint32_t memory_size; 245 bool has_gmr; 246 struct mutex hw_mutex; 247 248 /* 249 * VGA registers. 250 */ 251 252 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; 253 uint32_t vga_width; 254 uint32_t vga_height; 255 uint32_t vga_bpp; 256 uint32_t vga_bpl; 257 uint32_t vga_pitchlock; 258 259 uint32_t num_displays; 260 261 /* 262 * Framebuffer info. 263 */ 264 265 void *fb_info; 266 struct vmw_legacy_display *ldu_priv; 267 struct vmw_screen_object_display *sou_priv; 268 struct vmw_overlay *overlay_priv; 269 270 /* 271 * Context and surface management. 272 */ 273 274 rwlock_t resource_lock; 275 struct idr res_idr[vmw_res_max]; 276 /* 277 * Block lastclose from racing with firstopen. 278 */ 279 280 struct mutex init_mutex; 281 282 /* 283 * A resource manager for kernel-only surfaces and 284 * contexts. 285 */ 286 287 struct ttm_object_device *tdev; 288 289 /* 290 * Fencing and IRQs. 291 */ 292 293 atomic_t marker_seq; 294 wait_queue_head_t fence_queue; 295 wait_queue_head_t fifo_queue; 296 int fence_queue_waiters; /* Protected by hw_mutex */ 297 int goal_queue_waiters; /* Protected by hw_mutex */ 298 atomic_t fifo_queue_waiters; 299 uint32_t last_read_seqno; 300 spinlock_t irq_lock; 301 struct vmw_fence_manager *fman; 302 uint32_t irq_mask; 303 304 /* 305 * Device state 306 */ 307 308 uint32_t traces_state; 309 uint32_t enable_state; 310 uint32_t config_done_state; 311 312 /** 313 * Execbuf 314 */ 315 /** 316 * Protected by the cmdbuf mutex. 317 */ 318 319 struct vmw_sw_context ctx; 320 struct mutex cmdbuf_mutex; 321 322 /** 323 * Operating mode. 324 */ 325 326 bool stealth; 327 bool is_opened; 328 bool enable_fb; 329 330 /** 331 * Master management. 332 */ 333 334 struct vmw_master *active_master; 335 struct vmw_master fbdev_master; 336 struct notifier_block pm_nb; 337 bool suspended; 338 339 struct mutex release_mutex; 340 uint32_t num_3d_resources; 341 342 /* 343 * Query processing. These members 344 * are protected by the cmdbuf mutex. 345 */ 346 347 struct ttm_buffer_object *dummy_query_bo; 348 struct ttm_buffer_object *pinned_bo; 349 uint32_t query_cid; 350 uint32_t query_cid_valid; 351 bool dummy_query_bo_pinned; 352 353 /* 354 * Surface swapping. The "surface_lru" list is protected by the 355 * resource lock in order to be able to destroy a surface and take 356 * it off the lru atomically. "used_memory_size" is currently 357 * protected by the cmdbuf mutex for simplicity. 358 */ 359 360 struct list_head res_lru[vmw_res_max]; 361 uint32_t used_memory_size; 362 }; 363 364 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 365 { 366 return container_of(res, struct vmw_surface, res); 367 } 368 369 static inline struct vmw_private *vmw_priv(struct drm_device *dev) 370 { 371 return (struct vmw_private *)dev->dev_private; 372 } 373 374 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 375 { 376 return (struct vmw_fpriv *)file_priv->driver_priv; 377 } 378 379 static inline struct vmw_master *vmw_master(struct drm_master *master) 380 { 381 return (struct vmw_master *) master->driver_priv; 382 } 383 384 static inline void vmw_write(struct vmw_private *dev_priv, 385 unsigned int offset, uint32_t value) 386 { 387 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 388 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 389 } 390 391 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 392 unsigned int offset) 393 { 394 uint32_t val; 395 396 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 397 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 398 return val; 399 } 400 401 int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga); 402 void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); 403 404 /** 405 * GMR utilities - vmwgfx_gmr.c 406 */ 407 408 extern int vmw_gmr_bind(struct vmw_private *dev_priv, 409 struct page *pages[], 410 unsigned long num_pages, 411 int gmr_id); 412 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 413 414 /** 415 * Resource utilities - vmwgfx_resource.c 416 */ 417 struct vmw_user_resource_conv; 418 extern const struct vmw_user_resource_conv *user_surface_converter; 419 extern const struct vmw_user_resource_conv *user_context_converter; 420 421 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 422 extern void vmw_resource_unreference(struct vmw_resource **p_res); 423 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 424 extern int vmw_resource_validate(struct vmw_resource *res); 425 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 426 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 427 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 428 struct drm_file *file_priv); 429 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 430 struct drm_file *file_priv); 431 extern int vmw_context_check(struct vmw_private *dev_priv, 432 struct ttm_object_file *tfile, 433 int id, 434 struct vmw_resource **p_res); 435 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 436 struct ttm_object_file *tfile, 437 uint32_t handle, 438 struct vmw_surface **out_surf, 439 struct vmw_dma_buffer **out_buf); 440 extern int vmw_user_resource_lookup_handle( 441 struct vmw_private *dev_priv, 442 struct ttm_object_file *tfile, 443 uint32_t handle, 444 const struct vmw_user_resource_conv *converter, 445 struct vmw_resource **p_res); 446 extern void vmw_surface_res_free(struct vmw_resource *res); 447 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 448 struct drm_file *file_priv); 449 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 450 struct drm_file *file_priv); 451 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 452 struct drm_file *file_priv); 453 extern int vmw_surface_check(struct vmw_private *dev_priv, 454 struct ttm_object_file *tfile, 455 uint32_t handle, int *id); 456 extern int vmw_surface_validate(struct vmw_private *dev_priv, 457 struct vmw_surface *srf); 458 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 459 extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 460 struct vmw_dma_buffer *vmw_bo, 461 size_t size, struct ttm_placement *placement, 462 bool interuptable, 463 void (*bo_free) (struct ttm_buffer_object *bo)); 464 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 465 struct ttm_object_file *tfile); 466 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 467 struct drm_file *file_priv); 468 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 469 struct drm_file *file_priv); 470 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 471 uint32_t cur_validate_node); 472 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 473 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 474 uint32_t id, struct vmw_dma_buffer **out); 475 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 476 struct drm_file *file_priv); 477 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 478 struct drm_file *file_priv); 479 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, 480 struct ttm_object_file *tfile, 481 uint32_t *inout_id, 482 struct vmw_resource **out); 483 extern void vmw_resource_unreserve(struct vmw_resource *res, 484 struct vmw_dma_buffer *new_backup, 485 unsigned long new_backup_offset); 486 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, 487 struct ttm_mem_reg *mem); 488 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, 489 struct vmw_fence_obj *fence); 490 extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 491 492 /** 493 * DMA buffer helper routines - vmwgfx_dmabuf.c 494 */ 495 extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, 496 struct vmw_dma_buffer *bo, 497 struct ttm_placement *placement, 498 bool interruptible); 499 extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, 500 struct vmw_dma_buffer *buf, 501 bool pin, bool interruptible); 502 extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, 503 struct vmw_dma_buffer *buf, 504 bool pin, bool interruptible); 505 extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, 506 struct vmw_dma_buffer *bo, 507 bool pin, bool interruptible); 508 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, 509 struct vmw_dma_buffer *bo, 510 bool interruptible); 511 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 512 SVGAGuestPtr *ptr); 513 extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); 514 515 /** 516 * Misc Ioctl functionality - vmwgfx_ioctl.c 517 */ 518 519 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, 520 struct drm_file *file_priv); 521 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 522 struct drm_file *file_priv); 523 extern int vmw_present_ioctl(struct drm_device *dev, void *data, 524 struct drm_file *file_priv); 525 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 526 struct drm_file *file_priv); 527 extern unsigned int vmw_fops_poll(struct file *filp, 528 struct poll_table_struct *wait); 529 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 530 size_t count, loff_t *offset); 531 532 /** 533 * Fifo utilities - vmwgfx_fifo.c 534 */ 535 536 extern int vmw_fifo_init(struct vmw_private *dev_priv, 537 struct vmw_fifo_state *fifo); 538 extern void vmw_fifo_release(struct vmw_private *dev_priv, 539 struct vmw_fifo_state *fifo); 540 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 541 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 542 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 543 uint32_t *seqno); 544 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 545 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 546 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 547 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 548 uint32_t cid); 549 550 /** 551 * TTM glue - vmwgfx_ttm_glue.c 552 */ 553 554 extern int vmw_ttm_global_init(struct vmw_private *dev_priv); 555 extern void vmw_ttm_global_release(struct vmw_private *dev_priv); 556 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 557 558 /** 559 * TTM buffer object driver - vmwgfx_buffer.c 560 */ 561 562 extern struct ttm_placement vmw_vram_placement; 563 extern struct ttm_placement vmw_vram_ne_placement; 564 extern struct ttm_placement vmw_vram_sys_placement; 565 extern struct ttm_placement vmw_vram_gmr_placement; 566 extern struct ttm_placement vmw_vram_gmr_ne_placement; 567 extern struct ttm_placement vmw_sys_placement; 568 extern struct ttm_placement vmw_evictable_placement; 569 extern struct ttm_placement vmw_srf_placement; 570 extern struct ttm_bo_driver vmw_bo_driver; 571 extern int vmw_dma_quiescent(struct drm_device *dev); 572 573 /** 574 * Command submission - vmwgfx_execbuf.c 575 */ 576 577 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 578 struct drm_file *file_priv); 579 extern int vmw_execbuf_process(struct drm_file *file_priv, 580 struct vmw_private *dev_priv, 581 void __user *user_commands, 582 void *kernel_commands, 583 uint32_t command_size, 584 uint64_t throttle_us, 585 struct drm_vmw_fence_rep __user 586 *user_fence_rep, 587 struct vmw_fence_obj **out_fence); 588 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 589 struct vmw_fence_obj *fence); 590 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); 591 592 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 593 struct vmw_private *dev_priv, 594 struct vmw_fence_obj **p_fence, 595 uint32_t *p_handle); 596 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 597 struct vmw_fpriv *vmw_fp, 598 int ret, 599 struct drm_vmw_fence_rep __user 600 *user_fence_rep, 601 struct vmw_fence_obj *fence, 602 uint32_t fence_handle); 603 604 /** 605 * IRQs and wating - vmwgfx_irq.c 606 */ 607 608 extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); 609 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 610 uint32_t seqno, bool interruptible, 611 unsigned long timeout); 612 extern void vmw_irq_preinstall(struct drm_device *dev); 613 extern int vmw_irq_postinstall(struct drm_device *dev); 614 extern void vmw_irq_uninstall(struct drm_device *dev); 615 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 616 uint32_t seqno); 617 extern int vmw_fallback_wait(struct vmw_private *dev_priv, 618 bool lazy, 619 bool fifo_idle, 620 uint32_t seqno, 621 bool interruptible, 622 unsigned long timeout); 623 extern void vmw_update_seqno(struct vmw_private *dev_priv, 624 struct vmw_fifo_state *fifo_state); 625 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); 626 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 627 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 628 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 629 630 /** 631 * Rudimentary fence-like objects currently used only for throttling - 632 * vmwgfx_marker.c 633 */ 634 635 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); 636 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); 637 extern int vmw_marker_push(struct vmw_marker_queue *queue, 638 uint32_t seqno); 639 extern int vmw_marker_pull(struct vmw_marker_queue *queue, 640 uint32_t signaled_seqno); 641 extern int vmw_wait_lag(struct vmw_private *dev_priv, 642 struct vmw_marker_queue *queue, uint32_t us); 643 644 /** 645 * Kernel framebuffer - vmwgfx_fb.c 646 */ 647 648 int vmw_fb_init(struct vmw_private *vmw_priv); 649 int vmw_fb_close(struct vmw_private *dev_priv); 650 int vmw_fb_off(struct vmw_private *vmw_priv); 651 int vmw_fb_on(struct vmw_private *vmw_priv); 652 653 /** 654 * Kernel modesetting - vmwgfx_kms.c 655 */ 656 657 int vmw_kms_init(struct vmw_private *dev_priv); 658 int vmw_kms_close(struct vmw_private *dev_priv); 659 int vmw_kms_save_vga(struct vmw_private *vmw_priv); 660 int vmw_kms_restore_vga(struct vmw_private *vmw_priv); 661 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 662 struct drm_file *file_priv); 663 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); 664 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 665 struct ttm_object_file *tfile, 666 struct ttm_buffer_object *bo, 667 SVGA3dCmdHeader *header); 668 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 669 unsigned width, unsigned height, unsigned pitch, 670 unsigned bpp, unsigned depth); 671 void vmw_kms_idle_workqueues(struct vmw_master *vmaster); 672 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 673 uint32_t pitch, 674 uint32_t height); 675 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); 676 int vmw_enable_vblank(struct drm_device *dev, int crtc); 677 void vmw_disable_vblank(struct drm_device *dev, int crtc); 678 int vmw_kms_present(struct vmw_private *dev_priv, 679 struct drm_file *file_priv, 680 struct vmw_framebuffer *vfb, 681 struct vmw_surface *surface, 682 uint32_t sid, int32_t destX, int32_t destY, 683 struct drm_vmw_rect *clips, 684 uint32_t num_clips); 685 int vmw_kms_readback(struct vmw_private *dev_priv, 686 struct drm_file *file_priv, 687 struct vmw_framebuffer *vfb, 688 struct drm_vmw_fence_rep __user *user_fence_rep, 689 struct drm_vmw_rect *clips, 690 uint32_t num_clips); 691 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 692 struct drm_file *file_priv); 693 694 int vmw_dumb_create(struct drm_file *file_priv, 695 struct drm_device *dev, 696 struct drm_mode_create_dumb *args); 697 698 int vmw_dumb_map_offset(struct drm_file *file_priv, 699 struct drm_device *dev, uint32_t handle, 700 uint64_t *offset); 701 int vmw_dumb_destroy(struct drm_file *file_priv, 702 struct drm_device *dev, 703 uint32_t handle); 704 /** 705 * Overlay control - vmwgfx_overlay.c 706 */ 707 708 int vmw_overlay_init(struct vmw_private *dev_priv); 709 int vmw_overlay_close(struct vmw_private *dev_priv); 710 int vmw_overlay_ioctl(struct drm_device *dev, void *data, 711 struct drm_file *file_priv); 712 int vmw_overlay_stop_all(struct vmw_private *dev_priv); 713 int vmw_overlay_resume_all(struct vmw_private *dev_priv); 714 int vmw_overlay_pause_all(struct vmw_private *dev_priv); 715 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); 716 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); 717 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 718 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 719 720 /** 721 * GMR Id manager 722 */ 723 724 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; 725 726 /** 727 * Inline helper functions 728 */ 729 730 static inline void vmw_surface_unreference(struct vmw_surface **srf) 731 { 732 struct vmw_surface *tmp_srf = *srf; 733 struct vmw_resource *res = &tmp_srf->res; 734 *srf = NULL; 735 736 vmw_resource_unreference(&res); 737 } 738 739 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) 740 { 741 (void) vmw_resource_reference(&srf->res); 742 return srf; 743 } 744 745 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) 746 { 747 struct vmw_dma_buffer *tmp_buf = *buf; 748 749 *buf = NULL; 750 if (tmp_buf != NULL) { 751 struct ttm_buffer_object *bo = &tmp_buf->base; 752 753 ttm_bo_unref(&bo); 754 } 755 } 756 757 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) 758 { 759 if (ttm_bo_reference(&buf->base)) 760 return buf; 761 return NULL; 762 } 763 764 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) 765 { 766 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; 767 } 768 #endif 769