1 /************************************************************************** 2 * 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef __VMWGFX_DRM_H__ 29 #define __VMWGFX_DRM_H__ 30 31 #include "drm.h" 32 33 #if defined(__cplusplus) 34 extern "C" { 35 #endif 36 37 #define DRM_VMW_MAX_SURFACE_FACES 6 38 #define DRM_VMW_MAX_MIP_LEVELS 24 39 40 41 #define DRM_VMW_GET_PARAM 0 42 #define DRM_VMW_ALLOC_DMABUF 1 43 #define DRM_VMW_ALLOC_BO 1 44 #define DRM_VMW_UNREF_DMABUF 2 45 #define DRM_VMW_HANDLE_CLOSE 2 46 #define DRM_VMW_CURSOR_BYPASS 3 47 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 48 #define DRM_VMW_CONTROL_STREAM 4 49 #define DRM_VMW_CLAIM_STREAM 5 50 #define DRM_VMW_UNREF_STREAM 6 51 /* guarded by DRM_VMW_PARAM_3D == 1 */ 52 #define DRM_VMW_CREATE_CONTEXT 7 53 #define DRM_VMW_UNREF_CONTEXT 8 54 #define DRM_VMW_CREATE_SURFACE 9 55 #define DRM_VMW_UNREF_SURFACE 10 56 #define DRM_VMW_REF_SURFACE 11 57 #define DRM_VMW_EXECBUF 12 58 #define DRM_VMW_GET_3D_CAP 13 59 #define DRM_VMW_FENCE_WAIT 14 60 #define DRM_VMW_FENCE_SIGNALED 15 61 #define DRM_VMW_FENCE_UNREF 16 62 #define DRM_VMW_FENCE_EVENT 17 63 #define DRM_VMW_PRESENT 18 64 #define DRM_VMW_PRESENT_READBACK 19 65 #define DRM_VMW_UPDATE_LAYOUT 20 66 #define DRM_VMW_CREATE_SHADER 21 67 #define DRM_VMW_UNREF_SHADER 22 68 #define DRM_VMW_GB_SURFACE_CREATE 23 69 #define DRM_VMW_GB_SURFACE_REF 24 70 #define DRM_VMW_SYNCCPU 25 71 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 72 #define DRM_VMW_GB_SURFACE_CREATE_EXT 27 73 #define DRM_VMW_GB_SURFACE_REF_EXT 28 74 #define DRM_VMW_MSG 29 75 76 /*************************************************************************/ 77 /** 78 * DRM_VMW_GET_PARAM - get device information. 79 * 80 * DRM_VMW_PARAM_FIFO_OFFSET: 81 * Offset to use to map the first page of the FIFO read-only. 82 * The fifo is mapped using the mmap() system call on the drm device. 83 * 84 * DRM_VMW_PARAM_OVERLAY_IOCTL: 85 * Does the driver support the overlay ioctl. 86 * 87 * DRM_VMW_PARAM_SM4_1 88 * SM4_1 support is enabled. 89 * 90 * DRM_VMW_PARAM_SM5 91 * SM5 support is enabled. 92 */ 93 94 #define DRM_VMW_PARAM_NUM_STREAMS 0 95 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 96 #define DRM_VMW_PARAM_3D 2 97 #define DRM_VMW_PARAM_HW_CAPS 3 98 #define DRM_VMW_PARAM_FIFO_CAPS 4 99 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 100 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 101 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 102 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 103 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 104 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 105 #define DRM_VMW_PARAM_SCREEN_TARGET 11 106 #define DRM_VMW_PARAM_DX 12 107 #define DRM_VMW_PARAM_HW_CAPS2 13 108 #define DRM_VMW_PARAM_SM4_1 14 109 #define DRM_VMW_PARAM_SM5 15 110 111 /** 112 * enum drm_vmw_handle_type - handle type for ref ioctls 113 * 114 */ 115 enum drm_vmw_handle_type { 116 DRM_VMW_HANDLE_LEGACY = 0, 117 DRM_VMW_HANDLE_PRIME = 1 118 }; 119 120 /** 121 * struct drm_vmw_getparam_arg 122 * 123 * @value: Returned value. //Out 124 * @param: Parameter to query. //In. 125 * 126 * Argument to the DRM_VMW_GET_PARAM Ioctl. 127 */ 128 129 struct drm_vmw_getparam_arg { 130 __u64 value; 131 __u32 param; 132 __u32 pad64; 133 }; 134 135 /*************************************************************************/ 136 /** 137 * DRM_VMW_CREATE_CONTEXT - Create a host context. 138 * 139 * Allocates a device unique context id, and queues a create context command 140 * for the host. Does not wait for host completion. 141 */ 142 143 /** 144 * struct drm_vmw_context_arg 145 * 146 * @cid: Device unique context ID. 147 * 148 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 149 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 150 */ 151 152 struct drm_vmw_context_arg { 153 __s32 cid; 154 __u32 pad64; 155 }; 156 157 /*************************************************************************/ 158 /** 159 * DRM_VMW_UNREF_CONTEXT - Create a host context. 160 * 161 * Frees a global context id, and queues a destroy host command for the host. 162 * Does not wait for host completion. The context ID can be used directly 163 * in the command stream and shows up as the same context ID on the host. 164 */ 165 166 /*************************************************************************/ 167 /** 168 * DRM_VMW_CREATE_SURFACE - Create a host suface. 169 * 170 * Allocates a device unique surface id, and queues a create surface command 171 * for the host. Does not wait for host completion. The surface ID can be 172 * used directly in the command stream and shows up as the same surface 173 * ID on the host. 174 */ 175 176 /** 177 * struct drm_wmv_surface_create_req 178 * 179 * @flags: Surface flags as understood by the host. 180 * @format: Surface format as understood by the host. 181 * @mip_levels: Number of mip levels for each face. 182 * An unused face should have 0 encoded. 183 * @size_addr: Address of a user-space array of sruct drm_vmw_size 184 * cast to an __u64 for 32-64 bit compatibility. 185 * The size of the array should equal the total number of mipmap levels. 186 * @shareable: Boolean whether other clients (as identified by file descriptors) 187 * may reference this surface. 188 * @scanout: Boolean whether the surface is intended to be used as a 189 * scanout. 190 * 191 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 192 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 193 */ 194 195 struct drm_vmw_surface_create_req { 196 __u32 flags; 197 __u32 format; 198 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 199 __u64 size_addr; 200 __s32 shareable; 201 __s32 scanout; 202 }; 203 204 /** 205 * struct drm_wmv_surface_arg 206 * 207 * @sid: Surface id of created surface or surface to destroy or reference. 208 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. 209 * 210 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 211 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 212 * Input argument to the DRM_VMW_REF_SURFACE Ioctl. 213 */ 214 215 struct drm_vmw_surface_arg { 216 __s32 sid; 217 enum drm_vmw_handle_type handle_type; 218 }; 219 220 /** 221 * struct drm_vmw_size ioctl. 222 * 223 * @width - mip level width 224 * @height - mip level height 225 * @depth - mip level depth 226 * 227 * Description of a mip level. 228 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. 229 */ 230 231 struct drm_vmw_size { 232 __u32 width; 233 __u32 height; 234 __u32 depth; 235 __u32 pad64; 236 }; 237 238 /** 239 * union drm_vmw_surface_create_arg 240 * 241 * @rep: Output data as described above. 242 * @req: Input data as described above. 243 * 244 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. 245 */ 246 247 union drm_vmw_surface_create_arg { 248 struct drm_vmw_surface_arg rep; 249 struct drm_vmw_surface_create_req req; 250 }; 251 252 /*************************************************************************/ 253 /** 254 * DRM_VMW_REF_SURFACE - Reference a host surface. 255 * 256 * Puts a reference on a host surface with a give sid, as previously 257 * returned by the DRM_VMW_CREATE_SURFACE ioctl. 258 * A reference will make sure the surface isn't destroyed while we hold 259 * it and will allow the calling client to use the surface ID in the command 260 * stream. 261 * 262 * On successful return, the Ioctl returns the surface information given 263 * in the DRM_VMW_CREATE_SURFACE ioctl. 264 */ 265 266 /** 267 * union drm_vmw_surface_reference_arg 268 * 269 * @rep: Output data as described above. 270 * @req: Input data as described above. 271 * 272 * Argument to the DRM_VMW_REF_SURFACE Ioctl. 273 */ 274 275 union drm_vmw_surface_reference_arg { 276 struct drm_vmw_surface_create_req rep; 277 struct drm_vmw_surface_arg req; 278 }; 279 280 /*************************************************************************/ 281 /** 282 * DRM_VMW_UNREF_SURFACE - Unreference a host surface. 283 * 284 * Clear a reference previously put on a host surface. 285 * When all references are gone, including the one implicitly placed 286 * on creation, 287 * a destroy surface command will be queued for the host. 288 * Does not wait for completion. 289 */ 290 291 /*************************************************************************/ 292 /** 293 * DRM_VMW_EXECBUF 294 * 295 * Submit a command buffer for execution on the host, and return a 296 * fence seqno that when signaled, indicates that the command buffer has 297 * executed. 298 */ 299 300 /** 301 * struct drm_vmw_execbuf_arg 302 * 303 * @commands: User-space address of a command buffer cast to an __u64. 304 * @command-size: Size in bytes of the command buffer. 305 * @throttle-us: Sleep until software is less than @throttle_us 306 * microseconds ahead of hardware. The driver may round this value 307 * to the nearest kernel tick. 308 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 309 * __u64. 310 * @version: Allows expanding the execbuf ioctl parameters without breaking 311 * backwards compatibility, since user-space will always tell the kernel 312 * which version it uses. 313 * @flags: Execbuf flags. 314 * @imported_fence_fd: FD for a fence imported from another device 315 * 316 * Argument to the DRM_VMW_EXECBUF Ioctl. 317 */ 318 319 #define DRM_VMW_EXECBUF_VERSION 2 320 321 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0) 322 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1) 323 324 struct drm_vmw_execbuf_arg { 325 __u64 commands; 326 __u32 command_size; 327 __u32 throttle_us; 328 __u64 fence_rep; 329 __u32 version; 330 __u32 flags; 331 __u32 context_handle; 332 __s32 imported_fence_fd; 333 }; 334 335 /** 336 * struct drm_vmw_fence_rep 337 * 338 * @handle: Fence object handle for fence associated with a command submission. 339 * @mask: Fence flags relevant for this fence object. 340 * @seqno: Fence sequence number in fifo. A fence object with a lower 341 * seqno will signal the EXEC flag before a fence object with a higher 342 * seqno. This can be used by user-space to avoid kernel calls to determine 343 * whether a fence has signaled the EXEC flag. Note that @seqno will 344 * wrap at 32-bit. 345 * @passed_seqno: The highest seqno number processed by the hardware 346 * so far. This can be used to mark user-space fence objects as signaled, and 347 * to determine whether a fence seqno might be stale. 348 * @fd: FD associated with the fence, -1 if not exported 349 * @error: This member should've been set to -EFAULT on submission. 350 * The following actions should be take on completion: 351 * error == -EFAULT: Fence communication failed. The host is synchronized. 352 * Use the last fence id read from the FIFO fence register. 353 * error != 0 && error != -EFAULT: 354 * Fence submission failed. The host is synchronized. Use the fence_seq member. 355 * error == 0: All is OK, The host may not be synchronized. 356 * Use the fence_seq member. 357 * 358 * Input / Output data to the DRM_VMW_EXECBUF Ioctl. 359 */ 360 361 struct drm_vmw_fence_rep { 362 __u32 handle; 363 __u32 mask; 364 __u32 seqno; 365 __u32 passed_seqno; 366 __s32 fd; 367 __s32 error; 368 }; 369 370 /*************************************************************************/ 371 /** 372 * DRM_VMW_ALLOC_BO 373 * 374 * Allocate a buffer object that is visible also to the host. 375 * NOTE: The buffer is 376 * identified by a handle and an offset, which are private to the guest, but 377 * useable in the command stream. The guest kernel may translate these 378 * and patch up the command stream accordingly. In the future, the offset may 379 * be zero at all times, or it may disappear from the interface before it is 380 * fixed. 381 * 382 * The buffer object may stay user-space mapped in the guest at all times, 383 * and is thus suitable for sub-allocation. 384 * 385 * Buffer objects are mapped using the mmap() syscall on the drm device. 386 */ 387 388 /** 389 * struct drm_vmw_alloc_bo_req 390 * 391 * @size: Required minimum size of the buffer. 392 * 393 * Input data to the DRM_VMW_ALLOC_BO Ioctl. 394 */ 395 396 struct drm_vmw_alloc_bo_req { 397 __u32 size; 398 __u32 pad64; 399 }; 400 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req 401 402 /** 403 * struct drm_vmw_bo_rep 404 * 405 * @map_handle: Offset to use in the mmap() call used to map the buffer. 406 * @handle: Handle unique to this buffer. Used for unreferencing. 407 * @cur_gmr_id: GMR id to use in the command stream when this buffer is 408 * referenced. See not above. 409 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 410 * referenced. See note above. 411 * 412 * Output data from the DRM_VMW_ALLOC_BO Ioctl. 413 */ 414 415 struct drm_vmw_bo_rep { 416 __u64 map_handle; 417 __u32 handle; 418 __u32 cur_gmr_id; 419 __u32 cur_gmr_offset; 420 __u32 pad64; 421 }; 422 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep 423 424 /** 425 * union drm_vmw_alloc_bo_arg 426 * 427 * @req: Input data as described above. 428 * @rep: Output data as described above. 429 * 430 * Argument to the DRM_VMW_ALLOC_BO Ioctl. 431 */ 432 433 union drm_vmw_alloc_bo_arg { 434 struct drm_vmw_alloc_bo_req req; 435 struct drm_vmw_bo_rep rep; 436 }; 437 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg 438 439 /*************************************************************************/ 440 /** 441 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 442 * 443 * This IOCTL controls the overlay units of the svga device. 444 * The SVGA overlay units does not work like regular hardware units in 445 * that they do not automaticaly read back the contents of the given dma 446 * buffer. But instead only read back for each call to this ioctl, and 447 * at any point between this call being made and a following call that 448 * either changes the buffer or disables the stream. 449 */ 450 451 /** 452 * struct drm_vmw_rect 453 * 454 * Defines a rectangle. Used in the overlay ioctl to define 455 * source and destination rectangle. 456 */ 457 458 struct drm_vmw_rect { 459 __s32 x; 460 __s32 y; 461 __u32 w; 462 __u32 h; 463 }; 464 465 /** 466 * struct drm_vmw_control_stream_arg 467 * 468 * @stream_id: Stearm to control 469 * @enabled: If false all following arguments are ignored. 470 * @handle: Handle to buffer for getting data from. 471 * @format: Format of the overlay as understood by the host. 472 * @width: Width of the overlay. 473 * @height: Height of the overlay. 474 * @size: Size of the overlay in bytes. 475 * @pitch: Array of pitches, the two last are only used for YUV12 formats. 476 * @offset: Offset from start of dma buffer to overlay. 477 * @src: Source rect, must be within the defined area above. 478 * @dst: Destination rect, x and y may be negative. 479 * 480 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. 481 */ 482 483 struct drm_vmw_control_stream_arg { 484 __u32 stream_id; 485 __u32 enabled; 486 487 __u32 flags; 488 __u32 color_key; 489 490 __u32 handle; 491 __u32 offset; 492 __s32 format; 493 __u32 size; 494 __u32 width; 495 __u32 height; 496 __u32 pitch[3]; 497 498 __u32 pad64; 499 struct drm_vmw_rect src; 500 struct drm_vmw_rect dst; 501 }; 502 503 /*************************************************************************/ 504 /** 505 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. 506 * 507 */ 508 509 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) 510 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) 511 512 /** 513 * struct drm_vmw_cursor_bypass_arg 514 * 515 * @flags: Flags. 516 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. 517 * @xpos: X position of cursor. 518 * @ypos: Y position of cursor. 519 * @xhot: X hotspot. 520 * @yhot: Y hotspot. 521 * 522 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. 523 */ 524 525 struct drm_vmw_cursor_bypass_arg { 526 __u32 flags; 527 __u32 crtc_id; 528 __s32 xpos; 529 __s32 ypos; 530 __s32 xhot; 531 __s32 yhot; 532 }; 533 534 /*************************************************************************/ 535 /** 536 * DRM_VMW_CLAIM_STREAM - Claim a single stream. 537 */ 538 539 /** 540 * struct drm_vmw_context_arg 541 * 542 * @stream_id: Device unique context ID. 543 * 544 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 545 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 546 */ 547 548 struct drm_vmw_stream_arg { 549 __u32 stream_id; 550 __u32 pad64; 551 }; 552 553 /*************************************************************************/ 554 /** 555 * DRM_VMW_UNREF_STREAM - Unclaim a stream. 556 * 557 * Return a single stream that was claimed by this process. Also makes 558 * sure that the stream has been stopped. 559 */ 560 561 /*************************************************************************/ 562 /** 563 * DRM_VMW_GET_3D_CAP 564 * 565 * Read 3D capabilities from the FIFO 566 * 567 */ 568 569 /** 570 * struct drm_vmw_get_3d_cap_arg 571 * 572 * @buffer: Pointer to a buffer for capability data, cast to an __u64 573 * @size: Max size to copy 574 * 575 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 576 * ioctls. 577 */ 578 579 struct drm_vmw_get_3d_cap_arg { 580 __u64 buffer; 581 __u32 max_size; 582 __u32 pad64; 583 }; 584 585 /*************************************************************************/ 586 /** 587 * DRM_VMW_FENCE_WAIT 588 * 589 * Waits for a fence object to signal. The wait is interruptible, so that 590 * signals may be delivered during the interrupt. The wait may timeout, 591 * in which case the calls returns -EBUSY. If the wait is restarted, 592 * that is restarting without resetting @cookie_valid to zero, 593 * the timeout is computed from the first call. 594 * 595 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait 596 * on: 597 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command 598 * stream 599 * have executed. 600 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish 601 * commands 602 * in the buffer given to the EXECBUF ioctl returning the fence object handle 603 * are available to user-space. 604 * 605 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the 606 * fenc wait ioctl returns 0, the fence object has been unreferenced after 607 * the wait. 608 */ 609 610 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) 611 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) 612 613 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) 614 615 /** 616 * struct drm_vmw_fence_wait_arg 617 * 618 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 619 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. 620 * @kernel_cookie: Set to 0 on first call. Left alone on restart. 621 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. 622 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick 623 * before returning. 624 * @flags: Fence flags to wait on. 625 * @wait_options: Options that control the behaviour of the wait ioctl. 626 * 627 * Input argument to the DRM_VMW_FENCE_WAIT ioctl. 628 */ 629 630 struct drm_vmw_fence_wait_arg { 631 __u32 handle; 632 __s32 cookie_valid; 633 __u64 kernel_cookie; 634 __u64 timeout_us; 635 __s32 lazy; 636 __s32 flags; 637 __s32 wait_options; 638 __s32 pad64; 639 }; 640 641 /*************************************************************************/ 642 /** 643 * DRM_VMW_FENCE_SIGNALED 644 * 645 * Checks if a fence object is signaled.. 646 */ 647 648 /** 649 * struct drm_vmw_fence_signaled_arg 650 * 651 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 652 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl 653 * @signaled: Out: Flags signaled. 654 * @sequence: Out: Highest sequence passed so far. Can be used to signal the 655 * EXEC flag of user-space fence objects. 656 * 657 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF 658 * ioctls. 659 */ 660 661 struct drm_vmw_fence_signaled_arg { 662 __u32 handle; 663 __u32 flags; 664 __s32 signaled; 665 __u32 passed_seqno; 666 __u32 signaled_flags; 667 __u32 pad64; 668 }; 669 670 /*************************************************************************/ 671 /** 672 * DRM_VMW_FENCE_UNREF 673 * 674 * Unreferences a fence object, and causes it to be destroyed if there are no 675 * other references to it. 676 * 677 */ 678 679 /** 680 * struct drm_vmw_fence_arg 681 * 682 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 683 * 684 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. 685 */ 686 687 struct drm_vmw_fence_arg { 688 __u32 handle; 689 __u32 pad64; 690 }; 691 692 693 /*************************************************************************/ 694 /** 695 * DRM_VMW_FENCE_EVENT 696 * 697 * Queues an event on a fence to be delivered on the drm character device 698 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. 699 * Optionally the approximate time when the fence signaled is 700 * given by the event. 701 */ 702 703 /* 704 * The event type 705 */ 706 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 707 708 struct drm_vmw_event_fence { 709 struct drm_event base; 710 __u64 user_data; 711 __u32 tv_sec; 712 __u32 tv_usec; 713 }; 714 715 /* 716 * Flags that may be given to the command. 717 */ 718 /* Request fence signaled time on the event. */ 719 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) 720 721 /** 722 * struct drm_vmw_fence_event_arg 723 * 724 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if 725 * the fence is not supposed to be referenced by user-space. 726 * @user_info: Info to be delivered with the event. 727 * @handle: Attach the event to this fence only. 728 * @flags: A set of flags as defined above. 729 */ 730 struct drm_vmw_fence_event_arg { 731 __u64 fence_rep; 732 __u64 user_data; 733 __u32 handle; 734 __u32 flags; 735 }; 736 737 738 /*************************************************************************/ 739 /** 740 * DRM_VMW_PRESENT 741 * 742 * Executes an SVGA present on a given fb for a given surface. The surface 743 * is placed on the framebuffer. Cliprects are given relative to the given 744 * point (the point disignated by dest_{x|y}). 745 * 746 */ 747 748 /** 749 * struct drm_vmw_present_arg 750 * @fb_id: framebuffer id to present / read back from. 751 * @sid: Surface id to present from. 752 * @dest_x: X placement coordinate for surface. 753 * @dest_y: Y placement coordinate for surface. 754 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 755 * @num_clips: Number of cliprects given relative to the framebuffer origin, 756 * in the same coordinate space as the frame buffer. 757 * @pad64: Unused 64-bit padding. 758 * 759 * Input argument to the DRM_VMW_PRESENT ioctl. 760 */ 761 762 struct drm_vmw_present_arg { 763 __u32 fb_id; 764 __u32 sid; 765 __s32 dest_x; 766 __s32 dest_y; 767 __u64 clips_ptr; 768 __u32 num_clips; 769 __u32 pad64; 770 }; 771 772 773 /*************************************************************************/ 774 /** 775 * DRM_VMW_PRESENT_READBACK 776 * 777 * Executes an SVGA present readback from a given fb to the dma buffer 778 * currently bound as the fb. If there is no dma buffer bound to the fb, 779 * an error will be returned. 780 * 781 */ 782 783 /** 784 * struct drm_vmw_present_arg 785 * @fb_id: fb_id to present / read back from. 786 * @num_clips: Number of cliprects. 787 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 788 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. 789 * If this member is NULL, then the ioctl should not return a fence. 790 */ 791 792 struct drm_vmw_present_readback_arg { 793 __u32 fb_id; 794 __u32 num_clips; 795 __u64 clips_ptr; 796 __u64 fence_rep; 797 }; 798 799 /*************************************************************************/ 800 /** 801 * DRM_VMW_UPDATE_LAYOUT - Update layout 802 * 803 * Updates the preferred modes and connection status for connectors. The 804 * command consists of one drm_vmw_update_layout_arg pointing to an array 805 * of num_outputs drm_vmw_rect's. 806 */ 807 808 /** 809 * struct drm_vmw_update_layout_arg 810 * 811 * @num_outputs: number of active connectors 812 * @rects: pointer to array of drm_vmw_rect cast to an __u64 813 * 814 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 815 */ 816 struct drm_vmw_update_layout_arg { 817 __u32 num_outputs; 818 __u32 pad64; 819 __u64 rects; 820 }; 821 822 823 /*************************************************************************/ 824 /** 825 * DRM_VMW_CREATE_SHADER - Create shader 826 * 827 * Creates a shader and optionally binds it to a dma buffer containing 828 * the shader byte-code. 829 */ 830 831 /** 832 * enum drm_vmw_shader_type - Shader types 833 */ 834 enum drm_vmw_shader_type { 835 drm_vmw_shader_type_vs = 0, 836 drm_vmw_shader_type_ps, 837 }; 838 839 840 /** 841 * struct drm_vmw_shader_create_arg 842 * 843 * @shader_type: Shader type of the shader to create. 844 * @size: Size of the byte-code in bytes. 845 * where the shader byte-code starts 846 * @buffer_handle: Buffer handle identifying the buffer containing the 847 * shader byte-code 848 * @shader_handle: On successful completion contains a handle that 849 * can be used to subsequently identify the shader. 850 * @offset: Offset in bytes into the buffer given by @buffer_handle, 851 * 852 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 853 */ 854 struct drm_vmw_shader_create_arg { 855 enum drm_vmw_shader_type shader_type; 856 __u32 size; 857 __u32 buffer_handle; 858 __u32 shader_handle; 859 __u64 offset; 860 }; 861 862 /*************************************************************************/ 863 /** 864 * DRM_VMW_UNREF_SHADER - Unreferences a shader 865 * 866 * Destroys a user-space reference to a shader, optionally destroying 867 * it. 868 */ 869 870 /** 871 * struct drm_vmw_shader_arg 872 * 873 * @handle: Handle identifying the shader to destroy. 874 * 875 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 876 */ 877 struct drm_vmw_shader_arg { 878 __u32 handle; 879 __u32 pad64; 880 }; 881 882 /*************************************************************************/ 883 /** 884 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 885 * 886 * Allocates a surface handle and queues a create surface command 887 * for the host on the first use of the surface. The surface ID can 888 * be used as the surface ID in commands referencing the surface. 889 */ 890 891 /** 892 * enum drm_vmw_surface_flags 893 * 894 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 895 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 896 * surface. 897 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 898 * given. 899 * @drm_vmw_surface_flag_coherent: Back surface with coherent memory. 900 */ 901 enum drm_vmw_surface_flags { 902 drm_vmw_surface_flag_shareable = (1 << 0), 903 drm_vmw_surface_flag_scanout = (1 << 1), 904 drm_vmw_surface_flag_create_buffer = (1 << 2), 905 drm_vmw_surface_flag_coherent = (1 << 3), 906 }; 907 908 /** 909 * struct drm_vmw_gb_surface_create_req 910 * 911 * @svga3d_flags: SVGA3d surface flags for the device. 912 * @format: SVGA3d format. 913 * @mip_level: Number of mip levels for all faces. 914 * @drm_surface_flags Flags as described above. 915 * @multisample_count Future use. Set to 0. 916 * @autogen_filter Future use. Set to 0. 917 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 918 * if none. 919 * @base_size Size of the base mip level for all faces. 920 * @array_size Must be zero for non-DX hardware, and if non-zero 921 * svga3d_flags must have proper bind flags setup. 922 * 923 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 924 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 925 */ 926 struct drm_vmw_gb_surface_create_req { 927 __u32 svga3d_flags; 928 __u32 format; 929 __u32 mip_levels; 930 enum drm_vmw_surface_flags drm_surface_flags; 931 __u32 multisample_count; 932 __u32 autogen_filter; 933 __u32 buffer_handle; 934 __u32 array_size; 935 struct drm_vmw_size base_size; 936 }; 937 938 /** 939 * struct drm_vmw_gb_surface_create_rep 940 * 941 * @handle: Surface handle. 942 * @backup_size: Size of backup buffers for this surface. 943 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 944 * @buffer_size: Actual size of the buffer identified by 945 * @buffer_handle 946 * @buffer_map_handle: Offset into device address space for the buffer 947 * identified by @buffer_handle. 948 * 949 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 950 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 951 */ 952 struct drm_vmw_gb_surface_create_rep { 953 __u32 handle; 954 __u32 backup_size; 955 __u32 buffer_handle; 956 __u32 buffer_size; 957 __u64 buffer_map_handle; 958 }; 959 960 /** 961 * union drm_vmw_gb_surface_create_arg 962 * 963 * @req: Input argument as described above. 964 * @rep: Output argument as described above. 965 * 966 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 967 */ 968 union drm_vmw_gb_surface_create_arg { 969 struct drm_vmw_gb_surface_create_rep rep; 970 struct drm_vmw_gb_surface_create_req req; 971 }; 972 973 /*************************************************************************/ 974 /** 975 * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 976 * 977 * Puts a reference on a host surface with a given handle, as previously 978 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 979 * A reference will make sure the surface isn't destroyed while we hold 980 * it and will allow the calling client to use the surface handle in 981 * the command stream. 982 * 983 * On successful return, the Ioctl returns the surface information given 984 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 985 */ 986 987 /** 988 * struct drm_vmw_gb_surface_reference_arg 989 * 990 * @creq: The data used as input when the surface was created, as described 991 * above at "struct drm_vmw_gb_surface_create_req" 992 * @crep: Additional data output when the surface was created, as described 993 * above at "struct drm_vmw_gb_surface_create_rep" 994 * 995 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 996 */ 997 struct drm_vmw_gb_surface_ref_rep { 998 struct drm_vmw_gb_surface_create_req creq; 999 struct drm_vmw_gb_surface_create_rep crep; 1000 }; 1001 1002 /** 1003 * union drm_vmw_gb_surface_reference_arg 1004 * 1005 * @req: Input data as described above at "struct drm_vmw_surface_arg" 1006 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 1007 * 1008 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1009 */ 1010 union drm_vmw_gb_surface_reference_arg { 1011 struct drm_vmw_gb_surface_ref_rep rep; 1012 struct drm_vmw_surface_arg req; 1013 }; 1014 1015 1016 /*************************************************************************/ 1017 /** 1018 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 1019 * 1020 * Idles any previously submitted GPU operations on the buffer and 1021 * by default blocks command submissions that reference the buffer. 1022 * If the file descriptor used to grab a blocking CPU sync is closed, the 1023 * cpu sync is released. 1024 * The flags argument indicates how the grab / release operation should be 1025 * performed: 1026 */ 1027 1028 /** 1029 * enum drm_vmw_synccpu_flags - Synccpu flags: 1030 * 1031 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 1032 * hint to the kernel to allow command submissions that references the buffer 1033 * for read-only. 1034 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1035 * referencing this buffer. 1036 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1037 * -EBUSY should the buffer be busy. 1038 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1039 * while the buffer is synced for CPU. This is similar to the GEM bo idle 1040 * behavior. 1041 */ 1042 enum drm_vmw_synccpu_flags { 1043 drm_vmw_synccpu_read = (1 << 0), 1044 drm_vmw_synccpu_write = (1 << 1), 1045 drm_vmw_synccpu_dontblock = (1 << 2), 1046 drm_vmw_synccpu_allow_cs = (1 << 3) 1047 }; 1048 1049 /** 1050 * enum drm_vmw_synccpu_op - Synccpu operations: 1051 * 1052 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1053 * @drm_vmw_synccpu_release: Release a previous grab. 1054 */ 1055 enum drm_vmw_synccpu_op { 1056 drm_vmw_synccpu_grab, 1057 drm_vmw_synccpu_release 1058 }; 1059 1060 /** 1061 * struct drm_vmw_synccpu_arg 1062 * 1063 * @op: The synccpu operation as described above. 1064 * @handle: Handle identifying the buffer object. 1065 * @flags: Flags as described above. 1066 */ 1067 struct drm_vmw_synccpu_arg { 1068 enum drm_vmw_synccpu_op op; 1069 enum drm_vmw_synccpu_flags flags; 1070 __u32 handle; 1071 __u32 pad64; 1072 }; 1073 1074 /*************************************************************************/ 1075 /** 1076 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. 1077 * 1078 * Allocates a device unique context id, and queues a create context command 1079 * for the host. Does not wait for host completion. 1080 */ 1081 enum drm_vmw_extended_context { 1082 drm_vmw_context_legacy, 1083 drm_vmw_context_dx 1084 }; 1085 1086 /** 1087 * union drm_vmw_extended_context_arg 1088 * 1089 * @req: Context type. 1090 * @rep: Context identifier. 1091 * 1092 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. 1093 */ 1094 union drm_vmw_extended_context_arg { 1095 enum drm_vmw_extended_context req; 1096 struct drm_vmw_context_arg rep; 1097 }; 1098 1099 /*************************************************************************/ 1100 /* 1101 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its 1102 * underlying resource. 1103 * 1104 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF 1105 * Ioctl. 1106 */ 1107 1108 /** 1109 * struct drm_vmw_handle_close_arg 1110 * 1111 * @handle: Handle to close. 1112 * 1113 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl. 1114 */ 1115 struct drm_vmw_handle_close_arg { 1116 __u32 handle; 1117 __u32 pad64; 1118 }; 1119 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg 1120 1121 /*************************************************************************/ 1122 /** 1123 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface. 1124 * 1125 * Allocates a surface handle and queues a create surface command 1126 * for the host on the first use of the surface. The surface ID can 1127 * be used as the surface ID in commands referencing the surface. 1128 * 1129 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version 1130 * parameter and 64 bit svga flag. 1131 */ 1132 1133 /** 1134 * enum drm_vmw_surface_version 1135 * 1136 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with 1137 * svga3d surface flags split into 2, upper half and lower half. 1138 */ 1139 enum drm_vmw_surface_version { 1140 drm_vmw_gb_surface_v1, 1141 }; 1142 1143 /** 1144 * struct drm_vmw_gb_surface_create_ext_req 1145 * 1146 * @base: Surface create parameters. 1147 * @version: Version of surface create ioctl. 1148 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags. 1149 * @multisample_pattern: Multisampling pattern when msaa is supported. 1150 * @quality_level: Precision settings for each sample. 1151 * @buffer_byte_stride: Buffer byte stride. 1152 * @must_be_zero: Reserved for future usage. 1153 * 1154 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl. 1155 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl. 1156 */ 1157 struct drm_vmw_gb_surface_create_ext_req { 1158 struct drm_vmw_gb_surface_create_req base; 1159 enum drm_vmw_surface_version version; 1160 __u32 svga3d_flags_upper_32_bits; 1161 __u32 multisample_pattern; 1162 __u32 quality_level; 1163 __u32 buffer_byte_stride; 1164 __u32 must_be_zero; 1165 }; 1166 1167 /** 1168 * union drm_vmw_gb_surface_create_ext_arg 1169 * 1170 * @req: Input argument as described above. 1171 * @rep: Output argument as described above. 1172 * 1173 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1174 */ 1175 union drm_vmw_gb_surface_create_ext_arg { 1176 struct drm_vmw_gb_surface_create_rep rep; 1177 struct drm_vmw_gb_surface_create_ext_req req; 1178 }; 1179 1180 /*************************************************************************/ 1181 /** 1182 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface. 1183 * 1184 * Puts a reference on a host surface with a given handle, as previously 1185 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1186 * A reference will make sure the surface isn't destroyed while we hold 1187 * it and will allow the calling client to use the surface handle in 1188 * the command stream. 1189 * 1190 * On successful return, the Ioctl returns the surface information given 1191 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1192 */ 1193 1194 /** 1195 * struct drm_vmw_gb_surface_ref_ext_rep 1196 * 1197 * @creq: The data used as input when the surface was created, as described 1198 * above at "struct drm_vmw_gb_surface_create_ext_req" 1199 * @crep: Additional data output when the surface was created, as described 1200 * above at "struct drm_vmw_gb_surface_create_rep" 1201 * 1202 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl. 1203 */ 1204 struct drm_vmw_gb_surface_ref_ext_rep { 1205 struct drm_vmw_gb_surface_create_ext_req creq; 1206 struct drm_vmw_gb_surface_create_rep crep; 1207 }; 1208 1209 /** 1210 * union drm_vmw_gb_surface_reference_ext_arg 1211 * 1212 * @req: Input data as described above at "struct drm_vmw_surface_arg" 1213 * @rep: Output data as described above at 1214 * "struct drm_vmw_gb_surface_ref_ext_rep" 1215 * 1216 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1217 */ 1218 union drm_vmw_gb_surface_reference_ext_arg { 1219 struct drm_vmw_gb_surface_ref_ext_rep rep; 1220 struct drm_vmw_surface_arg req; 1221 }; 1222 1223 /** 1224 * struct drm_vmw_msg_arg 1225 * 1226 * @send: Pointer to user-space msg string (null terminated). 1227 * @receive: Pointer to user-space receive buffer. 1228 * @send_only: Boolean whether this is only sending or receiving too. 1229 * 1230 * Argument to the DRM_VMW_MSG ioctl. 1231 */ 1232 struct drm_vmw_msg_arg { 1233 __u64 send; 1234 __u64 receive; 1235 __s32 send_only; 1236 __u32 receive_len; 1237 }; 1238 1239 #if defined(__cplusplus) 1240 } 1241 #endif 1242 1243 #endif 1244