1 /* 2 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. 3 * The term “Broadcom” refers to Broadcom Inc. 4 * and/or its subsidiaries. 5 * SPDX-License-Identifier: MIT 6 */ 7 8 #ifndef __VMWGFX_DRM_H__ 9 #define __VMWGFX_DRM_H__ 10 11 #include "drm.h" 12 13 #if defined(__cplusplus) 14 extern "C" { 15 #endif 16 17 #define DRM_VMW_MAX_SURFACE_FACES 6 18 #define DRM_VMW_MAX_MIP_LEVELS 24 19 20 21 #define DRM_VMW_GET_PARAM 0 22 #define DRM_VMW_ALLOC_DMABUF 1 23 #define DRM_VMW_ALLOC_BO 1 24 #define DRM_VMW_UNREF_DMABUF 2 25 #define DRM_VMW_HANDLE_CLOSE 2 26 #define DRM_VMW_CURSOR_BYPASS 3 27 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 28 #define DRM_VMW_CONTROL_STREAM 4 29 #define DRM_VMW_CLAIM_STREAM 5 30 #define DRM_VMW_UNREF_STREAM 6 31 /* guarded by DRM_VMW_PARAM_3D == 1 */ 32 #define DRM_VMW_CREATE_CONTEXT 7 33 #define DRM_VMW_UNREF_CONTEXT 8 34 #define DRM_VMW_CREATE_SURFACE 9 35 #define DRM_VMW_UNREF_SURFACE 10 36 #define DRM_VMW_REF_SURFACE 11 37 #define DRM_VMW_EXECBUF 12 38 #define DRM_VMW_GET_3D_CAP 13 39 #define DRM_VMW_FENCE_WAIT 14 40 #define DRM_VMW_FENCE_SIGNALED 15 41 #define DRM_VMW_FENCE_UNREF 16 42 #define DRM_VMW_FENCE_EVENT 17 43 #define DRM_VMW_PRESENT 18 44 #define DRM_VMW_PRESENT_READBACK 19 45 #define DRM_VMW_UPDATE_LAYOUT 20 46 #define DRM_VMW_CREATE_SHADER 21 47 #define DRM_VMW_UNREF_SHADER 22 48 #define DRM_VMW_GB_SURFACE_CREATE 23 49 #define DRM_VMW_GB_SURFACE_REF 24 50 #define DRM_VMW_SYNCCPU 25 51 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 52 #define DRM_VMW_GB_SURFACE_CREATE_EXT 27 53 #define DRM_VMW_GB_SURFACE_REF_EXT 28 54 #define DRM_VMW_MSG 29 55 #define DRM_VMW_MKSSTAT_RESET 30 56 #define DRM_VMW_MKSSTAT_ADD 31 57 #define DRM_VMW_MKSSTAT_REMOVE 32 58 59 /*************************************************************************/ 60 /** 61 * DRM_VMW_GET_PARAM - get device information. 62 * 63 * DRM_VMW_PARAM_FIFO_OFFSET: 64 * Offset to use to map the first page of the FIFO read-only. 65 * The fifo is mapped using the mmap() system call on the drm device. 66 * 67 * DRM_VMW_PARAM_OVERLAY_IOCTL: 68 * Does the driver support the overlay ioctl. 69 * 70 * DRM_VMW_PARAM_SM4_1 71 * SM4_1 support is enabled. 72 * 73 * DRM_VMW_PARAM_SM5 74 * SM5 support is enabled. 75 * 76 * DRM_VMW_PARAM_GL43 77 * SM5.1+GL4.3 support is enabled. 78 * 79 * DRM_VMW_PARAM_DEVICE_ID 80 * PCI ID of the underlying SVGA device. 81 */ 82 83 #define DRM_VMW_PARAM_NUM_STREAMS 0 84 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 85 #define DRM_VMW_PARAM_3D 2 86 #define DRM_VMW_PARAM_HW_CAPS 3 87 #define DRM_VMW_PARAM_FIFO_CAPS 4 88 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 89 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 90 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 91 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 92 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 93 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 94 #define DRM_VMW_PARAM_SCREEN_TARGET 11 95 #define DRM_VMW_PARAM_DX 12 96 #define DRM_VMW_PARAM_HW_CAPS2 13 97 #define DRM_VMW_PARAM_SM4_1 14 98 #define DRM_VMW_PARAM_SM5 15 99 #define DRM_VMW_PARAM_GL43 16 100 #define DRM_VMW_PARAM_DEVICE_ID 17 101 102 /** 103 * enum drm_vmw_handle_type - handle type for ref ioctls 104 * 105 */ 106 enum drm_vmw_handle_type { 107 DRM_VMW_HANDLE_LEGACY = 0, 108 DRM_VMW_HANDLE_PRIME = 1 109 }; 110 111 /** 112 * struct drm_vmw_getparam_arg 113 * 114 * @value: Returned value. //Out 115 * @param: Parameter to query. //In. 116 * 117 * Argument to the DRM_VMW_GET_PARAM Ioctl. 118 */ 119 120 struct drm_vmw_getparam_arg { 121 __u64 value; 122 __u32 param; 123 __u32 pad64; 124 }; 125 126 /*************************************************************************/ 127 /** 128 * DRM_VMW_CREATE_CONTEXT - Create a host context. 129 * 130 * Allocates a device unique context id, and queues a create context command 131 * for the host. Does not wait for host completion. 132 */ 133 134 /** 135 * struct drm_vmw_context_arg 136 * 137 * @cid: Device unique context ID. 138 * 139 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 140 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 141 */ 142 143 struct drm_vmw_context_arg { 144 __s32 cid; 145 __u32 pad64; 146 }; 147 148 /*************************************************************************/ 149 /** 150 * DRM_VMW_UNREF_CONTEXT - Create a host context. 151 * 152 * Frees a global context id, and queues a destroy host command for the host. 153 * Does not wait for host completion. The context ID can be used directly 154 * in the command stream and shows up as the same context ID on the host. 155 */ 156 157 /*************************************************************************/ 158 /** 159 * DRM_VMW_CREATE_SURFACE - Create a host suface. 160 * 161 * Allocates a device unique surface id, and queues a create surface command 162 * for the host. Does not wait for host completion. The surface ID can be 163 * used directly in the command stream and shows up as the same surface 164 * ID on the host. 165 */ 166 167 /** 168 * struct drm_wmv_surface_create_req 169 * 170 * @flags: Surface flags as understood by the host. 171 * @format: Surface format as understood by the host. 172 * @mip_levels: Number of mip levels for each face. 173 * An unused face should have 0 encoded. 174 * @size_addr: Address of a user-space array of sruct drm_vmw_size 175 * cast to an __u64 for 32-64 bit compatibility. 176 * The size of the array should equal the total number of mipmap levels. 177 * @shareable: Boolean whether other clients (as identified by file descriptors) 178 * may reference this surface. 179 * @scanout: Boolean whether the surface is intended to be used as a 180 * scanout. 181 * 182 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 183 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 184 */ 185 186 struct drm_vmw_surface_create_req { 187 __u32 flags; 188 __u32 format; 189 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 190 __u64 size_addr; 191 __s32 shareable; 192 __s32 scanout; 193 }; 194 195 /** 196 * struct drm_wmv_surface_arg 197 * 198 * @sid: Surface id of created surface or surface to destroy or reference. 199 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. 200 * 201 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 202 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 203 * Input argument to the DRM_VMW_REF_SURFACE Ioctl. 204 */ 205 206 struct drm_vmw_surface_arg { 207 __s32 sid; 208 enum drm_vmw_handle_type handle_type; 209 }; 210 211 /** 212 * struct drm_vmw_size ioctl. 213 * 214 * @width - mip level width 215 * @height - mip level height 216 * @depth - mip level depth 217 * 218 * Description of a mip level. 219 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. 220 */ 221 222 struct drm_vmw_size { 223 __u32 width; 224 __u32 height; 225 __u32 depth; 226 __u32 pad64; 227 }; 228 229 /** 230 * union drm_vmw_surface_create_arg 231 * 232 * @rep: Output data as described above. 233 * @req: Input data as described above. 234 * 235 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. 236 */ 237 238 union drm_vmw_surface_create_arg { 239 struct drm_vmw_surface_arg rep; 240 struct drm_vmw_surface_create_req req; 241 }; 242 243 /*************************************************************************/ 244 /** 245 * DRM_VMW_REF_SURFACE - Reference a host surface. 246 * 247 * Puts a reference on a host surface with a give sid, as previously 248 * returned by the DRM_VMW_CREATE_SURFACE ioctl. 249 * A reference will make sure the surface isn't destroyed while we hold 250 * it and will allow the calling client to use the surface ID in the command 251 * stream. 252 * 253 * On successful return, the Ioctl returns the surface information given 254 * in the DRM_VMW_CREATE_SURFACE ioctl. 255 */ 256 257 /** 258 * union drm_vmw_surface_reference_arg 259 * 260 * @rep: Output data as described above. 261 * @req: Input data as described above. 262 * 263 * Argument to the DRM_VMW_REF_SURFACE Ioctl. 264 */ 265 266 union drm_vmw_surface_reference_arg { 267 struct drm_vmw_surface_create_req rep; 268 struct drm_vmw_surface_arg req; 269 }; 270 271 /*************************************************************************/ 272 /** 273 * DRM_VMW_UNREF_SURFACE - Unreference a host surface. 274 * 275 * Clear a reference previously put on a host surface. 276 * When all references are gone, including the one implicitly placed 277 * on creation, 278 * a destroy surface command will be queued for the host. 279 * Does not wait for completion. 280 */ 281 282 /*************************************************************************/ 283 /** 284 * DRM_VMW_EXECBUF 285 * 286 * Submit a command buffer for execution on the host, and return a 287 * fence seqno that when signaled, indicates that the command buffer has 288 * executed. 289 */ 290 291 /** 292 * struct drm_vmw_execbuf_arg 293 * 294 * @commands: User-space address of a command buffer cast to an __u64. 295 * @command-size: Size in bytes of the command buffer. 296 * @throttle-us: Sleep until software is less than @throttle_us 297 * microseconds ahead of hardware. The driver may round this value 298 * to the nearest kernel tick. 299 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 300 * __u64. 301 * @version: Allows expanding the execbuf ioctl parameters without breaking 302 * backwards compatibility, since user-space will always tell the kernel 303 * which version it uses. 304 * @flags: Execbuf flags. 305 * @imported_fence_fd: FD for a fence imported from another device 306 * 307 * Argument to the DRM_VMW_EXECBUF Ioctl. 308 */ 309 310 #define DRM_VMW_EXECBUF_VERSION 2 311 312 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0) 313 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1) 314 315 struct drm_vmw_execbuf_arg { 316 __u64 commands; 317 __u32 command_size; 318 __u32 throttle_us; 319 __u64 fence_rep; 320 __u32 version; 321 __u32 flags; 322 __u32 context_handle; 323 __s32 imported_fence_fd; 324 }; 325 326 /** 327 * struct drm_vmw_fence_rep 328 * 329 * @handle: Fence object handle for fence associated with a command submission. 330 * @mask: Fence flags relevant for this fence object. 331 * @seqno: Fence sequence number in fifo. A fence object with a lower 332 * seqno will signal the EXEC flag before a fence object with a higher 333 * seqno. This can be used by user-space to avoid kernel calls to determine 334 * whether a fence has signaled the EXEC flag. Note that @seqno will 335 * wrap at 32-bit. 336 * @passed_seqno: The highest seqno number processed by the hardware 337 * so far. This can be used to mark user-space fence objects as signaled, and 338 * to determine whether a fence seqno might be stale. 339 * @fd: FD associated with the fence, -1 if not exported 340 * @error: This member should've been set to -EFAULT on submission. 341 * The following actions should be take on completion: 342 * error == -EFAULT: Fence communication failed. The host is synchronized. 343 * Use the last fence id read from the FIFO fence register. 344 * error != 0 && error != -EFAULT: 345 * Fence submission failed. The host is synchronized. Use the fence_seq member. 346 * error == 0: All is OK, The host may not be synchronized. 347 * Use the fence_seq member. 348 * 349 * Input / Output data to the DRM_VMW_EXECBUF Ioctl. 350 */ 351 352 struct drm_vmw_fence_rep { 353 __u32 handle; 354 __u32 mask; 355 __u32 seqno; 356 __u32 passed_seqno; 357 __s32 fd; 358 __s32 error; 359 }; 360 361 /*************************************************************************/ 362 /** 363 * DRM_VMW_ALLOC_BO 364 * 365 * Allocate a buffer object that is visible also to the host. 366 * NOTE: The buffer is 367 * identified by a handle and an offset, which are private to the guest, but 368 * useable in the command stream. The guest kernel may translate these 369 * and patch up the command stream accordingly. In the future, the offset may 370 * be zero at all times, or it may disappear from the interface before it is 371 * fixed. 372 * 373 * The buffer object may stay user-space mapped in the guest at all times, 374 * and is thus suitable for sub-allocation. 375 * 376 * Buffer objects are mapped using the mmap() syscall on the drm device. 377 */ 378 379 /** 380 * struct drm_vmw_alloc_bo_req 381 * 382 * @size: Required minimum size of the buffer. 383 * 384 * Input data to the DRM_VMW_ALLOC_BO Ioctl. 385 */ 386 387 struct drm_vmw_alloc_bo_req { 388 __u32 size; 389 __u32 pad64; 390 }; 391 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req 392 393 /** 394 * struct drm_vmw_bo_rep 395 * 396 * @map_handle: Offset to use in the mmap() call used to map the buffer. 397 * @handle: Handle unique to this buffer. Used for unreferencing. 398 * @cur_gmr_id: GMR id to use in the command stream when this buffer is 399 * referenced. See not above. 400 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 401 * referenced. See note above. 402 * 403 * Output data from the DRM_VMW_ALLOC_BO Ioctl. 404 */ 405 406 struct drm_vmw_bo_rep { 407 __u64 map_handle; 408 __u32 handle; 409 __u32 cur_gmr_id; 410 __u32 cur_gmr_offset; 411 __u32 pad64; 412 }; 413 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep 414 415 /** 416 * union drm_vmw_alloc_bo_arg 417 * 418 * @req: Input data as described above. 419 * @rep: Output data as described above. 420 * 421 * Argument to the DRM_VMW_ALLOC_BO Ioctl. 422 */ 423 424 union drm_vmw_alloc_bo_arg { 425 struct drm_vmw_alloc_bo_req req; 426 struct drm_vmw_bo_rep rep; 427 }; 428 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg 429 430 /*************************************************************************/ 431 /** 432 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 433 * 434 * This IOCTL controls the overlay units of the svga device. 435 * The SVGA overlay units does not work like regular hardware units in 436 * that they do not automaticaly read back the contents of the given dma 437 * buffer. But instead only read back for each call to this ioctl, and 438 * at any point between this call being made and a following call that 439 * either changes the buffer or disables the stream. 440 */ 441 442 /** 443 * struct drm_vmw_rect 444 * 445 * Defines a rectangle. Used in the overlay ioctl to define 446 * source and destination rectangle. 447 */ 448 449 struct drm_vmw_rect { 450 __s32 x; 451 __s32 y; 452 __u32 w; 453 __u32 h; 454 }; 455 456 /** 457 * struct drm_vmw_control_stream_arg 458 * 459 * @stream_id: Stearm to control 460 * @enabled: If false all following arguments are ignored. 461 * @handle: Handle to buffer for getting data from. 462 * @format: Format of the overlay as understood by the host. 463 * @width: Width of the overlay. 464 * @height: Height of the overlay. 465 * @size: Size of the overlay in bytes. 466 * @pitch: Array of pitches, the two last are only used for YUV12 formats. 467 * @offset: Offset from start of dma buffer to overlay. 468 * @src: Source rect, must be within the defined area above. 469 * @dst: Destination rect, x and y may be negative. 470 * 471 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. 472 */ 473 474 struct drm_vmw_control_stream_arg { 475 __u32 stream_id; 476 __u32 enabled; 477 478 __u32 flags; 479 __u32 color_key; 480 481 __u32 handle; 482 __u32 offset; 483 __s32 format; 484 __u32 size; 485 __u32 width; 486 __u32 height; 487 __u32 pitch[3]; 488 489 __u32 pad64; 490 struct drm_vmw_rect src; 491 struct drm_vmw_rect dst; 492 }; 493 494 /*************************************************************************/ 495 /** 496 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. 497 * 498 */ 499 500 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) 501 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) 502 503 /** 504 * struct drm_vmw_cursor_bypass_arg 505 * 506 * @flags: Flags. 507 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. 508 * @xpos: X position of cursor. 509 * @ypos: Y position of cursor. 510 * @xhot: X hotspot. 511 * @yhot: Y hotspot. 512 * 513 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. 514 */ 515 516 struct drm_vmw_cursor_bypass_arg { 517 __u32 flags; 518 __u32 crtc_id; 519 __s32 xpos; 520 __s32 ypos; 521 __s32 xhot; 522 __s32 yhot; 523 }; 524 525 /*************************************************************************/ 526 /** 527 * DRM_VMW_CLAIM_STREAM - Claim a single stream. 528 */ 529 530 /** 531 * struct drm_vmw_context_arg 532 * 533 * @stream_id: Device unique context ID. 534 * 535 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 536 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 537 */ 538 539 struct drm_vmw_stream_arg { 540 __u32 stream_id; 541 __u32 pad64; 542 }; 543 544 /*************************************************************************/ 545 /** 546 * DRM_VMW_UNREF_STREAM - Unclaim a stream. 547 * 548 * Return a single stream that was claimed by this process. Also makes 549 * sure that the stream has been stopped. 550 */ 551 552 /*************************************************************************/ 553 /** 554 * DRM_VMW_GET_3D_CAP 555 * 556 * Read 3D capabilities from the FIFO 557 * 558 */ 559 560 /** 561 * struct drm_vmw_get_3d_cap_arg 562 * 563 * @buffer: Pointer to a buffer for capability data, cast to an __u64 564 * @size: Max size to copy 565 * 566 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 567 * ioctls. 568 */ 569 570 struct drm_vmw_get_3d_cap_arg { 571 __u64 buffer; 572 __u32 max_size; 573 __u32 pad64; 574 }; 575 576 /*************************************************************************/ 577 /** 578 * DRM_VMW_FENCE_WAIT 579 * 580 * Waits for a fence object to signal. The wait is interruptible, so that 581 * signals may be delivered during the interrupt. The wait may timeout, 582 * in which case the calls returns -EBUSY. If the wait is restarted, 583 * that is restarting without resetting @cookie_valid to zero, 584 * the timeout is computed from the first call. 585 * 586 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait 587 * on: 588 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command 589 * stream 590 * have executed. 591 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish 592 * commands 593 * in the buffer given to the EXECBUF ioctl returning the fence object handle 594 * are available to user-space. 595 * 596 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the 597 * fenc wait ioctl returns 0, the fence object has been unreferenced after 598 * the wait. 599 */ 600 601 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) 602 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) 603 604 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) 605 606 /** 607 * struct drm_vmw_fence_wait_arg 608 * 609 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 610 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. 611 * @kernel_cookie: Set to 0 on first call. Left alone on restart. 612 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. 613 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick 614 * before returning. 615 * @flags: Fence flags to wait on. 616 * @wait_options: Options that control the behaviour of the wait ioctl. 617 * 618 * Input argument to the DRM_VMW_FENCE_WAIT ioctl. 619 */ 620 621 struct drm_vmw_fence_wait_arg { 622 __u32 handle; 623 __s32 cookie_valid; 624 __u64 kernel_cookie; 625 __u64 timeout_us; 626 __s32 lazy; 627 __s32 flags; 628 __s32 wait_options; 629 __s32 pad64; 630 }; 631 632 /*************************************************************************/ 633 /** 634 * DRM_VMW_FENCE_SIGNALED 635 * 636 * Checks if a fence object is signaled.. 637 */ 638 639 /** 640 * struct drm_vmw_fence_signaled_arg 641 * 642 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 643 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl 644 * @signaled: Out: Flags signaled. 645 * @sequence: Out: Highest sequence passed so far. Can be used to signal the 646 * EXEC flag of user-space fence objects. 647 * 648 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF 649 * ioctls. 650 */ 651 652 struct drm_vmw_fence_signaled_arg { 653 __u32 handle; 654 __u32 flags; 655 __s32 signaled; 656 __u32 passed_seqno; 657 __u32 signaled_flags; 658 __u32 pad64; 659 }; 660 661 /*************************************************************************/ 662 /** 663 * DRM_VMW_FENCE_UNREF 664 * 665 * Unreferences a fence object, and causes it to be destroyed if there are no 666 * other references to it. 667 * 668 */ 669 670 /** 671 * struct drm_vmw_fence_arg 672 * 673 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 674 * 675 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. 676 */ 677 678 struct drm_vmw_fence_arg { 679 __u32 handle; 680 __u32 pad64; 681 }; 682 683 684 /*************************************************************************/ 685 /** 686 * DRM_VMW_FENCE_EVENT 687 * 688 * Queues an event on a fence to be delivered on the drm character device 689 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. 690 * Optionally the approximate time when the fence signaled is 691 * given by the event. 692 */ 693 694 /* 695 * The event type 696 */ 697 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 698 699 struct drm_vmw_event_fence { 700 struct drm_event base; 701 __u64 user_data; 702 __u32 tv_sec; 703 __u32 tv_usec; 704 }; 705 706 /* 707 * Flags that may be given to the command. 708 */ 709 /* Request fence signaled time on the event. */ 710 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) 711 712 /** 713 * struct drm_vmw_fence_event_arg 714 * 715 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if 716 * the fence is not supposed to be referenced by user-space. 717 * @user_info: Info to be delivered with the event. 718 * @handle: Attach the event to this fence only. 719 * @flags: A set of flags as defined above. 720 */ 721 struct drm_vmw_fence_event_arg { 722 __u64 fence_rep; 723 __u64 user_data; 724 __u32 handle; 725 __u32 flags; 726 }; 727 728 729 /*************************************************************************/ 730 /** 731 * DRM_VMW_PRESENT 732 * 733 * Executes an SVGA present on a given fb for a given surface. The surface 734 * is placed on the framebuffer. Cliprects are given relative to the given 735 * point (the point disignated by dest_{x|y}). 736 * 737 */ 738 739 /** 740 * struct drm_vmw_present_arg 741 * @fb_id: framebuffer id to present / read back from. 742 * @sid: Surface id to present from. 743 * @dest_x: X placement coordinate for surface. 744 * @dest_y: Y placement coordinate for surface. 745 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 746 * @num_clips: Number of cliprects given relative to the framebuffer origin, 747 * in the same coordinate space as the frame buffer. 748 * @pad64: Unused 64-bit padding. 749 * 750 * Input argument to the DRM_VMW_PRESENT ioctl. 751 */ 752 753 struct drm_vmw_present_arg { 754 __u32 fb_id; 755 __u32 sid; 756 __s32 dest_x; 757 __s32 dest_y; 758 __u64 clips_ptr; 759 __u32 num_clips; 760 __u32 pad64; 761 }; 762 763 764 /*************************************************************************/ 765 /** 766 * DRM_VMW_PRESENT_READBACK 767 * 768 * Executes an SVGA present readback from a given fb to the dma buffer 769 * currently bound as the fb. If there is no dma buffer bound to the fb, 770 * an error will be returned. 771 * 772 */ 773 774 /** 775 * struct drm_vmw_present_arg 776 * @fb_id: fb_id to present / read back from. 777 * @num_clips: Number of cliprects. 778 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 779 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. 780 * If this member is NULL, then the ioctl should not return a fence. 781 */ 782 783 struct drm_vmw_present_readback_arg { 784 __u32 fb_id; 785 __u32 num_clips; 786 __u64 clips_ptr; 787 __u64 fence_rep; 788 }; 789 790 /*************************************************************************/ 791 /** 792 * DRM_VMW_UPDATE_LAYOUT - Update layout 793 * 794 * Updates the preferred modes and connection status for connectors. The 795 * command consists of one drm_vmw_update_layout_arg pointing to an array 796 * of num_outputs drm_vmw_rect's. 797 */ 798 799 /** 800 * struct drm_vmw_update_layout_arg 801 * 802 * @num_outputs: number of active connectors 803 * @rects: pointer to array of drm_vmw_rect cast to an __u64 804 * 805 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 806 */ 807 struct drm_vmw_update_layout_arg { 808 __u32 num_outputs; 809 __u32 pad64; 810 __u64 rects; 811 }; 812 813 814 /*************************************************************************/ 815 /** 816 * DRM_VMW_CREATE_SHADER - Create shader 817 * 818 * Creates a shader and optionally binds it to a dma buffer containing 819 * the shader byte-code. 820 */ 821 822 /** 823 * enum drm_vmw_shader_type - Shader types 824 */ 825 enum drm_vmw_shader_type { 826 drm_vmw_shader_type_vs = 0, 827 drm_vmw_shader_type_ps, 828 }; 829 830 831 /** 832 * struct drm_vmw_shader_create_arg 833 * 834 * @shader_type: Shader type of the shader to create. 835 * @size: Size of the byte-code in bytes. 836 * where the shader byte-code starts 837 * @buffer_handle: Buffer handle identifying the buffer containing the 838 * shader byte-code 839 * @shader_handle: On successful completion contains a handle that 840 * can be used to subsequently identify the shader. 841 * @offset: Offset in bytes into the buffer given by @buffer_handle, 842 * 843 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 844 */ 845 struct drm_vmw_shader_create_arg { 846 enum drm_vmw_shader_type shader_type; 847 __u32 size; 848 __u32 buffer_handle; 849 __u32 shader_handle; 850 __u64 offset; 851 }; 852 853 /*************************************************************************/ 854 /** 855 * DRM_VMW_UNREF_SHADER - Unreferences a shader 856 * 857 * Destroys a user-space reference to a shader, optionally destroying 858 * it. 859 */ 860 861 /** 862 * struct drm_vmw_shader_arg 863 * 864 * @handle: Handle identifying the shader to destroy. 865 * 866 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 867 */ 868 struct drm_vmw_shader_arg { 869 __u32 handle; 870 __u32 pad64; 871 }; 872 873 /*************************************************************************/ 874 /** 875 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 876 * 877 * Allocates a surface handle and queues a create surface command 878 * for the host on the first use of the surface. The surface ID can 879 * be used as the surface ID in commands referencing the surface. 880 */ 881 882 /** 883 * enum drm_vmw_surface_flags 884 * 885 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 886 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 887 * surface. 888 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 889 * given. 890 * @drm_vmw_surface_flag_coherent: Back surface with coherent memory. 891 */ 892 enum drm_vmw_surface_flags { 893 drm_vmw_surface_flag_shareable = (1 << 0), 894 drm_vmw_surface_flag_scanout = (1 << 1), 895 drm_vmw_surface_flag_create_buffer = (1 << 2), 896 drm_vmw_surface_flag_coherent = (1 << 3), 897 }; 898 899 /** 900 * struct drm_vmw_gb_surface_create_req 901 * 902 * @svga3d_flags: SVGA3d surface flags for the device. 903 * @format: SVGA3d format. 904 * @mip_level: Number of mip levels for all faces. 905 * @drm_surface_flags Flags as described above. 906 * @multisample_count Future use. Set to 0. 907 * @autogen_filter Future use. Set to 0. 908 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 909 * if none. 910 * @base_size Size of the base mip level for all faces. 911 * @array_size Must be zero for non-DX hardware, and if non-zero 912 * svga3d_flags must have proper bind flags setup. 913 * 914 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 915 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 916 */ 917 struct drm_vmw_gb_surface_create_req { 918 __u32 svga3d_flags; 919 __u32 format; 920 __u32 mip_levels; 921 enum drm_vmw_surface_flags drm_surface_flags; 922 __u32 multisample_count; 923 __u32 autogen_filter; 924 __u32 buffer_handle; 925 __u32 array_size; 926 struct drm_vmw_size base_size; 927 }; 928 929 /** 930 * struct drm_vmw_gb_surface_create_rep 931 * 932 * @handle: Surface handle. 933 * @backup_size: Size of backup buffers for this surface. 934 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 935 * @buffer_size: Actual size of the buffer identified by 936 * @buffer_handle 937 * @buffer_map_handle: Offset into device address space for the buffer 938 * identified by @buffer_handle. 939 * 940 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 941 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 942 */ 943 struct drm_vmw_gb_surface_create_rep { 944 __u32 handle; 945 __u32 backup_size; 946 __u32 buffer_handle; 947 __u32 buffer_size; 948 __u64 buffer_map_handle; 949 }; 950 951 /** 952 * union drm_vmw_gb_surface_create_arg 953 * 954 * @req: Input argument as described above. 955 * @rep: Output argument as described above. 956 * 957 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 958 */ 959 union drm_vmw_gb_surface_create_arg { 960 struct drm_vmw_gb_surface_create_rep rep; 961 struct drm_vmw_gb_surface_create_req req; 962 }; 963 964 /*************************************************************************/ 965 /** 966 * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 967 * 968 * Puts a reference on a host surface with a given handle, as previously 969 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 970 * A reference will make sure the surface isn't destroyed while we hold 971 * it and will allow the calling client to use the surface handle in 972 * the command stream. 973 * 974 * On successful return, the Ioctl returns the surface information given 975 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 976 */ 977 978 /** 979 * struct drm_vmw_gb_surface_reference_arg 980 * 981 * @creq: The data used as input when the surface was created, as described 982 * above at "struct drm_vmw_gb_surface_create_req" 983 * @crep: Additional data output when the surface was created, as described 984 * above at "struct drm_vmw_gb_surface_create_rep" 985 * 986 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 987 */ 988 struct drm_vmw_gb_surface_ref_rep { 989 struct drm_vmw_gb_surface_create_req creq; 990 struct drm_vmw_gb_surface_create_rep crep; 991 }; 992 993 /** 994 * union drm_vmw_gb_surface_reference_arg 995 * 996 * @req: Input data as described above at "struct drm_vmw_surface_arg" 997 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 998 * 999 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1000 */ 1001 union drm_vmw_gb_surface_reference_arg { 1002 struct drm_vmw_gb_surface_ref_rep rep; 1003 struct drm_vmw_surface_arg req; 1004 }; 1005 1006 1007 /*************************************************************************/ 1008 /** 1009 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 1010 * 1011 * Idles any previously submitted GPU operations on the buffer and 1012 * by default blocks command submissions that reference the buffer. 1013 * If the file descriptor used to grab a blocking CPU sync is closed, the 1014 * cpu sync is released. 1015 * The flags argument indicates how the grab / release operation should be 1016 * performed: 1017 */ 1018 1019 /** 1020 * enum drm_vmw_synccpu_flags - Synccpu flags: 1021 * 1022 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 1023 * hint to the kernel to allow command submissions that references the buffer 1024 * for read-only. 1025 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1026 * referencing this buffer. 1027 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1028 * -EBUSY should the buffer be busy. 1029 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1030 * while the buffer is synced for CPU. This is similar to the GEM bo idle 1031 * behavior. 1032 */ 1033 enum drm_vmw_synccpu_flags { 1034 drm_vmw_synccpu_read = (1 << 0), 1035 drm_vmw_synccpu_write = (1 << 1), 1036 drm_vmw_synccpu_dontblock = (1 << 2), 1037 drm_vmw_synccpu_allow_cs = (1 << 3) 1038 }; 1039 1040 /** 1041 * enum drm_vmw_synccpu_op - Synccpu operations: 1042 * 1043 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1044 * @drm_vmw_synccpu_release: Release a previous grab. 1045 */ 1046 enum drm_vmw_synccpu_op { 1047 drm_vmw_synccpu_grab, 1048 drm_vmw_synccpu_release 1049 }; 1050 1051 /** 1052 * struct drm_vmw_synccpu_arg 1053 * 1054 * @op: The synccpu operation as described above. 1055 * @handle: Handle identifying the buffer object. 1056 * @flags: Flags as described above. 1057 */ 1058 struct drm_vmw_synccpu_arg { 1059 enum drm_vmw_synccpu_op op; 1060 enum drm_vmw_synccpu_flags flags; 1061 __u32 handle; 1062 __u32 pad64; 1063 }; 1064 1065 /*************************************************************************/ 1066 /** 1067 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. 1068 * 1069 * Allocates a device unique context id, and queues a create context command 1070 * for the host. Does not wait for host completion. 1071 */ 1072 enum drm_vmw_extended_context { 1073 drm_vmw_context_legacy, 1074 drm_vmw_context_dx 1075 }; 1076 1077 /** 1078 * union drm_vmw_extended_context_arg 1079 * 1080 * @req: Context type. 1081 * @rep: Context identifier. 1082 * 1083 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. 1084 */ 1085 union drm_vmw_extended_context_arg { 1086 enum drm_vmw_extended_context req; 1087 struct drm_vmw_context_arg rep; 1088 }; 1089 1090 /*************************************************************************/ 1091 /* 1092 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its 1093 * underlying resource. 1094 * 1095 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF 1096 * Ioctl. 1097 */ 1098 1099 /** 1100 * struct drm_vmw_handle_close_arg 1101 * 1102 * @handle: Handle to close. 1103 * 1104 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl. 1105 */ 1106 struct drm_vmw_handle_close_arg { 1107 __u32 handle; 1108 __u32 pad64; 1109 }; 1110 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg 1111 1112 /*************************************************************************/ 1113 /** 1114 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface. 1115 * 1116 * Allocates a surface handle and queues a create surface command 1117 * for the host on the first use of the surface. The surface ID can 1118 * be used as the surface ID in commands referencing the surface. 1119 * 1120 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version 1121 * parameter and 64 bit svga flag. 1122 */ 1123 1124 /** 1125 * enum drm_vmw_surface_version 1126 * 1127 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with 1128 * svga3d surface flags split into 2, upper half and lower half. 1129 */ 1130 enum drm_vmw_surface_version { 1131 drm_vmw_gb_surface_v1, 1132 }; 1133 1134 /** 1135 * struct drm_vmw_gb_surface_create_ext_req 1136 * 1137 * @base: Surface create parameters. 1138 * @version: Version of surface create ioctl. 1139 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags. 1140 * @multisample_pattern: Multisampling pattern when msaa is supported. 1141 * @quality_level: Precision settings for each sample. 1142 * @buffer_byte_stride: Buffer byte stride. 1143 * @must_be_zero: Reserved for future usage. 1144 * 1145 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl. 1146 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl. 1147 */ 1148 struct drm_vmw_gb_surface_create_ext_req { 1149 struct drm_vmw_gb_surface_create_req base; 1150 enum drm_vmw_surface_version version; 1151 __u32 svga3d_flags_upper_32_bits; 1152 __u32 multisample_pattern; 1153 __u32 quality_level; 1154 __u32 buffer_byte_stride; 1155 __u32 must_be_zero; 1156 }; 1157 1158 /** 1159 * union drm_vmw_gb_surface_create_ext_arg 1160 * 1161 * @req: Input argument as described above. 1162 * @rep: Output argument as described above. 1163 * 1164 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1165 */ 1166 union drm_vmw_gb_surface_create_ext_arg { 1167 struct drm_vmw_gb_surface_create_rep rep; 1168 struct drm_vmw_gb_surface_create_ext_req req; 1169 }; 1170 1171 /*************************************************************************/ 1172 /** 1173 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface. 1174 * 1175 * Puts a reference on a host surface with a given handle, as previously 1176 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1177 * A reference will make sure the surface isn't destroyed while we hold 1178 * it and will allow the calling client to use the surface handle in 1179 * the command stream. 1180 * 1181 * On successful return, the Ioctl returns the surface information given 1182 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1183 */ 1184 1185 /** 1186 * struct drm_vmw_gb_surface_ref_ext_rep 1187 * 1188 * @creq: The data used as input when the surface was created, as described 1189 * above at "struct drm_vmw_gb_surface_create_ext_req" 1190 * @crep: Additional data output when the surface was created, as described 1191 * above at "struct drm_vmw_gb_surface_create_rep" 1192 * 1193 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl. 1194 */ 1195 struct drm_vmw_gb_surface_ref_ext_rep { 1196 struct drm_vmw_gb_surface_create_ext_req creq; 1197 struct drm_vmw_gb_surface_create_rep crep; 1198 }; 1199 1200 /** 1201 * union drm_vmw_gb_surface_reference_ext_arg 1202 * 1203 * @req: Input data as described above at "struct drm_vmw_surface_arg" 1204 * @rep: Output data as described above at 1205 * "struct drm_vmw_gb_surface_ref_ext_rep" 1206 * 1207 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1208 */ 1209 union drm_vmw_gb_surface_reference_ext_arg { 1210 struct drm_vmw_gb_surface_ref_ext_rep rep; 1211 struct drm_vmw_surface_arg req; 1212 }; 1213 1214 /** 1215 * struct drm_vmw_msg_arg 1216 * 1217 * @send: Pointer to user-space msg string (null terminated). 1218 * @receive: Pointer to user-space receive buffer. 1219 * @send_only: Boolean whether this is only sending or receiving too. 1220 * 1221 * Argument to the DRM_VMW_MSG ioctl. 1222 */ 1223 struct drm_vmw_msg_arg { 1224 __u64 send; 1225 __u64 receive; 1226 __s32 send_only; 1227 __u32 receive_len; 1228 }; 1229 1230 /** 1231 * struct drm_vmw_mksstat_add_arg 1232 * 1233 * @stat: Pointer to user-space stat-counters array, page-aligned. 1234 * @info: Pointer to user-space counter-infos array, page-aligned. 1235 * @strs: Pointer to user-space stat strings, page-aligned. 1236 * @stat_len: Length in bytes of stat-counters array. 1237 * @info_len: Length in bytes of counter-infos array. 1238 * @strs_len: Length in bytes of the stat strings, terminators included. 1239 * @description: Pointer to instance descriptor string; will be truncated 1240 * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars. 1241 * @id: Output identifier of the produced record; -1 if error. 1242 * 1243 * Argument to the DRM_VMW_MKSSTAT_ADD ioctl. 1244 */ 1245 struct drm_vmw_mksstat_add_arg { 1246 __u64 stat; 1247 __u64 info; 1248 __u64 strs; 1249 __u64 stat_len; 1250 __u64 info_len; 1251 __u64 strs_len; 1252 __u64 description; 1253 __u64 id; 1254 }; 1255 1256 /** 1257 * struct drm_vmw_mksstat_remove_arg 1258 * 1259 * @id: Identifier of the record being disposed, originally obtained through 1260 * DRM_VMW_MKSSTAT_ADD ioctl. 1261 * 1262 * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl. 1263 */ 1264 struct drm_vmw_mksstat_remove_arg { 1265 __u64 id; 1266 }; 1267 1268 #if defined(__cplusplus) 1269 } 1270 #endif 1271 1272 #endif 1273