1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include "vmwgfx_bo.h" 28 #include "vmwgfx_drv.h" 29 #include "vmwgfx_devcaps.h" 30 31 #include <drm/ttm/ttm_placement.h> 32 33 #include <linux/sched/signal.h> 34 #include <linux/vmalloc.h> 35 36 bool vmw_supports_3d(struct vmw_private *dev_priv) 37 { 38 uint32_t fifo_min, hwversion; 39 const struct vmw_fifo_state *fifo = dev_priv->fifo; 40 41 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 42 return false; 43 44 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 45 uint32_t result; 46 47 if (!dev_priv->has_mob) 48 return false; 49 50 result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D); 51 52 return (result != 0); 53 } 54 55 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 56 return false; 57 58 BUG_ON(vmw_is_svga_v3(dev_priv)); 59 60 fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 61 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 62 return false; 63 64 hwversion = vmw_fifo_mem_read(dev_priv, 65 ((fifo->capabilities & 66 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? 67 SVGA_FIFO_3D_HWVERSION_REVISED : 68 SVGA_FIFO_3D_HWVERSION)); 69 70 if (hwversion == 0) 71 return false; 72 73 if (hwversion < SVGA3D_HWVERSION_WS8_B1) 74 return false; 75 76 /* Legacy Display Unit does not support surfaces */ 77 if (dev_priv->active_display_unit == vmw_du_legacy) 78 return false; 79 80 return true; 81 } 82 83 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) 84 { 85 uint32_t caps; 86 87 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 88 return false; 89 90 caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); 91 if (caps & SVGA_FIFO_CAP_PITCHLOCK) 92 return true; 93 94 return false; 95 } 96 97 struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) 98 { 99 struct vmw_fifo_state *fifo; 100 uint32_t max; 101 uint32_t min; 102 103 if (!dev_priv->fifo_mem) 104 return NULL; 105 106 fifo = kzalloc(sizeof(*fifo), GFP_KERNEL); 107 if (!fifo) 108 return ERR_PTR(-ENOMEM); 109 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; 110 fifo->static_buffer = vmalloc(fifo->static_buffer_size); 111 if (unlikely(fifo->static_buffer == NULL)) { 112 kfree(fifo); 113 return ERR_PTR(-ENOMEM); 114 } 115 116 fifo->dynamic_buffer = NULL; 117 fifo->reserved_size = 0; 118 fifo->using_bounce_buffer = false; 119 120 mutex_init(&fifo->fifo_mutex); 121 init_rwsem(&fifo->rwsem); 122 min = 4; 123 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) 124 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); 125 min <<= 2; 126 127 if (min < PAGE_SIZE) 128 min = PAGE_SIZE; 129 130 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min); 131 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size); 132 wmb(); 133 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min); 134 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min); 135 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0); 136 mb(); 137 138 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 139 140 max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); 141 min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 142 fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); 143 144 drm_info(&dev_priv->drm, 145 "Fifo max 0x%08x min 0x%08x cap 0x%08x\n", 146 (unsigned int) max, 147 (unsigned int) min, 148 (unsigned int) fifo->capabilities); 149 150 if (unlikely(min >= max)) { 151 drm_warn(&dev_priv->drm, 152 "FIFO memory is not usable. Driver failed to initialize."); 153 return ERR_PTR(-ENXIO); 154 } 155 156 return fifo; 157 } 158 159 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 160 { 161 u32 *fifo_mem = dev_priv->fifo_mem; 162 if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) 163 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 164 165 } 166 167 void vmw_fifo_destroy(struct vmw_private *dev_priv) 168 { 169 struct vmw_fifo_state *fifo = dev_priv->fifo; 170 171 if (!fifo) 172 return; 173 174 if (likely(fifo->static_buffer != NULL)) { 175 vfree(fifo->static_buffer); 176 fifo->static_buffer = NULL; 177 } 178 179 if (likely(fifo->dynamic_buffer != NULL)) { 180 vfree(fifo->dynamic_buffer); 181 fifo->dynamic_buffer = NULL; 182 } 183 kfree(fifo); 184 dev_priv->fifo = NULL; 185 } 186 187 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) 188 { 189 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); 190 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); 191 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 192 uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); 193 194 return ((max - next_cmd) + (stop - min) <= bytes); 195 } 196 197 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, 198 uint32_t bytes, bool interruptible, 199 unsigned long timeout) 200 { 201 int ret = 0; 202 unsigned long end_jiffies = jiffies + timeout; 203 DEFINE_WAIT(__wait); 204 205 DRM_INFO("Fifo wait noirq.\n"); 206 207 for (;;) { 208 prepare_to_wait(&dev_priv->fifo_queue, &__wait, 209 (interruptible) ? 210 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 211 if (!vmw_fifo_is_full(dev_priv, bytes)) 212 break; 213 if (time_after_eq(jiffies, end_jiffies)) { 214 ret = -EBUSY; 215 DRM_ERROR("SVGA device lockup.\n"); 216 break; 217 } 218 schedule_timeout(1); 219 if (interruptible && signal_pending(current)) { 220 ret = -ERESTARTSYS; 221 break; 222 } 223 } 224 finish_wait(&dev_priv->fifo_queue, &__wait); 225 wake_up_all(&dev_priv->fifo_queue); 226 DRM_INFO("Fifo noirq exit.\n"); 227 return ret; 228 } 229 230 static int vmw_fifo_wait(struct vmw_private *dev_priv, 231 uint32_t bytes, bool interruptible, 232 unsigned long timeout) 233 { 234 long ret = 1L; 235 236 if (likely(!vmw_fifo_is_full(dev_priv, bytes))) 237 return 0; 238 239 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); 240 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 241 return vmw_fifo_wait_noirq(dev_priv, bytes, 242 interruptible, timeout); 243 244 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, 245 &dev_priv->fifo_queue_waiters); 246 247 if (interruptible) 248 ret = wait_event_interruptible_timeout 249 (dev_priv->fifo_queue, 250 !vmw_fifo_is_full(dev_priv, bytes), timeout); 251 else 252 ret = wait_event_timeout 253 (dev_priv->fifo_queue, 254 !vmw_fifo_is_full(dev_priv, bytes), timeout); 255 256 if (unlikely(ret == 0)) 257 ret = -EBUSY; 258 else if (likely(ret > 0)) 259 ret = 0; 260 261 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, 262 &dev_priv->fifo_queue_waiters); 263 264 return ret; 265 } 266 267 /* 268 * Reserve @bytes number of bytes in the fifo. 269 * 270 * This function will return NULL (error) on two conditions: 271 * If it timeouts waiting for fifo space, or if @bytes is larger than the 272 * available fifo space. 273 * 274 * Returns: 275 * Pointer to the fifo, or null on error (possible hardware hang). 276 */ 277 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, 278 uint32_t bytes) 279 { 280 struct vmw_fifo_state *fifo_state = dev_priv->fifo; 281 u32 *fifo_mem = dev_priv->fifo_mem; 282 uint32_t max; 283 uint32_t min; 284 uint32_t next_cmd; 285 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 286 int ret; 287 288 mutex_lock(&fifo_state->fifo_mutex); 289 max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); 290 min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 291 next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); 292 293 if (unlikely(bytes >= (max - min))) 294 goto out_err; 295 296 BUG_ON(fifo_state->reserved_size != 0); 297 BUG_ON(fifo_state->dynamic_buffer != NULL); 298 299 fifo_state->reserved_size = bytes; 300 301 while (1) { 302 uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); 303 bool need_bounce = false; 304 bool reserve_in_place = false; 305 306 if (next_cmd >= stop) { 307 if (likely((next_cmd + bytes < max || 308 (next_cmd + bytes == max && stop > min)))) 309 reserve_in_place = true; 310 311 else if (vmw_fifo_is_full(dev_priv, bytes)) { 312 ret = vmw_fifo_wait(dev_priv, bytes, 313 false, 3 * HZ); 314 if (unlikely(ret != 0)) 315 goto out_err; 316 } else 317 need_bounce = true; 318 319 } else { 320 321 if (likely((next_cmd + bytes < stop))) 322 reserve_in_place = true; 323 else { 324 ret = vmw_fifo_wait(dev_priv, bytes, 325 false, 3 * HZ); 326 if (unlikely(ret != 0)) 327 goto out_err; 328 } 329 } 330 331 if (reserve_in_place) { 332 if (reserveable || bytes <= sizeof(uint32_t)) { 333 fifo_state->using_bounce_buffer = false; 334 335 if (reserveable) 336 vmw_fifo_mem_write(dev_priv, 337 SVGA_FIFO_RESERVED, 338 bytes); 339 return (void __force *) (fifo_mem + 340 (next_cmd >> 2)); 341 } else { 342 need_bounce = true; 343 } 344 } 345 346 if (need_bounce) { 347 fifo_state->using_bounce_buffer = true; 348 if (bytes < fifo_state->static_buffer_size) 349 return fifo_state->static_buffer; 350 else { 351 fifo_state->dynamic_buffer = vmalloc(bytes); 352 if (!fifo_state->dynamic_buffer) 353 goto out_err; 354 return fifo_state->dynamic_buffer; 355 } 356 } 357 } 358 out_err: 359 fifo_state->reserved_size = 0; 360 mutex_unlock(&fifo_state->fifo_mutex); 361 362 return NULL; 363 } 364 365 void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, 366 int ctx_id) 367 { 368 void *ret; 369 370 if (dev_priv->cman) 371 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, 372 ctx_id, false, NULL); 373 else if (ctx_id == SVGA3D_INVALID_ID) 374 ret = vmw_local_fifo_reserve(dev_priv, bytes); 375 else { 376 WARN(1, "Command buffer has not been allocated.\n"); 377 ret = NULL; 378 } 379 if (IS_ERR_OR_NULL(ret)) 380 return NULL; 381 382 return ret; 383 } 384 385 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, 386 struct vmw_private *vmw, 387 uint32_t next_cmd, 388 uint32_t max, uint32_t min, uint32_t bytes) 389 { 390 u32 *fifo_mem = vmw->fifo_mem; 391 uint32_t chunk_size = max - next_cmd; 392 uint32_t rest; 393 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? 394 fifo_state->dynamic_buffer : fifo_state->static_buffer; 395 396 if (bytes < chunk_size) 397 chunk_size = bytes; 398 399 vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes); 400 mb(); 401 memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size); 402 rest = bytes - chunk_size; 403 if (rest) 404 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest); 405 } 406 407 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, 408 struct vmw_private *vmw, 409 uint32_t next_cmd, 410 uint32_t max, uint32_t min, uint32_t bytes) 411 { 412 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? 413 fifo_state->dynamic_buffer : fifo_state->static_buffer; 414 415 while (bytes > 0) { 416 vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++); 417 next_cmd += sizeof(uint32_t); 418 if (unlikely(next_cmd == max)) 419 next_cmd = min; 420 mb(); 421 vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd); 422 mb(); 423 bytes -= sizeof(uint32_t); 424 } 425 } 426 427 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 428 { 429 struct vmw_fifo_state *fifo_state = dev_priv->fifo; 430 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); 431 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); 432 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 433 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 434 435 BUG_ON((bytes & 3) != 0); 436 BUG_ON(bytes > fifo_state->reserved_size); 437 438 fifo_state->reserved_size = 0; 439 440 if (fifo_state->using_bounce_buffer) { 441 if (reserveable) 442 vmw_fifo_res_copy(fifo_state, dev_priv, 443 next_cmd, max, min, bytes); 444 else 445 vmw_fifo_slow_copy(fifo_state, dev_priv, 446 next_cmd, max, min, bytes); 447 448 if (fifo_state->dynamic_buffer) { 449 vfree(fifo_state->dynamic_buffer); 450 fifo_state->dynamic_buffer = NULL; 451 } 452 453 } 454 455 down_write(&fifo_state->rwsem); 456 if (fifo_state->using_bounce_buffer || reserveable) { 457 next_cmd += bytes; 458 if (next_cmd >= max) 459 next_cmd -= max - min; 460 mb(); 461 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd); 462 } 463 464 if (reserveable) 465 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0); 466 mb(); 467 up_write(&fifo_state->rwsem); 468 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 469 mutex_unlock(&fifo_state->fifo_mutex); 470 } 471 472 void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes) 473 { 474 if (dev_priv->cman) 475 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); 476 else 477 vmw_local_fifo_commit(dev_priv, bytes); 478 } 479 480 481 /** 482 * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands. 483 * 484 * @dev_priv: Pointer to device private structure. 485 * @bytes: Number of bytes to commit. 486 */ 487 void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) 488 { 489 if (dev_priv->cman) 490 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); 491 else 492 vmw_local_fifo_commit(dev_priv, bytes); 493 } 494 495 /** 496 * vmw_cmd_flush - Flush any buffered commands and make sure command processing 497 * starts. 498 * 499 * @dev_priv: Pointer to device private structure. 500 * @interruptible: Whether to wait interruptible if function needs to sleep. 501 */ 502 int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible) 503 { 504 might_sleep(); 505 506 if (dev_priv->cman) 507 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible); 508 else 509 return 0; 510 } 511 512 int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) 513 { 514 struct svga_fifo_cmd_fence *cmd_fence; 515 u32 *fm; 516 int ret = 0; 517 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); 518 519 fm = VMW_CMD_RESERVE(dev_priv, bytes); 520 if (unlikely(fm == NULL)) { 521 *seqno = atomic_read(&dev_priv->marker_seq); 522 ret = -ENOMEM; 523 (void)vmw_fallback_wait(dev_priv, false, true, *seqno, 524 false, 3*HZ); 525 goto out_err; 526 } 527 528 do { 529 *seqno = atomic_add_return(1, &dev_priv->marker_seq); 530 } while (*seqno == 0); 531 532 if (!vmw_has_fences(dev_priv)) { 533 534 /* 535 * Don't request hardware to send a fence. The 536 * waiting code in vmwgfx_irq.c will emulate this. 537 */ 538 539 vmw_cmd_commit(dev_priv, 0); 540 return 0; 541 } 542 543 *fm++ = SVGA_CMD_FENCE; 544 cmd_fence = (struct svga_fifo_cmd_fence *) fm; 545 cmd_fence->fence = *seqno; 546 vmw_cmd_commit_flush(dev_priv, bytes); 547 vmw_update_seqno(dev_priv); 548 549 out_err: 550 return ret; 551 } 552 553 /** 554 * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using 555 * legacy query commands. 556 * 557 * @dev_priv: The device private structure. 558 * @cid: The hardware context id used for the query. 559 * 560 * See the vmw_cmd_emit_dummy_query documentation. 561 */ 562 static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv, 563 uint32_t cid) 564 { 565 /* 566 * A query wait without a preceding query end will 567 * actually finish all queries for this cid 568 * without writing to the query result structure. 569 */ 570 571 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo; 572 struct { 573 SVGA3dCmdHeader header; 574 SVGA3dCmdWaitForQuery body; 575 } *cmd; 576 577 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 578 if (unlikely(cmd == NULL)) 579 return -ENOMEM; 580 581 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY; 582 cmd->header.size = sizeof(cmd->body); 583 cmd->body.cid = cid; 584 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 585 586 if (bo->resource->mem_type == TTM_PL_VRAM) { 587 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; 588 cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT; 589 } else { 590 cmd->body.guestResult.gmrId = bo->resource->start; 591 cmd->body.guestResult.offset = 0; 592 } 593 594 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 595 596 return 0; 597 } 598 599 /** 600 * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using 601 * guest-backed resource query commands. 602 * 603 * @dev_priv: The device private structure. 604 * @cid: The hardware context id used for the query. 605 * 606 * See the vmw_cmd_emit_dummy_query documentation. 607 */ 608 static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv, 609 uint32_t cid) 610 { 611 /* 612 * A query wait without a preceding query end will 613 * actually finish all queries for this cid 614 * without writing to the query result structure. 615 */ 616 617 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo; 618 struct { 619 SVGA3dCmdHeader header; 620 SVGA3dCmdWaitForGBQuery body; 621 } *cmd; 622 623 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 624 if (unlikely(cmd == NULL)) 625 return -ENOMEM; 626 627 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 628 cmd->header.size = sizeof(cmd->body); 629 cmd->body.cid = cid; 630 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 631 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 632 cmd->body.mobid = bo->resource->start; 633 cmd->body.offset = 0; 634 635 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 636 637 return 0; 638 } 639 640 641 /** 642 * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using 643 * appropriate resource query commands. 644 * 645 * @dev_priv: The device private structure. 646 * @cid: The hardware context id used for the query. 647 * 648 * This function is used to emit a dummy occlusion query with 649 * no primitives rendered between query begin and query end. 650 * It's used to provide a query barrier, in order to know that when 651 * this query is finished, all preceding queries are also finished. 652 * 653 * A Query results structure should have been initialized at the start 654 * of the dev_priv->dummy_query_bo buffer object. And that buffer object 655 * must also be either reserved or pinned when this function is called. 656 * 657 * Returns -ENOMEM on failure to reserve fifo space. 658 */ 659 int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, 660 uint32_t cid) 661 { 662 if (dev_priv->has_mob) 663 return vmw_cmd_emit_dummy_gb_query(dev_priv, cid); 664 665 return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid); 666 } 667 668 669 /** 670 * vmw_cmd_supported - returns true if the given device supports 671 * command queues. 672 * 673 * @vmw: The device private structure. 674 * 675 * Returns true if we can issue commands. 676 */ 677 bool vmw_cmd_supported(struct vmw_private *vmw) 678 { 679 bool has_cmdbufs = 680 (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | 681 SVGA_CAP_CMD_BUFFERS_2)) != 0; 682 if (vmw_is_svga_v3(vmw)) 683 return (has_cmdbufs && 684 (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0); 685 /* 686 * We have FIFO cmd's 687 */ 688 return has_cmdbufs || vmw->fifo_mem != NULL; 689 } 690