1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 **************************************************************************/ 8 9 #include "vmwgfx_bo.h" 10 #include "vmwgfx_cursor_plane.h" 11 #include "vmwgfx_drv.h" 12 #include "vmwgfx_resource_priv.h" 13 #include "vmwgfx_so.h" 14 #include "vmwgfx_binding.h" 15 #include "vmw_surface_cache.h" 16 #include "device_include/svga3d_surfacedefs.h" 17 18 #include <drm/ttm/ttm_placement.h> 19 20 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) 21 22 /** 23 * struct vmw_user_surface - User-space visible surface resource 24 * 25 * @prime: The TTM prime object. 26 * @srf: The surface metadata. 27 * @master: Master of the creating client. Used for security check. 28 */ 29 struct vmw_user_surface { 30 struct ttm_prime_object prime; 31 struct vmw_surface srf; 32 struct drm_master *master; 33 }; 34 35 /** 36 * struct vmw_surface_offset - Backing store mip level offset info 37 * 38 * @face: Surface face. 39 * @mip: Mip level. 40 * @bo_offset: Offset into backing store of this mip level. 41 * 42 */ 43 struct vmw_surface_offset { 44 uint32_t face; 45 uint32_t mip; 46 uint32_t bo_offset; 47 }; 48 49 /** 50 * struct vmw_surface_dirty - Surface dirty-tracker 51 * @cache: Cached layout information of the surface. 52 * @num_subres: Number of subresources. 53 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. 54 */ 55 struct vmw_surface_dirty { 56 struct vmw_surface_cache cache; 57 u32 num_subres; 58 SVGA3dBox boxes[] __counted_by(num_subres); 59 }; 60 61 static void vmw_user_surface_free(struct vmw_resource *res); 62 static struct vmw_resource * 63 vmw_user_surface_base_to_res(struct ttm_base_object *base); 64 static int vmw_legacy_srf_bind(struct vmw_resource *res, 65 struct ttm_validate_buffer *val_buf); 66 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 67 bool readback, 68 struct ttm_validate_buffer *val_buf); 69 static int vmw_legacy_srf_create(struct vmw_resource *res); 70 static int vmw_legacy_srf_destroy(struct vmw_resource *res); 71 static int vmw_gb_surface_create(struct vmw_resource *res); 72 static int vmw_gb_surface_bind(struct vmw_resource *res, 73 struct ttm_validate_buffer *val_buf); 74 static int vmw_gb_surface_unbind(struct vmw_resource *res, 75 bool readback, 76 struct ttm_validate_buffer *val_buf); 77 static int vmw_gb_surface_destroy(struct vmw_resource *res); 78 static int 79 vmw_gb_surface_define_internal(struct drm_device *dev, 80 struct drm_vmw_gb_surface_create_ext_req *req, 81 struct drm_vmw_gb_surface_create_rep *rep, 82 struct drm_file *file_priv); 83 static int 84 vmw_gb_surface_reference_internal(struct drm_device *dev, 85 struct drm_vmw_surface_arg *req, 86 struct drm_vmw_gb_surface_ref_ext_rep *rep, 87 struct drm_file *file_priv); 88 89 static void vmw_surface_dirty_free(struct vmw_resource *res); 90 static int vmw_surface_dirty_alloc(struct vmw_resource *res); 91 static int vmw_surface_dirty_sync(struct vmw_resource *res); 92 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, 93 size_t end); 94 static int vmw_surface_clean(struct vmw_resource *res); 95 96 static const struct vmw_user_resource_conv user_surface_conv = { 97 .object_type = VMW_RES_SURFACE, 98 .base_obj_to_res = vmw_user_surface_base_to_res, 99 .res_free = vmw_user_surface_free 100 }; 101 102 const struct vmw_user_resource_conv *user_surface_converter = 103 &user_surface_conv; 104 105 static const struct vmw_res_func vmw_legacy_surface_func = { 106 .res_type = vmw_res_surface, 107 .needs_guest_memory = false, 108 .may_evict = true, 109 .prio = 1, 110 .dirty_prio = 1, 111 .type_name = "legacy surfaces", 112 .domain = VMW_BO_DOMAIN_GMR, 113 .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 114 .create = &vmw_legacy_srf_create, 115 .destroy = &vmw_legacy_srf_destroy, 116 .bind = &vmw_legacy_srf_bind, 117 .unbind = &vmw_legacy_srf_unbind 118 }; 119 120 static const struct vmw_res_func vmw_gb_surface_func = { 121 .res_type = vmw_res_surface, 122 .needs_guest_memory = true, 123 .may_evict = true, 124 .prio = 1, 125 .dirty_prio = 2, 126 .type_name = "guest backed surfaces", 127 .domain = VMW_BO_DOMAIN_MOB, 128 .busy_domain = VMW_BO_DOMAIN_MOB, 129 .create = vmw_gb_surface_create, 130 .destroy = vmw_gb_surface_destroy, 131 .bind = vmw_gb_surface_bind, 132 .unbind = vmw_gb_surface_unbind, 133 .dirty_alloc = vmw_surface_dirty_alloc, 134 .dirty_free = vmw_surface_dirty_free, 135 .dirty_sync = vmw_surface_dirty_sync, 136 .dirty_range_add = vmw_surface_dirty_range_add, 137 .clean = vmw_surface_clean, 138 }; 139 140 /* 141 * struct vmw_surface_dma - SVGA3D DMA command 142 */ 143 struct vmw_surface_dma { 144 SVGA3dCmdHeader header; 145 SVGA3dCmdSurfaceDMA body; 146 SVGA3dCopyBox cb; 147 SVGA3dCmdSurfaceDMASuffix suffix; 148 }; 149 150 /* 151 * struct vmw_surface_define - SVGA3D Surface Define command 152 */ 153 struct vmw_surface_define { 154 SVGA3dCmdHeader header; 155 SVGA3dCmdDefineSurface body; 156 }; 157 158 /* 159 * struct vmw_surface_destroy - SVGA3D Surface Destroy command 160 */ 161 struct vmw_surface_destroy { 162 SVGA3dCmdHeader header; 163 SVGA3dCmdDestroySurface body; 164 }; 165 166 167 /** 168 * vmw_surface_dma_size - Compute fifo size for a dma command. 169 * 170 * @srf: Pointer to a struct vmw_surface 171 * 172 * Computes the required size for a surface dma command for backup or 173 * restoration of the surface represented by @srf. 174 */ 175 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) 176 { 177 return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma); 178 } 179 180 181 /** 182 * vmw_surface_define_size - Compute fifo size for a surface define command. 183 * 184 * @srf: Pointer to a struct vmw_surface 185 * 186 * Computes the required size for a surface define command for the definition 187 * of the surface represented by @srf. 188 */ 189 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) 190 { 191 return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes * 192 sizeof(SVGA3dSize); 193 } 194 195 196 /** 197 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. 198 * 199 * Computes the required size for a surface destroy command for the destruction 200 * of a hw surface. 201 */ 202 static inline uint32_t vmw_surface_destroy_size(void) 203 { 204 return sizeof(struct vmw_surface_destroy); 205 } 206 207 /** 208 * vmw_surface_destroy_encode - Encode a surface_destroy command. 209 * 210 * @id: The surface id 211 * @cmd_space: Pointer to memory area in which the commands should be encoded. 212 */ 213 static void vmw_surface_destroy_encode(uint32_t id, 214 void *cmd_space) 215 { 216 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) 217 cmd_space; 218 219 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; 220 cmd->header.size = sizeof(cmd->body); 221 cmd->body.sid = id; 222 } 223 224 /** 225 * vmw_surface_define_encode - Encode a surface_define command. 226 * 227 * @srf: Pointer to a struct vmw_surface object. 228 * @cmd_space: Pointer to memory area in which the commands should be encoded. 229 */ 230 static void vmw_surface_define_encode(const struct vmw_surface *srf, 231 void *cmd_space) 232 { 233 struct vmw_surface_define *cmd = (struct vmw_surface_define *) 234 cmd_space; 235 struct drm_vmw_size *src_size; 236 SVGA3dSize *cmd_size; 237 uint32_t cmd_len; 238 int i; 239 240 cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes * 241 sizeof(SVGA3dSize); 242 243 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; 244 cmd->header.size = cmd_len; 245 cmd->body.sid = srf->res.id; 246 /* 247 * Downcast of surfaceFlags, was upcasted when received from user-space, 248 * since driver internally stores as 64 bit. 249 * For legacy surface define only 32 bit flag is supported. 250 */ 251 cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags; 252 cmd->body.format = srf->metadata.format; 253 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 254 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i]; 255 256 cmd += 1; 257 cmd_size = (SVGA3dSize *) cmd; 258 src_size = srf->metadata.sizes; 259 260 for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) { 261 cmd_size->width = src_size->width; 262 cmd_size->height = src_size->height; 263 cmd_size->depth = src_size->depth; 264 } 265 } 266 267 /** 268 * vmw_surface_dma_encode - Encode a surface_dma command. 269 * 270 * @srf: Pointer to a struct vmw_surface object. 271 * @cmd_space: Pointer to memory area in which the commands should be encoded. 272 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents 273 * should be placed or read from. 274 * @to_surface: Boolean whether to DMA to the surface or from the surface. 275 */ 276 static void vmw_surface_dma_encode(struct vmw_surface *srf, 277 void *cmd_space, 278 const SVGAGuestPtr *ptr, 279 bool to_surface) 280 { 281 uint32_t i; 282 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; 283 const struct SVGA3dSurfaceDesc *desc = 284 vmw_surface_get_desc(srf->metadata.format); 285 286 for (i = 0; i < srf->metadata.num_sizes; ++i) { 287 SVGA3dCmdHeader *header = &cmd->header; 288 SVGA3dCmdSurfaceDMA *body = &cmd->body; 289 SVGA3dCopyBox *cb = &cmd->cb; 290 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; 291 const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; 292 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i]; 293 294 header->id = SVGA_3D_CMD_SURFACE_DMA; 295 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); 296 297 body->guest.ptr = *ptr; 298 body->guest.ptr.offset += cur_offset->bo_offset; 299 body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size); 300 body->host.sid = srf->res.id; 301 body->host.face = cur_offset->face; 302 body->host.mipmap = cur_offset->mip; 303 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : 304 SVGA3D_READ_HOST_VRAM); 305 cb->x = 0; 306 cb->y = 0; 307 cb->z = 0; 308 cb->srcx = 0; 309 cb->srcy = 0; 310 cb->srcz = 0; 311 cb->w = cur_size->width; 312 cb->h = cur_size->height; 313 cb->d = cur_size->depth; 314 315 suffix->suffixSize = sizeof(*suffix); 316 suffix->maximumOffset = 317 vmw_surface_get_image_buffer_size(desc, cur_size, 318 body->guest.pitch); 319 suffix->flags.discard = 0; 320 suffix->flags.unsynchronized = 0; 321 suffix->flags.reserved = 0; 322 ++cmd; 323 } 324 }; 325 326 327 /** 328 * vmw_hw_surface_destroy - destroy a Device surface 329 * 330 * @res: Pointer to a struct vmw_resource embedded in a struct 331 * vmw_surface. 332 * 333 * Destroys a the device surface associated with a struct vmw_surface if 334 * any, and adjusts resource count accordingly. 335 */ 336 static void vmw_hw_surface_destroy(struct vmw_resource *res) 337 { 338 339 struct vmw_private *dev_priv = res->dev_priv; 340 void *cmd; 341 342 if (res->func->destroy == vmw_gb_surface_destroy) { 343 (void) vmw_gb_surface_destroy(res); 344 return; 345 } 346 347 if (res->id != -1) { 348 349 cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size()); 350 if (unlikely(!cmd)) 351 return; 352 353 vmw_surface_destroy_encode(res->id, cmd); 354 vmw_cmd_commit(dev_priv, vmw_surface_destroy_size()); 355 356 /* 357 * used_memory_size_atomic, or separate lock 358 * to avoid taking dev_priv::cmdbuf_mutex in 359 * the destroy path. 360 */ 361 362 mutex_lock(&dev_priv->cmdbuf_mutex); 363 dev_priv->used_memory_size -= res->guest_memory_size; 364 mutex_unlock(&dev_priv->cmdbuf_mutex); 365 } 366 } 367 368 /** 369 * vmw_legacy_srf_create - Create a device surface as part of the 370 * resource validation process. 371 * 372 * @res: Pointer to a struct vmw_surface. 373 * 374 * If the surface doesn't have a hw id. 375 * 376 * Returns -EBUSY if there wasn't sufficient device resources to 377 * complete the validation. Retry after freeing up resources. 378 * 379 * May return other errors if the kernel is out of guest resources. 380 */ 381 static int vmw_legacy_srf_create(struct vmw_resource *res) 382 { 383 struct vmw_private *dev_priv = res->dev_priv; 384 struct vmw_surface *srf; 385 uint32_t submit_size; 386 uint8_t *cmd; 387 int ret; 388 389 if (likely(res->id != -1)) 390 return 0; 391 392 srf = vmw_res_to_srf(res); 393 if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >= 394 dev_priv->memory_size)) 395 return -EBUSY; 396 397 /* 398 * Alloc id for the resource. 399 */ 400 401 ret = vmw_resource_alloc_id(res); 402 if (unlikely(ret != 0)) { 403 DRM_ERROR("Failed to allocate a surface id.\n"); 404 goto out_no_id; 405 } 406 407 if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) { 408 ret = -EBUSY; 409 goto out_no_fifo; 410 } 411 412 /* 413 * Encode surface define- commands. 414 */ 415 416 submit_size = vmw_surface_define_size(srf); 417 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 418 if (unlikely(!cmd)) { 419 ret = -ENOMEM; 420 goto out_no_fifo; 421 } 422 423 vmw_surface_define_encode(srf, cmd); 424 vmw_cmd_commit(dev_priv, submit_size); 425 vmw_fifo_resource_inc(dev_priv); 426 427 /* 428 * Surface memory usage accounting. 429 */ 430 431 dev_priv->used_memory_size += res->guest_memory_size; 432 return 0; 433 434 out_no_fifo: 435 vmw_resource_release_id(res); 436 out_no_id: 437 return ret; 438 } 439 440 /** 441 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. 442 * 443 * @res: Pointer to a struct vmw_res embedded in a struct 444 * vmw_surface. 445 * @val_buf: Pointer to a struct ttm_validate_buffer containing 446 * information about the backup buffer. 447 * @bind: Boolean wether to DMA to the surface. 448 * 449 * Transfer backup data to or from a legacy surface as part of the 450 * validation process. 451 * May return other errors if the kernel is out of guest resources. 452 * The backup buffer will be fenced or idle upon successful completion, 453 * and if the surface needs persistent backup storage, the backup buffer 454 * will also be returned reserved iff @bind is true. 455 */ 456 static int vmw_legacy_srf_dma(struct vmw_resource *res, 457 struct ttm_validate_buffer *val_buf, 458 bool bind) 459 { 460 SVGAGuestPtr ptr; 461 struct vmw_fence_obj *fence; 462 uint32_t submit_size; 463 struct vmw_surface *srf = vmw_res_to_srf(res); 464 uint8_t *cmd; 465 struct vmw_private *dev_priv = res->dev_priv; 466 467 BUG_ON(!val_buf->bo); 468 submit_size = vmw_surface_dma_size(srf); 469 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 470 if (unlikely(!cmd)) 471 return -ENOMEM; 472 473 vmw_bo_get_guest_ptr(val_buf->bo, &ptr); 474 vmw_surface_dma_encode(srf, cmd, &ptr, bind); 475 476 vmw_cmd_commit(dev_priv, submit_size); 477 478 /* 479 * Create a fence object and fence the backup buffer. 480 */ 481 482 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 483 &fence, NULL); 484 485 vmw_bo_fence_single(val_buf->bo, fence); 486 487 if (likely(fence != NULL)) 488 vmw_fence_obj_unreference(&fence); 489 490 return 0; 491 } 492 493 /** 494 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the 495 * surface validation process. 496 * 497 * @res: Pointer to a struct vmw_res embedded in a struct 498 * vmw_surface. 499 * @val_buf: Pointer to a struct ttm_validate_buffer containing 500 * information about the backup buffer. 501 * 502 * This function will copy backup data to the surface if the 503 * backup buffer is dirty. 504 */ 505 static int vmw_legacy_srf_bind(struct vmw_resource *res, 506 struct ttm_validate_buffer *val_buf) 507 { 508 if (!res->guest_memory_dirty) 509 return 0; 510 511 return vmw_legacy_srf_dma(res, val_buf, true); 512 } 513 514 515 /** 516 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the 517 * surface eviction process. 518 * 519 * @res: Pointer to a struct vmw_res embedded in a struct 520 * vmw_surface. 521 * @readback: Readback - only true if dirty 522 * @val_buf: Pointer to a struct ttm_validate_buffer containing 523 * information about the backup buffer. 524 * 525 * This function will copy backup data from the surface. 526 */ 527 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 528 bool readback, 529 struct ttm_validate_buffer *val_buf) 530 { 531 if (unlikely(readback)) 532 return vmw_legacy_srf_dma(res, val_buf, false); 533 return 0; 534 } 535 536 /** 537 * vmw_legacy_srf_destroy - Destroy a device surface as part of a 538 * resource eviction process. 539 * 540 * @res: Pointer to a struct vmw_res embedded in a struct 541 * vmw_surface. 542 */ 543 static int vmw_legacy_srf_destroy(struct vmw_resource *res) 544 { 545 struct vmw_private *dev_priv = res->dev_priv; 546 uint32_t submit_size; 547 uint8_t *cmd; 548 549 BUG_ON(res->id == -1); 550 551 /* 552 * Encode the dma- and surface destroy commands. 553 */ 554 555 submit_size = vmw_surface_destroy_size(); 556 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 557 if (unlikely(!cmd)) 558 return -ENOMEM; 559 560 vmw_surface_destroy_encode(res->id, cmd); 561 vmw_cmd_commit(dev_priv, submit_size); 562 563 /* 564 * Surface memory usage accounting. 565 */ 566 567 dev_priv->used_memory_size -= res->guest_memory_size; 568 569 /* 570 * Release the surface ID. 571 */ 572 573 vmw_resource_release_id(res); 574 vmw_fifo_resource_dec(dev_priv); 575 576 return 0; 577 } 578 579 580 /** 581 * vmw_surface_init - initialize a struct vmw_surface 582 * 583 * @dev_priv: Pointer to a device private struct. 584 * @srf: Pointer to the struct vmw_surface to initialize. 585 * @res_free: Pointer to a resource destructor used to free 586 * the object. 587 */ 588 static int vmw_surface_init(struct vmw_private *dev_priv, 589 struct vmw_surface *srf, 590 void (*res_free) (struct vmw_resource *res)) 591 { 592 int ret; 593 struct vmw_resource *res = &srf->res; 594 595 BUG_ON(!res_free); 596 ret = vmw_resource_init(dev_priv, res, true, res_free, 597 (dev_priv->has_mob) ? &vmw_gb_surface_func : 598 &vmw_legacy_surface_func); 599 600 if (unlikely(ret != 0)) { 601 res_free(res); 602 return ret; 603 } 604 605 /* 606 * The surface won't be visible to hardware until a 607 * surface validate. 608 */ 609 610 INIT_LIST_HEAD(&srf->view_list); 611 res->hw_destroy = vmw_hw_surface_destroy; 612 return ret; 613 } 614 615 /** 616 * vmw_user_surface_base_to_res - TTM base object to resource converter for 617 * user visible surfaces 618 * 619 * @base: Pointer to a TTM base object 620 * 621 * Returns the struct vmw_resource embedded in a struct vmw_surface 622 * for the user-visible object identified by the TTM base object @base. 623 */ 624 static struct vmw_resource * 625 vmw_user_surface_base_to_res(struct ttm_base_object *base) 626 { 627 return &(container_of(base, struct vmw_user_surface, 628 prime.base)->srf.res); 629 } 630 631 /** 632 * vmw_user_surface_free - User visible surface resource destructor 633 * 634 * @res: A struct vmw_resource embedded in a struct vmw_surface. 635 */ 636 static void vmw_user_surface_free(struct vmw_resource *res) 637 { 638 struct vmw_surface *srf = vmw_res_to_srf(res); 639 struct vmw_user_surface *user_srf = 640 container_of(srf, struct vmw_user_surface, srf); 641 642 WARN_ON_ONCE(res->dirty); 643 if (user_srf->master) 644 drm_master_put(&user_srf->master); 645 kfree(srf->offsets); 646 kfree(srf->metadata.sizes); 647 kfree(srf->snooper.image); 648 ttm_prime_object_kfree(user_srf, prime); 649 } 650 651 /** 652 * vmw_user_surface_base_release - User visible surface TTM base object destructor 653 * 654 * @p_base: Pointer to a pointer to a TTM base object 655 * embedded in a struct vmw_user_surface. 656 * 657 * Drops the base object's reference on its resource, and the 658 * pointer pointed to by *p_base is set to NULL. 659 */ 660 static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 661 { 662 struct ttm_base_object *base = *p_base; 663 struct vmw_user_surface *user_srf = 664 container_of(base, struct vmw_user_surface, prime.base); 665 struct vmw_resource *res = &user_srf->srf.res; 666 667 *p_base = NULL; 668 669 /* 670 * Dumb buffers own the resource and they'll unref the 671 * resource themselves 672 */ 673 if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb) 674 return; 675 676 vmw_resource_unreference(&res); 677 } 678 679 /** 680 * vmw_surface_destroy_ioctl - Ioctl function implementing 681 * the user surface destroy functionality. 682 * 683 * @dev: Pointer to a struct drm_device. 684 * @data: Pointer to data copied from / to user-space. 685 * @file_priv: Pointer to a drm file private structure. 686 */ 687 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 688 struct drm_file *file_priv) 689 { 690 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 691 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 692 693 return ttm_ref_object_base_unref(tfile, arg->sid); 694 } 695 696 /** 697 * vmw_surface_define_ioctl - Ioctl function implementing 698 * the user surface define functionality. 699 * 700 * @dev: Pointer to a struct drm_device. 701 * @data: Pointer to data copied from / to user-space. 702 * @file_priv: Pointer to a drm file private structure. 703 */ 704 int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 705 struct drm_file *file_priv) 706 { 707 struct vmw_private *dev_priv = vmw_priv(dev); 708 struct vmw_user_surface *user_srf; 709 struct vmw_surface *srf; 710 struct vmw_surface_metadata *metadata; 711 struct vmw_resource *res; 712 struct vmw_resource *tmp; 713 union drm_vmw_surface_create_arg *arg = 714 (union drm_vmw_surface_create_arg *)data; 715 struct drm_vmw_surface_create_req *req = &arg->req; 716 struct drm_vmw_surface_arg *rep = &arg->rep; 717 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 718 int ret; 719 int i, j; 720 uint32_t cur_bo_offset; 721 struct drm_vmw_size *cur_size; 722 struct vmw_surface_offset *cur_offset; 723 uint32_t num_sizes; 724 const SVGA3dSurfaceDesc *desc; 725 726 num_sizes = 0; 727 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 728 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) 729 return -EINVAL; 730 num_sizes += req->mip_levels[i]; 731 } 732 733 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || 734 num_sizes == 0) 735 return -EINVAL; 736 737 desc = vmw_surface_get_desc(req->format); 738 if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { 739 VMW_DEBUG_USER("Invalid format %d for surface creation.\n", 740 req->format); 741 return -EINVAL; 742 } 743 744 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 745 if (unlikely(!user_srf)) { 746 ret = -ENOMEM; 747 goto out_unlock; 748 } 749 750 srf = &user_srf->srf; 751 metadata = &srf->metadata; 752 res = &srf->res; 753 754 /* Driver internally stores as 64-bit flags */ 755 metadata->flags = (SVGA3dSurfaceAllFlags)req->flags; 756 metadata->format = req->format; 757 metadata->scanout = req->scanout; 758 759 memcpy(metadata->mip_levels, req->mip_levels, 760 sizeof(metadata->mip_levels)); 761 metadata->num_sizes = num_sizes; 762 metadata->sizes = 763 memdup_array_user((struct drm_vmw_size __user *)(unsigned long) 764 req->size_addr, 765 metadata->num_sizes, sizeof(*metadata->sizes)); 766 if (IS_ERR(metadata->sizes)) { 767 ret = PTR_ERR(metadata->sizes); 768 goto out_no_sizes; 769 } 770 srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets), 771 GFP_KERNEL); 772 if (unlikely(!srf->offsets)) { 773 ret = -ENOMEM; 774 goto out_no_offsets; 775 } 776 777 metadata->base_size = *srf->metadata.sizes; 778 metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE; 779 metadata->multisample_count = 0; 780 metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 781 metadata->quality_level = SVGA3D_MS_QUALITY_NONE; 782 783 cur_bo_offset = 0; 784 cur_offset = srf->offsets; 785 cur_size = metadata->sizes; 786 787 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 788 for (j = 0; j < metadata->mip_levels[i]; ++j) { 789 uint32_t stride = vmw_surface_calculate_pitch( 790 desc, cur_size); 791 792 cur_offset->face = i; 793 cur_offset->mip = j; 794 cur_offset->bo_offset = cur_bo_offset; 795 cur_bo_offset += vmw_surface_get_image_buffer_size 796 (desc, cur_size, stride); 797 ++cur_offset; 798 ++cur_size; 799 } 800 } 801 res->guest_memory_size = cur_bo_offset; 802 803 srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata); 804 if (IS_ERR(srf->snooper.image)) { 805 ret = PTR_ERR(srf->snooper.image); 806 goto out_no_copy; 807 } 808 809 if (drm_is_primary_client(file_priv)) 810 user_srf->master = drm_file_get_master(file_priv); 811 812 /** 813 * From this point, the generic resource management functions 814 * destroy the object on failure. 815 */ 816 817 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 818 if (unlikely(ret != 0)) 819 goto out_unlock; 820 821 /* 822 * A gb-aware client referencing a surface will expect a backup 823 * buffer to be present. 824 */ 825 if (dev_priv->has_mob) { 826 struct vmw_bo_params params = { 827 .domain = VMW_BO_DOMAIN_SYS, 828 .busy_domain = VMW_BO_DOMAIN_SYS, 829 .bo_type = ttm_bo_type_device, 830 .size = res->guest_memory_size, 831 .pin = false 832 }; 833 834 ret = vmw_gem_object_create(dev_priv, 835 ¶ms, 836 &res->guest_memory_bo); 837 if (unlikely(ret != 0)) { 838 vmw_resource_unreference(&res); 839 goto out_unlock; 840 } 841 842 ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); 843 if (unlikely(ret != 0)) { 844 vmw_resource_unreference(&res); 845 goto out_unlock; 846 } 847 } 848 849 tmp = vmw_resource_reference(&srf->res); 850 ret = ttm_prime_object_init(tfile, res->guest_memory_size, 851 &user_srf->prime, 852 VMW_RES_SURFACE, 853 &vmw_user_surface_base_release); 854 855 if (unlikely(ret != 0)) { 856 vmw_resource_unreference(&tmp); 857 vmw_resource_unreference(&res); 858 goto out_unlock; 859 } 860 861 rep->sid = user_srf->prime.base.handle; 862 vmw_resource_unreference(&res); 863 864 return 0; 865 out_no_copy: 866 kfree(srf->offsets); 867 out_no_offsets: 868 kfree(metadata->sizes); 869 out_no_sizes: 870 ttm_prime_object_kfree(user_srf, prime); 871 out_unlock: 872 return ret; 873 } 874 875 static struct vmw_user_surface * 876 vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo, 877 u32 handle) 878 { 879 struct vmw_user_surface *user_srf = NULL; 880 struct vmw_surface *surf; 881 struct ttm_base_object *base; 882 883 surf = vmw_bo_surface(bo); 884 if (surf) { 885 rcu_read_lock(); 886 user_srf = container_of(surf, struct vmw_user_surface, srf); 887 base = &user_srf->prime.base; 888 if (base && !kref_get_unless_zero(&base->refcount)) { 889 drm_dbg_driver(&vmw->drm, 890 "%s: referencing a stale surface handle %d\n", 891 __func__, handle); 892 base = NULL; 893 user_srf = NULL; 894 } 895 rcu_read_unlock(); 896 } 897 898 return user_srf; 899 } 900 901 struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, 902 struct vmw_bo *bo, 903 u32 handle) 904 { 905 struct vmw_user_surface *user_srf = 906 vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 907 struct vmw_surface *surf = NULL; 908 struct ttm_base_object *base; 909 910 if (user_srf) { 911 surf = vmw_surface_reference(&user_srf->srf); 912 base = &user_srf->prime.base; 913 ttm_base_object_unref(&base); 914 } 915 return surf; 916 } 917 918 u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, 919 struct vmw_bo *bo, 920 u32 handle) 921 { 922 struct vmw_user_surface *user_srf = 923 vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 924 int surf_handle = 0; 925 struct ttm_base_object *base; 926 927 if (user_srf) { 928 base = &user_srf->prime.base; 929 surf_handle = (u32)base->handle; 930 ttm_base_object_unref(&base); 931 } 932 return surf_handle; 933 } 934 935 static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv, 936 struct drm_file *file_priv, 937 u32 fd, u32 *handle, 938 struct ttm_base_object **base_p) 939 { 940 struct ttm_base_object *base; 941 struct vmw_bo *bo; 942 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 943 struct vmw_user_surface *user_srf; 944 int ret; 945 946 ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle); 947 if (ret) { 948 drm_warn(&dev_priv->drm, 949 "Wasn't able to find user buffer for fd = %u.\n", fd); 950 return ret; 951 } 952 953 ret = vmw_user_bo_lookup(file_priv, *handle, &bo); 954 if (ret) { 955 drm_warn(&dev_priv->drm, 956 "Wasn't able to lookup user buffer for handle = %u.\n", *handle); 957 return ret; 958 } 959 960 user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle); 961 if (WARN_ON(!user_srf)) { 962 drm_warn(&dev_priv->drm, 963 "User surface fd %d (handle %d) is null.\n", fd, *handle); 964 ret = -EINVAL; 965 goto out; 966 } 967 968 base = &user_srf->prime.base; 969 ret = ttm_ref_object_add(tfile, base, NULL, false); 970 if (ret) { 971 drm_warn(&dev_priv->drm, 972 "Couldn't add an object ref for the buffer (%d).\n", *handle); 973 goto out; 974 } 975 976 *base_p = base; 977 out: 978 vmw_user_bo_unref(&bo); 979 980 return ret; 981 } 982 983 static int 984 vmw_surface_handle_reference(struct vmw_private *dev_priv, 985 struct drm_file *file_priv, 986 uint32_t u_handle, 987 enum drm_vmw_handle_type handle_type, 988 struct ttm_base_object **base_p) 989 { 990 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 991 struct vmw_user_surface *user_srf = NULL; 992 uint32_t handle; 993 struct ttm_base_object *base; 994 int ret; 995 996 if (handle_type == DRM_VMW_HANDLE_PRIME) { 997 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 998 if (ret) 999 return vmw_buffer_prime_to_surface_base(dev_priv, 1000 file_priv, 1001 u_handle, 1002 &handle, 1003 base_p); 1004 } else { 1005 handle = u_handle; 1006 } 1007 1008 ret = -EINVAL; 1009 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 1010 if (unlikely(!base)) { 1011 VMW_DEBUG_USER("Could not find surface to reference.\n"); 1012 goto out_no_lookup; 1013 } 1014 1015 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) { 1016 VMW_DEBUG_USER("Referenced object is not a surface.\n"); 1017 goto out_bad_resource; 1018 } 1019 if (handle_type != DRM_VMW_HANDLE_PRIME) { 1020 bool require_exist = false; 1021 1022 user_srf = container_of(base, struct vmw_user_surface, 1023 prime.base); 1024 1025 /* Error out if we are unauthenticated primary */ 1026 if (drm_is_primary_client(file_priv) && 1027 !file_priv->authenticated) { 1028 ret = -EACCES; 1029 goto out_bad_resource; 1030 } 1031 1032 /* 1033 * Make sure the surface creator has the same 1034 * authenticating master, or is already registered with us. 1035 */ 1036 if (drm_is_primary_client(file_priv) && 1037 user_srf->master != file_priv->master) 1038 require_exist = true; 1039 1040 if (unlikely(drm_is_render_client(file_priv))) 1041 require_exist = true; 1042 1043 ret = ttm_ref_object_add(tfile, base, NULL, require_exist); 1044 if (unlikely(ret != 0)) { 1045 DRM_ERROR("Could not add a reference to a surface.\n"); 1046 goto out_bad_resource; 1047 } 1048 } 1049 1050 *base_p = base; 1051 return 0; 1052 1053 out_bad_resource: 1054 ttm_base_object_unref(&base); 1055 out_no_lookup: 1056 if (handle_type == DRM_VMW_HANDLE_PRIME) 1057 (void) ttm_ref_object_base_unref(tfile, handle); 1058 1059 return ret; 1060 } 1061 1062 /** 1063 * vmw_surface_reference_ioctl - Ioctl function implementing 1064 * the user surface reference functionality. 1065 * 1066 * @dev: Pointer to a struct drm_device. 1067 * @data: Pointer to data copied from / to user-space. 1068 * @file_priv: Pointer to a drm file private structure. 1069 */ 1070 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1071 struct drm_file *file_priv) 1072 { 1073 struct vmw_private *dev_priv = vmw_priv(dev); 1074 union drm_vmw_surface_reference_arg *arg = 1075 (union drm_vmw_surface_reference_arg *)data; 1076 struct drm_vmw_surface_arg *req = &arg->req; 1077 struct drm_vmw_surface_create_req *rep = &arg->rep; 1078 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1079 struct vmw_surface *srf; 1080 struct vmw_user_surface *user_srf; 1081 struct drm_vmw_size __user *user_sizes; 1082 struct ttm_base_object *base; 1083 int ret; 1084 1085 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1086 req->handle_type, &base); 1087 if (unlikely(ret != 0)) 1088 return ret; 1089 1090 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1091 srf = &user_srf->srf; 1092 1093 /* Downcast of flags when sending back to user space */ 1094 rep->flags = (uint32_t)srf->metadata.flags; 1095 rep->format = srf->metadata.format; 1096 memcpy(rep->mip_levels, srf->metadata.mip_levels, 1097 sizeof(srf->metadata.mip_levels)); 1098 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 1099 rep->size_addr; 1100 1101 if (user_sizes) 1102 ret = copy_to_user(user_sizes, &srf->metadata.base_size, 1103 sizeof(srf->metadata.base_size)); 1104 if (unlikely(ret != 0)) { 1105 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, 1106 srf->metadata.num_sizes); 1107 ttm_ref_object_base_unref(tfile, base->handle); 1108 ret = -EFAULT; 1109 } 1110 1111 ttm_base_object_unref(&base); 1112 1113 return ret; 1114 } 1115 1116 /** 1117 * vmw_gb_surface_create - Encode a surface_define command. 1118 * 1119 * @res: Pointer to a struct vmw_resource embedded in a struct 1120 * vmw_surface. 1121 */ 1122 static int vmw_gb_surface_create(struct vmw_resource *res) 1123 { 1124 struct vmw_private *dev_priv = res->dev_priv; 1125 struct vmw_surface *srf = vmw_res_to_srf(res); 1126 struct vmw_surface_metadata *metadata = &srf->metadata; 1127 uint32_t cmd_len, cmd_id, submit_len; 1128 int ret; 1129 struct { 1130 SVGA3dCmdHeader header; 1131 SVGA3dCmdDefineGBSurface body; 1132 } *cmd; 1133 struct { 1134 SVGA3dCmdHeader header; 1135 SVGA3dCmdDefineGBSurface_v2 body; 1136 } *cmd2; 1137 struct { 1138 SVGA3dCmdHeader header; 1139 SVGA3dCmdDefineGBSurface_v3 body; 1140 } *cmd3; 1141 struct { 1142 SVGA3dCmdHeader header; 1143 SVGA3dCmdDefineGBSurface_v4 body; 1144 } *cmd4; 1145 1146 if (likely(res->id != -1)) 1147 return 0; 1148 1149 vmw_fifo_resource_inc(dev_priv); 1150 ret = vmw_resource_alloc_id(res); 1151 if (unlikely(ret != 0)) { 1152 DRM_ERROR("Failed to allocate a surface id.\n"); 1153 goto out_no_id; 1154 } 1155 1156 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { 1157 ret = -EBUSY; 1158 goto out_no_fifo; 1159 } 1160 1161 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { 1162 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4; 1163 cmd_len = sizeof(cmd4->body); 1164 submit_len = sizeof(*cmd4); 1165 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { 1166 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3; 1167 cmd_len = sizeof(cmd3->body); 1168 submit_len = sizeof(*cmd3); 1169 } else if (metadata->array_size > 0) { 1170 /* VMW_SM_4 support verified at creation time. */ 1171 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; 1172 cmd_len = sizeof(cmd2->body); 1173 submit_len = sizeof(*cmd2); 1174 } else { 1175 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1176 cmd_len = sizeof(cmd->body); 1177 submit_len = sizeof(*cmd); 1178 } 1179 1180 cmd = VMW_CMD_RESERVE(dev_priv, submit_len); 1181 cmd2 = (typeof(cmd2))cmd; 1182 cmd3 = (typeof(cmd3))cmd; 1183 cmd4 = (typeof(cmd4))cmd; 1184 if (unlikely(!cmd)) { 1185 ret = -ENOMEM; 1186 goto out_no_fifo; 1187 } 1188 1189 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { 1190 cmd4->header.id = cmd_id; 1191 cmd4->header.size = cmd_len; 1192 cmd4->body.sid = srf->res.id; 1193 cmd4->body.surfaceFlags = metadata->flags; 1194 cmd4->body.format = metadata->format; 1195 cmd4->body.numMipLevels = metadata->mip_levels[0]; 1196 cmd4->body.multisampleCount = metadata->multisample_count; 1197 cmd4->body.multisamplePattern = metadata->multisample_pattern; 1198 cmd4->body.qualityLevel = metadata->quality_level; 1199 cmd4->body.autogenFilter = metadata->autogen_filter; 1200 cmd4->body.size.width = metadata->base_size.width; 1201 cmd4->body.size.height = metadata->base_size.height; 1202 cmd4->body.size.depth = metadata->base_size.depth; 1203 cmd4->body.arraySize = metadata->array_size; 1204 cmd4->body.bufferByteStride = metadata->buffer_byte_stride; 1205 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { 1206 cmd3->header.id = cmd_id; 1207 cmd3->header.size = cmd_len; 1208 cmd3->body.sid = srf->res.id; 1209 cmd3->body.surfaceFlags = metadata->flags; 1210 cmd3->body.format = metadata->format; 1211 cmd3->body.numMipLevels = metadata->mip_levels[0]; 1212 cmd3->body.multisampleCount = metadata->multisample_count; 1213 cmd3->body.multisamplePattern = metadata->multisample_pattern; 1214 cmd3->body.qualityLevel = metadata->quality_level; 1215 cmd3->body.autogenFilter = metadata->autogen_filter; 1216 cmd3->body.size.width = metadata->base_size.width; 1217 cmd3->body.size.height = metadata->base_size.height; 1218 cmd3->body.size.depth = metadata->base_size.depth; 1219 cmd3->body.arraySize = metadata->array_size; 1220 } else if (metadata->array_size > 0) { 1221 cmd2->header.id = cmd_id; 1222 cmd2->header.size = cmd_len; 1223 cmd2->body.sid = srf->res.id; 1224 cmd2->body.surfaceFlags = metadata->flags; 1225 cmd2->body.format = metadata->format; 1226 cmd2->body.numMipLevels = metadata->mip_levels[0]; 1227 cmd2->body.multisampleCount = metadata->multisample_count; 1228 cmd2->body.autogenFilter = metadata->autogen_filter; 1229 cmd2->body.size.width = metadata->base_size.width; 1230 cmd2->body.size.height = metadata->base_size.height; 1231 cmd2->body.size.depth = metadata->base_size.depth; 1232 cmd2->body.arraySize = metadata->array_size; 1233 } else { 1234 cmd->header.id = cmd_id; 1235 cmd->header.size = cmd_len; 1236 cmd->body.sid = srf->res.id; 1237 cmd->body.surfaceFlags = metadata->flags; 1238 cmd->body.format = metadata->format; 1239 cmd->body.numMipLevels = metadata->mip_levels[0]; 1240 cmd->body.multisampleCount = metadata->multisample_count; 1241 cmd->body.autogenFilter = metadata->autogen_filter; 1242 cmd->body.size.width = metadata->base_size.width; 1243 cmd->body.size.height = metadata->base_size.height; 1244 cmd->body.size.depth = metadata->base_size.depth; 1245 } 1246 1247 vmw_cmd_commit(dev_priv, submit_len); 1248 1249 return 0; 1250 1251 out_no_fifo: 1252 vmw_resource_release_id(res); 1253 out_no_id: 1254 vmw_fifo_resource_dec(dev_priv); 1255 return ret; 1256 } 1257 1258 1259 static int vmw_gb_surface_bind(struct vmw_resource *res, 1260 struct ttm_validate_buffer *val_buf) 1261 { 1262 struct vmw_private *dev_priv = res->dev_priv; 1263 struct { 1264 SVGA3dCmdHeader header; 1265 SVGA3dCmdBindGBSurface body; 1266 } *cmd1; 1267 struct { 1268 SVGA3dCmdHeader header; 1269 SVGA3dCmdUpdateGBSurface body; 1270 } *cmd2; 1271 uint32_t submit_size; 1272 struct ttm_buffer_object *bo = val_buf->bo; 1273 1274 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1275 1276 submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0); 1277 1278 cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); 1279 if (unlikely(!cmd1)) 1280 return -ENOMEM; 1281 1282 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1283 cmd1->header.size = sizeof(cmd1->body); 1284 cmd1->body.sid = res->id; 1285 cmd1->body.mobid = bo->resource->start; 1286 if (res->guest_memory_dirty) { 1287 cmd2 = (void *) &cmd1[1]; 1288 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; 1289 cmd2->header.size = sizeof(cmd2->body); 1290 cmd2->body.sid = res->id; 1291 } 1292 vmw_cmd_commit(dev_priv, submit_size); 1293 1294 if (res->guest_memory_bo->dirty && res->guest_memory_dirty) { 1295 /* We've just made a full upload. Cear dirty regions. */ 1296 vmw_bo_dirty_clear_res(res); 1297 } 1298 1299 res->guest_memory_dirty = false; 1300 1301 return 0; 1302 } 1303 1304 static int vmw_gb_surface_unbind(struct vmw_resource *res, 1305 bool readback, 1306 struct ttm_validate_buffer *val_buf) 1307 { 1308 struct vmw_private *dev_priv = res->dev_priv; 1309 struct ttm_buffer_object *bo = val_buf->bo; 1310 struct vmw_fence_obj *fence; 1311 1312 struct { 1313 SVGA3dCmdHeader header; 1314 SVGA3dCmdReadbackGBSurface body; 1315 } *cmd1; 1316 struct { 1317 SVGA3dCmdHeader header; 1318 SVGA3dCmdInvalidateGBSurface body; 1319 } *cmd2; 1320 struct { 1321 SVGA3dCmdHeader header; 1322 SVGA3dCmdBindGBSurface body; 1323 } *cmd3; 1324 uint32_t submit_size; 1325 uint8_t *cmd; 1326 1327 1328 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1329 1330 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1331 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 1332 if (unlikely(!cmd)) 1333 return -ENOMEM; 1334 1335 if (readback) { 1336 cmd1 = (void *) cmd; 1337 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 1338 cmd1->header.size = sizeof(cmd1->body); 1339 cmd1->body.sid = res->id; 1340 cmd3 = (void *) &cmd1[1]; 1341 } else { 1342 cmd2 = (void *) cmd; 1343 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; 1344 cmd2->header.size = sizeof(cmd2->body); 1345 cmd2->body.sid = res->id; 1346 cmd3 = (void *) &cmd2[1]; 1347 } 1348 1349 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1350 cmd3->header.size = sizeof(cmd3->body); 1351 cmd3->body.sid = res->id; 1352 cmd3->body.mobid = SVGA3D_INVALID_ID; 1353 1354 vmw_cmd_commit(dev_priv, submit_size); 1355 1356 /* 1357 * Create a fence object and fence the backup buffer. 1358 */ 1359 1360 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 1361 &fence, NULL); 1362 1363 vmw_bo_fence_single(val_buf->bo, fence); 1364 1365 if (likely(fence != NULL)) 1366 vmw_fence_obj_unreference(&fence); 1367 1368 return 0; 1369 } 1370 1371 static int vmw_gb_surface_destroy(struct vmw_resource *res) 1372 { 1373 struct vmw_private *dev_priv = res->dev_priv; 1374 struct vmw_surface *srf = vmw_res_to_srf(res); 1375 struct { 1376 SVGA3dCmdHeader header; 1377 SVGA3dCmdDestroyGBSurface body; 1378 } *cmd; 1379 1380 if (likely(res->id == -1)) 1381 return 0; 1382 1383 mutex_lock(&dev_priv->binding_mutex); 1384 vmw_view_surface_list_destroy(dev_priv, &srf->view_list); 1385 vmw_binding_res_list_scrub(&res->binding_head); 1386 1387 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 1388 if (unlikely(!cmd)) { 1389 mutex_unlock(&dev_priv->binding_mutex); 1390 return -ENOMEM; 1391 } 1392 1393 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; 1394 cmd->header.size = sizeof(cmd->body); 1395 cmd->body.sid = res->id; 1396 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 1397 mutex_unlock(&dev_priv->binding_mutex); 1398 vmw_resource_release_id(res); 1399 vmw_fifo_resource_dec(dev_priv); 1400 1401 return 0; 1402 } 1403 1404 /** 1405 * vmw_gb_surface_define_ioctl - Ioctl function implementing 1406 * the user surface define functionality. 1407 * 1408 * @dev: Pointer to a struct drm_device. 1409 * @data: Pointer to data copied from / to user-space. 1410 * @file_priv: Pointer to a drm file private structure. 1411 */ 1412 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1413 struct drm_file *file_priv) 1414 { 1415 union drm_vmw_gb_surface_create_arg *arg = 1416 (union drm_vmw_gb_surface_create_arg *)data; 1417 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1418 struct drm_vmw_gb_surface_create_ext_req req_ext; 1419 1420 req_ext.base = arg->req; 1421 req_ext.version = drm_vmw_gb_surface_v1; 1422 req_ext.svga3d_flags_upper_32_bits = 0; 1423 req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE; 1424 req_ext.quality_level = SVGA3D_MS_QUALITY_NONE; 1425 req_ext.buffer_byte_stride = 0; 1426 req_ext.must_be_zero = 0; 1427 1428 return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv); 1429 } 1430 1431 /** 1432 * vmw_gb_surface_reference_ioctl - Ioctl function implementing 1433 * the user surface reference functionality. 1434 * 1435 * @dev: Pointer to a struct drm_device. 1436 * @data: Pointer to data copied from / to user-space. 1437 * @file_priv: Pointer to a drm file private structure. 1438 */ 1439 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1440 struct drm_file *file_priv) 1441 { 1442 union drm_vmw_gb_surface_reference_arg *arg = 1443 (union drm_vmw_gb_surface_reference_arg *)data; 1444 struct drm_vmw_surface_arg *req = &arg->req; 1445 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; 1446 struct drm_vmw_gb_surface_ref_ext_rep rep_ext; 1447 int ret; 1448 1449 ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv); 1450 1451 if (unlikely(ret != 0)) 1452 return ret; 1453 1454 rep->creq = rep_ext.creq.base; 1455 rep->crep = rep_ext.crep; 1456 1457 return ret; 1458 } 1459 1460 /** 1461 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing 1462 * the user surface define functionality. 1463 * 1464 * @dev: Pointer to a struct drm_device. 1465 * @data: Pointer to data copied from / to user-space. 1466 * @file_priv: Pointer to a drm file private structure. 1467 */ 1468 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, 1469 struct drm_file *file_priv) 1470 { 1471 union drm_vmw_gb_surface_create_ext_arg *arg = 1472 (union drm_vmw_gb_surface_create_ext_arg *)data; 1473 struct drm_vmw_gb_surface_create_ext_req *req = &arg->req; 1474 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1475 1476 return vmw_gb_surface_define_internal(dev, req, rep, file_priv); 1477 } 1478 1479 /** 1480 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing 1481 * the user surface reference functionality. 1482 * 1483 * @dev: Pointer to a struct drm_device. 1484 * @data: Pointer to data copied from / to user-space. 1485 * @file_priv: Pointer to a drm file private structure. 1486 */ 1487 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data, 1488 struct drm_file *file_priv) 1489 { 1490 union drm_vmw_gb_surface_reference_ext_arg *arg = 1491 (union drm_vmw_gb_surface_reference_ext_arg *)data; 1492 struct drm_vmw_surface_arg *req = &arg->req; 1493 struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep; 1494 1495 return vmw_gb_surface_reference_internal(dev, req, rep, file_priv); 1496 } 1497 1498 /** 1499 * vmw_gb_surface_define_internal - Ioctl function implementing 1500 * the user surface define functionality. 1501 * 1502 * @dev: Pointer to a struct drm_device. 1503 * @req: Request argument from user-space. 1504 * @rep: Response argument to user-space. 1505 * @file_priv: Pointer to a drm file private structure. 1506 */ 1507 static int 1508 vmw_gb_surface_define_internal(struct drm_device *dev, 1509 struct drm_vmw_gb_surface_create_ext_req *req, 1510 struct drm_vmw_gb_surface_create_rep *rep, 1511 struct drm_file *file_priv) 1512 { 1513 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1514 struct vmw_private *dev_priv = vmw_priv(dev); 1515 struct vmw_user_surface *user_srf; 1516 struct vmw_surface_metadata metadata = {0}; 1517 struct vmw_surface *srf; 1518 struct vmw_resource *res; 1519 struct vmw_resource *tmp; 1520 int ret = 0; 1521 uint32_t backup_handle = 0; 1522 SVGA3dSurfaceAllFlags svga3d_flags_64 = 1523 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, 1524 req->base.svga3d_flags); 1525 1526 /* array_size must be null for non-GL3 host. */ 1527 if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) { 1528 VMW_DEBUG_USER("SM4 surface not supported.\n"); 1529 return -EINVAL; 1530 } 1531 1532 if (!has_sm4_1_context(dev_priv)) { 1533 if (req->svga3d_flags_upper_32_bits != 0) 1534 ret = -EINVAL; 1535 1536 if (req->base.multisample_count != 0) 1537 ret = -EINVAL; 1538 1539 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE) 1540 ret = -EINVAL; 1541 1542 if (req->quality_level != SVGA3D_MS_QUALITY_NONE) 1543 ret = -EINVAL; 1544 1545 if (ret) { 1546 VMW_DEBUG_USER("SM4.1 surface not supported.\n"); 1547 return ret; 1548 } 1549 } 1550 1551 if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) { 1552 VMW_DEBUG_USER("SM5 surface not supported.\n"); 1553 return -EINVAL; 1554 } 1555 1556 if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) && 1557 req->base.multisample_count == 0) { 1558 VMW_DEBUG_USER("Invalid sample count.\n"); 1559 return -EINVAL; 1560 } 1561 1562 if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) { 1563 VMW_DEBUG_USER("Invalid mip level.\n"); 1564 return -EINVAL; 1565 } 1566 1567 metadata.flags = svga3d_flags_64; 1568 metadata.format = req->base.format; 1569 metadata.mip_levels[0] = req->base.mip_levels; 1570 metadata.multisample_count = req->base.multisample_count; 1571 metadata.multisample_pattern = req->multisample_pattern; 1572 metadata.quality_level = req->quality_level; 1573 metadata.array_size = req->base.array_size; 1574 metadata.buffer_byte_stride = req->buffer_byte_stride; 1575 metadata.num_sizes = 1; 1576 metadata.base_size = req->base.base_size; 1577 metadata.scanout = req->base.drm_surface_flags & 1578 drm_vmw_surface_flag_scanout; 1579 1580 /* Define a surface based on the parameters. */ 1581 ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); 1582 if (ret != 0) { 1583 VMW_DEBUG_USER("Failed to define surface.\n"); 1584 return ret; 1585 } 1586 1587 user_srf = container_of(srf, struct vmw_user_surface, srf); 1588 if (drm_is_primary_client(file_priv)) 1589 user_srf->master = drm_file_get_master(file_priv); 1590 1591 res = &user_srf->srf.res; 1592 1593 if (req->base.buffer_handle != SVGA3D_INVALID_ID) { 1594 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, 1595 &res->guest_memory_bo); 1596 if (ret == 0) { 1597 if (res->guest_memory_bo->is_dumb) { 1598 VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n"); 1599 vmw_user_bo_unref(&res->guest_memory_bo); 1600 ret = -EINVAL; 1601 goto out_unlock; 1602 } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { 1603 VMW_DEBUG_USER("Surface backup buffer too small.\n"); 1604 vmw_user_bo_unref(&res->guest_memory_bo); 1605 ret = -EINVAL; 1606 goto out_unlock; 1607 } else { 1608 backup_handle = req->base.buffer_handle; 1609 } 1610 } 1611 } else if (req->base.drm_surface_flags & 1612 (drm_vmw_surface_flag_create_buffer | 1613 drm_vmw_surface_flag_coherent)) { 1614 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 1615 res->guest_memory_size, 1616 &backup_handle, 1617 &res->guest_memory_bo); 1618 } 1619 1620 if (unlikely(ret != 0)) { 1621 vmw_resource_unreference(&res); 1622 goto out_unlock; 1623 } 1624 1625 if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { 1626 struct vmw_bo *backup = res->guest_memory_bo; 1627 1628 ttm_bo_reserve(&backup->tbo, false, false, NULL); 1629 if (!res->func->dirty_alloc) 1630 ret = -EINVAL; 1631 if (!ret) 1632 ret = vmw_bo_dirty_add(backup); 1633 if (!ret) { 1634 res->coherent = true; 1635 ret = res->func->dirty_alloc(res); 1636 } 1637 ttm_bo_unreserve(&backup->tbo); 1638 if (ret) { 1639 vmw_resource_unreference(&res); 1640 goto out_unlock; 1641 } 1642 1643 } 1644 1645 if (res->guest_memory_bo) { 1646 ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); 1647 if (unlikely(ret != 0)) { 1648 vmw_resource_unreference(&res); 1649 goto out_unlock; 1650 } 1651 } 1652 1653 tmp = vmw_resource_reference(res); 1654 ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, 1655 VMW_RES_SURFACE, 1656 &vmw_user_surface_base_release); 1657 1658 if (unlikely(ret != 0)) { 1659 vmw_resource_unreference(&tmp); 1660 vmw_resource_unreference(&res); 1661 goto out_unlock; 1662 } 1663 1664 rep->handle = user_srf->prime.base.handle; 1665 rep->backup_size = res->guest_memory_size; 1666 if (res->guest_memory_bo) { 1667 rep->buffer_map_handle = 1668 drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); 1669 rep->buffer_size = res->guest_memory_bo->tbo.base.size; 1670 rep->buffer_handle = backup_handle; 1671 } else { 1672 rep->buffer_map_handle = 0; 1673 rep->buffer_size = 0; 1674 rep->buffer_handle = SVGA3D_INVALID_ID; 1675 } 1676 vmw_resource_unreference(&res); 1677 1678 out_unlock: 1679 return ret; 1680 } 1681 1682 /** 1683 * vmw_gb_surface_reference_internal - Ioctl function implementing 1684 * the user surface reference functionality. 1685 * 1686 * @dev: Pointer to a struct drm_device. 1687 * @req: Pointer to user-space request surface arg. 1688 * @rep: Pointer to response to user-space. 1689 * @file_priv: Pointer to a drm file private structure. 1690 */ 1691 static int 1692 vmw_gb_surface_reference_internal(struct drm_device *dev, 1693 struct drm_vmw_surface_arg *req, 1694 struct drm_vmw_gb_surface_ref_ext_rep *rep, 1695 struct drm_file *file_priv) 1696 { 1697 struct vmw_private *dev_priv = vmw_priv(dev); 1698 struct vmw_surface *srf; 1699 struct vmw_user_surface *user_srf; 1700 struct vmw_surface_metadata *metadata; 1701 struct ttm_base_object *base; 1702 u32 backup_handle; 1703 int ret; 1704 1705 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1706 req->handle_type, &base); 1707 if (unlikely(ret != 0)) 1708 return ret; 1709 1710 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1711 srf = &user_srf->srf; 1712 if (!srf->res.guest_memory_bo) { 1713 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1714 goto out_bad_resource; 1715 } 1716 metadata = &srf->metadata; 1717 1718 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1719 ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base, 1720 &backup_handle); 1721 mutex_unlock(&dev_priv->cmdbuf_mutex); 1722 if (ret != 0) { 1723 drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n", 1724 req->sid); 1725 goto out_bad_resource; 1726 } 1727 1728 rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags); 1729 rep->creq.base.format = metadata->format; 1730 rep->creq.base.mip_levels = metadata->mip_levels[0]; 1731 rep->creq.base.drm_surface_flags = 0; 1732 rep->creq.base.multisample_count = metadata->multisample_count; 1733 rep->creq.base.autogen_filter = metadata->autogen_filter; 1734 rep->creq.base.array_size = metadata->array_size; 1735 rep->creq.base.buffer_handle = backup_handle; 1736 rep->creq.base.base_size = metadata->base_size; 1737 rep->crep.handle = user_srf->prime.base.handle; 1738 rep->crep.backup_size = srf->res.guest_memory_size; 1739 rep->crep.buffer_handle = backup_handle; 1740 rep->crep.buffer_map_handle = 1741 drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node); 1742 rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size; 1743 1744 rep->creq.version = drm_vmw_gb_surface_v1; 1745 rep->creq.svga3d_flags_upper_32_bits = 1746 SVGA3D_FLAGS_UPPER_32(metadata->flags); 1747 rep->creq.multisample_pattern = metadata->multisample_pattern; 1748 rep->creq.quality_level = metadata->quality_level; 1749 rep->creq.must_be_zero = 0; 1750 1751 out_bad_resource: 1752 ttm_base_object_unref(&base); 1753 1754 return ret; 1755 } 1756 1757 /** 1758 * vmw_subres_dirty_add - Add a dirty region to a subresource 1759 * @dirty: The surfaces's dirty tracker. 1760 * @loc_start: The location corresponding to the start of the region. 1761 * @loc_end: The location corresponding to the end of the region. 1762 * 1763 * As we are assuming that @loc_start and @loc_end represent a sequential 1764 * range of backing store memory, if the region spans multiple lines then 1765 * regardless of the x coordinate, the full lines are dirtied. 1766 * Correspondingly if the region spans multiple z slices, then full rather 1767 * than partial z slices are dirtied. 1768 */ 1769 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, 1770 const struct vmw_surface_loc *loc_start, 1771 const struct vmw_surface_loc *loc_end) 1772 { 1773 const struct vmw_surface_cache *cache = &dirty->cache; 1774 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; 1775 u32 mip = loc_start->sub_resource % cache->num_mip_levels; 1776 const struct drm_vmw_size *size = &cache->mip[mip].size; 1777 u32 box_c2 = box->z + box->d; 1778 1779 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) 1780 return; 1781 1782 if (box->d == 0 || box->z > loc_start->z) 1783 box->z = loc_start->z; 1784 if (box_c2 < loc_end->z) 1785 box->d = loc_end->z - box->z; 1786 1787 if (loc_start->z + 1 == loc_end->z) { 1788 box_c2 = box->y + box->h; 1789 if (box->h == 0 || box->y > loc_start->y) 1790 box->y = loc_start->y; 1791 if (box_c2 < loc_end->y) 1792 box->h = loc_end->y - box->y; 1793 1794 if (loc_start->y + 1 == loc_end->y) { 1795 box_c2 = box->x + box->w; 1796 if (box->w == 0 || box->x > loc_start->x) 1797 box->x = loc_start->x; 1798 if (box_c2 < loc_end->x) 1799 box->w = loc_end->x - box->x; 1800 } else { 1801 box->x = 0; 1802 box->w = size->width; 1803 } 1804 } else { 1805 box->y = 0; 1806 box->h = size->height; 1807 box->x = 0; 1808 box->w = size->width; 1809 } 1810 } 1811 1812 /** 1813 * vmw_subres_dirty_full - Mark a full subresource as dirty 1814 * @dirty: The surface's dirty tracker. 1815 * @subres: The subresource 1816 */ 1817 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) 1818 { 1819 const struct vmw_surface_cache *cache = &dirty->cache; 1820 u32 mip = subres % cache->num_mip_levels; 1821 const struct drm_vmw_size *size = &cache->mip[mip].size; 1822 SVGA3dBox *box = &dirty->boxes[subres]; 1823 1824 box->x = 0; 1825 box->y = 0; 1826 box->z = 0; 1827 box->w = size->width; 1828 box->h = size->height; 1829 box->d = size->depth; 1830 } 1831 1832 /* 1833 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture 1834 * surfaces. 1835 */ 1836 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, 1837 size_t start, size_t end) 1838 { 1839 struct vmw_surface_dirty *dirty = 1840 (struct vmw_surface_dirty *) res->dirty; 1841 size_t backup_end = res->guest_memory_offset + res->guest_memory_size; 1842 struct vmw_surface_loc loc1, loc2; 1843 const struct vmw_surface_cache *cache; 1844 1845 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; 1846 end = min(end, backup_end) - res->guest_memory_offset; 1847 cache = &dirty->cache; 1848 vmw_surface_get_loc(cache, &loc1, start); 1849 vmw_surface_get_loc(cache, &loc2, end - 1); 1850 vmw_surface_inc_loc(cache, &loc2); 1851 1852 if (loc1.sheet != loc2.sheet) { 1853 u32 sub_res; 1854 1855 /* 1856 * Multiple multisample sheets. To do this in an optimized 1857 * fashion, compute the dirty region for each sheet and the 1858 * resulting union. Since this is not a common case, just dirty 1859 * the whole surface. 1860 */ 1861 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res) 1862 vmw_subres_dirty_full(dirty, sub_res); 1863 return; 1864 } 1865 if (loc1.sub_resource + 1 == loc2.sub_resource) { 1866 /* Dirty range covers a single sub-resource */ 1867 vmw_subres_dirty_add(dirty, &loc1, &loc2); 1868 } else { 1869 /* Dirty range covers multiple sub-resources */ 1870 struct vmw_surface_loc loc_min, loc_max; 1871 u32 sub_res; 1872 1873 vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max); 1874 vmw_subres_dirty_add(dirty, &loc1, &loc_max); 1875 vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min); 1876 vmw_subres_dirty_add(dirty, &loc_min, &loc2); 1877 for (sub_res = loc1.sub_resource + 1; 1878 sub_res < loc2.sub_resource - 1; ++sub_res) 1879 vmw_subres_dirty_full(dirty, sub_res); 1880 } 1881 } 1882 1883 /* 1884 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer 1885 * surfaces. 1886 */ 1887 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, 1888 size_t start, size_t end) 1889 { 1890 struct vmw_surface_dirty *dirty = 1891 (struct vmw_surface_dirty *) res->dirty; 1892 const struct vmw_surface_cache *cache = &dirty->cache; 1893 size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes; 1894 SVGA3dBox *box = &dirty->boxes[0]; 1895 u32 box_c2; 1896 1897 box->h = box->d = 1; 1898 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; 1899 end = min(end, backup_end) - res->guest_memory_offset; 1900 box_c2 = box->x + box->w; 1901 if (box->w == 0 || box->x > start) 1902 box->x = start; 1903 if (box_c2 < end) 1904 box->w = end - box->x; 1905 } 1906 1907 /* 1908 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces 1909 */ 1910 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, 1911 size_t end) 1912 { 1913 struct vmw_surface *srf = vmw_res_to_srf(res); 1914 1915 if (WARN_ON(end <= res->guest_memory_offset || 1916 start >= res->guest_memory_offset + res->guest_memory_size)) 1917 return; 1918 1919 if (srf->metadata.format == SVGA3D_BUFFER) 1920 vmw_surface_buf_dirty_range_add(res, start, end); 1921 else 1922 vmw_surface_tex_dirty_range_add(res, start, end); 1923 } 1924 1925 /* 1926 * vmw_surface_dirty_sync - The surface's dirty_sync callback. 1927 */ 1928 static int vmw_surface_dirty_sync(struct vmw_resource *res) 1929 { 1930 struct vmw_private *dev_priv = res->dev_priv; 1931 u32 i, num_dirty; 1932 struct vmw_surface_dirty *dirty = 1933 (struct vmw_surface_dirty *) res->dirty; 1934 size_t alloc_size; 1935 const struct vmw_surface_cache *cache = &dirty->cache; 1936 struct { 1937 SVGA3dCmdHeader header; 1938 SVGA3dCmdDXUpdateSubResource body; 1939 } *cmd1; 1940 struct { 1941 SVGA3dCmdHeader header; 1942 SVGA3dCmdUpdateGBImage body; 1943 } *cmd2; 1944 void *cmd; 1945 1946 num_dirty = 0; 1947 for (i = 0; i < dirty->num_subres; ++i) { 1948 const SVGA3dBox *box = &dirty->boxes[i]; 1949 1950 if (box->d) 1951 num_dirty++; 1952 } 1953 1954 if (!num_dirty) 1955 goto out; 1956 1957 alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2)); 1958 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); 1959 if (!cmd) 1960 return -ENOMEM; 1961 1962 cmd1 = cmd; 1963 cmd2 = cmd; 1964 1965 for (i = 0; i < dirty->num_subres; ++i) { 1966 const SVGA3dBox *box = &dirty->boxes[i]; 1967 1968 if (!box->d) 1969 continue; 1970 1971 /* 1972 * DX_UPDATE_SUBRESOURCE is aware of array surfaces. 1973 * UPDATE_GB_IMAGE is not. 1974 */ 1975 if (has_sm4_context(dev_priv)) { 1976 cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; 1977 cmd1->header.size = sizeof(cmd1->body); 1978 cmd1->body.sid = res->id; 1979 cmd1->body.subResource = i; 1980 cmd1->body.box = *box; 1981 cmd1++; 1982 } else { 1983 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 1984 cmd2->header.size = sizeof(cmd2->body); 1985 cmd2->body.image.sid = res->id; 1986 cmd2->body.image.face = i / cache->num_mip_levels; 1987 cmd2->body.image.mipmap = i - 1988 (cache->num_mip_levels * cmd2->body.image.face); 1989 cmd2->body.box = *box; 1990 cmd2++; 1991 } 1992 1993 } 1994 vmw_cmd_commit(dev_priv, alloc_size); 1995 out: 1996 memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * 1997 dirty->num_subres); 1998 1999 return 0; 2000 } 2001 2002 /* 2003 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback. 2004 */ 2005 static int vmw_surface_dirty_alloc(struct vmw_resource *res) 2006 { 2007 struct vmw_surface *srf = vmw_res_to_srf(res); 2008 const struct vmw_surface_metadata *metadata = &srf->metadata; 2009 struct vmw_surface_dirty *dirty; 2010 u32 num_layers = 1; 2011 u32 num_mip; 2012 u32 num_subres; 2013 u32 num_samples; 2014 size_t dirty_size; 2015 int ret; 2016 2017 if (metadata->array_size) 2018 num_layers = metadata->array_size; 2019 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) 2020 num_layers *= SVGA3D_MAX_SURFACE_FACES; 2021 2022 num_mip = metadata->mip_levels[0]; 2023 if (!num_mip) 2024 num_mip = 1; 2025 2026 num_subres = num_layers * num_mip; 2027 dirty_size = struct_size(dirty, boxes, num_subres); 2028 2029 dirty = kvzalloc(dirty_size, GFP_KERNEL); 2030 if (!dirty) { 2031 ret = -ENOMEM; 2032 goto out_no_dirty; 2033 } 2034 2035 num_samples = max_t(u32, 1, metadata->multisample_count); 2036 ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, 2037 num_mip, num_layers, num_samples, 2038 &dirty->cache); 2039 if (ret) 2040 goto out_no_cache; 2041 2042 dirty->num_subres = num_subres; 2043 res->dirty = (struct vmw_resource_dirty *) dirty; 2044 2045 return 0; 2046 2047 out_no_cache: 2048 kvfree(dirty); 2049 out_no_dirty: 2050 return ret; 2051 } 2052 2053 /* 2054 * vmw_surface_dirty_free - The surface's dirty_free callback 2055 */ 2056 static void vmw_surface_dirty_free(struct vmw_resource *res) 2057 { 2058 struct vmw_surface_dirty *dirty = 2059 (struct vmw_surface_dirty *) res->dirty; 2060 2061 kvfree(dirty); 2062 res->dirty = NULL; 2063 } 2064 2065 /* 2066 * vmw_surface_clean - The surface's clean callback 2067 */ 2068 static int vmw_surface_clean(struct vmw_resource *res) 2069 { 2070 struct vmw_private *dev_priv = res->dev_priv; 2071 size_t alloc_size; 2072 struct { 2073 SVGA3dCmdHeader header; 2074 SVGA3dCmdReadbackGBSurface body; 2075 } *cmd; 2076 2077 alloc_size = sizeof(*cmd); 2078 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); 2079 if (!cmd) 2080 return -ENOMEM; 2081 2082 cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 2083 cmd->header.size = sizeof(cmd->body); 2084 cmd->body.sid = res->id; 2085 vmw_cmd_commit(dev_priv, alloc_size); 2086 2087 return 0; 2088 } 2089 2090 /* 2091 * vmw_gb_surface_define - Define a private GB surface 2092 * 2093 * @dev_priv: Pointer to a device private. 2094 * @metadata: Metadata representing the surface to create. 2095 * @user_srf_out: allocated user_srf. Set to NULL on failure. 2096 * 2097 * GB surfaces allocated by this function will not have a user mode handle, and 2098 * thus will only be visible to vmwgfx. For optimization reasons the 2099 * surface may later be given a user mode handle by another function to make 2100 * it available to user mode drivers. 2101 */ 2102 int vmw_gb_surface_define(struct vmw_private *dev_priv, 2103 const struct vmw_surface_metadata *req, 2104 struct vmw_surface **srf_out) 2105 { 2106 struct vmw_surface_metadata *metadata; 2107 struct vmw_user_surface *user_srf; 2108 struct vmw_surface *srf; 2109 u32 sample_count = 1; 2110 u32 num_layers = 1; 2111 int ret; 2112 2113 *srf_out = NULL; 2114 2115 if (req->scanout) { 2116 if (!vmw_surface_is_screen_target_format(req->format)) { 2117 VMW_DEBUG_USER("Invalid Screen Target surface format."); 2118 return -EINVAL; 2119 } 2120 2121 if (req->base_size.width > dev_priv->texture_max_width || 2122 req->base_size.height > dev_priv->texture_max_height) { 2123 VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u", 2124 req->base_size.width, 2125 req->base_size.height, 2126 dev_priv->texture_max_width, 2127 dev_priv->texture_max_height); 2128 return -EINVAL; 2129 } 2130 } else { 2131 const SVGA3dSurfaceDesc *desc = 2132 vmw_surface_get_desc(req->format); 2133 2134 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) { 2135 VMW_DEBUG_USER("Invalid surface format.\n"); 2136 return -EINVAL; 2137 } 2138 } 2139 2140 if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE) 2141 return -EINVAL; 2142 2143 if (req->num_sizes != 1) 2144 return -EINVAL; 2145 2146 if (req->sizes != NULL) 2147 return -EINVAL; 2148 2149 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 2150 if (unlikely(!user_srf)) { 2151 ret = -ENOMEM; 2152 goto out_unlock; 2153 } 2154 2155 *srf_out = &user_srf->srf; 2156 2157 srf = &user_srf->srf; 2158 srf->metadata = *req; 2159 srf->offsets = NULL; 2160 2161 metadata = &srf->metadata; 2162 2163 if (metadata->array_size) 2164 num_layers = req->array_size; 2165 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) 2166 num_layers = SVGA3D_MAX_SURFACE_FACES; 2167 2168 if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE) 2169 sample_count = metadata->multisample_count; 2170 2171 srf->res.guest_memory_size = 2172 vmw_surface_get_serialized_size_extended( 2173 metadata->format, 2174 metadata->base_size, 2175 metadata->mip_levels[0], 2176 num_layers, 2177 sample_count); 2178 2179 if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) 2180 srf->res.guest_memory_size += sizeof(SVGA3dDXSOState); 2181 2182 /* 2183 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with 2184 * size greater than STDU max width/height. This is really a workaround 2185 * to support creation of big framebuffer requested by some user-space 2186 * for whole topology. That big framebuffer won't really be used for 2187 * binding with screen target as during prepare_fb a separate surface is 2188 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. 2189 */ 2190 if (dev_priv->active_display_unit == vmw_du_screen_target && 2191 metadata->scanout && 2192 metadata->base_size.width <= dev_priv->stdu_max_width && 2193 metadata->base_size.height <= dev_priv->stdu_max_height) 2194 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET; 2195 2196 /* 2197 * From this point, the generic resource management functions 2198 * destroy the object on failure. 2199 */ 2200 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 2201 2202 return ret; 2203 2204 out_unlock: 2205 return ret; 2206 } 2207 2208 static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw, 2209 int bpp) 2210 { 2211 switch (bpp) { 2212 case 8: /* DRM_FORMAT_C8 */ 2213 return SVGA3D_P8; 2214 case 16: /* DRM_FORMAT_RGB565 */ 2215 return SVGA3D_R5G6B5; 2216 case 32: /* DRM_FORMAT_XRGB8888 */ 2217 if (has_sm4_context(vmw)) 2218 return SVGA3D_B8G8R8X8_UNORM; 2219 return SVGA3D_X8R8G8B8; 2220 default: 2221 drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp); 2222 return SVGA3D_X8R8G8B8; 2223 } 2224 } 2225 2226 /** 2227 * vmw_dumb_create - Create a dumb kms buffer 2228 * 2229 * @file_priv: Pointer to a struct drm_file identifying the caller. 2230 * @dev: Pointer to the drm device. 2231 * @args: Pointer to a struct drm_mode_create_dumb structure 2232 * Return: Zero on success, negative error code on failure. 2233 * 2234 * This is a driver callback for the core drm create_dumb functionality. 2235 * Note that this is very similar to the vmw_bo_alloc ioctl, except 2236 * that the arguments have a different format. 2237 */ 2238 int vmw_dumb_create(struct drm_file *file_priv, 2239 struct drm_device *dev, 2240 struct drm_mode_create_dumb *args) 2241 { 2242 struct vmw_private *dev_priv = vmw_priv(dev); 2243 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 2244 struct vmw_bo *vbo = NULL; 2245 struct vmw_resource *res = NULL; 2246 union drm_vmw_gb_surface_create_ext_arg arg = { 0 }; 2247 struct drm_vmw_gb_surface_create_ext_req *req = &arg.req; 2248 int ret; 2249 struct drm_vmw_size drm_size = { 2250 .width = args->width, 2251 .height = args->height, 2252 .depth = 1, 2253 }; 2254 SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp); 2255 const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); 2256 SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | 2257 SVGA3D_SURFACE_HINT_RENDERTARGET | 2258 SVGA3D_SURFACE_SCREENTARGET; 2259 2260 if (vmw_surface_is_dx_screen_target_format(format)) { 2261 flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE | 2262 SVGA3D_SURFACE_BIND_RENDER_TARGET; 2263 } 2264 2265 /* 2266 * Without mob support we're just going to use raw memory buffer 2267 * because we wouldn't be able to support full surface coherency 2268 * without mobs. There also no reason to support surface coherency 2269 * without 3d (i.e. gpu usage on the host) because then all the 2270 * contents is going to be rendered guest side. 2271 */ 2272 if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) { 2273 int cpp = DIV_ROUND_UP(args->bpp, 8); 2274 2275 switch (cpp) { 2276 case 1: /* DRM_FORMAT_C8 */ 2277 case 2: /* DRM_FORMAT_RGB565 */ 2278 case 4: /* DRM_FORMAT_XRGB8888 */ 2279 break; 2280 default: 2281 /* 2282 * Dumb buffers don't allow anything else. 2283 * This is tested via IGT's dumb_buffers 2284 */ 2285 return -EINVAL; 2286 } 2287 2288 args->pitch = args->width * cpp; 2289 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 2290 2291 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 2292 args->size, &args->handle, 2293 &vbo); 2294 /* drop reference from allocate - handle holds it now */ 2295 drm_gem_object_put(&vbo->tbo.base); 2296 return ret; 2297 } 2298 2299 req->version = drm_vmw_gb_surface_v1; 2300 req->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 2301 req->quality_level = SVGA3D_MS_QUALITY_NONE; 2302 req->buffer_byte_stride = 0; 2303 req->must_be_zero = 0; 2304 req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags); 2305 req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags); 2306 req->base.format = (uint32_t)format; 2307 req->base.drm_surface_flags = drm_vmw_surface_flag_scanout; 2308 req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable; 2309 req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer; 2310 req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent; 2311 req->base.base_size.width = args->width; 2312 req->base.base_size.height = args->height; 2313 req->base.base_size.depth = 1; 2314 req->base.array_size = 0; 2315 req->base.mip_levels = 1; 2316 req->base.multisample_count = 0; 2317 req->base.buffer_handle = SVGA3D_INVALID_ID; 2318 req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE; 2319 ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv); 2320 if (ret) { 2321 drm_warn(dev, "Unable to create a dumb buffer\n"); 2322 return ret; 2323 } 2324 2325 args->handle = arg.rep.buffer_handle; 2326 args->size = arg.rep.buffer_size; 2327 args->pitch = vmw_surface_calculate_pitch(desc, &drm_size); 2328 2329 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle, 2330 user_surface_converter, 2331 &res); 2332 if (ret) { 2333 drm_err(dev, "Created resource handle doesn't exist!\n"); 2334 goto err; 2335 } 2336 2337 vbo = res->guest_memory_bo; 2338 vbo->is_dumb = true; 2339 vbo->dumb_surface = vmw_res_to_srf(res); 2340 2341 err: 2342 if (res) 2343 vmw_resource_unreference(&res); 2344 if (ret) 2345 ttm_ref_object_base_unref(tfile, arg.rep.handle); 2346 2347 return ret; 2348 } 2349