1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 **************************************************************************/ 8 9 #include "vmwgfx_bo.h" 10 #include "vmwgfx_cursor_plane.h" 11 #include "vmwgfx_drv.h" 12 #include "vmwgfx_resource_priv.h" 13 #include "vmwgfx_so.h" 14 #include "vmwgfx_binding.h" 15 #include "vmw_surface_cache.h" 16 #include "device_include/svga3d_surfacedefs.h" 17 18 #include <drm/ttm/ttm_placement.h> 19 20 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) 21 22 /** 23 * struct vmw_user_surface - User-space visible surface resource 24 * 25 * @prime: The TTM prime object. 26 * @srf: The surface metadata. 27 * @master: Master of the creating client. Used for security check. 28 */ 29 struct vmw_user_surface { 30 struct ttm_prime_object prime; 31 struct vmw_surface srf; 32 struct drm_master *master; 33 }; 34 35 /** 36 * struct vmw_surface_offset - Backing store mip level offset info 37 * 38 * @face: Surface face. 39 * @mip: Mip level. 40 * @bo_offset: Offset into backing store of this mip level. 41 * 42 */ 43 struct vmw_surface_offset { 44 uint32_t face; 45 uint32_t mip; 46 uint32_t bo_offset; 47 }; 48 49 /** 50 * struct vmw_surface_dirty - Surface dirty-tracker 51 * @cache: Cached layout information of the surface. 52 * @num_subres: Number of subresources. 53 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. 54 */ 55 struct vmw_surface_dirty { 56 struct vmw_surface_cache cache; 57 u32 num_subres; 58 SVGA3dBox boxes[] __counted_by(num_subres); 59 }; 60 61 static void vmw_user_surface_free(struct vmw_resource *res); 62 static struct vmw_resource * 63 vmw_user_surface_base_to_res(struct ttm_base_object *base); 64 static int vmw_legacy_srf_bind(struct vmw_resource *res, 65 struct ttm_validate_buffer *val_buf); 66 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 67 bool readback, 68 struct ttm_validate_buffer *val_buf); 69 static int vmw_legacy_srf_create(struct vmw_resource *res); 70 static int vmw_legacy_srf_destroy(struct vmw_resource *res); 71 static int vmw_gb_surface_create(struct vmw_resource *res); 72 static int vmw_gb_surface_bind(struct vmw_resource *res, 73 struct ttm_validate_buffer *val_buf); 74 static int vmw_gb_surface_unbind(struct vmw_resource *res, 75 bool readback, 76 struct ttm_validate_buffer *val_buf); 77 static int vmw_gb_surface_destroy(struct vmw_resource *res); 78 static int 79 vmw_gb_surface_define_internal(struct drm_device *dev, 80 struct drm_vmw_gb_surface_create_ext_req *req, 81 struct drm_vmw_gb_surface_create_rep *rep, 82 struct drm_file *file_priv); 83 static int 84 vmw_gb_surface_reference_internal(struct drm_device *dev, 85 struct drm_vmw_surface_arg *req, 86 struct drm_vmw_gb_surface_ref_ext_rep *rep, 87 struct drm_file *file_priv); 88 89 static void vmw_surface_dirty_free(struct vmw_resource *res); 90 static int vmw_surface_dirty_alloc(struct vmw_resource *res); 91 static int vmw_surface_dirty_sync(struct vmw_resource *res); 92 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, 93 size_t end); 94 static int vmw_surface_clean(struct vmw_resource *res); 95 96 static const struct vmw_user_resource_conv user_surface_conv = { 97 .object_type = VMW_RES_SURFACE, 98 .base_obj_to_res = vmw_user_surface_base_to_res, 99 .res_free = vmw_user_surface_free 100 }; 101 102 const struct vmw_user_resource_conv *user_surface_converter = 103 &user_surface_conv; 104 105 static const struct vmw_res_func vmw_legacy_surface_func = { 106 .res_type = vmw_res_surface, 107 .needs_guest_memory = false, 108 .may_evict = true, 109 .prio = 1, 110 .dirty_prio = 1, 111 .type_name = "legacy surfaces", 112 .domain = VMW_BO_DOMAIN_GMR, 113 .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 114 .create = &vmw_legacy_srf_create, 115 .destroy = &vmw_legacy_srf_destroy, 116 .bind = &vmw_legacy_srf_bind, 117 .unbind = &vmw_legacy_srf_unbind 118 }; 119 120 static const struct vmw_res_func vmw_gb_surface_func = { 121 .res_type = vmw_res_surface, 122 .needs_guest_memory = true, 123 .may_evict = true, 124 .prio = 1, 125 .dirty_prio = 2, 126 .type_name = "guest backed surfaces", 127 .domain = VMW_BO_DOMAIN_MOB, 128 .busy_domain = VMW_BO_DOMAIN_MOB, 129 .create = vmw_gb_surface_create, 130 .destroy = vmw_gb_surface_destroy, 131 .bind = vmw_gb_surface_bind, 132 .unbind = vmw_gb_surface_unbind, 133 .dirty_alloc = vmw_surface_dirty_alloc, 134 .dirty_free = vmw_surface_dirty_free, 135 .dirty_sync = vmw_surface_dirty_sync, 136 .dirty_range_add = vmw_surface_dirty_range_add, 137 .clean = vmw_surface_clean, 138 }; 139 140 /* 141 * struct vmw_surface_dma - SVGA3D DMA command 142 */ 143 struct vmw_surface_dma { 144 SVGA3dCmdHeader header; 145 SVGA3dCmdSurfaceDMA body; 146 SVGA3dCopyBox cb; 147 SVGA3dCmdSurfaceDMASuffix suffix; 148 }; 149 150 /* 151 * struct vmw_surface_define - SVGA3D Surface Define command 152 */ 153 struct vmw_surface_define { 154 SVGA3dCmdHeader header; 155 SVGA3dCmdDefineSurface body; 156 }; 157 158 /* 159 * struct vmw_surface_destroy - SVGA3D Surface Destroy command 160 */ 161 struct vmw_surface_destroy { 162 SVGA3dCmdHeader header; 163 SVGA3dCmdDestroySurface body; 164 }; 165 166 167 /** 168 * vmw_surface_dma_size - Compute fifo size for a dma command. 169 * 170 * @srf: Pointer to a struct vmw_surface 171 * 172 * Computes the required size for a surface dma command for backup or 173 * restoration of the surface represented by @srf. 174 */ 175 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) 176 { 177 return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma); 178 } 179 180 181 /** 182 * vmw_surface_define_size - Compute fifo size for a surface define command. 183 * 184 * @srf: Pointer to a struct vmw_surface 185 * 186 * Computes the required size for a surface define command for the definition 187 * of the surface represented by @srf. 188 */ 189 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) 190 { 191 return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes * 192 sizeof(SVGA3dSize); 193 } 194 195 196 /** 197 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. 198 * 199 * Computes the required size for a surface destroy command for the destruction 200 * of a hw surface. 201 */ 202 static inline uint32_t vmw_surface_destroy_size(void) 203 { 204 return sizeof(struct vmw_surface_destroy); 205 } 206 207 /** 208 * vmw_surface_destroy_encode - Encode a surface_destroy command. 209 * 210 * @id: The surface id 211 * @cmd_space: Pointer to memory area in which the commands should be encoded. 212 */ 213 static void vmw_surface_destroy_encode(uint32_t id, 214 void *cmd_space) 215 { 216 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) 217 cmd_space; 218 219 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; 220 cmd->header.size = sizeof(cmd->body); 221 cmd->body.sid = id; 222 } 223 224 /** 225 * vmw_surface_define_encode - Encode a surface_define command. 226 * 227 * @srf: Pointer to a struct vmw_surface object. 228 * @cmd_space: Pointer to memory area in which the commands should be encoded. 229 */ 230 static void vmw_surface_define_encode(const struct vmw_surface *srf, 231 void *cmd_space) 232 { 233 struct vmw_surface_define *cmd = (struct vmw_surface_define *) 234 cmd_space; 235 struct drm_vmw_size *src_size; 236 SVGA3dSize *cmd_size; 237 uint32_t cmd_len; 238 int i; 239 240 cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes * 241 sizeof(SVGA3dSize); 242 243 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; 244 cmd->header.size = cmd_len; 245 cmd->body.sid = srf->res.id; 246 /* 247 * Downcast of surfaceFlags, was upcasted when received from user-space, 248 * since driver internally stores as 64 bit. 249 * For legacy surface define only 32 bit flag is supported. 250 */ 251 cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags; 252 cmd->body.format = srf->metadata.format; 253 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 254 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i]; 255 256 cmd += 1; 257 cmd_size = (SVGA3dSize *) cmd; 258 src_size = srf->metadata.sizes; 259 260 for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) { 261 cmd_size->width = src_size->width; 262 cmd_size->height = src_size->height; 263 cmd_size->depth = src_size->depth; 264 } 265 } 266 267 /** 268 * vmw_surface_dma_encode - Encode a surface_dma command. 269 * 270 * @srf: Pointer to a struct vmw_surface object. 271 * @cmd_space: Pointer to memory area in which the commands should be encoded. 272 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents 273 * should be placed or read from. 274 * @to_surface: Boolean whether to DMA to the surface or from the surface. 275 */ 276 static void vmw_surface_dma_encode(struct vmw_surface *srf, 277 void *cmd_space, 278 const SVGAGuestPtr *ptr, 279 bool to_surface) 280 { 281 uint32_t i; 282 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; 283 const struct SVGA3dSurfaceDesc *desc = 284 vmw_surface_get_desc(srf->metadata.format); 285 286 for (i = 0; i < srf->metadata.num_sizes; ++i) { 287 SVGA3dCmdHeader *header = &cmd->header; 288 SVGA3dCmdSurfaceDMA *body = &cmd->body; 289 SVGA3dCopyBox *cb = &cmd->cb; 290 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; 291 const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; 292 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i]; 293 294 header->id = SVGA_3D_CMD_SURFACE_DMA; 295 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); 296 297 body->guest.ptr = *ptr; 298 body->guest.ptr.offset += cur_offset->bo_offset; 299 body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size); 300 body->host.sid = srf->res.id; 301 body->host.face = cur_offset->face; 302 body->host.mipmap = cur_offset->mip; 303 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : 304 SVGA3D_READ_HOST_VRAM); 305 cb->x = 0; 306 cb->y = 0; 307 cb->z = 0; 308 cb->srcx = 0; 309 cb->srcy = 0; 310 cb->srcz = 0; 311 cb->w = cur_size->width; 312 cb->h = cur_size->height; 313 cb->d = cur_size->depth; 314 315 suffix->suffixSize = sizeof(*suffix); 316 suffix->maximumOffset = 317 vmw_surface_get_image_buffer_size(desc, cur_size, 318 body->guest.pitch); 319 suffix->flags.discard = 0; 320 suffix->flags.unsynchronized = 0; 321 suffix->flags.reserved = 0; 322 ++cmd; 323 } 324 }; 325 326 327 /** 328 * vmw_hw_surface_destroy - destroy a Device surface 329 * 330 * @res: Pointer to a struct vmw_resource embedded in a struct 331 * vmw_surface. 332 * 333 * Destroys a the device surface associated with a struct vmw_surface if 334 * any, and adjusts resource count accordingly. 335 */ 336 static void vmw_hw_surface_destroy(struct vmw_resource *res) 337 { 338 339 struct vmw_private *dev_priv = res->dev_priv; 340 void *cmd; 341 342 if (res->func->destroy == vmw_gb_surface_destroy) { 343 (void) vmw_gb_surface_destroy(res); 344 return; 345 } 346 347 if (res->id != -1) { 348 349 cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size()); 350 if (unlikely(!cmd)) 351 return; 352 353 vmw_surface_destroy_encode(res->id, cmd); 354 vmw_cmd_commit(dev_priv, vmw_surface_destroy_size()); 355 356 /* 357 * used_memory_size_atomic, or separate lock 358 * to avoid taking dev_priv::cmdbuf_mutex in 359 * the destroy path. 360 */ 361 362 mutex_lock(&dev_priv->cmdbuf_mutex); 363 dev_priv->used_memory_size -= res->guest_memory_size; 364 mutex_unlock(&dev_priv->cmdbuf_mutex); 365 } 366 } 367 368 /** 369 * vmw_legacy_srf_create - Create a device surface as part of the 370 * resource validation process. 371 * 372 * @res: Pointer to a struct vmw_surface. 373 * 374 * If the surface doesn't have a hw id. 375 * 376 * Returns -EBUSY if there wasn't sufficient device resources to 377 * complete the validation. Retry after freeing up resources. 378 * 379 * May return other errors if the kernel is out of guest resources. 380 */ 381 static int vmw_legacy_srf_create(struct vmw_resource *res) 382 { 383 struct vmw_private *dev_priv = res->dev_priv; 384 struct vmw_surface *srf; 385 uint32_t submit_size; 386 uint8_t *cmd; 387 int ret; 388 389 if (likely(res->id != -1)) 390 return 0; 391 392 srf = vmw_res_to_srf(res); 393 if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >= 394 dev_priv->memory_size)) 395 return -EBUSY; 396 397 /* 398 * Alloc id for the resource. 399 */ 400 401 ret = vmw_resource_alloc_id(res); 402 if (unlikely(ret != 0)) { 403 DRM_ERROR("Failed to allocate a surface id.\n"); 404 goto out_no_id; 405 } 406 407 if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) { 408 ret = -EBUSY; 409 goto out_no_fifo; 410 } 411 412 /* 413 * Encode surface define- commands. 414 */ 415 416 submit_size = vmw_surface_define_size(srf); 417 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 418 if (unlikely(!cmd)) { 419 ret = -ENOMEM; 420 goto out_no_fifo; 421 } 422 423 vmw_surface_define_encode(srf, cmd); 424 vmw_cmd_commit(dev_priv, submit_size); 425 vmw_fifo_resource_inc(dev_priv); 426 427 /* 428 * Surface memory usage accounting. 429 */ 430 431 dev_priv->used_memory_size += res->guest_memory_size; 432 return 0; 433 434 out_no_fifo: 435 vmw_resource_release_id(res); 436 out_no_id: 437 return ret; 438 } 439 440 /** 441 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. 442 * 443 * @res: Pointer to a struct vmw_res embedded in a struct 444 * vmw_surface. 445 * @val_buf: Pointer to a struct ttm_validate_buffer containing 446 * information about the backup buffer. 447 * @bind: Boolean wether to DMA to the surface. 448 * 449 * Transfer backup data to or from a legacy surface as part of the 450 * validation process. 451 * May return other errors if the kernel is out of guest resources. 452 * The backup buffer will be fenced or idle upon successful completion, 453 * and if the surface needs persistent backup storage, the backup buffer 454 * will also be returned reserved iff @bind is true. 455 */ 456 static int vmw_legacy_srf_dma(struct vmw_resource *res, 457 struct ttm_validate_buffer *val_buf, 458 bool bind) 459 { 460 SVGAGuestPtr ptr; 461 struct vmw_fence_obj *fence; 462 uint32_t submit_size; 463 struct vmw_surface *srf = vmw_res_to_srf(res); 464 uint8_t *cmd; 465 struct vmw_private *dev_priv = res->dev_priv; 466 467 BUG_ON(!val_buf->bo); 468 submit_size = vmw_surface_dma_size(srf); 469 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 470 if (unlikely(!cmd)) 471 return -ENOMEM; 472 473 vmw_bo_get_guest_ptr(val_buf->bo, &ptr); 474 vmw_surface_dma_encode(srf, cmd, &ptr, bind); 475 476 vmw_cmd_commit(dev_priv, submit_size); 477 478 /* 479 * Create a fence object and fence the backup buffer. 480 */ 481 482 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 483 &fence, NULL); 484 485 vmw_bo_fence_single(val_buf->bo, fence); 486 487 if (likely(fence != NULL)) 488 vmw_fence_obj_unreference(&fence); 489 490 return 0; 491 } 492 493 /** 494 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the 495 * surface validation process. 496 * 497 * @res: Pointer to a struct vmw_res embedded in a struct 498 * vmw_surface. 499 * @val_buf: Pointer to a struct ttm_validate_buffer containing 500 * information about the backup buffer. 501 * 502 * This function will copy backup data to the surface if the 503 * backup buffer is dirty. 504 */ 505 static int vmw_legacy_srf_bind(struct vmw_resource *res, 506 struct ttm_validate_buffer *val_buf) 507 { 508 if (!res->guest_memory_dirty) 509 return 0; 510 511 return vmw_legacy_srf_dma(res, val_buf, true); 512 } 513 514 515 /** 516 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the 517 * surface eviction process. 518 * 519 * @res: Pointer to a struct vmw_res embedded in a struct 520 * vmw_surface. 521 * @readback: Readback - only true if dirty 522 * @val_buf: Pointer to a struct ttm_validate_buffer containing 523 * information about the backup buffer. 524 * 525 * This function will copy backup data from the surface. 526 */ 527 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 528 bool readback, 529 struct ttm_validate_buffer *val_buf) 530 { 531 if (unlikely(readback)) 532 return vmw_legacy_srf_dma(res, val_buf, false); 533 return 0; 534 } 535 536 /** 537 * vmw_legacy_srf_destroy - Destroy a device surface as part of a 538 * resource eviction process. 539 * 540 * @res: Pointer to a struct vmw_res embedded in a struct 541 * vmw_surface. 542 */ 543 static int vmw_legacy_srf_destroy(struct vmw_resource *res) 544 { 545 struct vmw_private *dev_priv = res->dev_priv; 546 uint32_t submit_size; 547 uint8_t *cmd; 548 549 BUG_ON(res->id == -1); 550 551 /* 552 * Encode the dma- and surface destroy commands. 553 */ 554 555 submit_size = vmw_surface_destroy_size(); 556 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 557 if (unlikely(!cmd)) 558 return -ENOMEM; 559 560 vmw_surface_destroy_encode(res->id, cmd); 561 vmw_cmd_commit(dev_priv, submit_size); 562 563 /* 564 * Surface memory usage accounting. 565 */ 566 567 dev_priv->used_memory_size -= res->guest_memory_size; 568 569 /* 570 * Release the surface ID. 571 */ 572 573 vmw_resource_release_id(res); 574 vmw_fifo_resource_dec(dev_priv); 575 576 return 0; 577 } 578 579 580 /** 581 * vmw_surface_init - initialize a struct vmw_surface 582 * 583 * @dev_priv: Pointer to a device private struct. 584 * @srf: Pointer to the struct vmw_surface to initialize. 585 * @res_free: Pointer to a resource destructor used to free 586 * the object. 587 */ 588 static int vmw_surface_init(struct vmw_private *dev_priv, 589 struct vmw_surface *srf, 590 void (*res_free) (struct vmw_resource *res)) 591 { 592 int ret; 593 struct vmw_resource *res = &srf->res; 594 595 BUG_ON(!res_free); 596 ret = vmw_resource_init(dev_priv, res, true, res_free, 597 (dev_priv->has_mob) ? &vmw_gb_surface_func : 598 &vmw_legacy_surface_func); 599 600 if (unlikely(ret != 0)) { 601 res_free(res); 602 return ret; 603 } 604 605 /* 606 * The surface won't be visible to hardware until a 607 * surface validate. 608 */ 609 610 INIT_LIST_HEAD(&srf->view_list); 611 res->hw_destroy = vmw_hw_surface_destroy; 612 return ret; 613 } 614 615 /** 616 * vmw_user_surface_base_to_res - TTM base object to resource converter for 617 * user visible surfaces 618 * 619 * @base: Pointer to a TTM base object 620 * 621 * Returns the struct vmw_resource embedded in a struct vmw_surface 622 * for the user-visible object identified by the TTM base object @base. 623 */ 624 static struct vmw_resource * 625 vmw_user_surface_base_to_res(struct ttm_base_object *base) 626 { 627 return &(container_of(base, struct vmw_user_surface, 628 prime.base)->srf.res); 629 } 630 631 /** 632 * vmw_user_surface_free - User visible surface resource destructor 633 * 634 * @res: A struct vmw_resource embedded in a struct vmw_surface. 635 */ 636 static void vmw_user_surface_free(struct vmw_resource *res) 637 { 638 struct vmw_surface *srf = vmw_res_to_srf(res); 639 struct vmw_user_surface *user_srf = 640 container_of(srf, struct vmw_user_surface, srf); 641 642 WARN_ON(res->dirty); 643 if (user_srf->master) 644 drm_master_put(&user_srf->master); 645 kfree(srf->offsets); 646 kfree(srf->metadata.sizes); 647 kfree(srf->snooper.image); 648 ttm_prime_object_kfree(user_srf, prime); 649 } 650 651 /** 652 * vmw_user_surface_base_release - User visible surface TTM base object destructor 653 * 654 * @p_base: Pointer to a pointer to a TTM base object 655 * embedded in a struct vmw_user_surface. 656 * 657 * Drops the base object's reference on its resource, and the 658 * pointer pointed to by *p_base is set to NULL. 659 */ 660 static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 661 { 662 struct ttm_base_object *base = *p_base; 663 struct vmw_user_surface *user_srf = 664 container_of(base, struct vmw_user_surface, prime.base); 665 struct vmw_resource *res = &user_srf->srf.res; 666 667 *p_base = NULL; 668 669 /* 670 * Dumb buffers own the resource and they'll unref the 671 * resource themselves 672 */ 673 WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb); 674 675 vmw_resource_unreference(&res); 676 } 677 678 /** 679 * vmw_surface_destroy_ioctl - Ioctl function implementing 680 * the user surface destroy functionality. 681 * 682 * @dev: Pointer to a struct drm_device. 683 * @data: Pointer to data copied from / to user-space. 684 * @file_priv: Pointer to a drm file private structure. 685 */ 686 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 687 struct drm_file *file_priv) 688 { 689 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 690 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 691 692 return ttm_ref_object_base_unref(tfile, arg->sid); 693 } 694 695 /** 696 * vmw_surface_define_ioctl - Ioctl function implementing 697 * the user surface define functionality. 698 * 699 * @dev: Pointer to a struct drm_device. 700 * @data: Pointer to data copied from / to user-space. 701 * @file_priv: Pointer to a drm file private structure. 702 */ 703 int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 704 struct drm_file *file_priv) 705 { 706 struct vmw_private *dev_priv = vmw_priv(dev); 707 struct vmw_user_surface *user_srf; 708 struct vmw_surface *srf; 709 struct vmw_surface_metadata *metadata; 710 struct vmw_resource *res; 711 struct vmw_resource *tmp; 712 union drm_vmw_surface_create_arg *arg = 713 (union drm_vmw_surface_create_arg *)data; 714 struct drm_vmw_surface_create_req *req = &arg->req; 715 struct drm_vmw_surface_arg *rep = &arg->rep; 716 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 717 int ret; 718 int i, j; 719 uint32_t cur_bo_offset; 720 struct drm_vmw_size *cur_size; 721 struct vmw_surface_offset *cur_offset; 722 uint32_t num_sizes; 723 const SVGA3dSurfaceDesc *desc; 724 725 num_sizes = 0; 726 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 727 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) 728 return -EINVAL; 729 num_sizes += req->mip_levels[i]; 730 } 731 732 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || 733 num_sizes == 0) 734 return -EINVAL; 735 736 desc = vmw_surface_get_desc(req->format); 737 if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { 738 VMW_DEBUG_USER("Invalid format %d for surface creation.\n", 739 req->format); 740 return -EINVAL; 741 } 742 743 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 744 if (unlikely(!user_srf)) { 745 ret = -ENOMEM; 746 goto out_unlock; 747 } 748 749 srf = &user_srf->srf; 750 metadata = &srf->metadata; 751 res = &srf->res; 752 753 /* Driver internally stores as 64-bit flags */ 754 metadata->flags = (SVGA3dSurfaceAllFlags)req->flags; 755 metadata->format = req->format; 756 metadata->scanout = req->scanout; 757 758 memcpy(metadata->mip_levels, req->mip_levels, 759 sizeof(metadata->mip_levels)); 760 metadata->num_sizes = num_sizes; 761 metadata->sizes = 762 memdup_array_user((struct drm_vmw_size __user *)(unsigned long) 763 req->size_addr, 764 metadata->num_sizes, sizeof(*metadata->sizes)); 765 if (IS_ERR(metadata->sizes)) { 766 ret = PTR_ERR(metadata->sizes); 767 goto out_no_sizes; 768 } 769 srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets), 770 GFP_KERNEL); 771 if (unlikely(!srf->offsets)) { 772 ret = -ENOMEM; 773 goto out_no_offsets; 774 } 775 776 metadata->base_size = *srf->metadata.sizes; 777 metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE; 778 metadata->multisample_count = 0; 779 metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 780 metadata->quality_level = SVGA3D_MS_QUALITY_NONE; 781 782 cur_bo_offset = 0; 783 cur_offset = srf->offsets; 784 cur_size = metadata->sizes; 785 786 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 787 for (j = 0; j < metadata->mip_levels[i]; ++j) { 788 uint32_t stride = vmw_surface_calculate_pitch( 789 desc, cur_size); 790 791 cur_offset->face = i; 792 cur_offset->mip = j; 793 cur_offset->bo_offset = cur_bo_offset; 794 cur_bo_offset += vmw_surface_get_image_buffer_size 795 (desc, cur_size, stride); 796 ++cur_offset; 797 ++cur_size; 798 } 799 } 800 res->guest_memory_size = cur_bo_offset; 801 802 srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata); 803 if (IS_ERR(srf->snooper.image)) { 804 ret = PTR_ERR(srf->snooper.image); 805 goto out_no_copy; 806 } 807 808 if (drm_is_primary_client(file_priv)) 809 user_srf->master = drm_file_get_master(file_priv); 810 811 /** 812 * From this point, the generic resource management functions 813 * destroy the object on failure. 814 */ 815 816 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 817 if (unlikely(ret != 0)) 818 goto out_unlock; 819 820 /* 821 * A gb-aware client referencing a surface will expect a backup 822 * buffer to be present. 823 */ 824 if (dev_priv->has_mob) { 825 struct vmw_bo_params params = { 826 .domain = VMW_BO_DOMAIN_SYS, 827 .busy_domain = VMW_BO_DOMAIN_SYS, 828 .bo_type = ttm_bo_type_device, 829 .size = res->guest_memory_size, 830 .pin = false 831 }; 832 833 ret = vmw_bo_create(dev_priv, ¶ms, &res->guest_memory_bo); 834 if (unlikely(ret != 0)) { 835 vmw_resource_unreference(&res); 836 goto out_unlock; 837 } 838 839 ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); 840 if (unlikely(ret != 0)) { 841 vmw_resource_unreference(&res); 842 goto out_unlock; 843 } 844 } 845 846 tmp = vmw_resource_reference(&srf->res); 847 ret = ttm_prime_object_init(tfile, res->guest_memory_size, 848 &user_srf->prime, 849 VMW_RES_SURFACE, 850 &vmw_user_surface_base_release); 851 852 if (unlikely(ret != 0)) { 853 vmw_resource_unreference(&tmp); 854 vmw_resource_unreference(&res); 855 goto out_unlock; 856 } 857 858 rep->sid = user_srf->prime.base.handle; 859 vmw_resource_unreference(&res); 860 861 return 0; 862 out_no_copy: 863 kfree(srf->offsets); 864 out_no_offsets: 865 kfree(metadata->sizes); 866 out_no_sizes: 867 ttm_prime_object_kfree(user_srf, prime); 868 out_unlock: 869 return ret; 870 } 871 872 static struct vmw_user_surface * 873 vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo, 874 u32 handle) 875 { 876 struct vmw_user_surface *user_srf = NULL; 877 struct vmw_surface *surf; 878 struct ttm_base_object *base; 879 880 surf = vmw_bo_surface(bo); 881 if (surf) { 882 rcu_read_lock(); 883 user_srf = container_of(surf, struct vmw_user_surface, srf); 884 base = &user_srf->prime.base; 885 if (base && !kref_get_unless_zero(&base->refcount)) { 886 drm_dbg_driver(&vmw->drm, 887 "%s: referencing a stale surface handle %d\n", 888 __func__, handle); 889 base = NULL; 890 user_srf = NULL; 891 } 892 rcu_read_unlock(); 893 } 894 895 return user_srf; 896 } 897 898 struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, 899 struct vmw_bo *bo, 900 u32 handle) 901 { 902 struct vmw_user_surface *user_srf = 903 vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 904 struct vmw_surface *surf = NULL; 905 struct ttm_base_object *base; 906 907 if (user_srf) { 908 surf = vmw_surface_reference(&user_srf->srf); 909 base = &user_srf->prime.base; 910 ttm_base_object_unref(&base); 911 } 912 return surf; 913 } 914 915 u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, 916 struct vmw_bo *bo, 917 u32 handle) 918 { 919 struct vmw_user_surface *user_srf = 920 vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 921 int surf_handle = 0; 922 struct ttm_base_object *base; 923 924 if (user_srf) { 925 base = &user_srf->prime.base; 926 surf_handle = (u32)base->handle; 927 ttm_base_object_unref(&base); 928 } 929 return surf_handle; 930 } 931 932 static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv, 933 struct drm_file *file_priv, 934 u32 fd, u32 *handle, 935 struct ttm_base_object **base_p) 936 { 937 struct ttm_base_object *base; 938 struct vmw_bo *bo; 939 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 940 struct vmw_user_surface *user_srf; 941 int ret; 942 943 ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle); 944 if (ret) { 945 drm_warn(&dev_priv->drm, 946 "Wasn't able to find user buffer for fd = %u.\n", fd); 947 return ret; 948 } 949 950 ret = vmw_user_bo_lookup(file_priv, *handle, &bo); 951 if (ret) { 952 drm_warn(&dev_priv->drm, 953 "Wasn't able to lookup user buffer for handle = %u.\n", *handle); 954 return ret; 955 } 956 957 user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle); 958 if (WARN_ON(!user_srf)) { 959 drm_warn(&dev_priv->drm, 960 "User surface fd %d (handle %d) is null.\n", fd, *handle); 961 ret = -EINVAL; 962 goto out; 963 } 964 965 base = &user_srf->prime.base; 966 ret = ttm_ref_object_add(tfile, base, NULL, false); 967 if (ret) { 968 drm_warn(&dev_priv->drm, 969 "Couldn't add an object ref for the buffer (%d).\n", *handle); 970 goto out; 971 } 972 973 *base_p = base; 974 out: 975 vmw_user_bo_unref(&bo); 976 977 return ret; 978 } 979 980 static int 981 vmw_surface_handle_reference(struct vmw_private *dev_priv, 982 struct drm_file *file_priv, 983 uint32_t u_handle, 984 enum drm_vmw_handle_type handle_type, 985 struct ttm_base_object **base_p) 986 { 987 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 988 struct vmw_user_surface *user_srf = NULL; 989 uint32_t handle; 990 struct ttm_base_object *base; 991 int ret; 992 993 if (handle_type == DRM_VMW_HANDLE_PRIME) { 994 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 995 if (ret) 996 return vmw_buffer_prime_to_surface_base(dev_priv, 997 file_priv, 998 u_handle, 999 &handle, 1000 base_p); 1001 } else { 1002 handle = u_handle; 1003 } 1004 1005 ret = -EINVAL; 1006 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 1007 if (unlikely(!base)) { 1008 VMW_DEBUG_USER("Could not find surface to reference.\n"); 1009 goto out_no_lookup; 1010 } 1011 1012 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) { 1013 VMW_DEBUG_USER("Referenced object is not a surface.\n"); 1014 goto out_bad_resource; 1015 } 1016 if (handle_type != DRM_VMW_HANDLE_PRIME) { 1017 bool require_exist = false; 1018 1019 user_srf = container_of(base, struct vmw_user_surface, 1020 prime.base); 1021 1022 /* Error out if we are unauthenticated primary */ 1023 if (drm_is_primary_client(file_priv) && 1024 !file_priv->authenticated) { 1025 ret = -EACCES; 1026 goto out_bad_resource; 1027 } 1028 1029 /* 1030 * Make sure the surface creator has the same 1031 * authenticating master, or is already registered with us. 1032 */ 1033 if (drm_is_primary_client(file_priv) && 1034 user_srf->master != file_priv->master) 1035 require_exist = true; 1036 1037 if (unlikely(drm_is_render_client(file_priv))) 1038 require_exist = true; 1039 1040 ret = ttm_ref_object_add(tfile, base, NULL, require_exist); 1041 if (unlikely(ret != 0)) { 1042 DRM_ERROR("Could not add a reference to a surface.\n"); 1043 goto out_bad_resource; 1044 } 1045 } 1046 1047 *base_p = base; 1048 return 0; 1049 1050 out_bad_resource: 1051 ttm_base_object_unref(&base); 1052 out_no_lookup: 1053 if (handle_type == DRM_VMW_HANDLE_PRIME) 1054 (void) ttm_ref_object_base_unref(tfile, handle); 1055 1056 return ret; 1057 } 1058 1059 /** 1060 * vmw_surface_reference_ioctl - Ioctl function implementing 1061 * the user surface reference functionality. 1062 * 1063 * @dev: Pointer to a struct drm_device. 1064 * @data: Pointer to data copied from / to user-space. 1065 * @file_priv: Pointer to a drm file private structure. 1066 */ 1067 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1068 struct drm_file *file_priv) 1069 { 1070 struct vmw_private *dev_priv = vmw_priv(dev); 1071 union drm_vmw_surface_reference_arg *arg = 1072 (union drm_vmw_surface_reference_arg *)data; 1073 struct drm_vmw_surface_arg *req = &arg->req; 1074 struct drm_vmw_surface_create_req *rep = &arg->rep; 1075 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1076 struct vmw_surface *srf; 1077 struct vmw_user_surface *user_srf; 1078 struct drm_vmw_size __user *user_sizes; 1079 struct ttm_base_object *base; 1080 int ret; 1081 1082 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1083 req->handle_type, &base); 1084 if (unlikely(ret != 0)) 1085 return ret; 1086 1087 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1088 srf = &user_srf->srf; 1089 1090 /* Downcast of flags when sending back to user space */ 1091 rep->flags = (uint32_t)srf->metadata.flags; 1092 rep->format = srf->metadata.format; 1093 memcpy(rep->mip_levels, srf->metadata.mip_levels, 1094 sizeof(srf->metadata.mip_levels)); 1095 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 1096 rep->size_addr; 1097 1098 if (user_sizes) 1099 ret = copy_to_user(user_sizes, &srf->metadata.base_size, 1100 sizeof(srf->metadata.base_size)); 1101 if (unlikely(ret != 0)) { 1102 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, 1103 srf->metadata.num_sizes); 1104 ttm_ref_object_base_unref(tfile, base->handle); 1105 ret = -EFAULT; 1106 } 1107 1108 ttm_base_object_unref(&base); 1109 1110 return ret; 1111 } 1112 1113 /** 1114 * vmw_gb_surface_create - Encode a surface_define command. 1115 * 1116 * @res: Pointer to a struct vmw_resource embedded in a struct 1117 * vmw_surface. 1118 */ 1119 static int vmw_gb_surface_create(struct vmw_resource *res) 1120 { 1121 struct vmw_private *dev_priv = res->dev_priv; 1122 struct vmw_surface *srf = vmw_res_to_srf(res); 1123 struct vmw_surface_metadata *metadata = &srf->metadata; 1124 uint32_t cmd_len, cmd_id, submit_len; 1125 int ret; 1126 struct { 1127 SVGA3dCmdHeader header; 1128 SVGA3dCmdDefineGBSurface body; 1129 } *cmd; 1130 struct { 1131 SVGA3dCmdHeader header; 1132 SVGA3dCmdDefineGBSurface_v2 body; 1133 } *cmd2; 1134 struct { 1135 SVGA3dCmdHeader header; 1136 SVGA3dCmdDefineGBSurface_v3 body; 1137 } *cmd3; 1138 struct { 1139 SVGA3dCmdHeader header; 1140 SVGA3dCmdDefineGBSurface_v4 body; 1141 } *cmd4; 1142 1143 if (likely(res->id != -1)) 1144 return 0; 1145 1146 vmw_fifo_resource_inc(dev_priv); 1147 ret = vmw_resource_alloc_id(res); 1148 if (unlikely(ret != 0)) { 1149 DRM_ERROR("Failed to allocate a surface id.\n"); 1150 goto out_no_id; 1151 } 1152 1153 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { 1154 ret = -EBUSY; 1155 goto out_no_fifo; 1156 } 1157 1158 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { 1159 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4; 1160 cmd_len = sizeof(cmd4->body); 1161 submit_len = sizeof(*cmd4); 1162 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { 1163 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3; 1164 cmd_len = sizeof(cmd3->body); 1165 submit_len = sizeof(*cmd3); 1166 } else if (metadata->array_size > 0) { 1167 /* VMW_SM_4 support verified at creation time. */ 1168 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; 1169 cmd_len = sizeof(cmd2->body); 1170 submit_len = sizeof(*cmd2); 1171 } else { 1172 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1173 cmd_len = sizeof(cmd->body); 1174 submit_len = sizeof(*cmd); 1175 } 1176 1177 cmd = VMW_CMD_RESERVE(dev_priv, submit_len); 1178 cmd2 = (typeof(cmd2))cmd; 1179 cmd3 = (typeof(cmd3))cmd; 1180 cmd4 = (typeof(cmd4))cmd; 1181 if (unlikely(!cmd)) { 1182 ret = -ENOMEM; 1183 goto out_no_fifo; 1184 } 1185 1186 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { 1187 cmd4->header.id = cmd_id; 1188 cmd4->header.size = cmd_len; 1189 cmd4->body.sid = srf->res.id; 1190 cmd4->body.surfaceFlags = metadata->flags; 1191 cmd4->body.format = metadata->format; 1192 cmd4->body.numMipLevels = metadata->mip_levels[0]; 1193 cmd4->body.multisampleCount = metadata->multisample_count; 1194 cmd4->body.multisamplePattern = metadata->multisample_pattern; 1195 cmd4->body.qualityLevel = metadata->quality_level; 1196 cmd4->body.autogenFilter = metadata->autogen_filter; 1197 cmd4->body.size.width = metadata->base_size.width; 1198 cmd4->body.size.height = metadata->base_size.height; 1199 cmd4->body.size.depth = metadata->base_size.depth; 1200 cmd4->body.arraySize = metadata->array_size; 1201 cmd4->body.bufferByteStride = metadata->buffer_byte_stride; 1202 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { 1203 cmd3->header.id = cmd_id; 1204 cmd3->header.size = cmd_len; 1205 cmd3->body.sid = srf->res.id; 1206 cmd3->body.surfaceFlags = metadata->flags; 1207 cmd3->body.format = metadata->format; 1208 cmd3->body.numMipLevels = metadata->mip_levels[0]; 1209 cmd3->body.multisampleCount = metadata->multisample_count; 1210 cmd3->body.multisamplePattern = metadata->multisample_pattern; 1211 cmd3->body.qualityLevel = metadata->quality_level; 1212 cmd3->body.autogenFilter = metadata->autogen_filter; 1213 cmd3->body.size.width = metadata->base_size.width; 1214 cmd3->body.size.height = metadata->base_size.height; 1215 cmd3->body.size.depth = metadata->base_size.depth; 1216 cmd3->body.arraySize = metadata->array_size; 1217 } else if (metadata->array_size > 0) { 1218 cmd2->header.id = cmd_id; 1219 cmd2->header.size = cmd_len; 1220 cmd2->body.sid = srf->res.id; 1221 cmd2->body.surfaceFlags = metadata->flags; 1222 cmd2->body.format = metadata->format; 1223 cmd2->body.numMipLevels = metadata->mip_levels[0]; 1224 cmd2->body.multisampleCount = metadata->multisample_count; 1225 cmd2->body.autogenFilter = metadata->autogen_filter; 1226 cmd2->body.size.width = metadata->base_size.width; 1227 cmd2->body.size.height = metadata->base_size.height; 1228 cmd2->body.size.depth = metadata->base_size.depth; 1229 cmd2->body.arraySize = metadata->array_size; 1230 } else { 1231 cmd->header.id = cmd_id; 1232 cmd->header.size = cmd_len; 1233 cmd->body.sid = srf->res.id; 1234 cmd->body.surfaceFlags = metadata->flags; 1235 cmd->body.format = metadata->format; 1236 cmd->body.numMipLevels = metadata->mip_levels[0]; 1237 cmd->body.multisampleCount = metadata->multisample_count; 1238 cmd->body.autogenFilter = metadata->autogen_filter; 1239 cmd->body.size.width = metadata->base_size.width; 1240 cmd->body.size.height = metadata->base_size.height; 1241 cmd->body.size.depth = metadata->base_size.depth; 1242 } 1243 1244 vmw_cmd_commit(dev_priv, submit_len); 1245 1246 return 0; 1247 1248 out_no_fifo: 1249 vmw_resource_release_id(res); 1250 out_no_id: 1251 vmw_fifo_resource_dec(dev_priv); 1252 return ret; 1253 } 1254 1255 1256 static int vmw_gb_surface_bind(struct vmw_resource *res, 1257 struct ttm_validate_buffer *val_buf) 1258 { 1259 struct vmw_private *dev_priv = res->dev_priv; 1260 struct { 1261 SVGA3dCmdHeader header; 1262 SVGA3dCmdBindGBSurface body; 1263 } *cmd1; 1264 struct { 1265 SVGA3dCmdHeader header; 1266 SVGA3dCmdUpdateGBSurface body; 1267 } *cmd2; 1268 uint32_t submit_size; 1269 struct ttm_buffer_object *bo = val_buf->bo; 1270 1271 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1272 1273 submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0); 1274 1275 cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); 1276 if (unlikely(!cmd1)) 1277 return -ENOMEM; 1278 1279 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1280 cmd1->header.size = sizeof(cmd1->body); 1281 cmd1->body.sid = res->id; 1282 cmd1->body.mobid = bo->resource->start; 1283 if (res->guest_memory_dirty) { 1284 cmd2 = (void *) &cmd1[1]; 1285 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; 1286 cmd2->header.size = sizeof(cmd2->body); 1287 cmd2->body.sid = res->id; 1288 } 1289 vmw_cmd_commit(dev_priv, submit_size); 1290 1291 if (res->guest_memory_bo->dirty && res->guest_memory_dirty) { 1292 /* We've just made a full upload. Cear dirty regions. */ 1293 vmw_bo_dirty_clear_res(res); 1294 } 1295 1296 res->guest_memory_dirty = false; 1297 1298 return 0; 1299 } 1300 1301 static int vmw_gb_surface_unbind(struct vmw_resource *res, 1302 bool readback, 1303 struct ttm_validate_buffer *val_buf) 1304 { 1305 struct vmw_private *dev_priv = res->dev_priv; 1306 struct ttm_buffer_object *bo = val_buf->bo; 1307 struct vmw_fence_obj *fence; 1308 1309 struct { 1310 SVGA3dCmdHeader header; 1311 SVGA3dCmdReadbackGBSurface body; 1312 } *cmd1; 1313 struct { 1314 SVGA3dCmdHeader header; 1315 SVGA3dCmdInvalidateGBSurface body; 1316 } *cmd2; 1317 struct { 1318 SVGA3dCmdHeader header; 1319 SVGA3dCmdBindGBSurface body; 1320 } *cmd3; 1321 uint32_t submit_size; 1322 uint8_t *cmd; 1323 1324 1325 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1326 1327 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1328 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 1329 if (unlikely(!cmd)) 1330 return -ENOMEM; 1331 1332 if (readback) { 1333 cmd1 = (void *) cmd; 1334 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 1335 cmd1->header.size = sizeof(cmd1->body); 1336 cmd1->body.sid = res->id; 1337 cmd3 = (void *) &cmd1[1]; 1338 } else { 1339 cmd2 = (void *) cmd; 1340 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; 1341 cmd2->header.size = sizeof(cmd2->body); 1342 cmd2->body.sid = res->id; 1343 cmd3 = (void *) &cmd2[1]; 1344 } 1345 1346 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1347 cmd3->header.size = sizeof(cmd3->body); 1348 cmd3->body.sid = res->id; 1349 cmd3->body.mobid = SVGA3D_INVALID_ID; 1350 1351 vmw_cmd_commit(dev_priv, submit_size); 1352 1353 /* 1354 * Create a fence object and fence the backup buffer. 1355 */ 1356 1357 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 1358 &fence, NULL); 1359 1360 vmw_bo_fence_single(val_buf->bo, fence); 1361 1362 if (likely(fence != NULL)) 1363 vmw_fence_obj_unreference(&fence); 1364 1365 return 0; 1366 } 1367 1368 static int vmw_gb_surface_destroy(struct vmw_resource *res) 1369 { 1370 struct vmw_private *dev_priv = res->dev_priv; 1371 struct vmw_surface *srf = vmw_res_to_srf(res); 1372 struct { 1373 SVGA3dCmdHeader header; 1374 SVGA3dCmdDestroyGBSurface body; 1375 } *cmd; 1376 1377 if (likely(res->id == -1)) 1378 return 0; 1379 1380 mutex_lock(&dev_priv->binding_mutex); 1381 vmw_view_surface_list_destroy(dev_priv, &srf->view_list); 1382 vmw_binding_res_list_scrub(&res->binding_head); 1383 1384 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 1385 if (unlikely(!cmd)) { 1386 mutex_unlock(&dev_priv->binding_mutex); 1387 return -ENOMEM; 1388 } 1389 1390 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; 1391 cmd->header.size = sizeof(cmd->body); 1392 cmd->body.sid = res->id; 1393 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 1394 mutex_unlock(&dev_priv->binding_mutex); 1395 vmw_resource_release_id(res); 1396 vmw_fifo_resource_dec(dev_priv); 1397 1398 return 0; 1399 } 1400 1401 /** 1402 * vmw_gb_surface_define_ioctl - Ioctl function implementing 1403 * the user surface define functionality. 1404 * 1405 * @dev: Pointer to a struct drm_device. 1406 * @data: Pointer to data copied from / to user-space. 1407 * @file_priv: Pointer to a drm file private structure. 1408 */ 1409 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1410 struct drm_file *file_priv) 1411 { 1412 union drm_vmw_gb_surface_create_arg *arg = 1413 (union drm_vmw_gb_surface_create_arg *)data; 1414 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1415 struct drm_vmw_gb_surface_create_ext_req req_ext; 1416 1417 req_ext.base = arg->req; 1418 req_ext.version = drm_vmw_gb_surface_v1; 1419 req_ext.svga3d_flags_upper_32_bits = 0; 1420 req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE; 1421 req_ext.quality_level = SVGA3D_MS_QUALITY_NONE; 1422 req_ext.buffer_byte_stride = 0; 1423 req_ext.must_be_zero = 0; 1424 1425 return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv); 1426 } 1427 1428 /** 1429 * vmw_gb_surface_reference_ioctl - Ioctl function implementing 1430 * the user surface reference functionality. 1431 * 1432 * @dev: Pointer to a struct drm_device. 1433 * @data: Pointer to data copied from / to user-space. 1434 * @file_priv: Pointer to a drm file private structure. 1435 */ 1436 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1437 struct drm_file *file_priv) 1438 { 1439 union drm_vmw_gb_surface_reference_arg *arg = 1440 (union drm_vmw_gb_surface_reference_arg *)data; 1441 struct drm_vmw_surface_arg *req = &arg->req; 1442 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; 1443 struct drm_vmw_gb_surface_ref_ext_rep rep_ext; 1444 int ret; 1445 1446 ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv); 1447 1448 if (unlikely(ret != 0)) 1449 return ret; 1450 1451 rep->creq = rep_ext.creq.base; 1452 rep->crep = rep_ext.crep; 1453 1454 return ret; 1455 } 1456 1457 /** 1458 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing 1459 * the user surface define functionality. 1460 * 1461 * @dev: Pointer to a struct drm_device. 1462 * @data: Pointer to data copied from / to user-space. 1463 * @file_priv: Pointer to a drm file private structure. 1464 */ 1465 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, 1466 struct drm_file *file_priv) 1467 { 1468 union drm_vmw_gb_surface_create_ext_arg *arg = 1469 (union drm_vmw_gb_surface_create_ext_arg *)data; 1470 struct drm_vmw_gb_surface_create_ext_req *req = &arg->req; 1471 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1472 1473 return vmw_gb_surface_define_internal(dev, req, rep, file_priv); 1474 } 1475 1476 /** 1477 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing 1478 * the user surface reference functionality. 1479 * 1480 * @dev: Pointer to a struct drm_device. 1481 * @data: Pointer to data copied from / to user-space. 1482 * @file_priv: Pointer to a drm file private structure. 1483 */ 1484 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data, 1485 struct drm_file *file_priv) 1486 { 1487 union drm_vmw_gb_surface_reference_ext_arg *arg = 1488 (union drm_vmw_gb_surface_reference_ext_arg *)data; 1489 struct drm_vmw_surface_arg *req = &arg->req; 1490 struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep; 1491 1492 return vmw_gb_surface_reference_internal(dev, req, rep, file_priv); 1493 } 1494 1495 /** 1496 * vmw_gb_surface_define_internal - Ioctl function implementing 1497 * the user surface define functionality. 1498 * 1499 * @dev: Pointer to a struct drm_device. 1500 * @req: Request argument from user-space. 1501 * @rep: Response argument to user-space. 1502 * @file_priv: Pointer to a drm file private structure. 1503 */ 1504 static int 1505 vmw_gb_surface_define_internal(struct drm_device *dev, 1506 struct drm_vmw_gb_surface_create_ext_req *req, 1507 struct drm_vmw_gb_surface_create_rep *rep, 1508 struct drm_file *file_priv) 1509 { 1510 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1511 struct vmw_private *dev_priv = vmw_priv(dev); 1512 struct vmw_user_surface *user_srf; 1513 struct vmw_surface_metadata metadata = {0}; 1514 struct vmw_surface *srf; 1515 struct vmw_resource *res; 1516 struct vmw_resource *tmp; 1517 int ret = 0; 1518 uint32_t backup_handle = 0; 1519 SVGA3dSurfaceAllFlags svga3d_flags_64 = 1520 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, 1521 req->base.svga3d_flags); 1522 1523 /* array_size must be null for non-GL3 host. */ 1524 if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) { 1525 VMW_DEBUG_USER("SM4 surface not supported.\n"); 1526 return -EINVAL; 1527 } 1528 1529 if (!has_sm4_1_context(dev_priv)) { 1530 if (req->svga3d_flags_upper_32_bits != 0) 1531 ret = -EINVAL; 1532 1533 if (req->base.multisample_count != 0) 1534 ret = -EINVAL; 1535 1536 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE) 1537 ret = -EINVAL; 1538 1539 if (req->quality_level != SVGA3D_MS_QUALITY_NONE) 1540 ret = -EINVAL; 1541 1542 if (ret) { 1543 VMW_DEBUG_USER("SM4.1 surface not supported.\n"); 1544 return ret; 1545 } 1546 } 1547 1548 if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) { 1549 VMW_DEBUG_USER("SM5 surface not supported.\n"); 1550 return -EINVAL; 1551 } 1552 1553 if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) && 1554 req->base.multisample_count == 0) { 1555 VMW_DEBUG_USER("Invalid sample count.\n"); 1556 return -EINVAL; 1557 } 1558 1559 if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) { 1560 VMW_DEBUG_USER("Invalid mip level.\n"); 1561 return -EINVAL; 1562 } 1563 1564 metadata.flags = svga3d_flags_64; 1565 metadata.format = req->base.format; 1566 metadata.mip_levels[0] = req->base.mip_levels; 1567 metadata.multisample_count = req->base.multisample_count; 1568 metadata.multisample_pattern = req->multisample_pattern; 1569 metadata.quality_level = req->quality_level; 1570 metadata.array_size = req->base.array_size; 1571 metadata.buffer_byte_stride = req->buffer_byte_stride; 1572 metadata.num_sizes = 1; 1573 metadata.base_size = req->base.base_size; 1574 metadata.scanout = req->base.drm_surface_flags & 1575 drm_vmw_surface_flag_scanout; 1576 1577 /* Define a surface based on the parameters. */ 1578 ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); 1579 if (ret != 0) { 1580 VMW_DEBUG_USER("Failed to define surface.\n"); 1581 return ret; 1582 } 1583 1584 user_srf = container_of(srf, struct vmw_user_surface, srf); 1585 if (drm_is_primary_client(file_priv)) 1586 user_srf->master = drm_file_get_master(file_priv); 1587 1588 res = &user_srf->srf.res; 1589 1590 if (req->base.buffer_handle != SVGA3D_INVALID_ID) { 1591 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, 1592 &res->guest_memory_bo); 1593 if (ret == 0) { 1594 if (res->guest_memory_bo->is_dumb) { 1595 VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n"); 1596 vmw_user_bo_unref(&res->guest_memory_bo); 1597 ret = -EINVAL; 1598 goto out_unlock; 1599 } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { 1600 VMW_DEBUG_USER("Surface backup buffer too small.\n"); 1601 vmw_user_bo_unref(&res->guest_memory_bo); 1602 ret = -EINVAL; 1603 goto out_unlock; 1604 } else { 1605 backup_handle = req->base.buffer_handle; 1606 } 1607 } 1608 } else if (req->base.drm_surface_flags & 1609 (drm_vmw_surface_flag_create_buffer | 1610 drm_vmw_surface_flag_coherent)) { 1611 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 1612 res->guest_memory_size, 1613 &backup_handle, 1614 &res->guest_memory_bo); 1615 } 1616 1617 if (unlikely(ret != 0)) { 1618 vmw_resource_unreference(&res); 1619 goto out_unlock; 1620 } 1621 1622 if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { 1623 struct vmw_bo *backup = res->guest_memory_bo; 1624 1625 ttm_bo_reserve(&backup->tbo, false, false, NULL); 1626 if (!res->func->dirty_alloc) 1627 ret = -EINVAL; 1628 if (!ret) 1629 ret = vmw_bo_dirty_add(backup); 1630 if (!ret) { 1631 res->coherent = true; 1632 ret = res->func->dirty_alloc(res); 1633 } 1634 ttm_bo_unreserve(&backup->tbo); 1635 if (ret) { 1636 vmw_resource_unreference(&res); 1637 goto out_unlock; 1638 } 1639 1640 } 1641 1642 if (res->guest_memory_bo) { 1643 ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); 1644 if (unlikely(ret != 0)) { 1645 vmw_resource_unreference(&res); 1646 goto out_unlock; 1647 } 1648 } 1649 1650 tmp = vmw_resource_reference(res); 1651 ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, 1652 VMW_RES_SURFACE, 1653 &vmw_user_surface_base_release); 1654 1655 if (unlikely(ret != 0)) { 1656 vmw_resource_unreference(&tmp); 1657 vmw_resource_unreference(&res); 1658 goto out_unlock; 1659 } 1660 1661 rep->handle = user_srf->prime.base.handle; 1662 rep->backup_size = res->guest_memory_size; 1663 if (res->guest_memory_bo) { 1664 rep->buffer_map_handle = 1665 drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); 1666 rep->buffer_size = res->guest_memory_bo->tbo.base.size; 1667 rep->buffer_handle = backup_handle; 1668 } else { 1669 rep->buffer_map_handle = 0; 1670 rep->buffer_size = 0; 1671 rep->buffer_handle = SVGA3D_INVALID_ID; 1672 } 1673 vmw_resource_unreference(&res); 1674 1675 out_unlock: 1676 return ret; 1677 } 1678 1679 /** 1680 * vmw_gb_surface_reference_internal - Ioctl function implementing 1681 * the user surface reference functionality. 1682 * 1683 * @dev: Pointer to a struct drm_device. 1684 * @req: Pointer to user-space request surface arg. 1685 * @rep: Pointer to response to user-space. 1686 * @file_priv: Pointer to a drm file private structure. 1687 */ 1688 static int 1689 vmw_gb_surface_reference_internal(struct drm_device *dev, 1690 struct drm_vmw_surface_arg *req, 1691 struct drm_vmw_gb_surface_ref_ext_rep *rep, 1692 struct drm_file *file_priv) 1693 { 1694 struct vmw_private *dev_priv = vmw_priv(dev); 1695 struct vmw_surface *srf; 1696 struct vmw_user_surface *user_srf; 1697 struct vmw_surface_metadata *metadata; 1698 struct ttm_base_object *base; 1699 u32 backup_handle; 1700 int ret; 1701 1702 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1703 req->handle_type, &base); 1704 if (unlikely(ret != 0)) 1705 return ret; 1706 1707 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1708 srf = &user_srf->srf; 1709 if (!srf->res.guest_memory_bo) { 1710 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1711 goto out_bad_resource; 1712 } 1713 metadata = &srf->metadata; 1714 1715 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1716 ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base, 1717 &backup_handle); 1718 mutex_unlock(&dev_priv->cmdbuf_mutex); 1719 if (ret != 0) { 1720 drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n", 1721 req->sid); 1722 goto out_bad_resource; 1723 } 1724 1725 rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags); 1726 rep->creq.base.format = metadata->format; 1727 rep->creq.base.mip_levels = metadata->mip_levels[0]; 1728 rep->creq.base.drm_surface_flags = 0; 1729 rep->creq.base.multisample_count = metadata->multisample_count; 1730 rep->creq.base.autogen_filter = metadata->autogen_filter; 1731 rep->creq.base.array_size = metadata->array_size; 1732 rep->creq.base.buffer_handle = backup_handle; 1733 rep->creq.base.base_size = metadata->base_size; 1734 rep->crep.handle = user_srf->prime.base.handle; 1735 rep->crep.backup_size = srf->res.guest_memory_size; 1736 rep->crep.buffer_handle = backup_handle; 1737 rep->crep.buffer_map_handle = 1738 drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node); 1739 rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size; 1740 1741 rep->creq.version = drm_vmw_gb_surface_v1; 1742 rep->creq.svga3d_flags_upper_32_bits = 1743 SVGA3D_FLAGS_UPPER_32(metadata->flags); 1744 rep->creq.multisample_pattern = metadata->multisample_pattern; 1745 rep->creq.quality_level = metadata->quality_level; 1746 rep->creq.must_be_zero = 0; 1747 1748 out_bad_resource: 1749 ttm_base_object_unref(&base); 1750 1751 return ret; 1752 } 1753 1754 /** 1755 * vmw_subres_dirty_add - Add a dirty region to a subresource 1756 * @dirty: The surfaces's dirty tracker. 1757 * @loc_start: The location corresponding to the start of the region. 1758 * @loc_end: The location corresponding to the end of the region. 1759 * 1760 * As we are assuming that @loc_start and @loc_end represent a sequential 1761 * range of backing store memory, if the region spans multiple lines then 1762 * regardless of the x coordinate, the full lines are dirtied. 1763 * Correspondingly if the region spans multiple z slices, then full rather 1764 * than partial z slices are dirtied. 1765 */ 1766 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, 1767 const struct vmw_surface_loc *loc_start, 1768 const struct vmw_surface_loc *loc_end) 1769 { 1770 const struct vmw_surface_cache *cache = &dirty->cache; 1771 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; 1772 u32 mip = loc_start->sub_resource % cache->num_mip_levels; 1773 const struct drm_vmw_size *size = &cache->mip[mip].size; 1774 u32 box_c2 = box->z + box->d; 1775 1776 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) 1777 return; 1778 1779 if (box->d == 0 || box->z > loc_start->z) 1780 box->z = loc_start->z; 1781 if (box_c2 < loc_end->z) 1782 box->d = loc_end->z - box->z; 1783 1784 if (loc_start->z + 1 == loc_end->z) { 1785 box_c2 = box->y + box->h; 1786 if (box->h == 0 || box->y > loc_start->y) 1787 box->y = loc_start->y; 1788 if (box_c2 < loc_end->y) 1789 box->h = loc_end->y - box->y; 1790 1791 if (loc_start->y + 1 == loc_end->y) { 1792 box_c2 = box->x + box->w; 1793 if (box->w == 0 || box->x > loc_start->x) 1794 box->x = loc_start->x; 1795 if (box_c2 < loc_end->x) 1796 box->w = loc_end->x - box->x; 1797 } else { 1798 box->x = 0; 1799 box->w = size->width; 1800 } 1801 } else { 1802 box->y = 0; 1803 box->h = size->height; 1804 box->x = 0; 1805 box->w = size->width; 1806 } 1807 } 1808 1809 /** 1810 * vmw_subres_dirty_full - Mark a full subresource as dirty 1811 * @dirty: The surface's dirty tracker. 1812 * @subres: The subresource 1813 */ 1814 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) 1815 { 1816 const struct vmw_surface_cache *cache = &dirty->cache; 1817 u32 mip = subres % cache->num_mip_levels; 1818 const struct drm_vmw_size *size = &cache->mip[mip].size; 1819 SVGA3dBox *box = &dirty->boxes[subres]; 1820 1821 box->x = 0; 1822 box->y = 0; 1823 box->z = 0; 1824 box->w = size->width; 1825 box->h = size->height; 1826 box->d = size->depth; 1827 } 1828 1829 /* 1830 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture 1831 * surfaces. 1832 */ 1833 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, 1834 size_t start, size_t end) 1835 { 1836 struct vmw_surface_dirty *dirty = 1837 (struct vmw_surface_dirty *) res->dirty; 1838 size_t backup_end = res->guest_memory_offset + res->guest_memory_size; 1839 struct vmw_surface_loc loc1, loc2; 1840 const struct vmw_surface_cache *cache; 1841 1842 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; 1843 end = min(end, backup_end) - res->guest_memory_offset; 1844 cache = &dirty->cache; 1845 vmw_surface_get_loc(cache, &loc1, start); 1846 vmw_surface_get_loc(cache, &loc2, end - 1); 1847 vmw_surface_inc_loc(cache, &loc2); 1848 1849 if (loc1.sheet != loc2.sheet) { 1850 u32 sub_res; 1851 1852 /* 1853 * Multiple multisample sheets. To do this in an optimized 1854 * fashion, compute the dirty region for each sheet and the 1855 * resulting union. Since this is not a common case, just dirty 1856 * the whole surface. 1857 */ 1858 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res) 1859 vmw_subres_dirty_full(dirty, sub_res); 1860 return; 1861 } 1862 if (loc1.sub_resource + 1 == loc2.sub_resource) { 1863 /* Dirty range covers a single sub-resource */ 1864 vmw_subres_dirty_add(dirty, &loc1, &loc2); 1865 } else { 1866 /* Dirty range covers multiple sub-resources */ 1867 struct vmw_surface_loc loc_min, loc_max; 1868 u32 sub_res; 1869 1870 vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max); 1871 vmw_subres_dirty_add(dirty, &loc1, &loc_max); 1872 vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min); 1873 vmw_subres_dirty_add(dirty, &loc_min, &loc2); 1874 for (sub_res = loc1.sub_resource + 1; 1875 sub_res < loc2.sub_resource - 1; ++sub_res) 1876 vmw_subres_dirty_full(dirty, sub_res); 1877 } 1878 } 1879 1880 /* 1881 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer 1882 * surfaces. 1883 */ 1884 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, 1885 size_t start, size_t end) 1886 { 1887 struct vmw_surface_dirty *dirty = 1888 (struct vmw_surface_dirty *) res->dirty; 1889 const struct vmw_surface_cache *cache = &dirty->cache; 1890 size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes; 1891 SVGA3dBox *box = &dirty->boxes[0]; 1892 u32 box_c2; 1893 1894 box->h = box->d = 1; 1895 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; 1896 end = min(end, backup_end) - res->guest_memory_offset; 1897 box_c2 = box->x + box->w; 1898 if (box->w == 0 || box->x > start) 1899 box->x = start; 1900 if (box_c2 < end) 1901 box->w = end - box->x; 1902 } 1903 1904 /* 1905 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces 1906 */ 1907 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, 1908 size_t end) 1909 { 1910 struct vmw_surface *srf = vmw_res_to_srf(res); 1911 1912 if (WARN_ON(end <= res->guest_memory_offset || 1913 start >= res->guest_memory_offset + res->guest_memory_size)) 1914 return; 1915 1916 if (srf->metadata.format == SVGA3D_BUFFER) 1917 vmw_surface_buf_dirty_range_add(res, start, end); 1918 else 1919 vmw_surface_tex_dirty_range_add(res, start, end); 1920 } 1921 1922 /* 1923 * vmw_surface_dirty_sync - The surface's dirty_sync callback. 1924 */ 1925 static int vmw_surface_dirty_sync(struct vmw_resource *res) 1926 { 1927 struct vmw_private *dev_priv = res->dev_priv; 1928 u32 i, num_dirty; 1929 struct vmw_surface_dirty *dirty = 1930 (struct vmw_surface_dirty *) res->dirty; 1931 size_t alloc_size; 1932 const struct vmw_surface_cache *cache = &dirty->cache; 1933 struct { 1934 SVGA3dCmdHeader header; 1935 SVGA3dCmdDXUpdateSubResource body; 1936 } *cmd1; 1937 struct { 1938 SVGA3dCmdHeader header; 1939 SVGA3dCmdUpdateGBImage body; 1940 } *cmd2; 1941 void *cmd; 1942 1943 num_dirty = 0; 1944 for (i = 0; i < dirty->num_subres; ++i) { 1945 const SVGA3dBox *box = &dirty->boxes[i]; 1946 1947 if (box->d) 1948 num_dirty++; 1949 } 1950 1951 if (!num_dirty) 1952 goto out; 1953 1954 alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2)); 1955 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); 1956 if (!cmd) 1957 return -ENOMEM; 1958 1959 cmd1 = cmd; 1960 cmd2 = cmd; 1961 1962 for (i = 0; i < dirty->num_subres; ++i) { 1963 const SVGA3dBox *box = &dirty->boxes[i]; 1964 1965 if (!box->d) 1966 continue; 1967 1968 /* 1969 * DX_UPDATE_SUBRESOURCE is aware of array surfaces. 1970 * UPDATE_GB_IMAGE is not. 1971 */ 1972 if (has_sm4_context(dev_priv)) { 1973 cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; 1974 cmd1->header.size = sizeof(cmd1->body); 1975 cmd1->body.sid = res->id; 1976 cmd1->body.subResource = i; 1977 cmd1->body.box = *box; 1978 cmd1++; 1979 } else { 1980 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 1981 cmd2->header.size = sizeof(cmd2->body); 1982 cmd2->body.image.sid = res->id; 1983 cmd2->body.image.face = i / cache->num_mip_levels; 1984 cmd2->body.image.mipmap = i - 1985 (cache->num_mip_levels * cmd2->body.image.face); 1986 cmd2->body.box = *box; 1987 cmd2++; 1988 } 1989 1990 } 1991 vmw_cmd_commit(dev_priv, alloc_size); 1992 out: 1993 memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * 1994 dirty->num_subres); 1995 1996 return 0; 1997 } 1998 1999 /* 2000 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback. 2001 */ 2002 static int vmw_surface_dirty_alloc(struct vmw_resource *res) 2003 { 2004 struct vmw_surface *srf = vmw_res_to_srf(res); 2005 const struct vmw_surface_metadata *metadata = &srf->metadata; 2006 struct vmw_surface_dirty *dirty; 2007 u32 num_layers = 1; 2008 u32 num_mip; 2009 u32 num_subres; 2010 u32 num_samples; 2011 size_t dirty_size; 2012 int ret; 2013 2014 if (metadata->array_size) 2015 num_layers = metadata->array_size; 2016 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) 2017 num_layers *= SVGA3D_MAX_SURFACE_FACES; 2018 2019 num_mip = metadata->mip_levels[0]; 2020 if (!num_mip) 2021 num_mip = 1; 2022 2023 num_subres = num_layers * num_mip; 2024 dirty_size = struct_size(dirty, boxes, num_subres); 2025 2026 dirty = kvzalloc(dirty_size, GFP_KERNEL); 2027 if (!dirty) { 2028 ret = -ENOMEM; 2029 goto out_no_dirty; 2030 } 2031 2032 num_samples = max_t(u32, 1, metadata->multisample_count); 2033 ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, 2034 num_mip, num_layers, num_samples, 2035 &dirty->cache); 2036 if (ret) 2037 goto out_no_cache; 2038 2039 dirty->num_subres = num_subres; 2040 res->dirty = (struct vmw_resource_dirty *) dirty; 2041 2042 return 0; 2043 2044 out_no_cache: 2045 kvfree(dirty); 2046 out_no_dirty: 2047 return ret; 2048 } 2049 2050 /* 2051 * vmw_surface_dirty_free - The surface's dirty_free callback 2052 */ 2053 static void vmw_surface_dirty_free(struct vmw_resource *res) 2054 { 2055 struct vmw_surface_dirty *dirty = 2056 (struct vmw_surface_dirty *) res->dirty; 2057 2058 kvfree(dirty); 2059 res->dirty = NULL; 2060 } 2061 2062 /* 2063 * vmw_surface_clean - The surface's clean callback 2064 */ 2065 static int vmw_surface_clean(struct vmw_resource *res) 2066 { 2067 struct vmw_private *dev_priv = res->dev_priv; 2068 size_t alloc_size; 2069 struct { 2070 SVGA3dCmdHeader header; 2071 SVGA3dCmdReadbackGBSurface body; 2072 } *cmd; 2073 2074 alloc_size = sizeof(*cmd); 2075 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); 2076 if (!cmd) 2077 return -ENOMEM; 2078 2079 cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 2080 cmd->header.size = sizeof(cmd->body); 2081 cmd->body.sid = res->id; 2082 vmw_cmd_commit(dev_priv, alloc_size); 2083 2084 return 0; 2085 } 2086 2087 /* 2088 * vmw_gb_surface_define - Define a private GB surface 2089 * 2090 * @dev_priv: Pointer to a device private. 2091 * @metadata: Metadata representing the surface to create. 2092 * @user_srf_out: allocated user_srf. Set to NULL on failure. 2093 * 2094 * GB surfaces allocated by this function will not have a user mode handle, and 2095 * thus will only be visible to vmwgfx. For optimization reasons the 2096 * surface may later be given a user mode handle by another function to make 2097 * it available to user mode drivers. 2098 */ 2099 int vmw_gb_surface_define(struct vmw_private *dev_priv, 2100 const struct vmw_surface_metadata *req, 2101 struct vmw_surface **srf_out) 2102 { 2103 struct vmw_surface_metadata *metadata; 2104 struct vmw_user_surface *user_srf; 2105 struct vmw_surface *srf; 2106 u32 sample_count = 1; 2107 u32 num_layers = 1; 2108 int ret; 2109 2110 *srf_out = NULL; 2111 2112 if (req->scanout) { 2113 if (!vmw_surface_is_screen_target_format(req->format)) { 2114 VMW_DEBUG_USER("Invalid Screen Target surface format."); 2115 return -EINVAL; 2116 } 2117 2118 if (req->base_size.width > dev_priv->texture_max_width || 2119 req->base_size.height > dev_priv->texture_max_height) { 2120 VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u", 2121 req->base_size.width, 2122 req->base_size.height, 2123 dev_priv->texture_max_width, 2124 dev_priv->texture_max_height); 2125 return -EINVAL; 2126 } 2127 } else { 2128 const SVGA3dSurfaceDesc *desc = 2129 vmw_surface_get_desc(req->format); 2130 2131 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) { 2132 VMW_DEBUG_USER("Invalid surface format.\n"); 2133 return -EINVAL; 2134 } 2135 } 2136 2137 if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE) 2138 return -EINVAL; 2139 2140 if (req->num_sizes != 1) 2141 return -EINVAL; 2142 2143 if (req->sizes != NULL) 2144 return -EINVAL; 2145 2146 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 2147 if (unlikely(!user_srf)) { 2148 ret = -ENOMEM; 2149 goto out_unlock; 2150 } 2151 2152 *srf_out = &user_srf->srf; 2153 2154 srf = &user_srf->srf; 2155 srf->metadata = *req; 2156 srf->offsets = NULL; 2157 2158 metadata = &srf->metadata; 2159 2160 if (metadata->array_size) 2161 num_layers = req->array_size; 2162 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) 2163 num_layers = SVGA3D_MAX_SURFACE_FACES; 2164 2165 if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE) 2166 sample_count = metadata->multisample_count; 2167 2168 srf->res.guest_memory_size = 2169 vmw_surface_get_serialized_size_extended( 2170 metadata->format, 2171 metadata->base_size, 2172 metadata->mip_levels[0], 2173 num_layers, 2174 sample_count); 2175 2176 if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) 2177 srf->res.guest_memory_size += sizeof(SVGA3dDXSOState); 2178 2179 /* 2180 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with 2181 * size greater than STDU max width/height. This is really a workaround 2182 * to support creation of big framebuffer requested by some user-space 2183 * for whole topology. That big framebuffer won't really be used for 2184 * binding with screen target as during prepare_fb a separate surface is 2185 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. 2186 */ 2187 if (dev_priv->active_display_unit == vmw_du_screen_target && 2188 metadata->scanout && 2189 metadata->base_size.width <= dev_priv->stdu_max_width && 2190 metadata->base_size.height <= dev_priv->stdu_max_height) 2191 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET; 2192 2193 /* 2194 * From this point, the generic resource management functions 2195 * destroy the object on failure. 2196 */ 2197 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 2198 2199 return ret; 2200 2201 out_unlock: 2202 return ret; 2203 } 2204 2205 static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw, 2206 int bpp) 2207 { 2208 switch (bpp) { 2209 case 8: /* DRM_FORMAT_C8 */ 2210 return SVGA3D_P8; 2211 case 16: /* DRM_FORMAT_RGB565 */ 2212 return SVGA3D_R5G6B5; 2213 case 32: /* DRM_FORMAT_XRGB8888 */ 2214 if (has_sm4_context(vmw)) 2215 return SVGA3D_B8G8R8X8_UNORM; 2216 return SVGA3D_X8R8G8B8; 2217 default: 2218 drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp); 2219 return SVGA3D_X8R8G8B8; 2220 } 2221 } 2222 2223 /** 2224 * vmw_dumb_create - Create a dumb kms buffer 2225 * 2226 * @file_priv: Pointer to a struct drm_file identifying the caller. 2227 * @dev: Pointer to the drm device. 2228 * @args: Pointer to a struct drm_mode_create_dumb structure 2229 * Return: Zero on success, negative error code on failure. 2230 * 2231 * This is a driver callback for the core drm create_dumb functionality. 2232 * Note that this is very similar to the vmw_bo_alloc ioctl, except 2233 * that the arguments have a different format. 2234 */ 2235 int vmw_dumb_create(struct drm_file *file_priv, 2236 struct drm_device *dev, 2237 struct drm_mode_create_dumb *args) 2238 { 2239 struct vmw_private *dev_priv = vmw_priv(dev); 2240 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 2241 struct vmw_bo *vbo = NULL; 2242 struct vmw_resource *res = NULL; 2243 union drm_vmw_gb_surface_create_ext_arg arg = { 0 }; 2244 struct drm_vmw_gb_surface_create_ext_req *req = &arg.req; 2245 int ret; 2246 struct drm_vmw_size drm_size = { 2247 .width = args->width, 2248 .height = args->height, 2249 .depth = 1, 2250 }; 2251 SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp); 2252 const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); 2253 SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | 2254 SVGA3D_SURFACE_HINT_RENDERTARGET | 2255 SVGA3D_SURFACE_SCREENTARGET; 2256 2257 if (vmw_surface_is_dx_screen_target_format(format)) { 2258 flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE | 2259 SVGA3D_SURFACE_BIND_RENDER_TARGET; 2260 } 2261 2262 /* 2263 * Without mob support we're just going to use raw memory buffer 2264 * because we wouldn't be able to support full surface coherency 2265 * without mobs. There also no reason to support surface coherency 2266 * without 3d (i.e. gpu usage on the host) because then all the 2267 * contents is going to be rendered guest side. 2268 */ 2269 if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) { 2270 int cpp = DIV_ROUND_UP(args->bpp, 8); 2271 2272 switch (cpp) { 2273 case 1: /* DRM_FORMAT_C8 */ 2274 case 2: /* DRM_FORMAT_RGB565 */ 2275 case 4: /* DRM_FORMAT_XRGB8888 */ 2276 break; 2277 default: 2278 /* 2279 * Dumb buffers don't allow anything else. 2280 * This is tested via IGT's dumb_buffers 2281 */ 2282 return -EINVAL; 2283 } 2284 2285 args->pitch = args->width * cpp; 2286 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 2287 2288 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 2289 args->size, &args->handle, 2290 &vbo); 2291 /* drop reference from allocate - handle holds it now */ 2292 drm_gem_object_put(&vbo->tbo.base); 2293 return ret; 2294 } 2295 2296 req->version = drm_vmw_gb_surface_v1; 2297 req->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 2298 req->quality_level = SVGA3D_MS_QUALITY_NONE; 2299 req->buffer_byte_stride = 0; 2300 req->must_be_zero = 0; 2301 req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags); 2302 req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags); 2303 req->base.format = (uint32_t)format; 2304 req->base.drm_surface_flags = drm_vmw_surface_flag_scanout; 2305 req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable; 2306 req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer; 2307 req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent; 2308 req->base.base_size.width = args->width; 2309 req->base.base_size.height = args->height; 2310 req->base.base_size.depth = 1; 2311 req->base.array_size = 0; 2312 req->base.mip_levels = 1; 2313 req->base.multisample_count = 0; 2314 req->base.buffer_handle = SVGA3D_INVALID_ID; 2315 req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE; 2316 ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv); 2317 if (ret) { 2318 drm_warn(dev, "Unable to create a dumb buffer\n"); 2319 return ret; 2320 } 2321 2322 args->handle = arg.rep.buffer_handle; 2323 args->size = arg.rep.buffer_size; 2324 args->pitch = vmw_surface_calculate_pitch(desc, &drm_size); 2325 2326 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle, 2327 user_surface_converter, 2328 &res); 2329 if (ret) { 2330 drm_err(dev, "Created resource handle doesn't exist!\n"); 2331 goto err; 2332 } 2333 2334 vbo = res->guest_memory_bo; 2335 vbo->is_dumb = true; 2336 vbo->dumb_surface = vmw_res_to_srf(res); 2337 drm_gem_object_put(&vbo->tbo.base); 2338 /* 2339 * Unset the user surface dtor since this in not actually exposed 2340 * to userspace. The suface is owned via the dumb_buffer's GEM handle 2341 */ 2342 struct vmw_user_surface *usurf = container_of(vbo->dumb_surface, 2343 struct vmw_user_surface, srf); 2344 usurf->prime.base.refcount_release = NULL; 2345 err: 2346 if (res) 2347 vmw_resource_unreference(&res); 2348 2349 ttm_ref_object_base_unref(tfile, arg.rep.handle); 2350 2351 return ret; 2352 } 2353