1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 **************************************************************************/ 8 9 #include "vmwgfx_bo.h" 10 #include "vmwgfx_cursor_plane.h" 11 #include "vmwgfx_drv.h" 12 #include "vmwgfx_resource_priv.h" 13 #include "vmwgfx_so.h" 14 #include "vmwgfx_binding.h" 15 #include "vmw_surface_cache.h" 16 #include "device_include/svga3d_surfacedefs.h" 17 18 #include <drm/drm_dumb_buffers.h> 19 #include <drm/ttm/ttm_placement.h> 20 21 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) 22 23 /** 24 * struct vmw_user_surface - User-space visible surface resource 25 * 26 * @prime: The TTM prime object. 27 * @srf: The surface metadata. 28 * @master: Master of the creating client. Used for security check. 29 */ 30 struct vmw_user_surface { 31 struct ttm_prime_object prime; 32 struct vmw_surface srf; 33 struct drm_master *master; 34 }; 35 36 /** 37 * struct vmw_surface_offset - Backing store mip level offset info 38 * 39 * @face: Surface face. 40 * @mip: Mip level. 41 * @bo_offset: Offset into backing store of this mip level. 42 * 43 */ 44 struct vmw_surface_offset { 45 uint32_t face; 46 uint32_t mip; 47 uint32_t bo_offset; 48 }; 49 50 /** 51 * struct vmw_surface_dirty - Surface dirty-tracker 52 * @cache: Cached layout information of the surface. 53 * @num_subres: Number of subresources. 54 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. 55 */ 56 struct vmw_surface_dirty { 57 struct vmw_surface_cache cache; 58 u32 num_subres; 59 SVGA3dBox boxes[] __counted_by(num_subres); 60 }; 61 62 static void vmw_user_surface_free(struct vmw_resource *res); 63 static struct vmw_resource * 64 vmw_user_surface_base_to_res(struct ttm_base_object *base); 65 static int vmw_legacy_srf_bind(struct vmw_resource *res, 66 struct ttm_validate_buffer *val_buf); 67 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 68 bool readback, 69 struct ttm_validate_buffer *val_buf); 70 static int vmw_legacy_srf_create(struct vmw_resource *res); 71 static int vmw_legacy_srf_destroy(struct vmw_resource *res); 72 static int vmw_gb_surface_create(struct vmw_resource *res); 73 static int vmw_gb_surface_bind(struct vmw_resource *res, 74 struct ttm_validate_buffer *val_buf); 75 static int vmw_gb_surface_unbind(struct vmw_resource *res, 76 bool readback, 77 struct ttm_validate_buffer *val_buf); 78 static int vmw_gb_surface_destroy(struct vmw_resource *res); 79 static int 80 vmw_gb_surface_define_internal(struct drm_device *dev, 81 struct drm_vmw_gb_surface_create_ext_req *req, 82 struct drm_vmw_gb_surface_create_rep *rep, 83 struct drm_file *file_priv); 84 static int 85 vmw_gb_surface_reference_internal(struct drm_device *dev, 86 struct drm_vmw_surface_arg *req, 87 struct drm_vmw_gb_surface_ref_ext_rep *rep, 88 struct drm_file *file_priv); 89 90 static void vmw_surface_dirty_free(struct vmw_resource *res); 91 static int vmw_surface_dirty_alloc(struct vmw_resource *res); 92 static int vmw_surface_dirty_sync(struct vmw_resource *res); 93 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, 94 size_t end); 95 static int vmw_surface_clean(struct vmw_resource *res); 96 97 static const struct vmw_user_resource_conv user_surface_conv = { 98 .object_type = VMW_RES_SURFACE, 99 .base_obj_to_res = vmw_user_surface_base_to_res, 100 .res_free = vmw_user_surface_free 101 }; 102 103 const struct vmw_user_resource_conv *user_surface_converter = 104 &user_surface_conv; 105 106 static const struct vmw_res_func vmw_legacy_surface_func = { 107 .res_type = vmw_res_surface, 108 .needs_guest_memory = false, 109 .may_evict = true, 110 .prio = 1, 111 .dirty_prio = 1, 112 .type_name = "legacy surfaces", 113 .domain = VMW_BO_DOMAIN_GMR, 114 .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 115 .create = &vmw_legacy_srf_create, 116 .destroy = &vmw_legacy_srf_destroy, 117 .bind = &vmw_legacy_srf_bind, 118 .unbind = &vmw_legacy_srf_unbind 119 }; 120 121 static const struct vmw_res_func vmw_gb_surface_func = { 122 .res_type = vmw_res_surface, 123 .needs_guest_memory = true, 124 .may_evict = true, 125 .prio = 1, 126 .dirty_prio = 2, 127 .type_name = "guest backed surfaces", 128 .domain = VMW_BO_DOMAIN_MOB, 129 .busy_domain = VMW_BO_DOMAIN_MOB, 130 .create = vmw_gb_surface_create, 131 .destroy = vmw_gb_surface_destroy, 132 .bind = vmw_gb_surface_bind, 133 .unbind = vmw_gb_surface_unbind, 134 .dirty_alloc = vmw_surface_dirty_alloc, 135 .dirty_free = vmw_surface_dirty_free, 136 .dirty_sync = vmw_surface_dirty_sync, 137 .dirty_range_add = vmw_surface_dirty_range_add, 138 .clean = vmw_surface_clean, 139 }; 140 141 /* 142 * struct vmw_surface_dma - SVGA3D DMA command 143 */ 144 struct vmw_surface_dma { 145 SVGA3dCmdHeader header; 146 SVGA3dCmdSurfaceDMA body; 147 SVGA3dCopyBox cb; 148 SVGA3dCmdSurfaceDMASuffix suffix; 149 }; 150 151 /* 152 * struct vmw_surface_define - SVGA3D Surface Define command 153 */ 154 struct vmw_surface_define { 155 SVGA3dCmdHeader header; 156 SVGA3dCmdDefineSurface body; 157 }; 158 159 /* 160 * struct vmw_surface_destroy - SVGA3D Surface Destroy command 161 */ 162 struct vmw_surface_destroy { 163 SVGA3dCmdHeader header; 164 SVGA3dCmdDestroySurface body; 165 }; 166 167 168 /** 169 * vmw_surface_dma_size - Compute fifo size for a dma command. 170 * 171 * @srf: Pointer to a struct vmw_surface 172 * 173 * Computes the required size for a surface dma command for backup or 174 * restoration of the surface represented by @srf. 175 */ 176 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) 177 { 178 return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma); 179 } 180 181 182 /** 183 * vmw_surface_define_size - Compute fifo size for a surface define command. 184 * 185 * @srf: Pointer to a struct vmw_surface 186 * 187 * Computes the required size for a surface define command for the definition 188 * of the surface represented by @srf. 189 */ 190 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) 191 { 192 return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes * 193 sizeof(SVGA3dSize); 194 } 195 196 197 /** 198 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. 199 * 200 * Computes the required size for a surface destroy command for the destruction 201 * of a hw surface. 202 */ 203 static inline uint32_t vmw_surface_destroy_size(void) 204 { 205 return sizeof(struct vmw_surface_destroy); 206 } 207 208 /** 209 * vmw_surface_destroy_encode - Encode a surface_destroy command. 210 * 211 * @id: The surface id 212 * @cmd_space: Pointer to memory area in which the commands should be encoded. 213 */ 214 static void vmw_surface_destroy_encode(uint32_t id, 215 void *cmd_space) 216 { 217 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) 218 cmd_space; 219 220 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; 221 cmd->header.size = sizeof(cmd->body); 222 cmd->body.sid = id; 223 } 224 225 /** 226 * vmw_surface_define_encode - Encode a surface_define command. 227 * 228 * @srf: Pointer to a struct vmw_surface object. 229 * @cmd_space: Pointer to memory area in which the commands should be encoded. 230 */ 231 static void vmw_surface_define_encode(const struct vmw_surface *srf, 232 void *cmd_space) 233 { 234 struct vmw_surface_define *cmd = (struct vmw_surface_define *) 235 cmd_space; 236 struct drm_vmw_size *src_size; 237 SVGA3dSize *cmd_size; 238 uint32_t cmd_len; 239 int i; 240 241 cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes * 242 sizeof(SVGA3dSize); 243 244 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; 245 cmd->header.size = cmd_len; 246 cmd->body.sid = srf->res.id; 247 /* 248 * Downcast of surfaceFlags, was upcasted when received from user-space, 249 * since driver internally stores as 64 bit. 250 * For legacy surface define only 32 bit flag is supported. 251 */ 252 cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags; 253 cmd->body.format = srf->metadata.format; 254 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 255 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i]; 256 257 cmd += 1; 258 cmd_size = (SVGA3dSize *) cmd; 259 src_size = srf->metadata.sizes; 260 261 for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) { 262 cmd_size->width = src_size->width; 263 cmd_size->height = src_size->height; 264 cmd_size->depth = src_size->depth; 265 } 266 } 267 268 /** 269 * vmw_surface_dma_encode - Encode a surface_dma command. 270 * 271 * @srf: Pointer to a struct vmw_surface object. 272 * @cmd_space: Pointer to memory area in which the commands should be encoded. 273 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents 274 * should be placed or read from. 275 * @to_surface: Boolean whether to DMA to the surface or from the surface. 276 */ 277 static void vmw_surface_dma_encode(struct vmw_surface *srf, 278 void *cmd_space, 279 const SVGAGuestPtr *ptr, 280 bool to_surface) 281 { 282 uint32_t i; 283 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; 284 const struct SVGA3dSurfaceDesc *desc = 285 vmw_surface_get_desc(srf->metadata.format); 286 287 for (i = 0; i < srf->metadata.num_sizes; ++i) { 288 SVGA3dCmdHeader *header = &cmd->header; 289 SVGA3dCmdSurfaceDMA *body = &cmd->body; 290 SVGA3dCopyBox *cb = &cmd->cb; 291 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; 292 const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; 293 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i]; 294 295 header->id = SVGA_3D_CMD_SURFACE_DMA; 296 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); 297 298 body->guest.ptr = *ptr; 299 body->guest.ptr.offset += cur_offset->bo_offset; 300 body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size); 301 body->host.sid = srf->res.id; 302 body->host.face = cur_offset->face; 303 body->host.mipmap = cur_offset->mip; 304 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : 305 SVGA3D_READ_HOST_VRAM); 306 cb->x = 0; 307 cb->y = 0; 308 cb->z = 0; 309 cb->srcx = 0; 310 cb->srcy = 0; 311 cb->srcz = 0; 312 cb->w = cur_size->width; 313 cb->h = cur_size->height; 314 cb->d = cur_size->depth; 315 316 suffix->suffixSize = sizeof(*suffix); 317 suffix->maximumOffset = 318 vmw_surface_get_image_buffer_size(desc, cur_size, 319 body->guest.pitch); 320 suffix->flags.discard = 0; 321 suffix->flags.unsynchronized = 0; 322 suffix->flags.reserved = 0; 323 ++cmd; 324 } 325 }; 326 327 328 /** 329 * vmw_hw_surface_destroy - destroy a Device surface 330 * 331 * @res: Pointer to a struct vmw_resource embedded in a struct 332 * vmw_surface. 333 * 334 * Destroys a the device surface associated with a struct vmw_surface if 335 * any, and adjusts resource count accordingly. 336 */ 337 static void vmw_hw_surface_destroy(struct vmw_resource *res) 338 { 339 340 struct vmw_private *dev_priv = res->dev_priv; 341 void *cmd; 342 343 if (res->func->destroy == vmw_gb_surface_destroy) { 344 (void) vmw_gb_surface_destroy(res); 345 return; 346 } 347 348 if (res->id != -1) { 349 350 cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size()); 351 if (unlikely(!cmd)) 352 return; 353 354 vmw_surface_destroy_encode(res->id, cmd); 355 vmw_cmd_commit(dev_priv, vmw_surface_destroy_size()); 356 357 /* 358 * used_memory_size_atomic, or separate lock 359 * to avoid taking dev_priv::cmdbuf_mutex in 360 * the destroy path. 361 */ 362 363 mutex_lock(&dev_priv->cmdbuf_mutex); 364 dev_priv->used_memory_size -= res->guest_memory_size; 365 mutex_unlock(&dev_priv->cmdbuf_mutex); 366 } 367 } 368 369 /** 370 * vmw_legacy_srf_create - Create a device surface as part of the 371 * resource validation process. 372 * 373 * @res: Pointer to a struct vmw_surface. 374 * 375 * If the surface doesn't have a hw id. 376 * 377 * Returns -EBUSY if there wasn't sufficient device resources to 378 * complete the validation. Retry after freeing up resources. 379 * 380 * May return other errors if the kernel is out of guest resources. 381 */ 382 static int vmw_legacy_srf_create(struct vmw_resource *res) 383 { 384 struct vmw_private *dev_priv = res->dev_priv; 385 struct vmw_surface *srf; 386 uint32_t submit_size; 387 uint8_t *cmd; 388 int ret; 389 390 if (likely(res->id != -1)) 391 return 0; 392 393 srf = vmw_res_to_srf(res); 394 if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >= 395 dev_priv->memory_size)) 396 return -EBUSY; 397 398 /* 399 * Alloc id for the resource. 400 */ 401 402 ret = vmw_resource_alloc_id(res); 403 if (unlikely(ret != 0)) { 404 DRM_ERROR("Failed to allocate a surface id.\n"); 405 goto out_no_id; 406 } 407 408 if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) { 409 ret = -EBUSY; 410 goto out_no_fifo; 411 } 412 413 /* 414 * Encode surface define- commands. 415 */ 416 417 submit_size = vmw_surface_define_size(srf); 418 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 419 if (unlikely(!cmd)) { 420 ret = -ENOMEM; 421 goto out_no_fifo; 422 } 423 424 vmw_surface_define_encode(srf, cmd); 425 vmw_cmd_commit(dev_priv, submit_size); 426 vmw_fifo_resource_inc(dev_priv); 427 428 /* 429 * Surface memory usage accounting. 430 */ 431 432 dev_priv->used_memory_size += res->guest_memory_size; 433 return 0; 434 435 out_no_fifo: 436 vmw_resource_release_id(res); 437 out_no_id: 438 return ret; 439 } 440 441 /** 442 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. 443 * 444 * @res: Pointer to a struct vmw_res embedded in a struct 445 * vmw_surface. 446 * @val_buf: Pointer to a struct ttm_validate_buffer containing 447 * information about the backup buffer. 448 * @bind: Boolean wether to DMA to the surface. 449 * 450 * Transfer backup data to or from a legacy surface as part of the 451 * validation process. 452 * May return other errors if the kernel is out of guest resources. 453 * The backup buffer will be fenced or idle upon successful completion, 454 * and if the surface needs persistent backup storage, the backup buffer 455 * will also be returned reserved iff @bind is true. 456 */ 457 static int vmw_legacy_srf_dma(struct vmw_resource *res, 458 struct ttm_validate_buffer *val_buf, 459 bool bind) 460 { 461 SVGAGuestPtr ptr; 462 struct vmw_fence_obj *fence; 463 uint32_t submit_size; 464 struct vmw_surface *srf = vmw_res_to_srf(res); 465 uint8_t *cmd; 466 struct vmw_private *dev_priv = res->dev_priv; 467 468 BUG_ON(!val_buf->bo); 469 submit_size = vmw_surface_dma_size(srf); 470 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 471 if (unlikely(!cmd)) 472 return -ENOMEM; 473 474 vmw_bo_get_guest_ptr(val_buf->bo, &ptr); 475 vmw_surface_dma_encode(srf, cmd, &ptr, bind); 476 477 vmw_cmd_commit(dev_priv, submit_size); 478 479 /* 480 * Create a fence object and fence the backup buffer. 481 */ 482 483 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 484 &fence, NULL); 485 486 vmw_bo_fence_single(val_buf->bo, fence); 487 488 if (likely(fence != NULL)) 489 vmw_fence_obj_unreference(&fence); 490 491 return 0; 492 } 493 494 /** 495 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the 496 * surface validation process. 497 * 498 * @res: Pointer to a struct vmw_res embedded in a struct 499 * vmw_surface. 500 * @val_buf: Pointer to a struct ttm_validate_buffer containing 501 * information about the backup buffer. 502 * 503 * This function will copy backup data to the surface if the 504 * backup buffer is dirty. 505 */ 506 static int vmw_legacy_srf_bind(struct vmw_resource *res, 507 struct ttm_validate_buffer *val_buf) 508 { 509 if (!res->guest_memory_dirty) 510 return 0; 511 512 return vmw_legacy_srf_dma(res, val_buf, true); 513 } 514 515 516 /** 517 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the 518 * surface eviction process. 519 * 520 * @res: Pointer to a struct vmw_res embedded in a struct 521 * vmw_surface. 522 * @readback: Readback - only true if dirty 523 * @val_buf: Pointer to a struct ttm_validate_buffer containing 524 * information about the backup buffer. 525 * 526 * This function will copy backup data from the surface. 527 */ 528 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 529 bool readback, 530 struct ttm_validate_buffer *val_buf) 531 { 532 if (unlikely(readback)) 533 return vmw_legacy_srf_dma(res, val_buf, false); 534 return 0; 535 } 536 537 /** 538 * vmw_legacy_srf_destroy - Destroy a device surface as part of a 539 * resource eviction process. 540 * 541 * @res: Pointer to a struct vmw_res embedded in a struct 542 * vmw_surface. 543 */ 544 static int vmw_legacy_srf_destroy(struct vmw_resource *res) 545 { 546 struct vmw_private *dev_priv = res->dev_priv; 547 uint32_t submit_size; 548 uint8_t *cmd; 549 550 BUG_ON(res->id == -1); 551 552 /* 553 * Encode the dma- and surface destroy commands. 554 */ 555 556 submit_size = vmw_surface_destroy_size(); 557 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 558 if (unlikely(!cmd)) 559 return -ENOMEM; 560 561 vmw_surface_destroy_encode(res->id, cmd); 562 vmw_cmd_commit(dev_priv, submit_size); 563 564 /* 565 * Surface memory usage accounting. 566 */ 567 568 dev_priv->used_memory_size -= res->guest_memory_size; 569 570 /* 571 * Release the surface ID. 572 */ 573 574 vmw_resource_release_id(res); 575 vmw_fifo_resource_dec(dev_priv); 576 577 return 0; 578 } 579 580 581 /** 582 * vmw_surface_init - initialize a struct vmw_surface 583 * 584 * @dev_priv: Pointer to a device private struct. 585 * @srf: Pointer to the struct vmw_surface to initialize. 586 * @res_free: Pointer to a resource destructor used to free 587 * the object. 588 */ 589 static int vmw_surface_init(struct vmw_private *dev_priv, 590 struct vmw_surface *srf, 591 void (*res_free) (struct vmw_resource *res)) 592 { 593 int ret; 594 struct vmw_resource *res = &srf->res; 595 596 BUG_ON(!res_free); 597 ret = vmw_resource_init(dev_priv, res, true, res_free, 598 (dev_priv->has_mob) ? &vmw_gb_surface_func : 599 &vmw_legacy_surface_func); 600 601 if (unlikely(ret != 0)) { 602 res_free(res); 603 return ret; 604 } 605 606 /* 607 * The surface won't be visible to hardware until a 608 * surface validate. 609 */ 610 611 INIT_LIST_HEAD(&srf->view_list); 612 res->hw_destroy = vmw_hw_surface_destroy; 613 return ret; 614 } 615 616 /** 617 * vmw_user_surface_base_to_res - TTM base object to resource converter for 618 * user visible surfaces 619 * 620 * @base: Pointer to a TTM base object 621 * 622 * Returns the struct vmw_resource embedded in a struct vmw_surface 623 * for the user-visible object identified by the TTM base object @base. 624 */ 625 static struct vmw_resource * 626 vmw_user_surface_base_to_res(struct ttm_base_object *base) 627 { 628 return &(container_of(base, struct vmw_user_surface, 629 prime.base)->srf.res); 630 } 631 632 /** 633 * vmw_user_surface_free - User visible surface resource destructor 634 * 635 * @res: A struct vmw_resource embedded in a struct vmw_surface. 636 */ 637 static void vmw_user_surface_free(struct vmw_resource *res) 638 { 639 struct vmw_surface *srf = vmw_res_to_srf(res); 640 struct vmw_user_surface *user_srf = 641 container_of(srf, struct vmw_user_surface, srf); 642 643 WARN_ON(res->dirty); 644 if (user_srf->master) 645 drm_master_put(&user_srf->master); 646 kfree(srf->offsets); 647 kfree(srf->metadata.sizes); 648 kfree(srf->snooper.image); 649 ttm_prime_object_kfree(user_srf, prime); 650 } 651 652 /** 653 * vmw_user_surface_base_release - User visible surface TTM base object destructor 654 * 655 * @p_base: Pointer to a pointer to a TTM base object 656 * embedded in a struct vmw_user_surface. 657 * 658 * Drops the base object's reference on its resource, and the 659 * pointer pointed to by *p_base is set to NULL. 660 */ 661 static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 662 { 663 struct ttm_base_object *base = *p_base; 664 struct vmw_user_surface *user_srf = 665 container_of(base, struct vmw_user_surface, prime.base); 666 struct vmw_resource *res = &user_srf->srf.res; 667 668 *p_base = NULL; 669 670 /* 671 * Dumb buffers own the resource and they'll unref the 672 * resource themselves 673 */ 674 WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb); 675 676 vmw_resource_unreference(&res); 677 } 678 679 /** 680 * vmw_surface_destroy_ioctl - Ioctl function implementing 681 * the user surface destroy functionality. 682 * 683 * @dev: Pointer to a struct drm_device. 684 * @data: Pointer to data copied from / to user-space. 685 * @file_priv: Pointer to a drm file private structure. 686 */ 687 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 688 struct drm_file *file_priv) 689 { 690 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 691 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 692 693 return ttm_ref_object_base_unref(tfile, arg->sid); 694 } 695 696 /** 697 * vmw_surface_define_ioctl - Ioctl function implementing 698 * the user surface define functionality. 699 * 700 * @dev: Pointer to a struct drm_device. 701 * @data: Pointer to data copied from / to user-space. 702 * @file_priv: Pointer to a drm file private structure. 703 */ 704 int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 705 struct drm_file *file_priv) 706 { 707 struct vmw_private *dev_priv = vmw_priv(dev); 708 struct vmw_user_surface *user_srf; 709 struct vmw_surface *srf; 710 struct vmw_surface_metadata *metadata; 711 struct vmw_resource *res; 712 struct vmw_resource *tmp; 713 union drm_vmw_surface_create_arg *arg = 714 (union drm_vmw_surface_create_arg *)data; 715 struct drm_vmw_surface_create_req *req = &arg->req; 716 struct drm_vmw_surface_arg *rep = &arg->rep; 717 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 718 int ret; 719 int i, j; 720 uint32_t cur_bo_offset; 721 struct drm_vmw_size *cur_size; 722 struct vmw_surface_offset *cur_offset; 723 uint32_t num_sizes; 724 const SVGA3dSurfaceDesc *desc; 725 726 num_sizes = 0; 727 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 728 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) 729 return -EINVAL; 730 num_sizes += req->mip_levels[i]; 731 } 732 733 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || 734 num_sizes == 0) 735 return -EINVAL; 736 737 desc = vmw_surface_get_desc(req->format); 738 if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { 739 VMW_DEBUG_USER("Invalid format %d for surface creation.\n", 740 req->format); 741 return -EINVAL; 742 } 743 744 user_srf = kzalloc_obj(*user_srf); 745 if (unlikely(!user_srf)) { 746 ret = -ENOMEM; 747 goto out_unlock; 748 } 749 750 srf = &user_srf->srf; 751 metadata = &srf->metadata; 752 res = &srf->res; 753 754 /* Driver internally stores as 64-bit flags */ 755 metadata->flags = (SVGA3dSurfaceAllFlags)req->flags; 756 metadata->format = req->format; 757 metadata->scanout = req->scanout; 758 759 memcpy(metadata->mip_levels, req->mip_levels, 760 sizeof(metadata->mip_levels)); 761 metadata->num_sizes = num_sizes; 762 metadata->sizes = 763 memdup_array_user((struct drm_vmw_size __user *)(unsigned long) 764 req->size_addr, 765 metadata->num_sizes, sizeof(*metadata->sizes)); 766 if (IS_ERR(metadata->sizes)) { 767 ret = PTR_ERR(metadata->sizes); 768 goto out_no_sizes; 769 } 770 srf->offsets = kmalloc_objs(*srf->offsets, metadata->num_sizes); 771 if (unlikely(!srf->offsets)) { 772 ret = -ENOMEM; 773 goto out_no_offsets; 774 } 775 776 metadata->base_size = *srf->metadata.sizes; 777 metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE; 778 metadata->multisample_count = 0; 779 metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 780 metadata->quality_level = SVGA3D_MS_QUALITY_NONE; 781 782 cur_bo_offset = 0; 783 cur_offset = srf->offsets; 784 cur_size = metadata->sizes; 785 786 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 787 for (j = 0; j < metadata->mip_levels[i]; ++j) { 788 uint32_t stride = vmw_surface_calculate_pitch( 789 desc, cur_size); 790 791 cur_offset->face = i; 792 cur_offset->mip = j; 793 cur_offset->bo_offset = cur_bo_offset; 794 cur_bo_offset += vmw_surface_get_image_buffer_size 795 (desc, cur_size, stride); 796 ++cur_offset; 797 ++cur_size; 798 } 799 } 800 res->guest_memory_size = cur_bo_offset; 801 802 srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata); 803 if (IS_ERR(srf->snooper.image)) { 804 ret = PTR_ERR(srf->snooper.image); 805 goto out_no_copy; 806 } 807 808 if (drm_is_primary_client(file_priv)) 809 user_srf->master = drm_file_get_master(file_priv); 810 811 /** 812 * From this point, the generic resource management functions 813 * destroy the object on failure. 814 */ 815 816 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 817 if (unlikely(ret != 0)) 818 goto out_unlock; 819 820 /* 821 * A gb-aware client referencing a surface will expect a backup 822 * buffer to be present. 823 */ 824 if (dev_priv->has_mob) { 825 struct vmw_bo_params params = { 826 .domain = VMW_BO_DOMAIN_SYS, 827 .busy_domain = VMW_BO_DOMAIN_SYS, 828 .bo_type = ttm_bo_type_device, 829 .size = res->guest_memory_size, 830 .pin = false 831 }; 832 833 ret = vmw_bo_create(dev_priv, ¶ms, &res->guest_memory_bo); 834 if (unlikely(ret != 0)) { 835 vmw_resource_unreference(&res); 836 goto out_unlock; 837 } 838 839 ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); 840 if (unlikely(ret != 0)) { 841 vmw_resource_unreference(&res); 842 goto out_unlock; 843 } 844 } 845 846 tmp = vmw_resource_reference(&srf->res); 847 ret = ttm_prime_object_init(tfile, res->guest_memory_size, 848 &user_srf->prime, 849 VMW_RES_SURFACE, 850 &vmw_user_surface_base_release); 851 852 if (unlikely(ret != 0)) { 853 vmw_resource_unreference(&tmp); 854 vmw_resource_unreference(&res); 855 goto out_unlock; 856 } 857 858 rep->sid = user_srf->prime.base.handle; 859 vmw_resource_unreference(&res); 860 861 return 0; 862 out_no_copy: 863 kfree(srf->offsets); 864 out_no_offsets: 865 kfree(metadata->sizes); 866 out_no_sizes: 867 ttm_prime_object_kfree(user_srf, prime); 868 out_unlock: 869 return ret; 870 } 871 872 static struct vmw_user_surface * 873 vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo, 874 u32 handle) 875 { 876 struct vmw_user_surface *user_srf = NULL; 877 struct vmw_surface *surf; 878 struct ttm_base_object *base; 879 880 surf = vmw_bo_surface(bo); 881 if (surf) { 882 rcu_read_lock(); 883 user_srf = container_of(surf, struct vmw_user_surface, srf); 884 base = &user_srf->prime.base; 885 if (base && !kref_get_unless_zero(&base->refcount)) { 886 drm_dbg_driver(&vmw->drm, 887 "%s: referencing a stale surface handle %d\n", 888 __func__, handle); 889 base = NULL; 890 user_srf = NULL; 891 } 892 rcu_read_unlock(); 893 } 894 895 return user_srf; 896 } 897 898 struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw, 899 struct vmw_bo *bo, 900 u32 handle) 901 { 902 struct vmw_user_surface *user_srf = 903 vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 904 struct vmw_surface *surf = NULL; 905 struct ttm_base_object *base; 906 907 if (user_srf) { 908 surf = vmw_surface_reference(&user_srf->srf); 909 base = &user_srf->prime.base; 910 ttm_base_object_unref(&base); 911 } 912 return surf; 913 } 914 915 u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw, 916 struct vmw_bo *bo, 917 u32 handle) 918 { 919 struct vmw_user_surface *user_srf = 920 vmw_lookup_user_surface_for_buffer(vmw, bo, handle); 921 int surf_handle = 0; 922 struct ttm_base_object *base; 923 924 if (user_srf) { 925 base = &user_srf->prime.base; 926 surf_handle = (u32)base->handle; 927 ttm_base_object_unref(&base); 928 } 929 return surf_handle; 930 } 931 932 static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv, 933 struct drm_file *file_priv, 934 u32 fd, u32 *handle, 935 struct ttm_base_object **base_p) 936 { 937 struct ttm_base_object *base; 938 struct vmw_bo *bo; 939 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 940 struct vmw_user_surface *user_srf; 941 int ret; 942 943 ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle); 944 if (ret) { 945 drm_warn(&dev_priv->drm, 946 "Wasn't able to find user buffer for fd = %u.\n", fd); 947 return ret; 948 } 949 950 ret = vmw_user_bo_lookup(file_priv, *handle, &bo); 951 if (ret) { 952 drm_warn(&dev_priv->drm, 953 "Wasn't able to lookup user buffer for handle = %u.\n", *handle); 954 return ret; 955 } 956 957 user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle); 958 if (WARN_ON(!user_srf)) { 959 drm_warn(&dev_priv->drm, 960 "User surface fd %d (handle %d) is null.\n", fd, *handle); 961 ret = -EINVAL; 962 goto out; 963 } 964 965 base = &user_srf->prime.base; 966 ret = ttm_ref_object_add(tfile, base, NULL, false); 967 if (ret) { 968 drm_warn(&dev_priv->drm, 969 "Couldn't add an object ref for the buffer (%d).\n", *handle); 970 goto out; 971 } 972 973 *base_p = base; 974 out: 975 vmw_user_bo_unref(&bo); 976 977 return ret; 978 } 979 980 static int 981 vmw_surface_handle_reference(struct vmw_private *dev_priv, 982 struct drm_file *file_priv, 983 uint32_t u_handle, 984 enum drm_vmw_handle_type handle_type, 985 struct ttm_base_object **base_p) 986 { 987 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 988 struct vmw_user_surface *user_srf = NULL; 989 uint32_t handle; 990 struct ttm_base_object *base; 991 int ret; 992 993 if (handle_type == DRM_VMW_HANDLE_PRIME) { 994 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 995 if (ret) 996 return vmw_buffer_prime_to_surface_base(dev_priv, 997 file_priv, 998 u_handle, 999 &handle, 1000 base_p); 1001 } else { 1002 handle = u_handle; 1003 } 1004 1005 ret = -EINVAL; 1006 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 1007 if (unlikely(!base)) { 1008 VMW_DEBUG_USER("Could not find surface to reference.\n"); 1009 goto out_no_lookup; 1010 } 1011 1012 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) { 1013 VMW_DEBUG_USER("Referenced object is not a surface.\n"); 1014 goto out_bad_resource; 1015 } 1016 if (handle_type != DRM_VMW_HANDLE_PRIME) { 1017 bool require_exist = false; 1018 1019 user_srf = container_of(base, struct vmw_user_surface, 1020 prime.base); 1021 1022 /* Error out if we are unauthenticated primary */ 1023 if (drm_is_primary_client(file_priv) && 1024 !file_priv->authenticated) { 1025 ret = -EACCES; 1026 goto out_bad_resource; 1027 } 1028 1029 /* 1030 * Make sure the surface creator has the same 1031 * authenticating master, or is already registered with us. 1032 */ 1033 if (drm_is_primary_client(file_priv) && 1034 user_srf->master != file_priv->master) 1035 require_exist = true; 1036 1037 if (unlikely(drm_is_render_client(file_priv))) 1038 require_exist = true; 1039 1040 ret = ttm_ref_object_add(tfile, base, NULL, require_exist); 1041 if (unlikely(ret != 0)) { 1042 DRM_ERROR("Could not add a reference to a surface.\n"); 1043 goto out_bad_resource; 1044 } 1045 } 1046 1047 *base_p = base; 1048 return 0; 1049 1050 out_bad_resource: 1051 ttm_base_object_unref(&base); 1052 out_no_lookup: 1053 if (handle_type == DRM_VMW_HANDLE_PRIME) 1054 (void) ttm_ref_object_base_unref(tfile, handle); 1055 1056 return ret; 1057 } 1058 1059 /** 1060 * vmw_surface_reference_ioctl - Ioctl function implementing 1061 * the user surface reference functionality. 1062 * 1063 * @dev: Pointer to a struct drm_device. 1064 * @data: Pointer to data copied from / to user-space. 1065 * @file_priv: Pointer to a drm file private structure. 1066 */ 1067 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 1068 struct drm_file *file_priv) 1069 { 1070 struct vmw_private *dev_priv = vmw_priv(dev); 1071 union drm_vmw_surface_reference_arg *arg = 1072 (union drm_vmw_surface_reference_arg *)data; 1073 struct drm_vmw_surface_arg *req = &arg->req; 1074 struct drm_vmw_surface_create_req *rep = &arg->rep; 1075 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1076 struct vmw_surface *srf; 1077 struct vmw_user_surface *user_srf; 1078 struct drm_vmw_size __user *user_sizes; 1079 struct ttm_base_object *base; 1080 int ret; 1081 1082 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1083 req->handle_type, &base); 1084 if (unlikely(ret != 0)) 1085 return ret; 1086 1087 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1088 srf = &user_srf->srf; 1089 1090 /* Downcast of flags when sending back to user space */ 1091 rep->flags = (uint32_t)srf->metadata.flags; 1092 rep->format = srf->metadata.format; 1093 memcpy(rep->mip_levels, srf->metadata.mip_levels, 1094 sizeof(srf->metadata.mip_levels)); 1095 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 1096 rep->size_addr; 1097 1098 if (user_sizes) 1099 ret = copy_to_user(user_sizes, &srf->metadata.base_size, 1100 sizeof(srf->metadata.base_size)); 1101 if (unlikely(ret != 0)) { 1102 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, 1103 srf->metadata.num_sizes); 1104 ttm_ref_object_base_unref(tfile, base->handle); 1105 ret = -EFAULT; 1106 } 1107 1108 ttm_base_object_unref(&base); 1109 1110 return ret; 1111 } 1112 1113 /** 1114 * vmw_gb_surface_create - Encode a surface_define command. 1115 * 1116 * @res: Pointer to a struct vmw_resource embedded in a struct 1117 * vmw_surface. 1118 */ 1119 static int vmw_gb_surface_create(struct vmw_resource *res) 1120 { 1121 struct vmw_private *dev_priv = res->dev_priv; 1122 struct vmw_surface *srf = vmw_res_to_srf(res); 1123 struct vmw_surface_metadata *metadata = &srf->metadata; 1124 uint32_t cmd_len, cmd_id, submit_len; 1125 int ret; 1126 struct { 1127 SVGA3dCmdHeader header; 1128 SVGA3dCmdDefineGBSurface body; 1129 } *cmd; 1130 struct { 1131 SVGA3dCmdHeader header; 1132 SVGA3dCmdDefineGBSurface_v2 body; 1133 } *cmd2; 1134 struct { 1135 SVGA3dCmdHeader header; 1136 SVGA3dCmdDefineGBSurface_v3 body; 1137 } *cmd3; 1138 struct { 1139 SVGA3dCmdHeader header; 1140 SVGA3dCmdDefineGBSurface_v4 body; 1141 } *cmd4; 1142 1143 if (likely(res->id != -1)) 1144 return 0; 1145 1146 vmw_fifo_resource_inc(dev_priv); 1147 ret = vmw_resource_alloc_id(res); 1148 if (unlikely(ret != 0)) { 1149 DRM_ERROR("Failed to allocate a surface id.\n"); 1150 goto out_no_id; 1151 } 1152 1153 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { 1154 ret = -EBUSY; 1155 goto out_no_fifo; 1156 } 1157 1158 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { 1159 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4; 1160 cmd_len = sizeof(cmd4->body); 1161 submit_len = sizeof(*cmd4); 1162 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { 1163 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3; 1164 cmd_len = sizeof(cmd3->body); 1165 submit_len = sizeof(*cmd3); 1166 } else if (metadata->array_size > 0) { 1167 /* VMW_SM_4 support verified at creation time. */ 1168 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; 1169 cmd_len = sizeof(cmd2->body); 1170 submit_len = sizeof(*cmd2); 1171 } else { 1172 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1173 cmd_len = sizeof(cmd->body); 1174 submit_len = sizeof(*cmd); 1175 } 1176 1177 cmd = VMW_CMD_RESERVE(dev_priv, submit_len); 1178 cmd2 = (typeof(cmd2))cmd; 1179 cmd3 = (typeof(cmd3))cmd; 1180 cmd4 = (typeof(cmd4))cmd; 1181 if (unlikely(!cmd)) { 1182 ret = -ENOMEM; 1183 goto out_no_fifo; 1184 } 1185 1186 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { 1187 cmd4->header.id = cmd_id; 1188 cmd4->header.size = cmd_len; 1189 cmd4->body.sid = srf->res.id; 1190 cmd4->body.surfaceFlags = metadata->flags; 1191 cmd4->body.format = metadata->format; 1192 cmd4->body.numMipLevels = metadata->mip_levels[0]; 1193 cmd4->body.multisampleCount = metadata->multisample_count; 1194 cmd4->body.multisamplePattern = metadata->multisample_pattern; 1195 cmd4->body.qualityLevel = metadata->quality_level; 1196 cmd4->body.autogenFilter = metadata->autogen_filter; 1197 cmd4->body.size.width = metadata->base_size.width; 1198 cmd4->body.size.height = metadata->base_size.height; 1199 cmd4->body.size.depth = metadata->base_size.depth; 1200 cmd4->body.arraySize = metadata->array_size; 1201 cmd4->body.bufferByteStride = metadata->buffer_byte_stride; 1202 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { 1203 cmd3->header.id = cmd_id; 1204 cmd3->header.size = cmd_len; 1205 cmd3->body.sid = srf->res.id; 1206 cmd3->body.surfaceFlags = metadata->flags; 1207 cmd3->body.format = metadata->format; 1208 cmd3->body.numMipLevels = metadata->mip_levels[0]; 1209 cmd3->body.multisampleCount = metadata->multisample_count; 1210 cmd3->body.multisamplePattern = metadata->multisample_pattern; 1211 cmd3->body.qualityLevel = metadata->quality_level; 1212 cmd3->body.autogenFilter = metadata->autogen_filter; 1213 cmd3->body.size.width = metadata->base_size.width; 1214 cmd3->body.size.height = metadata->base_size.height; 1215 cmd3->body.size.depth = metadata->base_size.depth; 1216 cmd3->body.arraySize = metadata->array_size; 1217 } else if (metadata->array_size > 0) { 1218 cmd2->header.id = cmd_id; 1219 cmd2->header.size = cmd_len; 1220 cmd2->body.sid = srf->res.id; 1221 cmd2->body.surfaceFlags = metadata->flags; 1222 cmd2->body.format = metadata->format; 1223 cmd2->body.numMipLevels = metadata->mip_levels[0]; 1224 cmd2->body.multisampleCount = metadata->multisample_count; 1225 cmd2->body.autogenFilter = metadata->autogen_filter; 1226 cmd2->body.size.width = metadata->base_size.width; 1227 cmd2->body.size.height = metadata->base_size.height; 1228 cmd2->body.size.depth = metadata->base_size.depth; 1229 cmd2->body.arraySize = metadata->array_size; 1230 } else { 1231 cmd->header.id = cmd_id; 1232 cmd->header.size = cmd_len; 1233 cmd->body.sid = srf->res.id; 1234 cmd->body.surfaceFlags = metadata->flags; 1235 cmd->body.format = metadata->format; 1236 cmd->body.numMipLevels = metadata->mip_levels[0]; 1237 cmd->body.multisampleCount = metadata->multisample_count; 1238 cmd->body.autogenFilter = metadata->autogen_filter; 1239 cmd->body.size.width = metadata->base_size.width; 1240 cmd->body.size.height = metadata->base_size.height; 1241 cmd->body.size.depth = metadata->base_size.depth; 1242 } 1243 1244 vmw_cmd_commit(dev_priv, submit_len); 1245 1246 return 0; 1247 1248 out_no_fifo: 1249 vmw_resource_release_id(res); 1250 out_no_id: 1251 vmw_fifo_resource_dec(dev_priv); 1252 return ret; 1253 } 1254 1255 1256 static int vmw_gb_surface_bind(struct vmw_resource *res, 1257 struct ttm_validate_buffer *val_buf) 1258 { 1259 struct vmw_private *dev_priv = res->dev_priv; 1260 struct { 1261 SVGA3dCmdHeader header; 1262 SVGA3dCmdBindGBSurface body; 1263 } *cmd1; 1264 struct { 1265 SVGA3dCmdHeader header; 1266 SVGA3dCmdUpdateGBSurface body; 1267 } *cmd2; 1268 uint32_t submit_size; 1269 struct ttm_buffer_object *bo = val_buf->bo; 1270 1271 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1272 1273 submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0); 1274 1275 cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); 1276 if (unlikely(!cmd1)) 1277 return -ENOMEM; 1278 1279 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1280 cmd1->header.size = sizeof(cmd1->body); 1281 cmd1->body.sid = res->id; 1282 cmd1->body.mobid = bo->resource->start; 1283 if (res->guest_memory_dirty) { 1284 cmd2 = (void *) &cmd1[1]; 1285 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; 1286 cmd2->header.size = sizeof(cmd2->body); 1287 cmd2->body.sid = res->id; 1288 } 1289 vmw_cmd_commit(dev_priv, submit_size); 1290 1291 if (res->guest_memory_bo->dirty && res->guest_memory_dirty) { 1292 /* We've just made a full upload. Cear dirty regions. */ 1293 vmw_bo_dirty_clear_res(res); 1294 } 1295 1296 res->guest_memory_dirty = false; 1297 1298 return 0; 1299 } 1300 1301 static int vmw_gb_surface_unbind(struct vmw_resource *res, 1302 bool readback, 1303 struct ttm_validate_buffer *val_buf) 1304 { 1305 struct vmw_private *dev_priv = res->dev_priv; 1306 struct ttm_buffer_object *bo = val_buf->bo; 1307 struct vmw_fence_obj *fence; 1308 1309 struct { 1310 SVGA3dCmdHeader header; 1311 SVGA3dCmdReadbackGBSurface body; 1312 } *cmd1; 1313 struct { 1314 SVGA3dCmdHeader header; 1315 SVGA3dCmdInvalidateGBSurface body; 1316 } *cmd2; 1317 struct { 1318 SVGA3dCmdHeader header; 1319 SVGA3dCmdBindGBSurface body; 1320 } *cmd3; 1321 uint32_t submit_size; 1322 uint8_t *cmd; 1323 1324 1325 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1326 1327 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1328 cmd = VMW_CMD_RESERVE(dev_priv, submit_size); 1329 if (unlikely(!cmd)) 1330 return -ENOMEM; 1331 1332 if (readback) { 1333 cmd1 = (void *) cmd; 1334 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 1335 cmd1->header.size = sizeof(cmd1->body); 1336 cmd1->body.sid = res->id; 1337 cmd3 = (void *) &cmd1[1]; 1338 } else { 1339 cmd2 = (void *) cmd; 1340 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; 1341 cmd2->header.size = sizeof(cmd2->body); 1342 cmd2->body.sid = res->id; 1343 cmd3 = (void *) &cmd2[1]; 1344 } 1345 1346 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1347 cmd3->header.size = sizeof(cmd3->body); 1348 cmd3->body.sid = res->id; 1349 cmd3->body.mobid = SVGA3D_INVALID_ID; 1350 1351 vmw_cmd_commit(dev_priv, submit_size); 1352 1353 /* 1354 * Create a fence object and fence the backup buffer. 1355 */ 1356 1357 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 1358 &fence, NULL); 1359 1360 vmw_bo_fence_single(val_buf->bo, fence); 1361 1362 if (likely(fence != NULL)) 1363 vmw_fence_obj_unreference(&fence); 1364 1365 return 0; 1366 } 1367 1368 static int vmw_gb_surface_destroy(struct vmw_resource *res) 1369 { 1370 struct vmw_private *dev_priv = res->dev_priv; 1371 struct vmw_surface *srf = vmw_res_to_srf(res); 1372 struct { 1373 SVGA3dCmdHeader header; 1374 SVGA3dCmdDestroyGBSurface body; 1375 } *cmd; 1376 1377 if (likely(res->id == -1)) 1378 return 0; 1379 1380 mutex_lock(&dev_priv->binding_mutex); 1381 vmw_view_surface_list_destroy(dev_priv, &srf->view_list); 1382 vmw_binding_res_list_scrub(&res->binding_head); 1383 1384 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 1385 if (unlikely(!cmd)) { 1386 mutex_unlock(&dev_priv->binding_mutex); 1387 return -ENOMEM; 1388 } 1389 1390 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; 1391 cmd->header.size = sizeof(cmd->body); 1392 cmd->body.sid = res->id; 1393 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 1394 mutex_unlock(&dev_priv->binding_mutex); 1395 vmw_resource_release_id(res); 1396 vmw_fifo_resource_dec(dev_priv); 1397 1398 return 0; 1399 } 1400 1401 /** 1402 * vmw_gb_surface_define_ioctl - Ioctl function implementing 1403 * the user surface define functionality. 1404 * 1405 * @dev: Pointer to a struct drm_device. 1406 * @data: Pointer to data copied from / to user-space. 1407 * @file_priv: Pointer to a drm file private structure. 1408 */ 1409 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1410 struct drm_file *file_priv) 1411 { 1412 union drm_vmw_gb_surface_create_arg *arg = 1413 (union drm_vmw_gb_surface_create_arg *)data; 1414 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1415 struct drm_vmw_gb_surface_create_ext_req req_ext; 1416 1417 req_ext.base = arg->req; 1418 req_ext.version = drm_vmw_gb_surface_v1; 1419 req_ext.svga3d_flags_upper_32_bits = 0; 1420 req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE; 1421 req_ext.quality_level = SVGA3D_MS_QUALITY_NONE; 1422 req_ext.buffer_byte_stride = 0; 1423 req_ext.must_be_zero = 0; 1424 1425 return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv); 1426 } 1427 1428 /** 1429 * vmw_gb_surface_reference_ioctl - Ioctl function implementing 1430 * the user surface reference functionality. 1431 * 1432 * @dev: Pointer to a struct drm_device. 1433 * @data: Pointer to data copied from / to user-space. 1434 * @file_priv: Pointer to a drm file private structure. 1435 */ 1436 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1437 struct drm_file *file_priv) 1438 { 1439 union drm_vmw_gb_surface_reference_arg *arg = 1440 (union drm_vmw_gb_surface_reference_arg *)data; 1441 struct drm_vmw_surface_arg *req = &arg->req; 1442 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; 1443 struct drm_vmw_gb_surface_ref_ext_rep rep_ext; 1444 int ret; 1445 1446 ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv); 1447 1448 if (unlikely(ret != 0)) 1449 return ret; 1450 1451 rep->creq = rep_ext.creq.base; 1452 rep->crep = rep_ext.crep; 1453 1454 return ret; 1455 } 1456 1457 /** 1458 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing 1459 * the user surface define functionality. 1460 * 1461 * @dev: Pointer to a struct drm_device. 1462 * @data: Pointer to data copied from / to user-space. 1463 * @file_priv: Pointer to a drm file private structure. 1464 */ 1465 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, 1466 struct drm_file *file_priv) 1467 { 1468 union drm_vmw_gb_surface_create_ext_arg *arg = 1469 (union drm_vmw_gb_surface_create_ext_arg *)data; 1470 struct drm_vmw_gb_surface_create_ext_req *req = &arg->req; 1471 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1472 1473 return vmw_gb_surface_define_internal(dev, req, rep, file_priv); 1474 } 1475 1476 /** 1477 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing 1478 * the user surface reference functionality. 1479 * 1480 * @dev: Pointer to a struct drm_device. 1481 * @data: Pointer to data copied from / to user-space. 1482 * @file_priv: Pointer to a drm file private structure. 1483 */ 1484 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data, 1485 struct drm_file *file_priv) 1486 { 1487 union drm_vmw_gb_surface_reference_ext_arg *arg = 1488 (union drm_vmw_gb_surface_reference_ext_arg *)data; 1489 struct drm_vmw_surface_arg *req = &arg->req; 1490 struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep; 1491 1492 return vmw_gb_surface_reference_internal(dev, req, rep, file_priv); 1493 } 1494 1495 /** 1496 * vmw_gb_surface_define_internal - Ioctl function implementing 1497 * the user surface define functionality. 1498 * 1499 * @dev: Pointer to a struct drm_device. 1500 * @req: Request argument from user-space. 1501 * @rep: Response argument to user-space. 1502 * @file_priv: Pointer to a drm file private structure. 1503 */ 1504 static int 1505 vmw_gb_surface_define_internal(struct drm_device *dev, 1506 struct drm_vmw_gb_surface_create_ext_req *req, 1507 struct drm_vmw_gb_surface_create_rep *rep, 1508 struct drm_file *file_priv) 1509 { 1510 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1511 struct vmw_private *dev_priv = vmw_priv(dev); 1512 struct vmw_user_surface *user_srf; 1513 struct vmw_surface_metadata metadata = {0}; 1514 struct vmw_surface *srf; 1515 struct vmw_resource *res; 1516 struct vmw_resource *tmp; 1517 int ret = 0; 1518 uint32_t backup_handle = 0; 1519 SVGA3dSurfaceAllFlags svga3d_flags_64 = 1520 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, 1521 req->base.svga3d_flags); 1522 1523 /* array_size must be null for non-GL3 host. */ 1524 if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) { 1525 VMW_DEBUG_USER("SM4 surface not supported.\n"); 1526 return -EINVAL; 1527 } 1528 1529 if (!has_sm4_1_context(dev_priv)) { 1530 if (req->svga3d_flags_upper_32_bits != 0) 1531 ret = -EINVAL; 1532 1533 if (req->base.multisample_count != 0) 1534 ret = -EINVAL; 1535 1536 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE) 1537 ret = -EINVAL; 1538 1539 if (req->quality_level != SVGA3D_MS_QUALITY_NONE) 1540 ret = -EINVAL; 1541 1542 if (ret) { 1543 VMW_DEBUG_USER("SM4.1 surface not supported.\n"); 1544 return ret; 1545 } 1546 } 1547 1548 if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) { 1549 VMW_DEBUG_USER("SM5 surface not supported.\n"); 1550 return -EINVAL; 1551 } 1552 1553 if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) && 1554 req->base.multisample_count == 0) { 1555 VMW_DEBUG_USER("Invalid sample count.\n"); 1556 return -EINVAL; 1557 } 1558 1559 if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) { 1560 VMW_DEBUG_USER("Invalid mip level.\n"); 1561 return -EINVAL; 1562 } 1563 1564 metadata.flags = svga3d_flags_64; 1565 metadata.format = req->base.format; 1566 metadata.mip_levels[0] = req->base.mip_levels; 1567 metadata.multisample_count = req->base.multisample_count; 1568 metadata.multisample_pattern = req->multisample_pattern; 1569 metadata.quality_level = req->quality_level; 1570 metadata.array_size = req->base.array_size; 1571 metadata.buffer_byte_stride = req->buffer_byte_stride; 1572 metadata.num_sizes = 1; 1573 metadata.base_size = req->base.base_size; 1574 metadata.scanout = req->base.drm_surface_flags & 1575 drm_vmw_surface_flag_scanout; 1576 1577 /* Define a surface based on the parameters. */ 1578 ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); 1579 if (ret != 0) { 1580 VMW_DEBUG_USER("Failed to define surface.\n"); 1581 return ret; 1582 } 1583 1584 user_srf = container_of(srf, struct vmw_user_surface, srf); 1585 if (drm_is_primary_client(file_priv)) 1586 user_srf->master = drm_file_get_master(file_priv); 1587 1588 res = &user_srf->srf.res; 1589 1590 if (req->base.buffer_handle != SVGA3D_INVALID_ID) { 1591 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, 1592 &res->guest_memory_bo); 1593 if (ret == 0) { 1594 if (res->guest_memory_bo->is_dumb) { 1595 VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n"); 1596 vmw_user_bo_unref(&res->guest_memory_bo); 1597 ret = -EINVAL; 1598 goto out_unlock; 1599 } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { 1600 VMW_DEBUG_USER("Surface backup buffer too small.\n"); 1601 vmw_user_bo_unref(&res->guest_memory_bo); 1602 ret = -EINVAL; 1603 goto out_unlock; 1604 } else { 1605 backup_handle = req->base.buffer_handle; 1606 } 1607 } 1608 } else if (req->base.drm_surface_flags & 1609 (drm_vmw_surface_flag_create_buffer | 1610 drm_vmw_surface_flag_coherent)) { 1611 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 1612 res->guest_memory_size, 1613 &backup_handle, 1614 &res->guest_memory_bo); 1615 } 1616 1617 if (unlikely(ret != 0)) { 1618 vmw_resource_unreference(&res); 1619 goto out_unlock; 1620 } 1621 1622 if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { 1623 struct vmw_bo *backup = res->guest_memory_bo; 1624 1625 ttm_bo_reserve(&backup->tbo, false, false, NULL); 1626 if (!res->func->dirty_alloc) 1627 ret = -EINVAL; 1628 if (!ret) 1629 ret = vmw_bo_dirty_add(backup); 1630 if (!ret) { 1631 res->coherent = true; 1632 ret = res->func->dirty_alloc(res); 1633 } 1634 ttm_bo_unreserve(&backup->tbo); 1635 if (ret) { 1636 vmw_resource_unreference(&res); 1637 goto out_unlock; 1638 } 1639 1640 } 1641 1642 if (res->guest_memory_bo) { 1643 ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); 1644 if (unlikely(ret != 0)) { 1645 vmw_resource_unreference(&res); 1646 goto out_unlock; 1647 } 1648 } 1649 1650 tmp = vmw_resource_reference(res); 1651 ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, 1652 VMW_RES_SURFACE, 1653 &vmw_user_surface_base_release); 1654 1655 if (unlikely(ret != 0)) { 1656 vmw_resource_unreference(&tmp); 1657 vmw_resource_unreference(&res); 1658 goto out_unlock; 1659 } 1660 1661 rep->handle = user_srf->prime.base.handle; 1662 rep->backup_size = res->guest_memory_size; 1663 if (res->guest_memory_bo) { 1664 rep->buffer_map_handle = 1665 drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); 1666 rep->buffer_size = res->guest_memory_bo->tbo.base.size; 1667 rep->buffer_handle = backup_handle; 1668 } else { 1669 rep->buffer_map_handle = 0; 1670 rep->buffer_size = 0; 1671 rep->buffer_handle = SVGA3D_INVALID_ID; 1672 } 1673 vmw_resource_unreference(&res); 1674 1675 out_unlock: 1676 return ret; 1677 } 1678 1679 /** 1680 * vmw_gb_surface_reference_internal - Ioctl function implementing 1681 * the user surface reference functionality. 1682 * 1683 * @dev: Pointer to a struct drm_device. 1684 * @req: Pointer to user-space request surface arg. 1685 * @rep: Pointer to response to user-space. 1686 * @file_priv: Pointer to a drm file private structure. 1687 */ 1688 static int 1689 vmw_gb_surface_reference_internal(struct drm_device *dev, 1690 struct drm_vmw_surface_arg *req, 1691 struct drm_vmw_gb_surface_ref_ext_rep *rep, 1692 struct drm_file *file_priv) 1693 { 1694 struct vmw_private *dev_priv = vmw_priv(dev); 1695 struct vmw_surface *srf; 1696 struct vmw_user_surface *user_srf; 1697 struct vmw_surface_metadata *metadata; 1698 struct ttm_base_object *base; 1699 u32 backup_handle; 1700 int ret; 1701 1702 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1703 req->handle_type, &base); 1704 if (unlikely(ret != 0)) 1705 return ret; 1706 1707 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1708 srf = &user_srf->srf; 1709 if (!srf->res.guest_memory_bo) { 1710 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1711 goto out_bad_resource; 1712 } 1713 metadata = &srf->metadata; 1714 1715 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1716 ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base, 1717 &backup_handle); 1718 mutex_unlock(&dev_priv->cmdbuf_mutex); 1719 if (ret != 0) { 1720 drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n", 1721 req->sid); 1722 goto out_bad_resource; 1723 } 1724 1725 rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags); 1726 rep->creq.base.format = metadata->format; 1727 rep->creq.base.mip_levels = metadata->mip_levels[0]; 1728 rep->creq.base.drm_surface_flags = 0; 1729 rep->creq.base.multisample_count = metadata->multisample_count; 1730 rep->creq.base.autogen_filter = metadata->autogen_filter; 1731 rep->creq.base.array_size = metadata->array_size; 1732 rep->creq.base.buffer_handle = backup_handle; 1733 rep->creq.base.base_size = metadata->base_size; 1734 rep->crep.handle = user_srf->prime.base.handle; 1735 rep->crep.backup_size = srf->res.guest_memory_size; 1736 rep->crep.buffer_handle = backup_handle; 1737 rep->crep.buffer_map_handle = 1738 drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node); 1739 rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size; 1740 1741 rep->creq.version = drm_vmw_gb_surface_v1; 1742 rep->creq.svga3d_flags_upper_32_bits = 1743 SVGA3D_FLAGS_UPPER_32(metadata->flags); 1744 rep->creq.multisample_pattern = metadata->multisample_pattern; 1745 rep->creq.quality_level = metadata->quality_level; 1746 rep->creq.must_be_zero = 0; 1747 1748 out_bad_resource: 1749 ttm_base_object_unref(&base); 1750 1751 return ret; 1752 } 1753 1754 /** 1755 * vmw_subres_dirty_add - Add a dirty region to a subresource 1756 * @dirty: The surfaces's dirty tracker. 1757 * @loc_start: The location corresponding to the start of the region. 1758 * @loc_end: The location corresponding to the end of the region. 1759 * 1760 * As we are assuming that @loc_start and @loc_end represent a sequential 1761 * range of backing store memory, if the region spans multiple lines then 1762 * regardless of the x coordinate, the full lines are dirtied. 1763 * Correspondingly if the region spans multiple z slices, then full rather 1764 * than partial z slices are dirtied. 1765 */ 1766 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, 1767 const struct vmw_surface_loc *loc_start, 1768 const struct vmw_surface_loc *loc_end) 1769 { 1770 const struct vmw_surface_cache *cache = &dirty->cache; 1771 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; 1772 u32 mip = loc_start->sub_resource % cache->num_mip_levels; 1773 const struct drm_vmw_size *size = &cache->mip[mip].size; 1774 u32 box_c2 = box->z + box->d; 1775 1776 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) 1777 return; 1778 1779 if (box->d == 0 || box->z > loc_start->z) 1780 box->z = loc_start->z; 1781 if (box_c2 < loc_end->z) 1782 box->d = loc_end->z - box->z; 1783 1784 if (loc_start->z + 1 == loc_end->z) { 1785 box_c2 = box->y + box->h; 1786 if (box->h == 0 || box->y > loc_start->y) 1787 box->y = loc_start->y; 1788 if (box_c2 < loc_end->y) 1789 box->h = loc_end->y - box->y; 1790 1791 if (loc_start->y + 1 == loc_end->y) { 1792 box_c2 = box->x + box->w; 1793 if (box->w == 0 || box->x > loc_start->x) 1794 box->x = loc_start->x; 1795 if (box_c2 < loc_end->x) 1796 box->w = loc_end->x - box->x; 1797 } else { 1798 box->x = 0; 1799 box->w = size->width; 1800 } 1801 } else { 1802 box->y = 0; 1803 box->h = size->height; 1804 box->x = 0; 1805 box->w = size->width; 1806 } 1807 } 1808 1809 /** 1810 * vmw_subres_dirty_full - Mark a full subresource as dirty 1811 * @dirty: The surface's dirty tracker. 1812 * @subres: The subresource 1813 */ 1814 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) 1815 { 1816 const struct vmw_surface_cache *cache = &dirty->cache; 1817 u32 mip = subres % cache->num_mip_levels; 1818 const struct drm_vmw_size *size = &cache->mip[mip].size; 1819 SVGA3dBox *box = &dirty->boxes[subres]; 1820 1821 box->x = 0; 1822 box->y = 0; 1823 box->z = 0; 1824 box->w = size->width; 1825 box->h = size->height; 1826 box->d = size->depth; 1827 } 1828 1829 /* 1830 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture 1831 * surfaces. 1832 */ 1833 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, 1834 size_t start, size_t end) 1835 { 1836 struct vmw_surface_dirty *dirty = 1837 (struct vmw_surface_dirty *) res->dirty; 1838 size_t backup_end = res->guest_memory_offset + res->guest_memory_size; 1839 struct vmw_surface_loc loc1, loc2; 1840 const struct vmw_surface_cache *cache; 1841 1842 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; 1843 end = min(end, backup_end) - res->guest_memory_offset; 1844 cache = &dirty->cache; 1845 vmw_surface_get_loc(cache, &loc1, start); 1846 vmw_surface_get_loc(cache, &loc2, end - 1); 1847 vmw_surface_inc_loc(cache, &loc2); 1848 1849 if (loc1.sheet != loc2.sheet) { 1850 u32 sub_res; 1851 1852 /* 1853 * Multiple multisample sheets. To do this in an optimized 1854 * fashion, compute the dirty region for each sheet and the 1855 * resulting union. Since this is not a common case, just dirty 1856 * the whole surface. 1857 */ 1858 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res) 1859 vmw_subres_dirty_full(dirty, sub_res); 1860 return; 1861 } 1862 if (loc1.sub_resource + 1 == loc2.sub_resource) { 1863 /* Dirty range covers a single sub-resource */ 1864 vmw_subres_dirty_add(dirty, &loc1, &loc2); 1865 } else { 1866 /* Dirty range covers multiple sub-resources */ 1867 struct vmw_surface_loc loc_min, loc_max; 1868 u32 sub_res; 1869 1870 vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max); 1871 vmw_subres_dirty_add(dirty, &loc1, &loc_max); 1872 vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min); 1873 vmw_subres_dirty_add(dirty, &loc_min, &loc2); 1874 for (sub_res = loc1.sub_resource + 1; 1875 sub_res < loc2.sub_resource - 1; ++sub_res) 1876 vmw_subres_dirty_full(dirty, sub_res); 1877 } 1878 } 1879 1880 /* 1881 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer 1882 * surfaces. 1883 */ 1884 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, 1885 size_t start, size_t end) 1886 { 1887 struct vmw_surface_dirty *dirty = 1888 (struct vmw_surface_dirty *) res->dirty; 1889 const struct vmw_surface_cache *cache = &dirty->cache; 1890 size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes; 1891 SVGA3dBox *box = &dirty->boxes[0]; 1892 u32 box_c2; 1893 1894 box->h = box->d = 1; 1895 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; 1896 end = min(end, backup_end) - res->guest_memory_offset; 1897 box_c2 = box->x + box->w; 1898 if (box->w == 0 || box->x > start) 1899 box->x = start; 1900 if (box_c2 < end) 1901 box->w = end - box->x; 1902 } 1903 1904 /* 1905 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces 1906 */ 1907 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, 1908 size_t end) 1909 { 1910 struct vmw_surface *srf = vmw_res_to_srf(res); 1911 1912 if (WARN_ON(end <= res->guest_memory_offset || 1913 start >= res->guest_memory_offset + res->guest_memory_size)) 1914 return; 1915 1916 if (srf->metadata.format == SVGA3D_BUFFER) 1917 vmw_surface_buf_dirty_range_add(res, start, end); 1918 else 1919 vmw_surface_tex_dirty_range_add(res, start, end); 1920 } 1921 1922 /* 1923 * vmw_surface_dirty_sync - The surface's dirty_sync callback. 1924 */ 1925 static int vmw_surface_dirty_sync(struct vmw_resource *res) 1926 { 1927 struct vmw_private *dev_priv = res->dev_priv; 1928 u32 i, num_dirty; 1929 struct vmw_surface_dirty *dirty = 1930 (struct vmw_surface_dirty *) res->dirty; 1931 size_t alloc_size; 1932 const struct vmw_surface_cache *cache = &dirty->cache; 1933 struct { 1934 SVGA3dCmdHeader header; 1935 SVGA3dCmdDXUpdateSubResource body; 1936 } *cmd1; 1937 struct { 1938 SVGA3dCmdHeader header; 1939 SVGA3dCmdUpdateGBImage body; 1940 } *cmd2; 1941 void *cmd; 1942 1943 num_dirty = 0; 1944 for (i = 0; i < dirty->num_subres; ++i) { 1945 const SVGA3dBox *box = &dirty->boxes[i]; 1946 1947 if (box->d) 1948 num_dirty++; 1949 } 1950 1951 if (!num_dirty) 1952 goto out; 1953 1954 alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2)); 1955 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); 1956 if (!cmd) 1957 return -ENOMEM; 1958 1959 cmd1 = cmd; 1960 cmd2 = cmd; 1961 1962 for (i = 0; i < dirty->num_subres; ++i) { 1963 const SVGA3dBox *box = &dirty->boxes[i]; 1964 1965 if (!box->d) 1966 continue; 1967 1968 /* 1969 * DX_UPDATE_SUBRESOURCE is aware of array surfaces. 1970 * UPDATE_GB_IMAGE is not. 1971 */ 1972 if (has_sm4_context(dev_priv)) { 1973 cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; 1974 cmd1->header.size = sizeof(cmd1->body); 1975 cmd1->body.sid = res->id; 1976 cmd1->body.subResource = i; 1977 cmd1->body.box = *box; 1978 cmd1++; 1979 } else { 1980 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 1981 cmd2->header.size = sizeof(cmd2->body); 1982 cmd2->body.image.sid = res->id; 1983 cmd2->body.image.face = i / cache->num_mip_levels; 1984 cmd2->body.image.mipmap = i - 1985 (cache->num_mip_levels * cmd2->body.image.face); 1986 cmd2->body.box = *box; 1987 cmd2++; 1988 } 1989 1990 } 1991 vmw_cmd_commit(dev_priv, alloc_size); 1992 out: 1993 memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * 1994 dirty->num_subres); 1995 1996 return 0; 1997 } 1998 1999 /* 2000 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback. 2001 */ 2002 static int vmw_surface_dirty_alloc(struct vmw_resource *res) 2003 { 2004 struct vmw_surface *srf = vmw_res_to_srf(res); 2005 const struct vmw_surface_metadata *metadata = &srf->metadata; 2006 struct vmw_surface_dirty *dirty; 2007 u32 num_layers = 1; 2008 u32 num_mip; 2009 u32 num_subres; 2010 u32 num_samples; 2011 size_t dirty_size; 2012 int ret; 2013 2014 if (metadata->array_size) 2015 num_layers = metadata->array_size; 2016 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) 2017 num_layers *= SVGA3D_MAX_SURFACE_FACES; 2018 2019 num_mip = metadata->mip_levels[0]; 2020 if (!num_mip) 2021 num_mip = 1; 2022 2023 num_subres = num_layers * num_mip; 2024 dirty_size = struct_size(dirty, boxes, num_subres); 2025 2026 dirty = kvzalloc(dirty_size, GFP_KERNEL); 2027 if (!dirty) { 2028 ret = -ENOMEM; 2029 goto out_no_dirty; 2030 } 2031 2032 num_samples = max_t(u32, 1, metadata->multisample_count); 2033 ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, 2034 num_mip, num_layers, num_samples, 2035 &dirty->cache); 2036 if (ret) 2037 goto out_no_cache; 2038 2039 dirty->num_subres = num_subres; 2040 res->dirty = (struct vmw_resource_dirty *) dirty; 2041 2042 return 0; 2043 2044 out_no_cache: 2045 kvfree(dirty); 2046 out_no_dirty: 2047 return ret; 2048 } 2049 2050 /* 2051 * vmw_surface_dirty_free - The surface's dirty_free callback 2052 */ 2053 static void vmw_surface_dirty_free(struct vmw_resource *res) 2054 { 2055 struct vmw_surface_dirty *dirty = 2056 (struct vmw_surface_dirty *) res->dirty; 2057 2058 kvfree(dirty); 2059 res->dirty = NULL; 2060 } 2061 2062 /* 2063 * vmw_surface_clean - The surface's clean callback 2064 */ 2065 static int vmw_surface_clean(struct vmw_resource *res) 2066 { 2067 struct vmw_private *dev_priv = res->dev_priv; 2068 size_t alloc_size; 2069 struct { 2070 SVGA3dCmdHeader header; 2071 SVGA3dCmdReadbackGBSurface body; 2072 } *cmd; 2073 2074 alloc_size = sizeof(*cmd); 2075 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); 2076 if (!cmd) 2077 return -ENOMEM; 2078 2079 cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 2080 cmd->header.size = sizeof(cmd->body); 2081 cmd->body.sid = res->id; 2082 vmw_cmd_commit(dev_priv, alloc_size); 2083 2084 return 0; 2085 } 2086 2087 /* 2088 * vmw_gb_surface_define - Define a private GB surface 2089 * 2090 * @dev_priv: Pointer to a device private. 2091 * @metadata: Metadata representing the surface to create. 2092 * @user_srf_out: allocated user_srf. Set to NULL on failure. 2093 * 2094 * GB surfaces allocated by this function will not have a user mode handle, and 2095 * thus will only be visible to vmwgfx. For optimization reasons the 2096 * surface may later be given a user mode handle by another function to make 2097 * it available to user mode drivers. 2098 */ 2099 int vmw_gb_surface_define(struct vmw_private *dev_priv, 2100 const struct vmw_surface_metadata *req, 2101 struct vmw_surface **srf_out) 2102 { 2103 struct vmw_surface_metadata *metadata; 2104 struct vmw_user_surface *user_srf; 2105 struct vmw_surface *srf; 2106 u32 sample_count = 1; 2107 u32 num_layers = 1; 2108 int ret; 2109 2110 *srf_out = NULL; 2111 2112 if (req->scanout) { 2113 if (!vmw_surface_is_screen_target_format(req->format)) { 2114 VMW_DEBUG_USER("Invalid Screen Target surface format."); 2115 return -EINVAL; 2116 } 2117 2118 if (req->base_size.width > dev_priv->texture_max_width || 2119 req->base_size.height > dev_priv->texture_max_height) { 2120 VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u", 2121 req->base_size.width, 2122 req->base_size.height, 2123 dev_priv->texture_max_width, 2124 dev_priv->texture_max_height); 2125 return -EINVAL; 2126 } 2127 } else { 2128 const SVGA3dSurfaceDesc *desc = 2129 vmw_surface_get_desc(req->format); 2130 2131 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) { 2132 VMW_DEBUG_USER("Invalid surface format.\n"); 2133 return -EINVAL; 2134 } 2135 } 2136 2137 if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE) 2138 return -EINVAL; 2139 2140 if (req->num_sizes != 1) 2141 return -EINVAL; 2142 2143 if (req->sizes != NULL) 2144 return -EINVAL; 2145 2146 user_srf = kzalloc_obj(*user_srf); 2147 if (unlikely(!user_srf)) { 2148 ret = -ENOMEM; 2149 goto out_unlock; 2150 } 2151 2152 *srf_out = &user_srf->srf; 2153 2154 srf = &user_srf->srf; 2155 srf->metadata = *req; 2156 srf->offsets = NULL; 2157 2158 metadata = &srf->metadata; 2159 2160 if (metadata->array_size) 2161 num_layers = req->array_size; 2162 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) 2163 num_layers = SVGA3D_MAX_SURFACE_FACES; 2164 2165 if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE) 2166 sample_count = metadata->multisample_count; 2167 2168 srf->res.guest_memory_size = 2169 vmw_surface_get_serialized_size_extended( 2170 metadata->format, 2171 metadata->base_size, 2172 metadata->mip_levels[0], 2173 num_layers, 2174 sample_count); 2175 2176 if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) 2177 srf->res.guest_memory_size += sizeof(SVGA3dDXSOState); 2178 2179 /* 2180 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with 2181 * size greater than STDU max width/height. This is really a workaround 2182 * to support creation of big framebuffer requested by some user-space 2183 * for whole topology. That big framebuffer won't really be used for 2184 * binding with screen target as during prepare_fb a separate surface is 2185 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. 2186 */ 2187 if (dev_priv->active_display_unit == vmw_du_screen_target && 2188 metadata->scanout && 2189 metadata->base_size.width <= dev_priv->stdu_max_width && 2190 metadata->base_size.height <= dev_priv->stdu_max_height) 2191 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET; 2192 2193 /* 2194 * From this point, the generic resource management functions 2195 * destroy the object on failure. 2196 */ 2197 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 2198 2199 return ret; 2200 2201 out_unlock: 2202 return ret; 2203 } 2204 2205 static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw, 2206 int bpp) 2207 { 2208 switch (bpp) { 2209 case 8: /* DRM_FORMAT_C8 */ 2210 return SVGA3D_P8; 2211 case 16: /* DRM_FORMAT_RGB565 */ 2212 return SVGA3D_R5G6B5; 2213 case 32: /* DRM_FORMAT_XRGB8888 */ 2214 if (has_sm4_context(vmw)) 2215 return SVGA3D_B8G8R8X8_UNORM; 2216 return SVGA3D_X8R8G8B8; 2217 default: 2218 drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp); 2219 return SVGA3D_X8R8G8B8; 2220 } 2221 } 2222 2223 /** 2224 * vmw_dumb_create - Create a dumb kms buffer 2225 * 2226 * @file_priv: Pointer to a struct drm_file identifying the caller. 2227 * @dev: Pointer to the drm device. 2228 * @args: Pointer to a struct drm_mode_create_dumb structure 2229 * Return: Zero on success, negative error code on failure. 2230 * 2231 * This is a driver callback for the core drm create_dumb functionality. 2232 * Note that this is very similar to the vmw_bo_alloc ioctl, except 2233 * that the arguments have a different format. 2234 */ 2235 int vmw_dumb_create(struct drm_file *file_priv, 2236 struct drm_device *dev, 2237 struct drm_mode_create_dumb *args) 2238 { 2239 struct vmw_private *dev_priv = vmw_priv(dev); 2240 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 2241 struct vmw_bo *vbo = NULL; 2242 struct vmw_resource *res = NULL; 2243 union drm_vmw_gb_surface_create_ext_arg arg = { 0 }; 2244 struct drm_vmw_gb_surface_create_ext_req *req = &arg.req; 2245 int ret; 2246 struct drm_vmw_size drm_size = { 2247 .width = args->width, 2248 .height = args->height, 2249 .depth = 1, 2250 }; 2251 SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp); 2252 const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); 2253 SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | 2254 SVGA3D_SURFACE_HINT_RENDERTARGET | 2255 SVGA3D_SURFACE_SCREENTARGET; 2256 2257 if (vmw_surface_is_dx_screen_target_format(format)) { 2258 flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE | 2259 SVGA3D_SURFACE_BIND_RENDER_TARGET; 2260 } 2261 2262 /* 2263 * Without mob support we're just going to use raw memory buffer 2264 * because we wouldn't be able to support full surface coherency 2265 * without mobs. There also no reason to support surface coherency 2266 * without 3d (i.e. gpu usage on the host) because then all the 2267 * contents is going to be rendered guest side. 2268 */ 2269 if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) { 2270 ret = drm_mode_size_dumb(dev, args, 0, 0); 2271 if (ret) 2272 return ret; 2273 2274 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 2275 args->size, &args->handle, 2276 &vbo); 2277 /* drop reference from allocate - handle holds it now */ 2278 drm_gem_object_put(&vbo->tbo.base); 2279 return ret; 2280 } 2281 2282 req->version = drm_vmw_gb_surface_v1; 2283 req->multisample_pattern = SVGA3D_MS_PATTERN_NONE; 2284 req->quality_level = SVGA3D_MS_QUALITY_NONE; 2285 req->buffer_byte_stride = 0; 2286 req->must_be_zero = 0; 2287 req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags); 2288 req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags); 2289 req->base.format = (uint32_t)format; 2290 req->base.drm_surface_flags = drm_vmw_surface_flag_scanout; 2291 req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable; 2292 req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer; 2293 req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent; 2294 req->base.base_size.width = args->width; 2295 req->base.base_size.height = args->height; 2296 req->base.base_size.depth = 1; 2297 req->base.array_size = 0; 2298 req->base.mip_levels = 1; 2299 req->base.multisample_count = 0; 2300 req->base.buffer_handle = SVGA3D_INVALID_ID; 2301 req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE; 2302 ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv); 2303 if (ret) { 2304 drm_warn(dev, "Unable to create a dumb buffer\n"); 2305 return ret; 2306 } 2307 2308 args->handle = arg.rep.buffer_handle; 2309 args->size = arg.rep.buffer_size; 2310 args->pitch = vmw_surface_calculate_pitch(desc, &drm_size); 2311 2312 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle, 2313 user_surface_converter, 2314 &res); 2315 if (ret) { 2316 drm_err(dev, "Created resource handle doesn't exist!\n"); 2317 goto err; 2318 } 2319 2320 vbo = res->guest_memory_bo; 2321 vbo->is_dumb = true; 2322 vbo->dumb_surface = vmw_res_to_srf(res); 2323 drm_gem_object_put(&vbo->tbo.base); 2324 /* 2325 * Unset the user surface dtor since this in not actually exposed 2326 * to userspace. The suface is owned via the dumb_buffer's GEM handle 2327 */ 2328 struct vmw_user_surface *usurf = container_of(vbo->dumb_surface, 2329 struct vmw_user_surface, srf); 2330 usurf->prime.base.refcount_release = NULL; 2331 err: 2332 if (res) 2333 vmw_resource_unreference(&res); 2334 2335 ttm_ref_object_base_unref(tfile, arg.rep.handle); 2336 2337 return ret; 2338 } 2339