1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29 #include "vmwgfx_drv.h" 30 31 #include "vmwgfx_bo.h" 32 #include "vmwgfx_binding.h" 33 #include "vmwgfx_devcaps.h" 34 #include "vmwgfx_mksstat.h" 35 #include "vmwgfx_vkms.h" 36 #include "ttm_object.h" 37 38 #include <drm/drm_aperture.h> 39 #include <drm/drm_drv.h> 40 #include <drm/drm_fbdev_ttm.h> 41 #include <drm/drm_gem_ttm_helper.h> 42 #include <drm/drm_ioctl.h> 43 #include <drm/drm_module.h> 44 #include <drm/drm_sysfs.h> 45 #include <drm/ttm/ttm_range_manager.h> 46 #include <drm/ttm/ttm_placement.h> 47 #include <generated/utsrelease.h> 48 49 #ifdef CONFIG_X86 50 #include <asm/hypervisor.h> 51 #endif 52 #include <linux/cc_platform.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/module.h> 55 #include <linux/pci.h> 56 #include <linux/version.h> 57 #include <linux/vmalloc.h> 58 59 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 60 61 /* 62 * Fully encoded drm commands. Might move to vmw_drm.h 63 */ 64 65 #define DRM_IOCTL_VMW_GET_PARAM \ 66 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 67 struct drm_vmw_getparam_arg) 68 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 69 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 70 union drm_vmw_alloc_dmabuf_arg) 71 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 73 struct drm_vmw_unref_dmabuf_arg) 74 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 75 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 76 struct drm_vmw_cursor_bypass_arg) 77 78 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 80 struct drm_vmw_control_stream_arg) 81 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 83 struct drm_vmw_stream_arg) 84 #define DRM_IOCTL_VMW_UNREF_STREAM \ 85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 86 struct drm_vmw_stream_arg) 87 88 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 89 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 90 struct drm_vmw_context_arg) 91 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 93 struct drm_vmw_context_arg) 94 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 96 union drm_vmw_surface_create_arg) 97 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 99 struct drm_vmw_surface_arg) 100 #define DRM_IOCTL_VMW_REF_SURFACE \ 101 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 102 union drm_vmw_surface_reference_arg) 103 #define DRM_IOCTL_VMW_EXECBUF \ 104 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 105 struct drm_vmw_execbuf_arg) 106 #define DRM_IOCTL_VMW_GET_3D_CAP \ 107 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 108 struct drm_vmw_get_3d_cap_arg) 109 #define DRM_IOCTL_VMW_FENCE_WAIT \ 110 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 111 struct drm_vmw_fence_wait_arg) 112 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 113 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 114 struct drm_vmw_fence_signaled_arg) 115 #define DRM_IOCTL_VMW_FENCE_UNREF \ 116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 117 struct drm_vmw_fence_arg) 118 #define DRM_IOCTL_VMW_FENCE_EVENT \ 119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 120 struct drm_vmw_fence_event_arg) 121 #define DRM_IOCTL_VMW_PRESENT \ 122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 123 struct drm_vmw_present_arg) 124 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 125 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 126 struct drm_vmw_present_readback_arg) 127 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 129 struct drm_vmw_update_layout_arg) 130 #define DRM_IOCTL_VMW_CREATE_SHADER \ 131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 132 struct drm_vmw_shader_create_arg) 133 #define DRM_IOCTL_VMW_UNREF_SHADER \ 134 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 135 struct drm_vmw_shader_arg) 136 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 137 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 138 union drm_vmw_gb_surface_create_arg) 139 #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 141 union drm_vmw_gb_surface_reference_arg) 142 #define DRM_IOCTL_VMW_SYNCCPU \ 143 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 144 struct drm_vmw_synccpu_arg) 145 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 147 struct drm_vmw_context_arg) 148 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ 149 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ 150 union drm_vmw_gb_surface_create_ext_arg) 151 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ 152 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ 153 union drm_vmw_gb_surface_reference_ext_arg) 154 #define DRM_IOCTL_VMW_MSG \ 155 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ 156 struct drm_vmw_msg_arg) 157 #define DRM_IOCTL_VMW_MKSSTAT_RESET \ 158 DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET) 159 #define DRM_IOCTL_VMW_MKSSTAT_ADD \ 160 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \ 161 struct drm_vmw_mksstat_add_arg) 162 #define DRM_IOCTL_VMW_MKSSTAT_REMOVE \ 163 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \ 164 struct drm_vmw_mksstat_remove_arg) 165 166 /* 167 * Ioctl definitions. 168 */ 169 170 static const struct drm_ioctl_desc vmw_ioctls[] = { 171 DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, 172 DRM_RENDER_ALLOW), 173 DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl, 174 DRM_RENDER_ALLOW), 175 DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 176 DRM_RENDER_ALLOW), 177 DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS, 178 vmw_kms_cursor_bypass_ioctl, 179 DRM_MASTER), 180 181 DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 182 DRM_MASTER), 183 DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 184 DRM_MASTER), 185 DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 186 DRM_MASTER), 187 188 DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 189 DRM_RENDER_ALLOW), 190 DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 191 DRM_RENDER_ALLOW), 192 DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 193 DRM_RENDER_ALLOW), 194 DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 195 DRM_RENDER_ALLOW), 196 DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 197 DRM_RENDER_ALLOW), 198 DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl, 199 DRM_RENDER_ALLOW), 200 DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 201 DRM_RENDER_ALLOW), 202 DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED, 203 vmw_fence_obj_signaled_ioctl, 204 DRM_RENDER_ALLOW), 205 DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 206 DRM_RENDER_ALLOW), 207 DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 208 DRM_RENDER_ALLOW), 209 DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 210 DRM_RENDER_ALLOW), 211 212 /* these allow direct access to the framebuffers mark as master only */ 213 DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl, 214 DRM_MASTER | DRM_AUTH), 215 DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK, 216 vmw_present_readback_ioctl, 217 DRM_MASTER | DRM_AUTH), 218 /* 219 * The permissions of the below ioctl are overridden in 220 * vmw_generic_ioctl(). We require either 221 * DRM_MASTER or capable(CAP_SYS_ADMIN). 222 */ 223 DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT, 224 vmw_kms_update_layout_ioctl, 225 DRM_RENDER_ALLOW), 226 DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER, 227 vmw_shader_define_ioctl, 228 DRM_RENDER_ALLOW), 229 DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER, 230 vmw_shader_destroy_ioctl, 231 DRM_RENDER_ALLOW), 232 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE, 233 vmw_gb_surface_define_ioctl, 234 DRM_RENDER_ALLOW), 235 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF, 236 vmw_gb_surface_reference_ioctl, 237 DRM_RENDER_ALLOW), 238 DRM_IOCTL_DEF_DRV(VMW_SYNCCPU, 239 vmw_user_bo_synccpu_ioctl, 240 DRM_RENDER_ALLOW), 241 DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT, 242 vmw_extended_context_define_ioctl, 243 DRM_RENDER_ALLOW), 244 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT, 245 vmw_gb_surface_define_ext_ioctl, 246 DRM_RENDER_ALLOW), 247 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT, 248 vmw_gb_surface_reference_ext_ioctl, 249 DRM_RENDER_ALLOW), 250 DRM_IOCTL_DEF_DRV(VMW_MSG, 251 vmw_msg_ioctl, 252 DRM_RENDER_ALLOW), 253 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET, 254 vmw_mksstat_reset_ioctl, 255 DRM_RENDER_ALLOW), 256 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD, 257 vmw_mksstat_add_ioctl, 258 DRM_RENDER_ALLOW), 259 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE, 260 vmw_mksstat_remove_ioctl, 261 DRM_RENDER_ALLOW), 262 }; 263 264 static const struct pci_device_id vmw_pci_id_list[] = { 265 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) }, 266 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) }, 267 { } 268 }; 269 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 270 271 static int vmw_restrict_iommu; 272 static int vmw_force_coherent; 273 static int vmw_restrict_dma_mask; 274 static int vmw_assume_16bpp; 275 276 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 277 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 278 void *ptr); 279 280 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 281 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 282 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 283 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 284 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 285 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 286 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 287 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 288 289 290 struct bitmap_name { 291 uint32 value; 292 const char *name; 293 }; 294 295 static const struct bitmap_name cap1_names[] = { 296 { SVGA_CAP_RECT_COPY, "rect copy" }, 297 { SVGA_CAP_CURSOR, "cursor" }, 298 { SVGA_CAP_CURSOR_BYPASS, "cursor bypass" }, 299 { SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" }, 300 { SVGA_CAP_8BIT_EMULATION, "8bit emulation" }, 301 { SVGA_CAP_ALPHA_CURSOR, "alpha cursor" }, 302 { SVGA_CAP_3D, "3D" }, 303 { SVGA_CAP_EXTENDED_FIFO, "extended fifo" }, 304 { SVGA_CAP_MULTIMON, "multimon" }, 305 { SVGA_CAP_PITCHLOCK, "pitchlock" }, 306 { SVGA_CAP_IRQMASK, "irq mask" }, 307 { SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" }, 308 { SVGA_CAP_GMR, "gmr" }, 309 { SVGA_CAP_TRACES, "traces" }, 310 { SVGA_CAP_GMR2, "gmr2" }, 311 { SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" }, 312 { SVGA_CAP_COMMAND_BUFFERS, "command buffers" }, 313 { SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" }, 314 { SVGA_CAP_GBOBJECTS, "gbobject" }, 315 { SVGA_CAP_DX, "dx" }, 316 { SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" }, 317 { SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" }, 318 { SVGA_CAP_CAP2_REGISTER, "cap2 register" }, 319 }; 320 321 322 static const struct bitmap_name cap2_names[] = { 323 { SVGA_CAP2_GROW_OTABLE, "grow otable" }, 324 { SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" }, 325 { SVGA_CAP2_DX2, "dx2" }, 326 { SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" }, 327 { SVGA_CAP2_SCREENDMA_REG, "screendma reg" }, 328 { SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" }, 329 { SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" }, 330 { SVGA_CAP2_CURSOR_MOB, "cursor mob" }, 331 { SVGA_CAP2_MSHINT, "mshint" }, 332 { SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" }, 333 { SVGA_CAP2_DX3, "dx3" }, 334 { SVGA_CAP2_FRAME_TYPE, "frame type" }, 335 { SVGA_CAP2_COTABLE_COPY, "cotable copy" }, 336 { SVGA_CAP2_TRACE_FULL_FB, "trace full fb" }, 337 { SVGA_CAP2_EXTRA_REGS, "extra regs" }, 338 { SVGA_CAP2_LO_STAGING, "lo staging" }, 339 }; 340 341 static void vmw_print_bitmap(struct drm_device *drm, 342 const char *prefix, uint32_t bitmap, 343 const struct bitmap_name *bnames, 344 uint32_t num_names) 345 { 346 char buf[512]; 347 uint32_t i; 348 uint32_t offset = 0; 349 for (i = 0; i < num_names; ++i) { 350 if ((bitmap & bnames[i].value) != 0) { 351 offset += snprintf(buf + offset, 352 ARRAY_SIZE(buf) - offset, 353 "%s, ", bnames[i].name); 354 bitmap &= ~bnames[i].value; 355 } 356 } 357 358 drm_info(drm, "%s: %s\n", prefix, buf); 359 if (bitmap != 0) 360 drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap); 361 } 362 363 364 static void vmw_print_sm_type(struct vmw_private *dev_priv) 365 { 366 static const char *names[] = { 367 [VMW_SM_LEGACY] = "Legacy", 368 [VMW_SM_4] = "SM4", 369 [VMW_SM_4_1] = "SM4_1", 370 [VMW_SM_5] = "SM_5", 371 [VMW_SM_5_1X] = "SM_5_1X", 372 [VMW_SM_MAX] = "Invalid" 373 }; 374 BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1)); 375 drm_info(&dev_priv->drm, "Available shader model: %s.\n", 376 names[dev_priv->sm_type]); 377 } 378 379 /** 380 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 381 * 382 * @dev_priv: A device private structure. 383 * 384 * This function creates a small buffer object that holds the query 385 * result for dummy queries emitted as query barriers. 386 * The function will then map the first page and initialize a pending 387 * occlusion query result structure, Finally it will unmap the buffer. 388 * No interruptible waits are done within this function. 389 * 390 * Returns an error if bo creation or initialization fails. 391 */ 392 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 393 { 394 int ret; 395 struct vmw_bo *vbo; 396 struct ttm_bo_kmap_obj map; 397 volatile SVGA3dQueryResult *result; 398 bool dummy; 399 struct vmw_bo_params bo_params = { 400 .domain = VMW_BO_DOMAIN_SYS, 401 .busy_domain = VMW_BO_DOMAIN_SYS, 402 .bo_type = ttm_bo_type_kernel, 403 .size = PAGE_SIZE, 404 .pin = true 405 }; 406 407 /* 408 * Create the vbo as pinned, so that a tryreserve will 409 * immediately succeed. This is because we're the only 410 * user of the bo currently. 411 */ 412 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); 413 if (unlikely(ret != 0)) 414 return ret; 415 416 ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); 417 BUG_ON(ret != 0); 418 vmw_bo_pin_reserved(vbo, true); 419 420 ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); 421 if (likely(ret == 0)) { 422 result = ttm_kmap_obj_virtual(&map, &dummy); 423 result->totalSize = sizeof(*result); 424 result->state = SVGA3D_QUERYSTATE_PENDING; 425 result->result32 = 0xff; 426 ttm_bo_kunmap(&map); 427 } 428 vmw_bo_pin_reserved(vbo, false); 429 ttm_bo_unreserve(&vbo->tbo); 430 431 if (unlikely(ret != 0)) { 432 DRM_ERROR("Dummy query buffer map failed.\n"); 433 vmw_bo_unreference(&vbo); 434 } else 435 dev_priv->dummy_query_bo = vbo; 436 437 return ret; 438 } 439 440 static int vmw_device_init(struct vmw_private *dev_priv) 441 { 442 bool uses_fb_traces = false; 443 444 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 445 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 446 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 447 448 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | 449 SVGA_REG_ENABLE_HIDE); 450 451 uses_fb_traces = !vmw_cmd_supported(dev_priv) && 452 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0; 453 454 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces); 455 dev_priv->fifo = vmw_fifo_create(dev_priv); 456 if (IS_ERR(dev_priv->fifo)) { 457 int err = PTR_ERR(dev_priv->fifo); 458 dev_priv->fifo = NULL; 459 return err; 460 } else if (!dev_priv->fifo) { 461 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 462 } 463 464 dev_priv->last_read_seqno = vmw_fence_read(dev_priv); 465 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 466 return 0; 467 } 468 469 static void vmw_device_fini(struct vmw_private *vmw) 470 { 471 /* 472 * Legacy sync 473 */ 474 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 475 while (vmw_read(vmw, SVGA_REG_BUSY) != 0) 476 ; 477 478 vmw->last_read_seqno = vmw_fence_read(vmw); 479 480 vmw_write(vmw, SVGA_REG_CONFIG_DONE, 481 vmw->config_done_state); 482 vmw_write(vmw, SVGA_REG_ENABLE, 483 vmw->enable_state); 484 vmw_write(vmw, SVGA_REG_TRACES, 485 vmw->traces_state); 486 487 vmw_fifo_destroy(vmw); 488 } 489 490 /** 491 * vmw_request_device_late - Perform late device setup 492 * 493 * @dev_priv: Pointer to device private. 494 * 495 * This function performs setup of otables and enables large command 496 * buffer submission. These tasks are split out to a separate function 497 * because it reverts vmw_release_device_early and is intended to be used 498 * by an error path in the hibernation code. 499 */ 500 static int vmw_request_device_late(struct vmw_private *dev_priv) 501 { 502 int ret; 503 504 if (dev_priv->has_mob) { 505 ret = vmw_otables_setup(dev_priv); 506 if (unlikely(ret != 0)) { 507 DRM_ERROR("Unable to initialize " 508 "guest Memory OBjects.\n"); 509 return ret; 510 } 511 } 512 513 if (dev_priv->cman) { 514 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096); 515 if (ret) { 516 struct vmw_cmdbuf_man *man = dev_priv->cman; 517 518 dev_priv->cman = NULL; 519 vmw_cmdbuf_man_destroy(man); 520 } 521 } 522 523 return 0; 524 } 525 526 static int vmw_request_device(struct vmw_private *dev_priv) 527 { 528 int ret; 529 530 ret = vmw_device_init(dev_priv); 531 if (unlikely(ret != 0)) { 532 DRM_ERROR("Unable to initialize the device.\n"); 533 return ret; 534 } 535 vmw_fence_fifo_up(dev_priv->fman); 536 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 537 if (IS_ERR(dev_priv->cman)) { 538 dev_priv->cman = NULL; 539 dev_priv->sm_type = VMW_SM_LEGACY; 540 } 541 542 ret = vmw_request_device_late(dev_priv); 543 if (ret) 544 goto out_no_mob; 545 546 ret = vmw_dummy_query_bo_create(dev_priv); 547 if (unlikely(ret != 0)) 548 goto out_no_query_bo; 549 550 return 0; 551 552 out_no_query_bo: 553 if (dev_priv->cman) 554 vmw_cmdbuf_remove_pool(dev_priv->cman); 555 if (dev_priv->has_mob) { 556 struct ttm_resource_manager *man; 557 558 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 559 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 560 vmw_otables_takedown(dev_priv); 561 } 562 if (dev_priv->cman) 563 vmw_cmdbuf_man_destroy(dev_priv->cman); 564 out_no_mob: 565 vmw_fence_fifo_down(dev_priv->fman); 566 vmw_device_fini(dev_priv); 567 return ret; 568 } 569 570 /** 571 * vmw_release_device_early - Early part of fifo takedown. 572 * 573 * @dev_priv: Pointer to device private struct. 574 * 575 * This is the first part of command submission takedown, to be called before 576 * buffer management is taken down. 577 */ 578 static void vmw_release_device_early(struct vmw_private *dev_priv) 579 { 580 /* 581 * Previous destructions should've released 582 * the pinned bo. 583 */ 584 585 BUG_ON(dev_priv->pinned_bo != NULL); 586 587 vmw_bo_unreference(&dev_priv->dummy_query_bo); 588 if (dev_priv->cman) 589 vmw_cmdbuf_remove_pool(dev_priv->cman); 590 591 if (dev_priv->has_mob) { 592 struct ttm_resource_manager *man; 593 594 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 595 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 596 vmw_otables_takedown(dev_priv); 597 } 598 } 599 600 /** 601 * vmw_release_device_late - Late part of fifo takedown. 602 * 603 * @dev_priv: Pointer to device private struct. 604 * 605 * This is the last part of the command submission takedown, to be called when 606 * command submission is no longer needed. It may wait on pending fences. 607 */ 608 static void vmw_release_device_late(struct vmw_private *dev_priv) 609 { 610 vmw_fence_fifo_down(dev_priv->fman); 611 if (dev_priv->cman) 612 vmw_cmdbuf_man_destroy(dev_priv->cman); 613 614 vmw_device_fini(dev_priv); 615 } 616 617 /* 618 * Sets the initial_[width|height] fields on the given vmw_private. 619 * 620 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 621 * clamping the value to fb_max_[width|height] fields and the 622 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 623 * If the values appear to be invalid, set them to 624 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 625 */ 626 static void vmw_get_initial_size(struct vmw_private *dev_priv) 627 { 628 uint32_t width; 629 uint32_t height; 630 631 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 632 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 633 634 width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH); 635 height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT); 636 637 if (width > dev_priv->fb_max_width || 638 height > dev_priv->fb_max_height) { 639 640 /* 641 * This is a host error and shouldn't occur. 642 */ 643 644 width = VMWGFX_MIN_INITIAL_WIDTH; 645 height = VMWGFX_MIN_INITIAL_HEIGHT; 646 } 647 648 dev_priv->initial_width = width; 649 dev_priv->initial_height = height; 650 } 651 652 /** 653 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 654 * system. 655 * 656 * @dev_priv: Pointer to a struct vmw_private 657 * 658 * This functions tries to determine what actions need to be taken by the 659 * driver to make system pages visible to the device. 660 * If this function decides that DMA is not possible, it returns -EINVAL. 661 * The driver may then try to disable features of the device that require 662 * DMA. 663 */ 664 static int vmw_dma_select_mode(struct vmw_private *dev_priv) 665 { 666 static const char *names[vmw_dma_map_max] = { 667 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 668 [vmw_dma_map_populate] = "Caching DMA mappings.", 669 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 670 671 /* 672 * When running with SEV we always want dma mappings, because 673 * otherwise ttm tt pool pages will bounce through swiotlb running 674 * out of available space. 675 */ 676 if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 677 dev_priv->map_mode = vmw_dma_alloc_coherent; 678 else if (vmw_restrict_iommu) 679 dev_priv->map_mode = vmw_dma_map_bind; 680 else 681 dev_priv->map_mode = vmw_dma_map_populate; 682 683 drm_info(&dev_priv->drm, 684 "DMA map mode: %s\n", names[dev_priv->map_mode]); 685 return 0; 686 } 687 688 /** 689 * vmw_dma_masks - set required page- and dma masks 690 * 691 * @dev_priv: Pointer to struct drm-device 692 * 693 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 694 * restriction also for 64-bit systems. 695 */ 696 static int vmw_dma_masks(struct vmw_private *dev_priv) 697 { 698 struct drm_device *dev = &dev_priv->drm; 699 int ret = 0; 700 701 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 702 if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) { 703 drm_info(&dev_priv->drm, 704 "Restricting DMA addresses to 44 bits.\n"); 705 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 706 } 707 708 return ret; 709 } 710 711 static int vmw_vram_manager_init(struct vmw_private *dev_priv) 712 { 713 int ret; 714 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false, 715 dev_priv->vram_size >> PAGE_SHIFT); 716 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); 717 return ret; 718 } 719 720 static void vmw_vram_manager_fini(struct vmw_private *dev_priv) 721 { 722 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM); 723 } 724 725 static int vmw_setup_pci_resources(struct vmw_private *dev, 726 u32 pci_id) 727 { 728 resource_size_t rmmio_start; 729 resource_size_t rmmio_size; 730 resource_size_t fifo_start; 731 resource_size_t fifo_size; 732 int ret; 733 struct pci_dev *pdev = to_pci_dev(dev->drm.dev); 734 735 pci_set_master(pdev); 736 737 ret = pci_request_regions(pdev, "vmwgfx probe"); 738 if (ret) 739 return ret; 740 741 dev->pci_id = pci_id; 742 if (pci_id == VMWGFX_PCI_ID_SVGA3) { 743 rmmio_start = pci_resource_start(pdev, 0); 744 rmmio_size = pci_resource_len(pdev, 0); 745 dev->vram_start = pci_resource_start(pdev, 2); 746 dev->vram_size = pci_resource_len(pdev, 2); 747 748 drm_info(&dev->drm, 749 "Register MMIO at 0x%pa size is %llu KiB\n", 750 &rmmio_start, (uint64_t)rmmio_size / 1024); 751 dev->rmmio = devm_ioremap(dev->drm.dev, 752 rmmio_start, 753 rmmio_size); 754 if (!dev->rmmio) { 755 drm_err(&dev->drm, 756 "Failed mapping registers mmio memory.\n"); 757 pci_release_regions(pdev); 758 return -ENOMEM; 759 } 760 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) { 761 dev->io_start = pci_resource_start(pdev, 0); 762 dev->vram_start = pci_resource_start(pdev, 1); 763 dev->vram_size = pci_resource_len(pdev, 1); 764 fifo_start = pci_resource_start(pdev, 2); 765 fifo_size = pci_resource_len(pdev, 2); 766 767 drm_info(&dev->drm, 768 "FIFO at %pa size is %llu KiB\n", 769 &fifo_start, (uint64_t)fifo_size / 1024); 770 dev->fifo_mem = devm_memremap(dev->drm.dev, 771 fifo_start, 772 fifo_size, 773 MEMREMAP_WB); 774 775 if (IS_ERR(dev->fifo_mem)) { 776 drm_err(&dev->drm, 777 "Failed mapping FIFO memory.\n"); 778 pci_release_regions(pdev); 779 return PTR_ERR(dev->fifo_mem); 780 } 781 } else { 782 pci_release_regions(pdev); 783 return -EINVAL; 784 } 785 786 /* 787 * This is approximate size of the vram, the exact size will only 788 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource 789 * size will be equal to or bigger than the size reported by 790 * SVGA_REG_VRAM_SIZE. 791 */ 792 drm_info(&dev->drm, 793 "VRAM at %pa size is %llu KiB\n", 794 &dev->vram_start, (uint64_t)dev->vram_size / 1024); 795 796 return 0; 797 } 798 799 static int vmw_detect_version(struct vmw_private *dev) 800 { 801 uint32_t svga_id; 802 803 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ? 804 SVGA_ID_3 : SVGA_ID_2); 805 svga_id = vmw_read(dev, SVGA_REG_ID); 806 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) { 807 drm_err(&dev->drm, 808 "Unsupported SVGA ID 0x%x on chipset 0x%x\n", 809 svga_id, dev->pci_id); 810 return -ENOSYS; 811 } 812 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3)); 813 drm_info(&dev->drm, 814 "Running on SVGA version %d.\n", (svga_id & 0xff)); 815 return 0; 816 } 817 818 static void vmw_write_driver_id(struct vmw_private *dev) 819 { 820 if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) { 821 vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID, 822 SVGA_REG_GUEST_DRIVER_ID_LINUX); 823 824 vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1, 825 LINUX_VERSION_MAJOR << 24 | 826 LINUX_VERSION_PATCHLEVEL << 16 | 827 LINUX_VERSION_SUBLEVEL); 828 vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2, 829 VMWGFX_DRIVER_MAJOR << 24 | 830 VMWGFX_DRIVER_MINOR << 16 | 831 VMWGFX_DRIVER_PATCHLEVEL); 832 vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0); 833 834 vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID, 835 SVGA_REG_GUEST_DRIVER_ID_SUBMIT); 836 } 837 } 838 839 static void vmw_sw_context_init(struct vmw_private *dev_priv) 840 { 841 struct vmw_sw_context *sw_context = &dev_priv->ctx; 842 843 hash_init(sw_context->res_ht); 844 } 845 846 static void vmw_sw_context_fini(struct vmw_private *dev_priv) 847 { 848 struct vmw_sw_context *sw_context = &dev_priv->ctx; 849 850 vfree(sw_context->cmd_bounce); 851 if (sw_context->staged_bindings) 852 vmw_binding_state_free(sw_context->staged_bindings); 853 } 854 855 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) 856 { 857 int ret; 858 enum vmw_res_type i; 859 bool refuse_dma = false; 860 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 861 862 dev_priv->drm.dev_private = dev_priv; 863 864 vmw_sw_context_init(dev_priv); 865 866 mutex_init(&dev_priv->cmdbuf_mutex); 867 mutex_init(&dev_priv->binding_mutex); 868 spin_lock_init(&dev_priv->resource_lock); 869 spin_lock_init(&dev_priv->hw_lock); 870 spin_lock_init(&dev_priv->waiter_lock); 871 spin_lock_init(&dev_priv->cursor_lock); 872 873 ret = vmw_setup_pci_resources(dev_priv, pci_id); 874 if (ret) 875 return ret; 876 ret = vmw_detect_version(dev_priv); 877 if (ret) 878 goto out_no_pci_or_version; 879 880 881 for (i = vmw_res_context; i < vmw_res_max; ++i) { 882 idr_init_base(&dev_priv->res_idr[i], 1); 883 INIT_LIST_HEAD(&dev_priv->res_lru[i]); 884 } 885 886 init_waitqueue_head(&dev_priv->fence_queue); 887 init_waitqueue_head(&dev_priv->fifo_queue); 888 dev_priv->fence_queue_waiters = 0; 889 dev_priv->fifo_queue_waiters = 0; 890 891 dev_priv->used_memory_size = 0; 892 893 dev_priv->assume_16bpp = !!vmw_assume_16bpp; 894 895 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 896 vmw_print_bitmap(&dev_priv->drm, "Capabilities", 897 dev_priv->capabilities, 898 cap1_names, ARRAY_SIZE(cap1_names)); 899 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { 900 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); 901 vmw_print_bitmap(&dev_priv->drm, "Capabilities2", 902 dev_priv->capabilities2, 903 cap2_names, ARRAY_SIZE(cap2_names)); 904 } 905 906 if (!vmwgfx_supported(dev_priv)) { 907 vmw_disable_backdoor(); 908 drm_err_once(&dev_priv->drm, 909 "vmwgfx seems to be running on an unsupported hypervisor."); 910 drm_err_once(&dev_priv->drm, 911 "This configuration is likely broken."); 912 drm_err_once(&dev_priv->drm, 913 "Please switch to a supported graphics device to avoid problems."); 914 } 915 916 vmw_vkms_init(dev_priv); 917 918 ret = vmw_dma_select_mode(dev_priv); 919 if (unlikely(ret != 0)) { 920 drm_info(&dev_priv->drm, 921 "Restricting capabilities since DMA not available.\n"); 922 refuse_dma = true; 923 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 924 drm_info(&dev_priv->drm, 925 "Disabling 3D acceleration.\n"); 926 } 927 928 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 929 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 930 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 931 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 932 933 vmw_get_initial_size(dev_priv); 934 935 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 936 dev_priv->max_gmr_ids = 937 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 938 dev_priv->max_gmr_pages = 939 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 940 dev_priv->memory_size = 941 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 942 dev_priv->memory_size -= dev_priv->vram_size; 943 } else { 944 /* 945 * An arbitrary limit of 512MiB on surface 946 * memory. But all HWV8 hardware supports GMR2. 947 */ 948 dev_priv->memory_size = 512*1024*1024; 949 } 950 dev_priv->max_mob_pages = 0; 951 dev_priv->max_mob_size = 0; 952 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 953 uint64_t mem_size; 954 955 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2) 956 mem_size = vmw_read(dev_priv, 957 SVGA_REG_GBOBJECT_MEM_SIZE_KB); 958 else 959 mem_size = 960 vmw_read(dev_priv, 961 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 962 963 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 964 dev_priv->max_primary_mem = 965 vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM); 966 dev_priv->max_mob_size = 967 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 968 dev_priv->stdu_max_width = 969 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 970 dev_priv->stdu_max_height = 971 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 972 973 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 974 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 975 dev_priv->texture_max_width = vmw_read(dev_priv, 976 SVGA_REG_DEV_CAP); 977 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 978 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 979 dev_priv->texture_max_height = vmw_read(dev_priv, 980 SVGA_REG_DEV_CAP); 981 } else { 982 dev_priv->texture_max_width = 8192; 983 dev_priv->texture_max_height = 8192; 984 dev_priv->max_primary_mem = dev_priv->vram_size; 985 } 986 drm_info(&dev_priv->drm, 987 "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n", 988 (u64)dev_priv->vram_size / 1024, 989 (u64)dev_priv->fifo_mem_size / 1024, 990 dev_priv->memory_size / 1024); 991 992 drm_info(&dev_priv->drm, 993 "MOB limits: max mob size = %u KiB, max mob pages = %u\n", 994 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages); 995 996 ret = vmw_dma_masks(dev_priv); 997 if (unlikely(ret != 0)) 998 goto out_err0; 999 1000 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX); 1001 1002 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 1003 drm_info(&dev_priv->drm, 1004 "Max GMR ids is %u\n", 1005 (unsigned)dev_priv->max_gmr_ids); 1006 drm_info(&dev_priv->drm, 1007 "Max number of GMR pages is %u\n", 1008 (unsigned)dev_priv->max_gmr_pages); 1009 } 1010 drm_info(&dev_priv->drm, 1011 "Maximum display memory size is %llu KiB\n", 1012 (uint64_t)dev_priv->max_primary_mem / 1024); 1013 1014 /* Need mmio memory to check for fifo pitchlock cap. */ 1015 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 1016 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 1017 !vmw_fifo_have_pitchlock(dev_priv)) { 1018 ret = -ENOSYS; 1019 DRM_ERROR("Hardware has no pitchlock\n"); 1020 goto out_err0; 1021 } 1022 1023 dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops); 1024 1025 if (unlikely(dev_priv->tdev == NULL)) { 1026 drm_err(&dev_priv->drm, 1027 "Unable to initialize TTM object management.\n"); 1028 ret = -ENOMEM; 1029 goto out_err0; 1030 } 1031 1032 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 1033 ret = vmw_irq_install(dev_priv); 1034 if (ret != 0) { 1035 drm_err(&dev_priv->drm, 1036 "Failed installing irq: %d\n", ret); 1037 goto out_no_irq; 1038 } 1039 } 1040 1041 dev_priv->fman = vmw_fence_manager_init(dev_priv); 1042 if (unlikely(dev_priv->fman == NULL)) { 1043 ret = -ENOMEM; 1044 goto out_no_fman; 1045 } 1046 1047 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, 1048 dev_priv->drm.dev, 1049 dev_priv->drm.anon_inode->i_mapping, 1050 dev_priv->drm.vma_offset_manager, 1051 dev_priv->map_mode == vmw_dma_alloc_coherent, 1052 false); 1053 if (unlikely(ret != 0)) { 1054 drm_err(&dev_priv->drm, 1055 "Failed initializing TTM buffer object driver.\n"); 1056 goto out_no_bdev; 1057 } 1058 1059 /* 1060 * Enable VRAM, but initially don't use it until SVGA is enabled and 1061 * unhidden. 1062 */ 1063 1064 ret = vmw_vram_manager_init(dev_priv); 1065 if (unlikely(ret != 0)) { 1066 drm_err(&dev_priv->drm, 1067 "Failed initializing memory manager for VRAM.\n"); 1068 goto out_no_vram; 1069 } 1070 1071 ret = vmw_devcaps_create(dev_priv); 1072 if (unlikely(ret != 0)) { 1073 drm_err(&dev_priv->drm, 1074 "Failed initializing device caps.\n"); 1075 goto out_no_vram; 1076 } 1077 1078 /* 1079 * "Guest Memory Regions" is an aperture like feature with 1080 * one slot per bo. There is an upper limit of the number of 1081 * slots as well as the bo size. 1082 */ 1083 dev_priv->has_gmr = true; 1084 /* TODO: This is most likely not correct */ 1085 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 1086 refuse_dma || 1087 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) { 1088 drm_info(&dev_priv->drm, 1089 "No GMR memory available. " 1090 "Graphics memory resources are very limited.\n"); 1091 dev_priv->has_gmr = false; 1092 } 1093 1094 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) { 1095 dev_priv->has_mob = true; 1096 1097 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) { 1098 drm_info(&dev_priv->drm, 1099 "No MOB memory available. " 1100 "3D will be disabled.\n"); 1101 dev_priv->has_mob = false; 1102 } 1103 if (vmw_sys_man_init(dev_priv) != 0) { 1104 drm_info(&dev_priv->drm, 1105 "No MOB page table memory available. " 1106 "3D will be disabled.\n"); 1107 dev_priv->has_mob = false; 1108 } 1109 } 1110 1111 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { 1112 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT)) 1113 dev_priv->sm_type = VMW_SM_4; 1114 } 1115 1116 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ 1117 if (has_sm4_context(dev_priv) && 1118 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { 1119 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41)) 1120 dev_priv->sm_type = VMW_SM_4_1; 1121 if (has_sm4_1_context(dev_priv) && 1122 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { 1123 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) { 1124 dev_priv->sm_type = VMW_SM_5; 1125 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43)) 1126 dev_priv->sm_type = VMW_SM_5_1X; 1127 } 1128 } 1129 } 1130 1131 ret = vmw_kms_init(dev_priv); 1132 if (unlikely(ret != 0)) 1133 goto out_no_kms; 1134 vmw_overlay_init(dev_priv); 1135 1136 ret = vmw_request_device(dev_priv); 1137 if (ret) 1138 goto out_no_fifo; 1139 1140 vmw_print_sm_type(dev_priv); 1141 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)", 1142 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 1143 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE); 1144 vmw_write_driver_id(dev_priv); 1145 1146 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 1147 register_pm_notifier(&dev_priv->pm_nb); 1148 1149 return 0; 1150 1151 out_no_fifo: 1152 vmw_overlay_close(dev_priv); 1153 vmw_kms_close(dev_priv); 1154 out_no_kms: 1155 if (dev_priv->has_mob) { 1156 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1157 vmw_sys_man_fini(dev_priv); 1158 } 1159 if (dev_priv->has_gmr) 1160 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1161 vmw_devcaps_destroy(dev_priv); 1162 vmw_vram_manager_fini(dev_priv); 1163 out_no_vram: 1164 ttm_device_fini(&dev_priv->bdev); 1165 out_no_bdev: 1166 vmw_fence_manager_takedown(dev_priv->fman); 1167 out_no_fman: 1168 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1169 vmw_irq_uninstall(&dev_priv->drm); 1170 out_no_irq: 1171 ttm_object_device_release(&dev_priv->tdev); 1172 out_err0: 1173 for (i = vmw_res_context; i < vmw_res_max; ++i) 1174 idr_destroy(&dev_priv->res_idr[i]); 1175 1176 if (dev_priv->ctx.staged_bindings) 1177 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1178 out_no_pci_or_version: 1179 pci_release_regions(pdev); 1180 return ret; 1181 } 1182 1183 static void vmw_driver_unload(struct drm_device *dev) 1184 { 1185 struct vmw_private *dev_priv = vmw_priv(dev); 1186 struct pci_dev *pdev = to_pci_dev(dev->dev); 1187 enum vmw_res_type i; 1188 1189 unregister_pm_notifier(&dev_priv->pm_nb); 1190 1191 vmw_sw_context_fini(dev_priv); 1192 vmw_fifo_resource_dec(dev_priv); 1193 1194 vmw_svga_disable(dev_priv); 1195 1196 vmw_vkms_cleanup(dev_priv); 1197 vmw_kms_close(dev_priv); 1198 vmw_overlay_close(dev_priv); 1199 1200 if (dev_priv->has_gmr) 1201 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1202 1203 vmw_release_device_early(dev_priv); 1204 if (dev_priv->has_mob) { 1205 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1206 vmw_sys_man_fini(dev_priv); 1207 } 1208 vmw_devcaps_destroy(dev_priv); 1209 vmw_vram_manager_fini(dev_priv); 1210 ttm_device_fini(&dev_priv->bdev); 1211 vmw_release_device_late(dev_priv); 1212 vmw_fence_manager_takedown(dev_priv->fman); 1213 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1214 vmw_irq_uninstall(&dev_priv->drm); 1215 1216 ttm_object_device_release(&dev_priv->tdev); 1217 1218 for (i = vmw_res_context; i < vmw_res_max; ++i) 1219 idr_destroy(&dev_priv->res_idr[i]); 1220 1221 vmw_mksstat_remove_all(dev_priv); 1222 1223 pci_release_regions(pdev); 1224 } 1225 1226 static void vmw_postclose(struct drm_device *dev, 1227 struct drm_file *file_priv) 1228 { 1229 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1230 1231 ttm_object_file_release(&vmw_fp->tfile); 1232 kfree(vmw_fp); 1233 } 1234 1235 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1236 { 1237 struct vmw_private *dev_priv = vmw_priv(dev); 1238 struct vmw_fpriv *vmw_fp; 1239 int ret = -ENOMEM; 1240 1241 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1242 if (unlikely(!vmw_fp)) 1243 return ret; 1244 1245 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev); 1246 if (unlikely(vmw_fp->tfile == NULL)) 1247 goto out_no_tfile; 1248 1249 file_priv->driver_priv = vmw_fp; 1250 1251 return 0; 1252 1253 out_no_tfile: 1254 kfree(vmw_fp); 1255 return ret; 1256 } 1257 1258 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1259 unsigned long arg, 1260 long (*ioctl_func)(struct file *, unsigned int, 1261 unsigned long)) 1262 { 1263 struct drm_file *file_priv = filp->private_data; 1264 struct drm_device *dev = file_priv->minor->dev; 1265 unsigned int nr = DRM_IOCTL_NR(cmd); 1266 unsigned int flags; 1267 1268 /* 1269 * Do extra checking on driver private ioctls. 1270 */ 1271 1272 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1273 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1274 const struct drm_ioctl_desc *ioctl = 1275 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1276 1277 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1278 return ioctl_func(filp, cmd, arg); 1279 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1280 if (!drm_is_current_master(file_priv) && 1281 !capable(CAP_SYS_ADMIN)) 1282 return -EACCES; 1283 } 1284 1285 if (unlikely(ioctl->cmd != cmd)) 1286 goto out_io_encoding; 1287 1288 flags = ioctl->flags; 1289 } else if (!drm_ioctl_flags(nr, &flags)) 1290 return -EINVAL; 1291 1292 return ioctl_func(filp, cmd, arg); 1293 1294 out_io_encoding: 1295 DRM_ERROR("Invalid command format, ioctl %d\n", 1296 nr - DRM_COMMAND_BASE); 1297 1298 return -EINVAL; 1299 } 1300 1301 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1302 unsigned long arg) 1303 { 1304 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1305 } 1306 1307 #ifdef CONFIG_COMPAT 1308 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1309 unsigned long arg) 1310 { 1311 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1312 } 1313 #endif 1314 1315 static void vmw_master_set(struct drm_device *dev, 1316 struct drm_file *file_priv, 1317 bool from_open) 1318 { 1319 /* 1320 * Inform a new master that the layout may have changed while 1321 * it was gone. 1322 */ 1323 if (!from_open) 1324 drm_sysfs_hotplug_event(dev); 1325 } 1326 1327 static void vmw_master_drop(struct drm_device *dev, 1328 struct drm_file *file_priv) 1329 { 1330 struct vmw_private *dev_priv = vmw_priv(dev); 1331 1332 vmw_kms_legacy_hotspot_clear(dev_priv); 1333 } 1334 1335 bool vmwgfx_supported(struct vmw_private *vmw) 1336 { 1337 #if defined(CONFIG_X86) 1338 return hypervisor_is_type(X86_HYPER_VMWARE); 1339 #elif defined(CONFIG_ARM64) 1340 /* 1341 * On aarch64 only svga3 is supported 1342 */ 1343 return vmw->pci_id == VMWGFX_PCI_ID_SVGA3; 1344 #else 1345 drm_warn_once(&vmw->drm, 1346 "vmwgfx is running on an unknown architecture."); 1347 return false; 1348 #endif 1349 } 1350 1351 /** 1352 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1353 * 1354 * @dev_priv: Pointer to device private struct. 1355 * Needs the reservation sem to be held in non-exclusive mode. 1356 */ 1357 static void __vmw_svga_enable(struct vmw_private *dev_priv) 1358 { 1359 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1360 1361 if (!ttm_resource_manager_used(man)) { 1362 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE); 1363 ttm_resource_manager_set_used(man, true); 1364 } 1365 } 1366 1367 /** 1368 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1369 * 1370 * @dev_priv: Pointer to device private struct. 1371 */ 1372 void vmw_svga_enable(struct vmw_private *dev_priv) 1373 { 1374 __vmw_svga_enable(dev_priv); 1375 } 1376 1377 /** 1378 * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1379 * 1380 * @dev_priv: Pointer to device private struct. 1381 * Needs the reservation sem to be held in exclusive mode. 1382 * Will not empty VRAM. VRAM must be emptied by caller. 1383 */ 1384 static void __vmw_svga_disable(struct vmw_private *dev_priv) 1385 { 1386 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1387 1388 if (ttm_resource_manager_used(man)) { 1389 ttm_resource_manager_set_used(man, false); 1390 vmw_write(dev_priv, SVGA_REG_ENABLE, 1391 SVGA_REG_ENABLE_HIDE | 1392 SVGA_REG_ENABLE_ENABLE); 1393 } 1394 } 1395 1396 /** 1397 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1398 * running. 1399 * 1400 * @dev_priv: Pointer to device private struct. 1401 * Will empty VRAM. 1402 */ 1403 void vmw_svga_disable(struct vmw_private *dev_priv) 1404 { 1405 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1406 /* 1407 * Disabling SVGA will turn off device modesetting capabilities, so 1408 * notify KMS about that so that it doesn't cache atomic state that 1409 * isn't valid anymore, for example crtcs turned on. 1410 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), 1411 * but vmw_kms_lost_device() takes the reservation sem and thus we'll 1412 * end up with lock order reversal. Thus, a master may actually perform 1413 * a new modeset just after we call vmw_kms_lost_device() and race with 1414 * vmw_svga_disable(), but that should at worst cause atomic KMS state 1415 * to be inconsistent with the device, causing modesetting problems. 1416 * 1417 */ 1418 vmw_kms_lost_device(&dev_priv->drm); 1419 if (ttm_resource_manager_used(man)) { 1420 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) 1421 DRM_ERROR("Failed evicting VRAM buffers.\n"); 1422 ttm_resource_manager_set_used(man, false); 1423 vmw_write(dev_priv, SVGA_REG_ENABLE, 1424 SVGA_REG_ENABLE_HIDE | 1425 SVGA_REG_ENABLE_ENABLE); 1426 } 1427 } 1428 1429 static void vmw_remove(struct pci_dev *pdev) 1430 { 1431 struct drm_device *dev = pci_get_drvdata(pdev); 1432 1433 drm_dev_unregister(dev); 1434 vmw_driver_unload(dev); 1435 } 1436 1437 static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw) 1438 { 1439 struct drm_minor *minor = vmw->drm.primary; 1440 struct dentry *root = minor->debugfs_root; 1441 1442 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM), 1443 root, "system_ttm"); 1444 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM), 1445 root, "vram_ttm"); 1446 if (vmw->has_gmr) 1447 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR), 1448 root, "gmr_ttm"); 1449 if (vmw->has_mob) { 1450 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB), 1451 root, "mob_ttm"); 1452 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM), 1453 root, "system_mob_ttm"); 1454 } 1455 } 1456 1457 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1458 void *ptr) 1459 { 1460 struct vmw_private *dev_priv = 1461 container_of(nb, struct vmw_private, pm_nb); 1462 1463 switch (val) { 1464 case PM_HIBERNATION_PREPARE: 1465 /* 1466 * Take the reservation sem in write mode, which will make sure 1467 * there are no other processes holding a buffer object 1468 * reservation, meaning we should be able to evict all buffer 1469 * objects if needed. 1470 * Once user-space processes have been frozen, we can release 1471 * the lock again. 1472 */ 1473 dev_priv->suspend_locked = true; 1474 break; 1475 case PM_POST_HIBERNATION: 1476 case PM_POST_RESTORE: 1477 if (READ_ONCE(dev_priv->suspend_locked)) { 1478 dev_priv->suspend_locked = false; 1479 } 1480 break; 1481 default: 1482 break; 1483 } 1484 return 0; 1485 } 1486 1487 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1488 { 1489 struct drm_device *dev = pci_get_drvdata(pdev); 1490 struct vmw_private *dev_priv = vmw_priv(dev); 1491 1492 if (dev_priv->refuse_hibernation) 1493 return -EBUSY; 1494 1495 pci_save_state(pdev); 1496 pci_disable_device(pdev); 1497 pci_set_power_state(pdev, PCI_D3hot); 1498 return 0; 1499 } 1500 1501 static int vmw_pci_resume(struct pci_dev *pdev) 1502 { 1503 pci_set_power_state(pdev, PCI_D0); 1504 pci_restore_state(pdev); 1505 return pci_enable_device(pdev); 1506 } 1507 1508 static int vmw_pm_suspend(struct device *kdev) 1509 { 1510 struct pci_dev *pdev = to_pci_dev(kdev); 1511 struct pm_message dummy; 1512 1513 dummy.event = 0; 1514 1515 return vmw_pci_suspend(pdev, dummy); 1516 } 1517 1518 static int vmw_pm_resume(struct device *kdev) 1519 { 1520 struct pci_dev *pdev = to_pci_dev(kdev); 1521 1522 return vmw_pci_resume(pdev); 1523 } 1524 1525 static int vmw_pm_freeze(struct device *kdev) 1526 { 1527 struct pci_dev *pdev = to_pci_dev(kdev); 1528 struct drm_device *dev = pci_get_drvdata(pdev); 1529 struct vmw_private *dev_priv = vmw_priv(dev); 1530 struct ttm_operation_ctx ctx = { 1531 .interruptible = false, 1532 .no_wait_gpu = false 1533 }; 1534 int ret; 1535 1536 /* 1537 * No user-space processes should be running now. 1538 */ 1539 ret = vmw_kms_suspend(&dev_priv->drm); 1540 if (ret) { 1541 DRM_ERROR("Failed to freeze modesetting.\n"); 1542 return ret; 1543 } 1544 1545 vmw_execbuf_release_pinned_bo(dev_priv); 1546 vmw_resource_evict_all(dev_priv); 1547 vmw_release_device_early(dev_priv); 1548 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0); 1549 vmw_fifo_resource_dec(dev_priv); 1550 if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1551 DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1552 vmw_fifo_resource_inc(dev_priv); 1553 WARN_ON(vmw_request_device_late(dev_priv)); 1554 dev_priv->suspend_locked = false; 1555 if (dev_priv->suspend_state) 1556 vmw_kms_resume(dev); 1557 return -EBUSY; 1558 } 1559 1560 vmw_fence_fifo_down(dev_priv->fman); 1561 __vmw_svga_disable(dev_priv); 1562 1563 vmw_release_device_late(dev_priv); 1564 return 0; 1565 } 1566 1567 static int vmw_pm_restore(struct device *kdev) 1568 { 1569 struct pci_dev *pdev = to_pci_dev(kdev); 1570 struct drm_device *dev = pci_get_drvdata(pdev); 1571 struct vmw_private *dev_priv = vmw_priv(dev); 1572 int ret; 1573 1574 vmw_detect_version(dev_priv); 1575 1576 vmw_fifo_resource_inc(dev_priv); 1577 1578 ret = vmw_request_device(dev_priv); 1579 if (ret) 1580 return ret; 1581 1582 __vmw_svga_enable(dev_priv); 1583 1584 vmw_fence_fifo_up(dev_priv->fman); 1585 dev_priv->suspend_locked = false; 1586 if (dev_priv->suspend_state) 1587 vmw_kms_resume(&dev_priv->drm); 1588 1589 return 0; 1590 } 1591 1592 static const struct dev_pm_ops vmw_pm_ops = { 1593 .freeze = vmw_pm_freeze, 1594 .thaw = vmw_pm_restore, 1595 .restore = vmw_pm_restore, 1596 .suspend = vmw_pm_suspend, 1597 .resume = vmw_pm_resume, 1598 }; 1599 1600 static const struct file_operations vmwgfx_driver_fops = { 1601 .owner = THIS_MODULE, 1602 .open = drm_open, 1603 .release = drm_release, 1604 .unlocked_ioctl = vmw_unlocked_ioctl, 1605 .mmap = drm_gem_mmap, 1606 .poll = drm_poll, 1607 .read = drm_read, 1608 #if defined(CONFIG_COMPAT) 1609 .compat_ioctl = vmw_compat_ioctl, 1610 #endif 1611 .llseek = noop_llseek, 1612 }; 1613 1614 static const struct drm_driver driver = { 1615 .driver_features = 1616 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT, 1617 .ioctls = vmw_ioctls, 1618 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1619 .master_set = vmw_master_set, 1620 .master_drop = vmw_master_drop, 1621 .open = vmw_driver_open, 1622 .postclose = vmw_postclose, 1623 1624 .dumb_create = vmw_dumb_create, 1625 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 1626 1627 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1628 .prime_handle_to_fd = vmw_prime_handle_to_fd, 1629 .gem_prime_import_sg_table = vmw_prime_import_sg_table, 1630 1631 .fops = &vmwgfx_driver_fops, 1632 .name = VMWGFX_DRIVER_NAME, 1633 .desc = VMWGFX_DRIVER_DESC, 1634 .date = VMWGFX_DRIVER_DATE, 1635 .major = VMWGFX_DRIVER_MAJOR, 1636 .minor = VMWGFX_DRIVER_MINOR, 1637 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1638 }; 1639 1640 static struct pci_driver vmw_pci_driver = { 1641 .name = VMWGFX_DRIVER_NAME, 1642 .id_table = vmw_pci_id_list, 1643 .probe = vmw_probe, 1644 .remove = vmw_remove, 1645 .driver = { 1646 .pm = &vmw_pm_ops 1647 } 1648 }; 1649 1650 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1651 { 1652 struct vmw_private *vmw; 1653 int ret; 1654 1655 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 1656 if (ret) 1657 goto out_error; 1658 1659 ret = pcim_enable_device(pdev); 1660 if (ret) 1661 goto out_error; 1662 1663 vmw = devm_drm_dev_alloc(&pdev->dev, &driver, 1664 struct vmw_private, drm); 1665 if (IS_ERR(vmw)) { 1666 ret = PTR_ERR(vmw); 1667 goto out_error; 1668 } 1669 1670 pci_set_drvdata(pdev, &vmw->drm); 1671 1672 ret = vmw_driver_load(vmw, ent->device); 1673 if (ret) 1674 goto out_error; 1675 1676 ret = drm_dev_register(&vmw->drm, 0); 1677 if (ret) 1678 goto out_unload; 1679 1680 vmw_fifo_resource_inc(vmw); 1681 vmw_svga_enable(vmw); 1682 drm_fbdev_ttm_setup(&vmw->drm, 0); 1683 1684 vmw_debugfs_gem_init(vmw); 1685 vmw_debugfs_resource_managers_init(vmw); 1686 1687 return 0; 1688 out_unload: 1689 vmw_driver_unload(&vmw->drm); 1690 out_error: 1691 return ret; 1692 } 1693 1694 drm_module_pci_driver(vmw_pci_driver); 1695 1696 MODULE_AUTHOR("VMware Inc. and others"); 1697 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1698 MODULE_LICENSE("GPL and additional rights"); 1699 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1700 __stringify(VMWGFX_DRIVER_MINOR) "." 1701 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1702 "0"); 1703