1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <linux/virtio.h> 27 #include <linux/virtio_config.h> 28 #include <linux/virtio_ring.h> 29 30 #include <drm/drm_file.h> 31 32 #include "virtgpu_drv.h" 33 34 static void virtio_gpu_config_changed_work_func(struct work_struct *work) 35 { 36 struct virtio_gpu_device *vgdev = 37 container_of(work, struct virtio_gpu_device, 38 config_changed_work); 39 u32 events_read, events_clear = 0; 40 41 /* read the config space */ 42 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, 43 events_read, &events_read); 44 if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { 45 if (vgdev->has_edid) 46 virtio_gpu_cmd_get_edids(vgdev); 47 virtio_gpu_cmd_get_display_info(vgdev); 48 virtio_gpu_notify(vgdev); 49 drm_helper_hpd_irq_event(vgdev->ddev); 50 events_clear |= VIRTIO_GPU_EVENT_DISPLAY; 51 } 52 virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, 53 events_clear, &events_clear); 54 } 55 56 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 57 void (*work_func)(struct work_struct *work)) 58 { 59 spin_lock_init(&vgvq->qlock); 60 init_waitqueue_head(&vgvq->ack_queue); 61 INIT_WORK(&vgvq->dequeue_work, work_func); 62 } 63 64 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, 65 int num_capsets) 66 { 67 int i, ret; 68 bool invalid_capset_id = false; 69 70 vgdev->capsets = kcalloc(num_capsets, 71 sizeof(struct virtio_gpu_drv_capset), 72 GFP_KERNEL); 73 if (!vgdev->capsets) { 74 DRM_ERROR("failed to allocate cap sets\n"); 75 return; 76 } 77 for (i = 0; i < num_capsets; i++) { 78 virtio_gpu_cmd_get_capset_info(vgdev, i); 79 virtio_gpu_notify(vgdev); 80 ret = wait_event_timeout(vgdev->resp_wq, 81 vgdev->capsets[i].id > 0, 5 * HZ); 82 /* 83 * Capability ids are defined in the virtio-gpu spec and are 84 * between 1 to 63, inclusive. 85 */ 86 if (!vgdev->capsets[i].id || 87 vgdev->capsets[i].id > MAX_CAPSET_ID) 88 invalid_capset_id = true; 89 90 if (ret == 0) 91 DRM_ERROR("timed out waiting for cap set %d\n", i); 92 else if (invalid_capset_id) 93 DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id); 94 95 if (ret == 0 || invalid_capset_id) { 96 spin_lock(&vgdev->display_info_lock); 97 kfree(vgdev->capsets); 98 vgdev->capsets = NULL; 99 spin_unlock(&vgdev->display_info_lock); 100 return; 101 } 102 103 vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id; 104 DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", 105 i, vgdev->capsets[i].id, 106 vgdev->capsets[i].max_version, 107 vgdev->capsets[i].max_size); 108 } 109 110 vgdev->num_capsets = num_capsets; 111 } 112 113 int virtio_gpu_init(struct drm_device *dev) 114 { 115 static vq_callback_t *callbacks[] = { 116 virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack 117 }; 118 static const char * const names[] = { "control", "cursor" }; 119 120 struct virtio_gpu_device *vgdev; 121 /* this will expand later */ 122 struct virtqueue *vqs[2]; 123 u32 num_scanouts, num_capsets; 124 int ret = 0; 125 126 if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1)) 127 return -ENODEV; 128 129 vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL); 130 if (!vgdev) 131 return -ENOMEM; 132 133 vgdev->ddev = dev; 134 dev->dev_private = vgdev; 135 vgdev->vdev = dev_to_virtio(dev->dev); 136 vgdev->dev = dev->dev; 137 138 spin_lock_init(&vgdev->display_info_lock); 139 spin_lock_init(&vgdev->resource_export_lock); 140 spin_lock_init(&vgdev->host_visible_lock); 141 ida_init(&vgdev->ctx_id_ida); 142 ida_init(&vgdev->resource_ida); 143 init_waitqueue_head(&vgdev->resp_wq); 144 virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); 145 virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); 146 147 vgdev->fence_drv.context = dma_fence_context_alloc(1); 148 spin_lock_init(&vgdev->fence_drv.lock); 149 INIT_LIST_HEAD(&vgdev->fence_drv.fences); 150 INIT_LIST_HEAD(&vgdev->cap_cache); 151 INIT_WORK(&vgdev->config_changed_work, 152 virtio_gpu_config_changed_work_func); 153 154 INIT_WORK(&vgdev->obj_free_work, 155 virtio_gpu_array_put_free_work); 156 INIT_LIST_HEAD(&vgdev->obj_free_list); 157 spin_lock_init(&vgdev->obj_free_lock); 158 159 #ifdef __LITTLE_ENDIAN 160 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) 161 vgdev->has_virgl_3d = true; 162 #endif 163 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { 164 vgdev->has_edid = true; 165 } 166 if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { 167 vgdev->has_indirect = true; 168 } 169 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { 170 vgdev->has_resource_assign_uuid = true; 171 } 172 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { 173 vgdev->has_resource_blob = true; 174 } 175 if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, 176 VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { 177 if (!devm_request_mem_region(&vgdev->vdev->dev, 178 vgdev->host_visible_region.addr, 179 vgdev->host_visible_region.len, 180 dev_name(&vgdev->vdev->dev))) { 181 DRM_ERROR("Could not reserve host visible region\n"); 182 ret = -EBUSY; 183 goto err_vqs; 184 } 185 186 DRM_INFO("Host memory window: 0x%lx +0x%lx\n", 187 (unsigned long)vgdev->host_visible_region.addr, 188 (unsigned long)vgdev->host_visible_region.len); 189 vgdev->has_host_visible = true; 190 drm_mm_init(&vgdev->host_visible_mm, 191 (unsigned long)vgdev->host_visible_region.addr, 192 (unsigned long)vgdev->host_visible_region.len); 193 } 194 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) { 195 vgdev->has_context_init = true; 196 } 197 198 DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", 199 vgdev->has_virgl_3d ? '+' : '-', 200 vgdev->has_edid ? '+' : '-', 201 vgdev->has_resource_blob ? '+' : '-', 202 vgdev->has_host_visible ? '+' : '-'); 203 204 DRM_INFO("features: %ccontext_init\n", 205 vgdev->has_context_init ? '+' : '-'); 206 207 ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); 208 if (ret) { 209 DRM_ERROR("failed to find virt queues\n"); 210 goto err_vqs; 211 } 212 vgdev->ctrlq.vq = vqs[0]; 213 vgdev->cursorq.vq = vqs[1]; 214 ret = virtio_gpu_alloc_vbufs(vgdev); 215 if (ret) { 216 DRM_ERROR("failed to alloc vbufs\n"); 217 goto err_vbufs; 218 } 219 220 /* get display info */ 221 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, 222 num_scanouts, &num_scanouts); 223 vgdev->num_scanouts = min_t(uint32_t, num_scanouts, 224 VIRTIO_GPU_MAX_SCANOUTS); 225 if (!vgdev->num_scanouts) { 226 DRM_ERROR("num_scanouts is zero\n"); 227 ret = -EINVAL; 228 goto err_scanouts; 229 } 230 DRM_INFO("number of scanouts: %d\n", num_scanouts); 231 232 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, 233 num_capsets, &num_capsets); 234 DRM_INFO("number of cap sets: %d\n", num_capsets); 235 236 ret = virtio_gpu_modeset_init(vgdev); 237 if (ret) { 238 DRM_ERROR("modeset init failed\n"); 239 goto err_scanouts; 240 } 241 242 virtio_device_ready(vgdev->vdev); 243 244 if (num_capsets) 245 virtio_gpu_get_capsets(vgdev, num_capsets); 246 if (vgdev->has_edid) 247 virtio_gpu_cmd_get_edids(vgdev); 248 virtio_gpu_cmd_get_display_info(vgdev); 249 virtio_gpu_notify(vgdev); 250 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, 251 5 * HZ); 252 return 0; 253 254 err_scanouts: 255 virtio_gpu_free_vbufs(vgdev); 256 err_vbufs: 257 vgdev->vdev->config->del_vqs(vgdev->vdev); 258 err_vqs: 259 dev->dev_private = NULL; 260 kfree(vgdev); 261 return ret; 262 } 263 264 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) 265 { 266 struct virtio_gpu_drv_cap_cache *cache_ent, *tmp; 267 268 list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { 269 kfree(cache_ent->caps_cache); 270 kfree(cache_ent); 271 } 272 } 273 274 void virtio_gpu_deinit(struct drm_device *dev) 275 { 276 struct virtio_gpu_device *vgdev = dev->dev_private; 277 278 flush_work(&vgdev->obj_free_work); 279 flush_work(&vgdev->ctrlq.dequeue_work); 280 flush_work(&vgdev->cursorq.dequeue_work); 281 flush_work(&vgdev->config_changed_work); 282 virtio_reset_device(vgdev->vdev); 283 vgdev->vdev->config->del_vqs(vgdev->vdev); 284 } 285 286 void virtio_gpu_release(struct drm_device *dev) 287 { 288 struct virtio_gpu_device *vgdev = dev->dev_private; 289 290 if (!vgdev) 291 return; 292 293 virtio_gpu_modeset_fini(vgdev); 294 virtio_gpu_free_vbufs(vgdev); 295 virtio_gpu_cleanup_cap_cache(vgdev); 296 297 if (vgdev->has_host_visible) 298 drm_mm_takedown(&vgdev->host_visible_mm); 299 300 kfree(vgdev->capsets); 301 kfree(vgdev); 302 } 303 304 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) 305 { 306 struct virtio_gpu_device *vgdev = dev->dev_private; 307 struct virtio_gpu_fpriv *vfpriv; 308 int handle; 309 310 /* can't create contexts without 3d renderer */ 311 if (!vgdev->has_virgl_3d) 312 return 0; 313 314 /* allocate a virt GPU context for this opener */ 315 vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL); 316 if (!vfpriv) 317 return -ENOMEM; 318 319 mutex_init(&vfpriv->context_lock); 320 321 handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); 322 if (handle < 0) { 323 kfree(vfpriv); 324 return handle; 325 } 326 327 vfpriv->ctx_id = handle + 1; 328 file->driver_priv = vfpriv; 329 return 0; 330 } 331 332 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) 333 { 334 struct virtio_gpu_device *vgdev = dev->dev_private; 335 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 336 337 if (!vgdev->has_virgl_3d) 338 return; 339 340 if (vfpriv->context_created) { 341 virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); 342 virtio_gpu_notify(vgdev); 343 } 344 345 ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); 346 mutex_destroy(&vfpriv->context_lock); 347 kfree(vfpriv); 348 file->driver_priv = NULL; 349 } 350