1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include <linux/pci.h> 27 #include <linux/uaccess.h> 28 29 #include <drm/drm_print.h> 30 31 #include "qxl_drv.h" 32 #include "qxl_object.h" 33 34 /* 35 * TODO: allocating a new gem(in qxl_bo) for each request. 36 * This is wasteful since bo's are page aligned. 37 */ 38 int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 39 { 40 struct qxl_device *qdev = to_qxl(dev); 41 struct drm_qxl_alloc *qxl_alloc = data; 42 int ret; 43 uint32_t handle; 44 u32 domain = QXL_GEM_DOMAIN_VRAM; 45 46 if (qxl_alloc->size == 0) { 47 DRM_ERROR("invalid size %d\n", qxl_alloc->size); 48 return -EINVAL; 49 } 50 ret = qxl_gem_object_create_with_handle(qdev, file_priv, 51 domain, 52 qxl_alloc->size, 53 NULL, 54 NULL, &handle); 55 if (ret) { 56 DRM_ERROR("%s: failed to create gem ret=%d\n", 57 __func__, ret); 58 return -ENOMEM; 59 } 60 qxl_alloc->handle = handle; 61 return 0; 62 } 63 64 int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 65 { 66 struct qxl_device *qdev = to_qxl(dev); 67 struct drm_qxl_map *qxl_map = data; 68 69 return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle, 70 &qxl_map->offset); 71 } 72 73 struct qxl_reloc_info { 74 int type; 75 struct qxl_bo *dst_bo; 76 uint32_t dst_offset; 77 struct qxl_bo *src_bo; 78 int src_offset; 79 }; 80 81 /* 82 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's 83 * are on vram). 84 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) 85 */ 86 static void 87 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) 88 { 89 void *reloc_page; 90 91 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); 92 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 93 info->src_bo, 94 info->src_offset); 95 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); 96 } 97 98 static void 99 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) 100 { 101 uint32_t id = 0; 102 void *reloc_page; 103 104 if (info->src_bo && !info->src_bo->is_primary) 105 id = info->src_bo->surface_id; 106 107 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); 108 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; 109 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); 110 } 111 112 /* return holding the reference to this object */ 113 static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle, 114 struct qxl_release *release, struct qxl_bo **qbo_p) 115 { 116 struct drm_gem_object *gobj; 117 struct qxl_bo *qobj; 118 int ret; 119 120 gobj = drm_gem_object_lookup(file_priv, handle); 121 if (!gobj) 122 return -EINVAL; 123 124 qobj = gem_to_qxl_bo(gobj); 125 126 ret = qxl_release_list_add(release, qobj); 127 drm_gem_object_put(gobj); 128 if (ret) 129 return ret; 130 131 *qbo_p = qobj; 132 return 0; 133 } 134 135 /* 136 * Usage of execbuffer: 137 * Relocations need to take into account the full QXLDrawable size. 138 * However, the command as passed from user space must *not* contain the initial 139 * QXLReleaseInfo struct (first XXX bytes) 140 */ 141 static int qxl_process_single_command(struct qxl_device *qdev, 142 struct drm_qxl_command *cmd, 143 struct drm_file *file_priv) 144 { 145 struct qxl_reloc_info *reloc_info; 146 int release_type; 147 struct qxl_release *release; 148 struct qxl_bo *cmd_bo; 149 void *fb_cmd; 150 int i, ret; 151 int unwritten; 152 153 switch (cmd->type) { 154 case QXL_CMD_DRAW: 155 release_type = QXL_RELEASE_DRAWABLE; 156 break; 157 case QXL_CMD_SURFACE: 158 case QXL_CMD_CURSOR: 159 default: 160 DRM_DEBUG("Only draw commands in execbuffers\n"); 161 return -EINVAL; 162 } 163 164 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 165 return -EINVAL; 166 167 if (!access_ok(u64_to_user_ptr(cmd->command), 168 cmd->command_size)) 169 return -EFAULT; 170 171 reloc_info = kmalloc_array(cmd->relocs_num, 172 sizeof(struct qxl_reloc_info), GFP_KERNEL); 173 if (!reloc_info) 174 return -ENOMEM; 175 176 ret = qxl_alloc_release_reserved(qdev, 177 sizeof(union qxl_release_info) + 178 cmd->command_size, 179 release_type, 180 &release, 181 &cmd_bo); 182 if (ret) 183 goto out_free_reloc; 184 185 /* TODO copy slow path code from i915 */ 186 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK)); 187 unwritten = __copy_from_user_inatomic_nocache 188 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK), 189 u64_to_user_ptr(cmd->command), cmd->command_size); 190 191 { 192 struct qxl_drawable *draw = fb_cmd; 193 194 draw->mm_time = qdev->rom->mm_clock; 195 } 196 197 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 198 if (unwritten) { 199 DRM_ERROR("got unwritten %d\n", unwritten); 200 ret = -EFAULT; 201 goto out_free_release; 202 } 203 204 /* fill out reloc info structs */ 205 for (i = 0; i < cmd->relocs_num; ++i) { 206 struct drm_qxl_reloc reloc; 207 struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs); 208 209 if (copy_from_user(&reloc, u + i, sizeof(reloc))) { 210 ret = -EFAULT; 211 goto out_free_bos; 212 } 213 214 /* add the bos to the list of bos to validate - 215 need to validate first then process relocs? */ 216 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { 217 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type); 218 219 ret = -EINVAL; 220 goto out_free_bos; 221 } 222 reloc_info[i].type = reloc.reloc_type; 223 224 if (reloc.dst_handle) { 225 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release, 226 &reloc_info[i].dst_bo); 227 if (ret) 228 goto out_free_bos; 229 reloc_info[i].dst_offset = reloc.dst_offset; 230 } else { 231 reloc_info[i].dst_bo = cmd_bo; 232 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; 233 } 234 235 /* reserve and validate the reloc dst bo */ 236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) { 237 ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release, 238 &reloc_info[i].src_bo); 239 if (ret) 240 goto out_free_bos; 241 reloc_info[i].src_offset = reloc.src_offset; 242 } else { 243 reloc_info[i].src_bo = NULL; 244 reloc_info[i].src_offset = 0; 245 } 246 } 247 248 /* validate all buffers */ 249 ret = qxl_release_reserve_list(release, false); 250 if (ret) 251 goto out_free_bos; 252 253 for (i = 0; i < cmd->relocs_num; ++i) { 254 if (reloc_info[i].type == QXL_RELOC_TYPE_BO) 255 apply_reloc(qdev, &reloc_info[i]); 256 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) 257 apply_surf_reloc(qdev, &reloc_info[i]); 258 } 259 260 qxl_release_fence_buffer_objects(release); 261 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); 262 263 out_free_bos: 264 out_free_release: 265 if (ret) 266 qxl_release_free(qdev, release); 267 out_free_reloc: 268 kfree(reloc_info); 269 return ret; 270 } 271 272 int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 273 { 274 struct qxl_device *qdev = to_qxl(dev); 275 struct drm_qxl_execbuffer *execbuffer = data; 276 struct drm_qxl_command user_cmd; 277 int cmd_num; 278 int ret; 279 280 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { 281 282 struct drm_qxl_command __user *commands = 283 u64_to_user_ptr(execbuffer->commands); 284 285 if (copy_from_user(&user_cmd, commands + cmd_num, 286 sizeof(user_cmd))) 287 return -EFAULT; 288 289 ret = qxl_process_single_command(qdev, &user_cmd, file_priv); 290 if (ret) 291 return ret; 292 } 293 return 0; 294 } 295 296 int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 297 { 298 struct qxl_device *qdev = to_qxl(dev); 299 struct drm_qxl_update_area *update_area = data; 300 struct qxl_rect area = {.left = update_area->left, 301 .top = update_area->top, 302 .right = update_area->right, 303 .bottom = update_area->bottom}; 304 int ret; 305 struct drm_gem_object *gobj = NULL; 306 struct qxl_bo *qobj = NULL; 307 struct ttm_operation_ctx ctx = { true, false }; 308 309 if (update_area->left >= update_area->right || 310 update_area->top >= update_area->bottom) 311 return -EINVAL; 312 313 gobj = drm_gem_object_lookup(file, update_area->handle); 314 if (gobj == NULL) 315 return -ENOENT; 316 317 qobj = gem_to_qxl_bo(gobj); 318 319 ret = qxl_bo_reserve(qobj); 320 if (ret) 321 goto out; 322 323 if (!qobj->tbo.pin_count) { 324 qxl_ttm_placement_from_domain(qobj, qobj->type); 325 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); 326 if (unlikely(ret)) 327 goto out; 328 } 329 330 ret = qxl_bo_check_id(qdev, qobj); 331 if (ret) 332 goto out2; 333 if (!qobj->surface_id) 334 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle); 335 ret = qxl_io_update_area(qdev, qobj, &area); 336 337 out2: 338 qxl_bo_unreserve(qobj); 339 340 out: 341 drm_gem_object_put(gobj); 342 return ret; 343 } 344 345 int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 346 { 347 struct qxl_device *qdev = to_qxl(dev); 348 struct drm_qxl_getparam *param = data; 349 350 switch (param->param) { 351 case QXL_PARAM_NUM_SURFACES: 352 param->value = qdev->rom->n_surfaces; 353 break; 354 case QXL_PARAM_MAX_RELOCS: 355 param->value = QXL_MAX_RES; 356 break; 357 default: 358 return -EINVAL; 359 } 360 return 0; 361 } 362 363 int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 364 { 365 struct qxl_device *qdev = to_qxl(dev); 366 struct pci_dev *pdev = to_pci_dev(dev->dev); 367 struct drm_qxl_clientcap *param = data; 368 int byte, idx; 369 370 byte = param->index / 8; 371 idx = param->index % 8; 372 373 if (pdev->revision < 4) 374 return -ENOSYS; 375 376 if (byte >= 58) 377 return -ENOSYS; 378 379 if (qdev->rom->client_capabilities[byte] & (1 << idx)) 380 return 0; 381 return -ENOSYS; 382 } 383 384 int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 385 { 386 struct qxl_device *qdev = to_qxl(dev); 387 struct drm_qxl_alloc_surf *param = data; 388 int handle; 389 int ret; 390 int size, actual_stride; 391 struct qxl_surface surf; 392 393 /* work out size allocate bo with handle */ 394 actual_stride = param->stride < 0 ? -param->stride : param->stride; 395 size = actual_stride * param->height + actual_stride; 396 397 surf.format = param->format; 398 surf.width = param->width; 399 surf.height = param->height; 400 surf.stride = param->stride; 401 surf.data = 0; 402 403 ret = qxl_gem_object_create_with_handle(qdev, file, 404 QXL_GEM_DOMAIN_SURFACE, 405 size, 406 &surf, 407 NULL, &handle); 408 if (ret) { 409 DRM_ERROR("%s: failed to create gem ret=%d\n", 410 __func__, ret); 411 return -ENOMEM; 412 } else 413 param->handle = handle; 414 return ret; 415 } 416