1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "qxl_drv.h" 23 #include "qxl_object.h" 24 #include <trace/events/fence.h> 25 26 /* 27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 28 * into 256 byte chunks for now - gives 16 cmds per page. 29 * 30 * use an ida to index into the chunks? 31 */ 32 /* manage releaseables */ 33 /* stack them 16 high for now -drawable object is 191 */ 34 #define RELEASE_SIZE 256 35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE) 36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ 37 #define SURFACE_RELEASE_SIZE 128 38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE) 39 40 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 41 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 42 43 static const char *qxl_get_driver_name(struct fence *fence) 44 { 45 return "qxl"; 46 } 47 48 static const char *qxl_get_timeline_name(struct fence *fence) 49 { 50 return "release"; 51 } 52 53 static bool qxl_nop_signaling(struct fence *fence) 54 { 55 /* fences are always automatically signaled, so just pretend we did this.. */ 56 return true; 57 } 58 59 static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) 60 { 61 struct qxl_device *qdev; 62 struct qxl_release *release; 63 int count = 0, sc = 0; 64 bool have_drawable_releases; 65 unsigned long cur, end = jiffies + timeout; 66 67 qdev = container_of(fence->lock, struct qxl_device, release_lock); 68 release = container_of(fence, struct qxl_release, base); 69 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE; 70 71 retry: 72 sc++; 73 74 if (fence_is_signaled(fence)) 75 goto signaled; 76 77 qxl_io_notify_oom(qdev); 78 79 for (count = 0; count < 11; count++) { 80 if (!qxl_queue_garbage_collect(qdev, true)) 81 break; 82 83 if (fence_is_signaled(fence)) 84 goto signaled; 85 } 86 87 if (fence_is_signaled(fence)) 88 goto signaled; 89 90 if (have_drawable_releases || sc < 4) { 91 if (sc > 2) 92 /* back off */ 93 usleep_range(500, 1000); 94 95 if (time_after(jiffies, end)) 96 return 0; 97 98 if (have_drawable_releases && sc > 300) { 99 FENCE_WARN(fence, "failed to wait on release %d " 100 "after spincount %d\n", 101 fence->context & ~0xf0000000, sc); 102 goto signaled; 103 } 104 goto retry; 105 } 106 /* 107 * yeah, original sync_obj_wait gave up after 3 spins when 108 * have_drawable_releases is not set. 109 */ 110 111 signaled: 112 cur = jiffies; 113 if (time_after(cur, end)) 114 return 0; 115 return end - cur; 116 } 117 118 static const struct fence_ops qxl_fence_ops = { 119 .get_driver_name = qxl_get_driver_name, 120 .get_timeline_name = qxl_get_timeline_name, 121 .enable_signaling = qxl_nop_signaling, 122 .wait = qxl_fence_wait, 123 }; 124 125 static int 126 qxl_release_alloc(struct qxl_device *qdev, int type, 127 struct qxl_release **ret) 128 { 129 struct qxl_release *release; 130 int handle; 131 size_t size = sizeof(*release); 132 133 release = kmalloc(size, GFP_KERNEL); 134 if (!release) { 135 DRM_ERROR("Out of memory\n"); 136 return 0; 137 } 138 release->base.ops = NULL; 139 release->type = type; 140 release->release_offset = 0; 141 release->surface_release_id = 0; 142 INIT_LIST_HEAD(&release->bos); 143 144 idr_preload(GFP_KERNEL); 145 spin_lock(&qdev->release_idr_lock); 146 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 147 release->base.seqno = ++qdev->release_seqno; 148 spin_unlock(&qdev->release_idr_lock); 149 idr_preload_end(); 150 if (handle < 0) { 151 kfree(release); 152 *ret = NULL; 153 return handle; 154 } 155 *ret = release; 156 QXL_INFO(qdev, "allocated release %d\n", handle); 157 release->id = handle; 158 return handle; 159 } 160 161 static void 162 qxl_release_free_list(struct qxl_release *release) 163 { 164 while (!list_empty(&release->bos)) { 165 struct qxl_bo_list *entry; 166 struct qxl_bo *bo; 167 168 entry = container_of(release->bos.next, 169 struct qxl_bo_list, tv.head); 170 bo = to_qxl_bo(entry->tv.bo); 171 qxl_bo_unref(&bo); 172 list_del(&entry->tv.head); 173 kfree(entry); 174 } 175 } 176 177 void 178 qxl_release_free(struct qxl_device *qdev, 179 struct qxl_release *release) 180 { 181 QXL_INFO(qdev, "release %d, type %d\n", release->id, 182 release->type); 183 184 if (release->surface_release_id) 185 qxl_surface_id_dealloc(qdev, release->surface_release_id); 186 187 spin_lock(&qdev->release_idr_lock); 188 idr_remove(&qdev->release_idr, release->id); 189 spin_unlock(&qdev->release_idr_lock); 190 191 if (release->base.ops) { 192 WARN_ON(list_empty(&release->bos)); 193 qxl_release_free_list(release); 194 195 fence_signal(&release->base); 196 fence_put(&release->base); 197 } else { 198 qxl_release_free_list(release); 199 kfree(release); 200 } 201 } 202 203 static int qxl_release_bo_alloc(struct qxl_device *qdev, 204 struct qxl_bo **bo) 205 { 206 int ret; 207 /* pin releases bo's they are too messy to evict */ 208 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, 209 QXL_GEM_DOMAIN_VRAM, NULL, 210 bo); 211 return ret; 212 } 213 214 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) 215 { 216 struct qxl_bo_list *entry; 217 218 list_for_each_entry(entry, &release->bos, tv.head) { 219 if (entry->tv.bo == &bo->tbo) 220 return 0; 221 } 222 223 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 224 if (!entry) 225 return -ENOMEM; 226 227 qxl_bo_ref(bo); 228 entry->tv.bo = &bo->tbo; 229 entry->tv.shared = false; 230 list_add_tail(&entry->tv.head, &release->bos); 231 return 0; 232 } 233 234 static int qxl_release_validate_bo(struct qxl_bo *bo) 235 { 236 int ret; 237 238 if (!bo->pin_count) { 239 qxl_ttm_placement_from_domain(bo, bo->type, false); 240 ret = ttm_bo_validate(&bo->tbo, &bo->placement, 241 true, false); 242 if (ret) 243 return ret; 244 } 245 246 ret = reservation_object_reserve_shared(bo->tbo.resv); 247 if (ret) 248 return ret; 249 250 /* allocate a surface for reserved + validated buffers */ 251 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 252 if (ret) 253 return ret; 254 return 0; 255 } 256 257 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) 258 { 259 int ret; 260 struct qxl_bo_list *entry; 261 262 /* if only one object on the release its the release itself 263 since these objects are pinned no need to reserve */ 264 if (list_is_singular(&release->bos)) 265 return 0; 266 267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, 268 !no_intr, NULL); 269 if (ret) 270 return ret; 271 272 list_for_each_entry(entry, &release->bos, tv.head) { 273 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 274 275 ret = qxl_release_validate_bo(bo); 276 if (ret) { 277 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 278 return ret; 279 } 280 } 281 return 0; 282 } 283 284 void qxl_release_backoff_reserve_list(struct qxl_release *release) 285 { 286 /* if only one object on the release its the release itself 287 since these objects are pinned no need to reserve */ 288 if (list_is_singular(&release->bos)) 289 return; 290 291 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 292 } 293 294 295 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 296 enum qxl_surface_cmd_type surface_cmd_type, 297 struct qxl_release *create_rel, 298 struct qxl_release **release) 299 { 300 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 301 int idr_ret; 302 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); 303 struct qxl_bo *bo; 304 union qxl_release_info *info; 305 306 /* stash the release after the create command */ 307 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 308 if (idr_ret < 0) 309 return idr_ret; 310 bo = to_qxl_bo(entry->tv.bo); 311 312 (*release)->release_offset = create_rel->release_offset + 64; 313 314 qxl_release_list_add(*release, bo); 315 316 info = qxl_release_map(qdev, *release); 317 info->id = idr_ret; 318 qxl_release_unmap(qdev, *release, info); 319 return 0; 320 } 321 322 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 323 QXL_RELEASE_SURFACE_CMD, release, NULL); 324 } 325 326 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 327 int type, struct qxl_release **release, 328 struct qxl_bo **rbo) 329 { 330 struct qxl_bo *bo; 331 int idr_ret; 332 int ret = 0; 333 union qxl_release_info *info; 334 int cur_idx; 335 336 if (type == QXL_RELEASE_DRAWABLE) 337 cur_idx = 0; 338 else if (type == QXL_RELEASE_SURFACE_CMD) 339 cur_idx = 1; 340 else if (type == QXL_RELEASE_CURSOR_CMD) 341 cur_idx = 2; 342 else { 343 DRM_ERROR("got illegal type: %d\n", type); 344 return -EINVAL; 345 } 346 347 idr_ret = qxl_release_alloc(qdev, type, release); 348 if (idr_ret < 0) { 349 if (rbo) 350 *rbo = NULL; 351 return idr_ret; 352 } 353 354 mutex_lock(&qdev->release_mutex); 355 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 356 qxl_bo_unref(&qdev->current_release_bo[cur_idx]); 357 qdev->current_release_bo_offset[cur_idx] = 0; 358 qdev->current_release_bo[cur_idx] = NULL; 359 } 360 if (!qdev->current_release_bo[cur_idx]) { 361 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); 362 if (ret) { 363 mutex_unlock(&qdev->release_mutex); 364 qxl_release_free(qdev, *release); 365 return ret; 366 } 367 } 368 369 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 370 371 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 372 qdev->current_release_bo_offset[cur_idx]++; 373 374 if (rbo) 375 *rbo = bo; 376 377 mutex_unlock(&qdev->release_mutex); 378 379 ret = qxl_release_list_add(*release, bo); 380 qxl_bo_unref(&bo); 381 if (ret) { 382 qxl_release_free(qdev, *release); 383 return ret; 384 } 385 386 info = qxl_release_map(qdev, *release); 387 info->id = idr_ret; 388 qxl_release_unmap(qdev, *release, info); 389 390 return ret; 391 } 392 393 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 394 uint64_t id) 395 { 396 struct qxl_release *release; 397 398 spin_lock(&qdev->release_idr_lock); 399 release = idr_find(&qdev->release_idr, id); 400 spin_unlock(&qdev->release_idr_lock); 401 if (!release) { 402 DRM_ERROR("failed to find id in release_idr\n"); 403 return NULL; 404 } 405 406 return release; 407 } 408 409 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 410 struct qxl_release *release) 411 { 412 void *ptr; 413 union qxl_release_info *info; 414 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 415 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 416 417 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 418 if (!ptr) 419 return NULL; 420 info = ptr + (release->release_offset & ~PAGE_SIZE); 421 return info; 422 } 423 424 void qxl_release_unmap(struct qxl_device *qdev, 425 struct qxl_release *release, 426 union qxl_release_info *info) 427 { 428 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 429 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 430 void *ptr; 431 432 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 433 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 434 } 435 436 void qxl_release_fence_buffer_objects(struct qxl_release *release) 437 { 438 struct ttm_buffer_object *bo; 439 struct ttm_bo_global *glob; 440 struct ttm_bo_device *bdev; 441 struct ttm_bo_driver *driver; 442 struct qxl_bo *qbo; 443 struct ttm_validate_buffer *entry; 444 struct qxl_device *qdev; 445 446 /* if only one object on the release its the release itself 447 since these objects are pinned no need to reserve */ 448 if (list_is_singular(&release->bos) || list_empty(&release->bos)) 449 return; 450 451 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 452 bdev = bo->bdev; 453 qdev = container_of(bdev, struct qxl_device, mman.bdev); 454 455 /* 456 * Since we never really allocated a context and we don't want to conflict, 457 * set the highest bits. This will break if we really allow exporting of dma-bufs. 458 */ 459 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, 460 release->id | 0xf0000000, release->base.seqno); 461 trace_fence_emit(&release->base); 462 463 driver = bdev->driver; 464 glob = bo->glob; 465 466 spin_lock(&glob->lru_lock); 467 468 list_for_each_entry(entry, &release->bos, head) { 469 bo = entry->bo; 470 qbo = to_qxl_bo(bo); 471 472 reservation_object_add_shared_fence(bo->resv, &release->base); 473 ttm_bo_add_to_lru(bo); 474 __ttm_bo_unreserve(bo); 475 } 476 spin_unlock(&glob->lru_lock); 477 ww_acquire_fini(&release->ticket); 478 } 479 480