1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include <linux/iosys-map.h> 27 #include <linux/io-mapping.h> 28 29 #include "qxl_drv.h" 30 #include "qxl_object.h" 31 32 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 33 { 34 struct qxl_bo *bo; 35 struct qxl_device *qdev; 36 37 bo = to_qxl_bo(tbo); 38 qdev = to_qxl(bo->tbo.base.dev); 39 40 qxl_surface_evict(qdev, bo, false); 41 WARN_ON_ONCE(bo->map_count > 0); 42 mutex_lock(&qdev->gem.mutex); 43 list_del_init(&bo->list); 44 mutex_unlock(&qdev->gem.mutex); 45 drm_gem_object_release(&bo->tbo.base); 46 kfree(bo); 47 } 48 49 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 50 { 51 if (bo->destroy == &qxl_ttm_bo_destroy) 52 return true; 53 return false; 54 } 55 56 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 57 { 58 u32 c = 0; 59 u32 pflag = 0; 60 unsigned int i; 61 62 if (qbo->tbo.base.size <= PAGE_SIZE) 63 pflag |= TTM_PL_FLAG_TOPDOWN; 64 65 qbo->placement.placement = qbo->placements; 66 if (domain == QXL_GEM_DOMAIN_VRAM) { 67 qbo->placements[c].mem_type = TTM_PL_VRAM; 68 qbo->placements[c++].flags = pflag; 69 } 70 if (domain == QXL_GEM_DOMAIN_SURFACE) { 71 qbo->placements[c].mem_type = TTM_PL_PRIV; 72 qbo->placements[c++].flags = pflag; 73 qbo->placements[c].mem_type = TTM_PL_VRAM; 74 qbo->placements[c++].flags = pflag; 75 } 76 if (domain == QXL_GEM_DOMAIN_CPU) { 77 qbo->placements[c].mem_type = TTM_PL_SYSTEM; 78 qbo->placements[c++].flags = pflag; 79 } 80 if (!c) { 81 qbo->placements[c].mem_type = TTM_PL_SYSTEM; 82 qbo->placements[c++].flags = 0; 83 } 84 qbo->placement.num_placement = c; 85 for (i = 0; i < c; ++i) { 86 qbo->placements[i].fpfn = 0; 87 qbo->placements[i].lpfn = 0; 88 } 89 } 90 91 static const struct drm_gem_object_funcs qxl_object_funcs = { 92 .free = qxl_gem_object_free, 93 .open = qxl_gem_object_open, 94 .close = qxl_gem_object_close, 95 .pin = qxl_gem_prime_pin, 96 .unpin = qxl_gem_prime_unpin, 97 .get_sg_table = qxl_gem_prime_get_sg_table, 98 .vmap = qxl_gem_prime_vmap, 99 .vunmap = qxl_gem_prime_vunmap, 100 .mmap = drm_gem_ttm_mmap, 101 .print_info = drm_gem_ttm_print_info, 102 }; 103 104 int qxl_bo_create(struct qxl_device *qdev, unsigned long size, 105 bool kernel, bool pinned, u32 domain, u32 priority, 106 struct qxl_surface *surf, 107 struct qxl_bo **bo_ptr) 108 { 109 struct ttm_operation_ctx ctx = { !kernel, false }; 110 struct qxl_bo *bo; 111 enum ttm_bo_type type; 112 int r; 113 114 if (kernel) 115 type = ttm_bo_type_kernel; 116 else 117 type = ttm_bo_type_device; 118 *bo_ptr = NULL; 119 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); 120 if (bo == NULL) 121 return -ENOMEM; 122 size = roundup(size, PAGE_SIZE); 123 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); 124 if (unlikely(r)) { 125 kfree(bo); 126 return r; 127 } 128 bo->tbo.base.funcs = &qxl_object_funcs; 129 bo->type = domain; 130 bo->surface_id = 0; 131 INIT_LIST_HEAD(&bo->list); 132 133 if (surf) 134 bo->surf = *surf; 135 136 qxl_ttm_placement_from_domain(bo, domain); 137 138 bo->tbo.priority = priority; 139 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type, 140 &bo->placement, 0, &ctx, NULL, NULL, 141 &qxl_ttm_bo_destroy); 142 if (unlikely(r != 0)) { 143 if (r != -ERESTARTSYS) 144 dev_err(qdev->ddev.dev, 145 "object_init failed for (%lu, 0x%08X)\n", 146 size, domain); 147 return r; 148 } 149 if (pinned) 150 ttm_bo_pin(&bo->tbo); 151 ttm_bo_unreserve(&bo->tbo); 152 *bo_ptr = bo; 153 return 0; 154 } 155 156 int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map) 157 { 158 int r; 159 160 dma_resv_assert_held(bo->tbo.base.resv); 161 162 if (bo->kptr) { 163 bo->map_count++; 164 goto out; 165 } 166 167 r = ttm_bo_vmap(&bo->tbo, &bo->map); 168 if (r) { 169 qxl_bo_unpin_locked(bo); 170 return r; 171 } 172 bo->map_count = 1; 173 174 /* TODO: Remove kptr in favor of map everywhere. */ 175 if (bo->map.is_iomem) 176 bo->kptr = (void *)bo->map.vaddr_iomem; 177 else 178 bo->kptr = bo->map.vaddr; 179 180 out: 181 *map = bo->map; 182 return 0; 183 } 184 185 int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map) 186 { 187 int r; 188 189 r = qxl_bo_reserve(bo); 190 if (r) 191 return r; 192 193 r = qxl_bo_pin_locked(bo); 194 if (r) { 195 qxl_bo_unreserve(bo); 196 return r; 197 } 198 199 r = qxl_bo_vmap_locked(bo, map); 200 if (r) 201 qxl_bo_unpin_locked(bo); 202 qxl_bo_unreserve(bo); 203 return r; 204 } 205 206 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, 207 struct qxl_bo *bo, int page_offset) 208 { 209 unsigned long offset; 210 void *rptr; 211 int ret; 212 struct io_mapping *map; 213 struct iosys_map bo_map; 214 215 if (bo->tbo.resource->mem_type == TTM_PL_VRAM) 216 map = qdev->vram_mapping; 217 else if (bo->tbo.resource->mem_type == TTM_PL_PRIV) 218 map = qdev->surface_mapping; 219 else 220 goto fallback; 221 222 offset = bo->tbo.resource->start << PAGE_SHIFT; 223 return io_mapping_map_atomic_wc(map, offset + page_offset); 224 fallback: 225 if (bo->kptr) { 226 rptr = bo->kptr + (page_offset * PAGE_SIZE); 227 return rptr; 228 } 229 230 ret = qxl_bo_vmap_locked(bo, &bo_map); 231 if (ret) 232 return NULL; 233 rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ 234 235 rptr += page_offset * PAGE_SIZE; 236 return rptr; 237 } 238 239 void qxl_bo_vunmap_locked(struct qxl_bo *bo) 240 { 241 dma_resv_assert_held(bo->tbo.base.resv); 242 243 if (bo->kptr == NULL) 244 return; 245 bo->map_count--; 246 if (bo->map_count > 0) 247 return; 248 bo->kptr = NULL; 249 ttm_bo_vunmap(&bo->tbo, &bo->map); 250 } 251 252 int qxl_bo_vunmap_and_unpin(struct qxl_bo *bo) 253 { 254 int r; 255 256 r = qxl_bo_reserve(bo); 257 if (r) 258 return r; 259 260 qxl_bo_vunmap_locked(bo); 261 qxl_bo_unpin_locked(bo); 262 qxl_bo_unreserve(bo); 263 return 0; 264 } 265 266 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, 267 struct qxl_bo *bo, void *pmap) 268 { 269 if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) && 270 (bo->tbo.resource->mem_type != TTM_PL_PRIV)) 271 goto fallback; 272 273 io_mapping_unmap_atomic(pmap); 274 return; 275 fallback: 276 qxl_bo_vunmap_locked(bo); 277 } 278 279 void qxl_bo_unref(struct qxl_bo **bo) 280 { 281 if ((*bo) == NULL) 282 return; 283 284 drm_gem_object_put(&(*bo)->tbo.base); 285 *bo = NULL; 286 } 287 288 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) 289 { 290 drm_gem_object_get(&bo->tbo.base); 291 return bo; 292 } 293 294 int qxl_bo_pin_locked(struct qxl_bo *bo) 295 { 296 struct ttm_operation_ctx ctx = { false, false }; 297 struct drm_device *ddev = bo->tbo.base.dev; 298 int r; 299 300 dma_resv_assert_held(bo->tbo.base.resv); 301 302 if (bo->tbo.pin_count) { 303 ttm_bo_pin(&bo->tbo); 304 return 0; 305 } 306 qxl_ttm_placement_from_domain(bo, bo->type); 307 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 308 if (likely(r == 0)) 309 ttm_bo_pin(&bo->tbo); 310 if (unlikely(r != 0)) 311 dev_err(ddev->dev, "%p pin failed\n", bo); 312 return r; 313 } 314 315 void qxl_bo_unpin_locked(struct qxl_bo *bo) 316 { 317 dma_resv_assert_held(bo->tbo.base.resv); 318 319 ttm_bo_unpin(&bo->tbo); 320 } 321 322 /* 323 * Reserve the BO before pinning the object. If the BO was reserved 324 * beforehand, use the internal version directly qxl_bo_pin_locked. 325 * 326 */ 327 int qxl_bo_pin(struct qxl_bo *bo) 328 { 329 int r; 330 331 r = qxl_bo_reserve(bo); 332 if (r) 333 return r; 334 335 r = qxl_bo_pin_locked(bo); 336 qxl_bo_unreserve(bo); 337 return r; 338 } 339 340 /* 341 * Reserve the BO before pinning the object. If the BO was reserved 342 * beforehand, use the internal version directly qxl_bo_unpin_locked. 343 * 344 */ 345 int qxl_bo_unpin(struct qxl_bo *bo) 346 { 347 int r; 348 349 r = qxl_bo_reserve(bo); 350 if (r) 351 return r; 352 353 qxl_bo_unpin_locked(bo); 354 qxl_bo_unreserve(bo); 355 return 0; 356 } 357 358 void qxl_bo_force_delete(struct qxl_device *qdev) 359 { 360 struct qxl_bo *bo, *n; 361 362 if (list_empty(&qdev->gem.objects)) 363 return; 364 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n"); 365 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 366 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n", 367 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, 368 *((unsigned long *)&bo->tbo.base.refcount)); 369 mutex_lock(&qdev->gem.mutex); 370 list_del_init(&bo->list); 371 mutex_unlock(&qdev->gem.mutex); 372 /* this should unref the ttm bo */ 373 drm_gem_object_put(&bo->tbo.base); 374 } 375 } 376 377 int qxl_bo_init(struct qxl_device *qdev) 378 { 379 return qxl_ttm_init(qdev); 380 } 381 382 void qxl_bo_fini(struct qxl_device *qdev) 383 { 384 qxl_ttm_fini(qdev); 385 } 386 387 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) 388 { 389 int ret; 390 391 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { 392 /* allocate a surface id for this surface now */ 393 ret = qxl_surface_id_alloc(qdev, bo); 394 if (ret) 395 return ret; 396 397 ret = qxl_hw_surface_alloc(qdev, bo); 398 if (ret) 399 return ret; 400 } 401 return 0; 402 } 403 404 int qxl_surf_evict(struct qxl_device *qdev) 405 { 406 struct ttm_resource_manager *man; 407 408 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV); 409 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); 410 } 411 412 int qxl_vram_evict(struct qxl_device *qdev) 413 { 414 struct ttm_resource_manager *man; 415 416 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM); 417 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); 418 } 419