1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_dma_buf.h" 7 8 #include <kunit/test.h> 9 #include <linux/dma-buf.h> 10 #include <linux/pci-p2pdma.h> 11 12 #include <drm/drm_device.h> 13 #include <drm/drm_prime.h> 14 #include <drm/ttm/ttm_tt.h> 15 16 #include "tests/xe_test.h" 17 #include "xe_bo.h" 18 #include "xe_device.h" 19 #include "xe_pm.h" 20 #include "xe_ttm_vram_mgr.h" 21 #include "xe_vm.h" 22 23 MODULE_IMPORT_NS("DMA_BUF"); 24 25 static int xe_dma_buf_attach(struct dma_buf *dmabuf, 26 struct dma_buf_attachment *attach) 27 { 28 struct drm_gem_object *obj = attach->dmabuf->priv; 29 30 if (attach->peer2peer && 31 pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0) 32 attach->peer2peer = false; 33 34 if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT)) 35 return -EOPNOTSUPP; 36 37 xe_pm_runtime_get(to_xe_device(obj->dev)); 38 return 0; 39 } 40 41 static void xe_dma_buf_detach(struct dma_buf *dmabuf, 42 struct dma_buf_attachment *attach) 43 { 44 struct drm_gem_object *obj = attach->dmabuf->priv; 45 46 xe_pm_runtime_put(to_xe_device(obj->dev)); 47 } 48 49 static int xe_dma_buf_pin(struct dma_buf_attachment *attach) 50 { 51 struct drm_gem_object *obj = attach->dmabuf->priv; 52 struct xe_bo *bo = gem_to_xe_bo(obj); 53 struct xe_device *xe = xe_bo_device(bo); 54 int ret; 55 56 /* 57 * For now only support pinning in TT memory, for two reasons: 58 * 1) Avoid pinning in a placement not accessible to some importers. 59 * 2) Pinning in VRAM requires PIN accounting which is a to-do. 60 */ 61 if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) { 62 drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n"); 63 return -EINVAL; 64 } 65 66 ret = xe_bo_migrate(bo, XE_PL_TT); 67 if (ret) { 68 if (ret != -EINTR && ret != -ERESTARTSYS) 69 drm_dbg(&xe->drm, 70 "Failed migrating dma-buf to TT memory: %pe\n", 71 ERR_PTR(ret)); 72 return ret; 73 } 74 75 ret = xe_bo_pin_external(bo, true); 76 xe_assert(xe, !ret); 77 78 return 0; 79 } 80 81 static void xe_dma_buf_unpin(struct dma_buf_attachment *attach) 82 { 83 struct drm_gem_object *obj = attach->dmabuf->priv; 84 struct xe_bo *bo = gem_to_xe_bo(obj); 85 86 xe_bo_unpin_external(bo); 87 } 88 89 static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach, 90 enum dma_data_direction dir) 91 { 92 struct dma_buf *dma_buf = attach->dmabuf; 93 struct drm_gem_object *obj = dma_buf->priv; 94 struct xe_bo *bo = gem_to_xe_bo(obj); 95 struct sg_table *sgt; 96 int r = 0; 97 98 if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT)) 99 return ERR_PTR(-EOPNOTSUPP); 100 101 if (!xe_bo_is_pinned(bo)) { 102 if (!attach->peer2peer) 103 r = xe_bo_migrate(bo, XE_PL_TT); 104 else 105 r = xe_bo_validate(bo, NULL, false); 106 if (r) 107 return ERR_PTR(r); 108 } 109 110 switch (bo->ttm.resource->mem_type) { 111 case XE_PL_TT: 112 sgt = drm_prime_pages_to_sg(obj->dev, 113 bo->ttm.ttm->pages, 114 bo->ttm.ttm->num_pages); 115 if (IS_ERR(sgt)) 116 return sgt; 117 118 if (dma_map_sgtable(attach->dev, sgt, dir, 119 DMA_ATTR_SKIP_CPU_SYNC)) 120 goto error_free; 121 break; 122 123 case XE_PL_VRAM0: 124 case XE_PL_VRAM1: 125 r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo), 126 bo->ttm.resource, 0, 127 bo->ttm.base.size, attach->dev, 128 dir, &sgt); 129 if (r) 130 return ERR_PTR(r); 131 break; 132 default: 133 return ERR_PTR(-EINVAL); 134 } 135 136 return sgt; 137 138 error_free: 139 sg_free_table(sgt); 140 kfree(sgt); 141 return ERR_PTR(-EBUSY); 142 } 143 144 static void xe_dma_buf_unmap(struct dma_buf_attachment *attach, 145 struct sg_table *sgt, 146 enum dma_data_direction dir) 147 { 148 if (sg_page(sgt->sgl)) { 149 dma_unmap_sgtable(attach->dev, sgt, dir, 0); 150 sg_free_table(sgt); 151 kfree(sgt); 152 } else { 153 xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt); 154 } 155 } 156 157 static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 158 enum dma_data_direction direction) 159 { 160 struct drm_gem_object *obj = dma_buf->priv; 161 struct xe_bo *bo = gem_to_xe_bo(obj); 162 bool reads = (direction == DMA_BIDIRECTIONAL || 163 direction == DMA_FROM_DEVICE); 164 165 if (!reads) 166 return 0; 167 168 /* Can we do interruptible lock here? */ 169 xe_bo_lock(bo, false); 170 (void)xe_bo_migrate(bo, XE_PL_TT); 171 xe_bo_unlock(bo); 172 173 return 0; 174 } 175 176 static const struct dma_buf_ops xe_dmabuf_ops = { 177 .attach = xe_dma_buf_attach, 178 .detach = xe_dma_buf_detach, 179 .pin = xe_dma_buf_pin, 180 .unpin = xe_dma_buf_unpin, 181 .map_dma_buf = xe_dma_buf_map, 182 .unmap_dma_buf = xe_dma_buf_unmap, 183 .release = drm_gem_dmabuf_release, 184 .begin_cpu_access = xe_dma_buf_begin_cpu_access, 185 .mmap = drm_gem_dmabuf_mmap, 186 .vmap = drm_gem_dmabuf_vmap, 187 .vunmap = drm_gem_dmabuf_vunmap, 188 }; 189 190 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags) 191 { 192 struct xe_bo *bo = gem_to_xe_bo(obj); 193 struct dma_buf *buf; 194 struct ttm_operation_ctx ctx = { 195 .interruptible = true, 196 .no_wait_gpu = true, 197 /* We opt to avoid OOM on system pages allocations */ 198 .gfp_retry_mayfail = true, 199 .allow_res_evict = false, 200 }; 201 int ret; 202 203 if (bo->vm) 204 return ERR_PTR(-EPERM); 205 206 ret = ttm_bo_setup_export(&bo->ttm, &ctx); 207 if (ret) 208 return ERR_PTR(ret); 209 210 buf = drm_gem_prime_export(obj, flags); 211 if (!IS_ERR(buf)) 212 buf->ops = &xe_dmabuf_ops; 213 214 return buf; 215 } 216 217 static struct drm_gem_object * 218 xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage, 219 struct dma_buf *dma_buf) 220 { 221 struct dma_resv *resv = dma_buf->resv; 222 struct xe_device *xe = to_xe_device(dev); 223 struct xe_bo *bo; 224 int ret; 225 226 dma_resv_lock(resv, NULL); 227 bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size, 228 0, /* Will require 1way or 2way for vm_bind */ 229 ttm_bo_type_sg, XE_BO_FLAG_SYSTEM); 230 if (IS_ERR(bo)) { 231 ret = PTR_ERR(bo); 232 goto error; 233 } 234 dma_resv_unlock(resv); 235 236 return &bo->ttm.base; 237 238 error: 239 dma_resv_unlock(resv); 240 return ERR_PTR(ret); 241 } 242 243 static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach) 244 { 245 struct drm_gem_object *obj = attach->importer_priv; 246 struct xe_bo *bo = gem_to_xe_bo(obj); 247 248 XE_WARN_ON(xe_bo_evict(bo)); 249 } 250 251 static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = { 252 .allow_peer2peer = true, 253 .move_notify = xe_dma_buf_move_notify 254 }; 255 256 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 257 258 struct dma_buf_test_params { 259 struct xe_test_priv base; 260 const struct dma_buf_attach_ops *attach_ops; 261 bool force_different_devices; 262 u32 mem_mask; 263 }; 264 265 #define to_dma_buf_test_params(_priv) \ 266 container_of(_priv, struct dma_buf_test_params, base) 267 #endif 268 269 struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev, 270 struct dma_buf *dma_buf) 271 { 272 XE_TEST_DECLARE(struct dma_buf_test_params *test = 273 to_dma_buf_test_params 274 (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));) 275 const struct dma_buf_attach_ops *attach_ops; 276 struct dma_buf_attachment *attach; 277 struct drm_gem_object *obj; 278 struct xe_bo *bo; 279 280 if (dma_buf->ops == &xe_dmabuf_ops) { 281 obj = dma_buf->priv; 282 if (obj->dev == dev && 283 !XE_TEST_ONLY(test && test->force_different_devices)) { 284 /* 285 * Importing dmabuf exported from out own gem increases 286 * refcount on gem itself instead of f_count of dmabuf. 287 */ 288 drm_gem_object_get(obj); 289 return obj; 290 } 291 } 292 293 /* 294 * Don't publish the bo until we have a valid attachment, and a 295 * valid attachment needs the bo address. So pre-create a bo before 296 * creating the attachment and publish. 297 */ 298 bo = xe_bo_alloc(); 299 if (IS_ERR(bo)) 300 return ERR_CAST(bo); 301 302 attach_ops = &xe_dma_buf_attach_ops; 303 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 304 if (test) 305 attach_ops = test->attach_ops; 306 #endif 307 308 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base); 309 if (IS_ERR(attach)) { 310 obj = ERR_CAST(attach); 311 goto out_err; 312 } 313 314 /* Errors here will take care of freeing the bo. */ 315 obj = xe_dma_buf_init_obj(dev, bo, dma_buf); 316 if (IS_ERR(obj)) 317 return obj; 318 319 320 get_dma_buf(dma_buf); 321 obj->import_attach = attach; 322 return obj; 323 324 out_err: 325 xe_bo_free(bo); 326 327 return obj; 328 } 329 330 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 331 #include "tests/xe_dma_buf.c" 332 #endif 333