1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2023 Loongson Technology Corporation Limited 4 */ 5 6 #include <linux/dma-buf.h> 7 8 #include <drm/drm_debugfs.h> 9 #include <drm/drm_dumb_buffers.h> 10 #include <drm/drm_file.h> 11 #include <drm/drm_gem.h> 12 #include <drm/drm_prime.h> 13 #include <drm/drm_print.h> 14 15 #include "lsdc_drv.h" 16 #include "lsdc_gem.h" 17 #include "lsdc_ttm.h" 18 19 static int lsdc_gem_prime_pin(struct drm_gem_object *obj) 20 { 21 struct lsdc_bo *lbo = gem_to_lsdc_bo(obj); 22 int ret; 23 24 dma_resv_assert_held(obj->resv); 25 26 ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL); 27 if (likely(ret == 0)) 28 lbo->sharing_count++; 29 30 return ret; 31 } 32 33 static void lsdc_gem_prime_unpin(struct drm_gem_object *obj) 34 { 35 struct lsdc_bo *lbo = gem_to_lsdc_bo(obj); 36 37 dma_resv_assert_held(obj->resv); 38 39 lsdc_bo_unpin(lbo); 40 if (lbo->sharing_count) 41 lbo->sharing_count--; 42 } 43 44 static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj) 45 { 46 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 47 struct ttm_tt *tt = tbo->ttm; 48 49 if (!tt) { 50 drm_err(obj->dev, "sharing a buffer without backing memory\n"); 51 return ERR_PTR(-ENOMEM); 52 } 53 54 return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages); 55 } 56 57 static void lsdc_gem_object_free(struct drm_gem_object *obj) 58 { 59 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 60 61 if (tbo) 62 ttm_bo_fini(tbo); 63 } 64 65 static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map) 66 { 67 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 68 struct lsdc_bo *lbo = to_lsdc_bo(tbo); 69 int ret; 70 71 if (lbo->vmap_count > 0) { 72 ++lbo->vmap_count; 73 goto out; 74 } 75 76 ret = lsdc_bo_pin(lbo, 0, NULL); 77 if (unlikely(ret)) { 78 drm_err(obj->dev, "pin %p for vmap failed\n", lbo); 79 return ret; 80 } 81 82 ret = ttm_bo_vmap(tbo, &lbo->map); 83 if (ret) { 84 drm_err(obj->dev, "ttm bo vmap failed\n"); 85 lsdc_bo_unpin(lbo); 86 return ret; 87 } 88 89 lbo->vmap_count = 1; 90 91 out: 92 *map = lbo->map; 93 94 return 0; 95 } 96 97 static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 98 { 99 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 100 struct lsdc_bo *lbo = to_lsdc_bo(tbo); 101 102 if (unlikely(!lbo->vmap_count)) { 103 drm_warn(obj->dev, "%p is not mapped\n", lbo); 104 return; 105 } 106 107 --lbo->vmap_count; 108 if (lbo->vmap_count == 0) { 109 ttm_bo_vunmap(tbo, &lbo->map); 110 111 lsdc_bo_unpin(lbo); 112 } 113 } 114 115 static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 116 { 117 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 118 int ret; 119 120 ret = ttm_bo_mmap_obj(vma, tbo); 121 if (unlikely(ret)) { 122 drm_warn(obj->dev, "mmap %p failed\n", tbo); 123 return ret; 124 } 125 126 drm_gem_object_put(obj); 127 128 return 0; 129 } 130 131 static const struct drm_gem_object_funcs lsdc_gem_object_funcs = { 132 .free = lsdc_gem_object_free, 133 .export = drm_gem_prime_export, 134 .pin = lsdc_gem_prime_pin, 135 .unpin = lsdc_gem_prime_unpin, 136 .get_sg_table = lsdc_gem_prime_get_sg_table, 137 .vmap = lsdc_gem_object_vmap, 138 .vunmap = lsdc_gem_object_vunmap, 139 .mmap = lsdc_gem_object_mmap, 140 }; 141 142 struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev, 143 u32 domain, 144 size_t size, 145 bool kerenl, 146 struct sg_table *sg, 147 struct dma_resv *resv) 148 { 149 struct lsdc_device *ldev = to_lsdc(ddev); 150 struct drm_gem_object *gobj; 151 struct lsdc_bo *lbo; 152 int ret; 153 154 lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv); 155 if (IS_ERR(lbo)) { 156 ret = PTR_ERR(lbo); 157 return ERR_PTR(ret); 158 } 159 160 if (!sg) { 161 /* VRAM is filled with random data */ 162 lsdc_bo_clear(lbo); 163 } 164 165 gobj = &lbo->tbo.base; 166 gobj->funcs = &lsdc_gem_object_funcs; 167 168 /* tracking the BOs we created */ 169 mutex_lock(&ldev->gem.mutex); 170 list_add_tail(&lbo->list, &ldev->gem.objects); 171 mutex_unlock(&ldev->gem.mutex); 172 173 return gobj; 174 } 175 176 struct drm_gem_object * 177 lsdc_prime_import_sg_table(struct drm_device *ddev, 178 struct dma_buf_attachment *attach, 179 struct sg_table *sg) 180 { 181 struct dma_resv *resv = attach->dmabuf->resv; 182 u64 size = attach->dmabuf->size; 183 struct drm_gem_object *gobj; 184 struct lsdc_bo *lbo; 185 186 dma_resv_lock(resv, NULL); 187 gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false, 188 sg, resv); 189 dma_resv_unlock(resv); 190 191 if (IS_ERR(gobj)) { 192 drm_err(ddev, "Failed to import sg table\n"); 193 return gobj; 194 } 195 196 lbo = gem_to_lsdc_bo(gobj); 197 lbo->sharing_count = 1; 198 199 return gobj; 200 } 201 202 int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev, 203 struct drm_mode_create_dumb *args) 204 { 205 struct lsdc_device *ldev = to_lsdc(ddev); 206 const struct lsdc_desc *descp = ldev->descp; 207 u32 domain = LSDC_GEM_DOMAIN_VRAM; 208 struct drm_gem_object *gobj; 209 int ret; 210 211 ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0); 212 if (ret) 213 return ret; 214 215 /* Maximum single bo size allowed is the half vram size available */ 216 if (args->size > ldev->vram_size / 2) { 217 drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT)); 218 return -ENOMEM; 219 } 220 221 gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL); 222 if (IS_ERR(gobj)) { 223 drm_err(ddev, "Failed to create gem object\n"); 224 return PTR_ERR(gobj); 225 } 226 227 ret = drm_gem_handle_create(file, gobj, &args->handle); 228 229 /* drop reference from allocate, handle holds it now */ 230 drm_gem_object_put(gobj); 231 if (ret) 232 return ret; 233 234 return 0; 235 } 236 237 int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev, 238 u32 handle, uint64_t *offset) 239 { 240 struct drm_gem_object *gobj; 241 242 gobj = drm_gem_object_lookup(filp, handle); 243 if (!gobj) 244 return -ENOENT; 245 246 *offset = drm_vma_node_offset_addr(&gobj->vma_node); 247 248 drm_gem_object_put(gobj); 249 250 return 0; 251 } 252 253 void lsdc_gem_init(struct drm_device *ddev) 254 { 255 struct lsdc_device *ldev = to_lsdc(ddev); 256 257 mutex_init(&ldev->gem.mutex); 258 INIT_LIST_HEAD(&ldev->gem.objects); 259 } 260 261 int lsdc_show_buffer_object(struct seq_file *m, void *arg) 262 { 263 struct drm_info_node *node = (struct drm_info_node *)m->private; 264 struct drm_device *ddev = node->minor->dev; 265 struct lsdc_device *ldev = to_lsdc(ddev); 266 struct lsdc_bo *lbo; 267 unsigned int i; 268 269 mutex_lock(&ldev->gem.mutex); 270 271 i = 0; 272 273 list_for_each_entry(lbo, &ldev->gem.objects, list) { 274 struct ttm_buffer_object *tbo = &lbo->tbo; 275 struct ttm_resource *resource = tbo->resource; 276 277 seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n", 278 i, lbo, lsdc_bo_size(lbo) >> 10, 279 lsdc_mem_type_to_str(resource->mem_type), 280 lsdc_bo_gpu_offset(lbo)); 281 i++; 282 } 283 284 mutex_unlock(&ldev->gem.mutex); 285 286 seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n", 287 ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10); 288 289 return 0; 290 } 291