xref: /linux/drivers/accel/amdxdna/amdxdna_ubuf.c (revision 8457669db968c98edb781892d73fa559e1efcbd4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2025, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_print.h>
9 #include <linux/dma-buf.h>
10 #include <linux/overflow.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 
14 #include "amdxdna_pci_drv.h"
15 #include "amdxdna_ubuf.h"
16 
17 struct amdxdna_ubuf_priv {
18 	struct page **pages;
19 	u64 nr_pages;
20 	enum amdxdna_ubuf_flag flags;
21 	struct mm_struct *mm;
22 };
23 
amdxdna_ubuf_map(struct dma_buf_attachment * attach,enum dma_data_direction direction)24 static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
25 					 enum dma_data_direction direction)
26 {
27 	struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
28 	struct sg_table *sg;
29 	int ret;
30 
31 	sg = kzalloc_obj(*sg);
32 	if (!sg)
33 		return ERR_PTR(-ENOMEM);
34 
35 	ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
36 					ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
37 	if (ret)
38 		goto err_free_sg;
39 
40 	if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
41 		ret = dma_map_sgtable(attach->dev, sg, direction, 0);
42 		if (ret)
43 			goto err_free_table;
44 	}
45 
46 	return sg;
47 
48 err_free_table:
49 	sg_free_table(sg);
50 err_free_sg:
51 	kfree(sg);
52 	return ERR_PTR(ret);
53 }
54 
amdxdna_ubuf_unmap(struct dma_buf_attachment * attach,struct sg_table * sg,enum dma_data_direction direction)55 static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
56 			       struct sg_table *sg,
57 			       enum dma_data_direction direction)
58 {
59 	struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
60 
61 	if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
62 		dma_unmap_sgtable(attach->dev, sg, direction, 0);
63 
64 	sg_free_table(sg);
65 	kfree(sg);
66 }
67 
amdxdna_ubuf_release(struct dma_buf * dbuf)68 static void amdxdna_ubuf_release(struct dma_buf *dbuf)
69 {
70 	struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
71 
72 	unpin_user_pages(ubuf->pages, ubuf->nr_pages);
73 	kvfree(ubuf->pages);
74 	atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
75 	mmdrop(ubuf->mm);
76 	kfree(ubuf);
77 }
78 
amdxdna_ubuf_vm_fault(struct vm_fault * vmf)79 static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
80 {
81 	struct vm_area_struct *vma = vmf->vma;
82 	struct amdxdna_ubuf_priv *ubuf;
83 	unsigned long pfn;
84 	pgoff_t pgoff;
85 
86 	ubuf = vma->vm_private_data;
87 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
88 
89 	pfn = page_to_pfn(ubuf->pages[pgoff]);
90 	return vmf_insert_pfn(vma, vmf->address, pfn);
91 }
92 
93 static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
94 	.fault = amdxdna_ubuf_vm_fault,
95 };
96 
amdxdna_ubuf_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)97 static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
98 {
99 	struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
100 
101 	vma->vm_ops = &amdxdna_ubuf_vm_ops;
102 	vma->vm_private_data = ubuf;
103 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
104 
105 	return 0;
106 }
107 
amdxdna_ubuf_vmap(struct dma_buf * dbuf,struct iosys_map * map)108 static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
109 {
110 	struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
111 	void *kva;
112 
113 	kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
114 	if (!kva)
115 		return -EINVAL;
116 
117 	iosys_map_set_vaddr(map, kva);
118 	return 0;
119 }
120 
amdxdna_ubuf_vunmap(struct dma_buf * dbuf,struct iosys_map * map)121 static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
122 {
123 	vunmap(map->vaddr);
124 }
125 
126 static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
127 	.map_dma_buf = amdxdna_ubuf_map,
128 	.unmap_dma_buf = amdxdna_ubuf_unmap,
129 	.release = amdxdna_ubuf_release,
130 	.mmap = amdxdna_ubuf_mmap,
131 	.vmap = amdxdna_ubuf_vmap,
132 	.vunmap = amdxdna_ubuf_vunmap,
133 };
134 
amdxdna_get_ubuf(struct drm_device * dev,enum amdxdna_ubuf_flag flags,u32 num_entries,void __user * va_entries)135 struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
136 				 enum amdxdna_ubuf_flag flags,
137 				 u32 num_entries, void __user *va_entries)
138 {
139 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
140 	unsigned long lock_limit, new_pinned;
141 	struct amdxdna_drm_va_entry *va_ent;
142 	struct amdxdna_ubuf_priv *ubuf;
143 	u32 npages, start = 0;
144 	struct dma_buf *dbuf;
145 	int i, ret;
146 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
147 
148 	if (!can_do_mlock())
149 		return ERR_PTR(-EPERM);
150 
151 	ubuf = kzalloc_obj(*ubuf);
152 	if (!ubuf)
153 		return ERR_PTR(-ENOMEM);
154 
155 	ubuf->flags = flags;
156 	ubuf->mm = current->mm;
157 	mmgrab(ubuf->mm);
158 
159 	va_ent = kvzalloc_objs(*va_ent, num_entries);
160 	if (!va_ent) {
161 		ret = -ENOMEM;
162 		goto free_ubuf;
163 	}
164 
165 	if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
166 		XDNA_DBG(xdna, "Access va entries failed");
167 		ret = -EINVAL;
168 		goto free_ent;
169 	}
170 
171 	for (i = 0, exp_info.size = 0; i < num_entries; i++) {
172 		if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
173 		    !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
174 			XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
175 				 va_ent[i].vaddr, va_ent[i].len);
176 			ret = -EINVAL;
177 			goto free_ent;
178 		}
179 
180 		if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) {
181 			ret = -EINVAL;
182 			goto free_ent;
183 		}
184 	}
185 
186 	ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
187 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
188 	new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
189 	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
190 		XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
191 			 new_pinned, lock_limit, capable(CAP_IPC_LOCK));
192 		ret = -ENOMEM;
193 		goto sub_pin_cnt;
194 	}
195 
196 	ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages);
197 	if (!ubuf->pages) {
198 		ret = -ENOMEM;
199 		goto sub_pin_cnt;
200 	}
201 
202 	for (i = 0; i < num_entries; i++) {
203 		npages = va_ent[i].len >> PAGE_SHIFT;
204 
205 		ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
206 					  FOLL_WRITE | FOLL_LONGTERM,
207 					  &ubuf->pages[start]);
208 		if (ret < 0 || ret != npages) {
209 			ret = -ENOMEM;
210 			XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
211 			goto destroy_pages;
212 		}
213 
214 		start += ret;
215 	}
216 
217 	exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
218 	exp_info.priv = ubuf;
219 	exp_info.flags = O_RDWR | O_CLOEXEC;
220 
221 	dbuf = dma_buf_export(&exp_info);
222 	if (IS_ERR(dbuf)) {
223 		ret = PTR_ERR(dbuf);
224 		goto destroy_pages;
225 	}
226 	kvfree(va_ent);
227 
228 	return dbuf;
229 
230 destroy_pages:
231 	if (start)
232 		unpin_user_pages(ubuf->pages, start);
233 	kvfree(ubuf->pages);
234 sub_pin_cnt:
235 	atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
236 free_ent:
237 	kvfree(va_ent);
238 free_ubuf:
239 	mmdrop(ubuf->mm);
240 	kfree(ubuf);
241 	return ERR_PTR(ret);
242 }
243