xref: /linux/drivers/media/pci/intel/ipu6/ipu6-dma.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013--2024 Intel Corporation
4  */
5 
6 #include <linux/cacheflush.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/iova.h>
9 #include <linux/list.h>
10 #include <linux/mm.h>
11 #include <linux/vmalloc.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 
16 #include "ipu6.h"
17 #include "ipu6-bus.h"
18 #include "ipu6-dma.h"
19 #include "ipu6-mmu.h"
20 
21 struct vm_info {
22 	struct list_head list;
23 	struct page **pages;
24 	dma_addr_t ipu6_iova;
25 	void *vaddr;
26 	unsigned long size;
27 };
28 
29 static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova)
30 {
31 	struct vm_info *info, *save;
32 
33 	list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
34 		if (iova >= info->ipu6_iova &&
35 		    iova < (info->ipu6_iova + info->size))
36 			return info;
37 	}
38 
39 	return NULL;
40 }
41 
42 static void __dma_clear_buffer(struct page *page, size_t size,
43 			       unsigned long attrs)
44 {
45 	void *ptr;
46 
47 	if (!page)
48 		return;
49 	/*
50 	 * Ensure that the allocated pages are zeroed, and that any data
51 	 * lurking in the kernel direct-mapped region is invalidated.
52 	 */
53 	ptr = page_address(page);
54 	memset(ptr, 0, size);
55 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
56 		clflush_cache_range(ptr, size);
57 }
58 
59 static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
60 					gfp_t gfp, unsigned long attrs)
61 {
62 	int count = PHYS_PFN(size);
63 	int array_size = count * sizeof(struct page *);
64 	struct page **pages;
65 	int i = 0;
66 
67 	pages = kvzalloc(array_size, GFP_KERNEL);
68 	if (!pages)
69 		return NULL;
70 
71 	gfp |= __GFP_NOWARN;
72 
73 	while (count) {
74 		int j, order = __fls(count);
75 
76 		pages[i] = alloc_pages(gfp, order);
77 		while (!pages[i] && order)
78 			pages[i] = alloc_pages(gfp, --order);
79 		if (!pages[i])
80 			goto error;
81 
82 		if (order) {
83 			split_page(pages[i], order);
84 			j = 1 << order;
85 			while (j--)
86 				pages[i + j] = pages[i] + j;
87 		}
88 
89 		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
90 		i += 1 << order;
91 		count -= 1 << order;
92 	}
93 
94 	return pages;
95 error:
96 	while (i--)
97 		if (pages[i])
98 			__free_pages(pages[i], 0);
99 	kvfree(pages);
100 	return NULL;
101 }
102 
103 static void __dma_free_buffer(struct device *dev, struct page **pages,
104 			      size_t size, unsigned long attrs)
105 {
106 	int count = PHYS_PFN(size);
107 	unsigned int i;
108 
109 	for (i = 0; i < count && pages[i]; i++) {
110 		__dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
111 		__free_pages(pages[i], 0);
112 	}
113 
114 	kvfree(pages);
115 }
116 
117 static void ipu6_dma_sync_single_for_cpu(struct device *dev,
118 					 dma_addr_t dma_handle,
119 					 size_t size,
120 					 enum dma_data_direction dir)
121 {
122 	void *vaddr;
123 	u32 offset;
124 	struct vm_info *info;
125 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
126 
127 	info = get_vm_info(mmu, dma_handle);
128 	if (WARN_ON(!info))
129 		return;
130 
131 	offset = dma_handle - info->ipu6_iova;
132 	if (WARN_ON(size > (info->size - offset)))
133 		return;
134 
135 	vaddr = info->vaddr + offset;
136 	clflush_cache_range(vaddr, size);
137 }
138 
139 static void ipu6_dma_sync_sg_for_cpu(struct device *dev,
140 				     struct scatterlist *sglist,
141 				     int nents, enum dma_data_direction dir)
142 {
143 	struct scatterlist *sg;
144 	int i;
145 
146 	for_each_sg(sglist, sg, nents, i)
147 		clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
148 }
149 
150 static void *ipu6_dma_alloc(struct device *dev, size_t size,
151 			    dma_addr_t *dma_handle, gfp_t gfp,
152 			    unsigned long attrs)
153 {
154 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
155 	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
156 	dma_addr_t pci_dma_addr, ipu6_iova;
157 	struct vm_info *info;
158 	unsigned long count;
159 	struct page **pages;
160 	struct iova *iova;
161 	unsigned int i;
162 	int ret;
163 
164 	info = kzalloc(sizeof(*info), GFP_KERNEL);
165 	if (!info)
166 		return NULL;
167 
168 	size = PAGE_ALIGN(size);
169 	count = PHYS_PFN(size);
170 
171 	iova = alloc_iova(&mmu->dmap->iovad, count,
172 			  PHYS_PFN(dma_get_mask(dev)), 0);
173 	if (!iova)
174 		goto out_kfree;
175 
176 	pages = __dma_alloc_buffer(dev, size, gfp, attrs);
177 	if (!pages)
178 		goto out_free_iova;
179 
180 	dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n",
181 		size, iova->pfn_lo, iova->pfn_hi);
182 	for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
183 		pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0,
184 						  PAGE_SIZE, DMA_BIDIRECTIONAL,
185 						  attrs);
186 		dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n",
187 			&pci_dma_addr);
188 		if (dma_mapping_error(&pdev->dev, pci_dma_addr)) {
189 			dev_err(dev, "pci_dma_mapping for page[%d] failed", i);
190 			goto out_unmap;
191 		}
192 
193 		ret = ipu6_mmu_map(mmu->dmap->mmu_info,
194 				   PFN_PHYS(iova->pfn_lo + i), pci_dma_addr,
195 				   PAGE_SIZE);
196 		if (ret) {
197 			dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed",
198 				i, &pci_dma_addr);
199 			dma_unmap_page_attrs(&pdev->dev, pci_dma_addr,
200 					     PAGE_SIZE, DMA_BIDIRECTIONAL,
201 					     attrs);
202 			goto out_unmap;
203 		}
204 	}
205 
206 	info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
207 	if (!info->vaddr)
208 		goto out_unmap;
209 
210 	*dma_handle = PFN_PHYS(iova->pfn_lo);
211 
212 	info->pages = pages;
213 	info->ipu6_iova = *dma_handle;
214 	info->size = size;
215 	list_add(&info->list, &mmu->vma_list);
216 
217 	return info->vaddr;
218 
219 out_unmap:
220 	while (i--) {
221 		ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
222 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
223 						     ipu6_iova);
224 		dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
225 				     DMA_BIDIRECTIONAL, attrs);
226 
227 		ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
228 	}
229 
230 	__dma_free_buffer(dev, pages, size, attrs);
231 
232 out_free_iova:
233 	__free_iova(&mmu->dmap->iovad, iova);
234 out_kfree:
235 	kfree(info);
236 
237 	return NULL;
238 }
239 
240 static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
241 			  dma_addr_t dma_handle,
242 			  unsigned long attrs)
243 {
244 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
245 	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
246 	struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
247 	dma_addr_t pci_dma_addr, ipu6_iova;
248 	struct vm_info *info;
249 	struct page **pages;
250 	unsigned int i;
251 
252 	if (WARN_ON(!iova))
253 		return;
254 
255 	info = get_vm_info(mmu, dma_handle);
256 	if (WARN_ON(!info))
257 		return;
258 
259 	if (WARN_ON(!info->vaddr))
260 		return;
261 
262 	if (WARN_ON(!info->pages))
263 		return;
264 
265 	list_del(&info->list);
266 
267 	size = PAGE_ALIGN(size);
268 
269 	pages = info->pages;
270 
271 	vunmap(vaddr);
272 
273 	for (i = 0; i < PHYS_PFN(size); i++) {
274 		ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
275 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
276 						     ipu6_iova);
277 		dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
278 				     DMA_BIDIRECTIONAL, attrs);
279 	}
280 
281 	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
282 		       PFN_PHYS(iova_size(iova)));
283 
284 	__dma_free_buffer(dev, pages, size, attrs);
285 
286 	mmu->tlb_invalidate(mmu);
287 
288 	__free_iova(&mmu->dmap->iovad, iova);
289 
290 	kfree(info);
291 }
292 
293 static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma,
294 			 void *addr, dma_addr_t iova, size_t size,
295 			 unsigned long attrs)
296 {
297 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
298 	size_t count = PHYS_PFN(PAGE_ALIGN(size));
299 	struct vm_info *info;
300 	size_t i;
301 	int ret;
302 
303 	info = get_vm_info(mmu, iova);
304 	if (!info)
305 		return -EFAULT;
306 
307 	if (!info->vaddr)
308 		return -EFAULT;
309 
310 	if (vma->vm_start & ~PAGE_MASK)
311 		return -EINVAL;
312 
313 	if (size > info->size)
314 		return -EFAULT;
315 
316 	for (i = 0; i < count; i++) {
317 		ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i),
318 				     info->pages[i]);
319 		if (ret < 0)
320 			return ret;
321 	}
322 
323 	return 0;
324 }
325 
326 static void ipu6_dma_unmap_sg(struct device *dev,
327 			      struct scatterlist *sglist,
328 			      int nents, enum dma_data_direction dir,
329 			      unsigned long attrs)
330 {
331 	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
332 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
333 	struct iova *iova = find_iova(&mmu->dmap->iovad,
334 				      PHYS_PFN(sg_dma_address(sglist)));
335 	int i, npages, count;
336 	struct scatterlist *sg;
337 	dma_addr_t pci_dma_addr;
338 
339 	if (!nents)
340 		return;
341 
342 	if (WARN_ON(!iova))
343 		return;
344 
345 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
346 		ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
347 
348 	/* get the nents as orig_nents given by caller */
349 	count = 0;
350 	npages = iova_size(iova);
351 	for_each_sg(sglist, sg, nents, i) {
352 		if (sg_dma_len(sg) == 0 ||
353 		    sg_dma_address(sg) == DMA_MAPPING_ERROR)
354 			break;
355 
356 		npages -= PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
357 		count++;
358 		if (npages <= 0)
359 			break;
360 	}
361 
362 	/*
363 	 * Before IPU6 mmu unmap, return the pci dma address back to sg
364 	 * assume the nents is less than orig_nents as the least granule
365 	 * is 1 SZ_4K page
366 	 */
367 	dev_dbg(dev, "trying to unmap concatenated %u ents\n", count);
368 	for_each_sg(sglist, sg, count, i) {
369 		dev_dbg(dev, "ipu unmap sg[%d] %pad\n", i, &sg_dma_address(sg));
370 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
371 						     sg_dma_address(sg));
372 		dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
373 			&pci_dma_addr, i);
374 		sg_dma_address(sg) = pci_dma_addr;
375 	}
376 
377 	dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n",
378 		iova->pfn_lo, iova->pfn_hi);
379 	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
380 		       PFN_PHYS(iova_size(iova)));
381 
382 	mmu->tlb_invalidate(mmu);
383 
384 	dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
385 
386 	__free_iova(&mmu->dmap->iovad, iova);
387 }
388 
389 static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
390 			   int nents, enum dma_data_direction dir,
391 			   unsigned long attrs)
392 {
393 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
394 	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
395 	struct scatterlist *sg;
396 	struct iova *iova;
397 	size_t npages = 0;
398 	unsigned long iova_addr;
399 	int i, count;
400 
401 	for_each_sg(sglist, sg, nents, i) {
402 		if (sg->offset) {
403 			dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n",
404 				i, sg->offset);
405 			return -EFAULT;
406 		}
407 	}
408 
409 	dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents);
410 	count  = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
411 	if (count <= 0) {
412 		dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents);
413 		return 0;
414 	}
415 
416 	dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count);
417 
418 	for_each_sg(sglist, sg, count, i)
419 		npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
420 
421 	iova = alloc_iova(&mmu->dmap->iovad, npages,
422 			  PHYS_PFN(dma_get_mask(dev)), 0);
423 	if (!iova)
424 		return 0;
425 
426 	dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
427 		iova->pfn_hi);
428 
429 	iova_addr = iova->pfn_lo;
430 	for_each_sg(sglist, sg, count, i) {
431 		int ret;
432 
433 		dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n",
434 			i, PFN_PHYS(iova_addr), &sg_dma_address(sg),
435 			sg_dma_len(sg));
436 
437 		ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
438 				   sg_dma_address(sg),
439 				   PAGE_ALIGN(sg_dma_len(sg)));
440 		if (ret)
441 			goto out_fail;
442 
443 		sg_dma_address(sg) = PFN_PHYS(iova_addr);
444 
445 		iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
446 	}
447 
448 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
449 		ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
450 
451 	return count;
452 
453 out_fail:
454 	ipu6_dma_unmap_sg(dev, sglist, i, dir, attrs);
455 
456 	return 0;
457 }
458 
459 /*
460  * Create scatter-list for the already allocated DMA buffer
461  */
462 static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
463 				void *cpu_addr, dma_addr_t handle, size_t size,
464 				unsigned long attrs)
465 {
466 	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
467 	struct vm_info *info;
468 	int n_pages;
469 	int ret = 0;
470 
471 	info = get_vm_info(mmu, handle);
472 	if (!info)
473 		return -EFAULT;
474 
475 	if (!info->vaddr)
476 		return -EFAULT;
477 
478 	if (WARN_ON(!info->pages))
479 		return -ENOMEM;
480 
481 	n_pages = PHYS_PFN(PAGE_ALIGN(size));
482 
483 	ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
484 					GFP_KERNEL);
485 	if (ret)
486 		dev_warn(dev, "IPU6 get sgt table failed\n");
487 
488 	return ret;
489 }
490 
491 const struct dma_map_ops ipu6_dma_ops = {
492 	.alloc = ipu6_dma_alloc,
493 	.free = ipu6_dma_free,
494 	.mmap = ipu6_dma_mmap,
495 	.map_sg = ipu6_dma_map_sg,
496 	.unmap_sg = ipu6_dma_unmap_sg,
497 	.sync_single_for_cpu = ipu6_dma_sync_single_for_cpu,
498 	.sync_single_for_device = ipu6_dma_sync_single_for_cpu,
499 	.sync_sg_for_cpu = ipu6_dma_sync_sg_for_cpu,
500 	.sync_sg_for_device = ipu6_dma_sync_sg_for_cpu,
501 	.get_sgtable = ipu6_dma_get_sgtable,
502 };
503