xref: /linux/drivers/media/pci/intel/ipu6/ipu6-dma.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013--2024 Intel Corporation
4  */
5 
6 #include <linux/cacheflush.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/iova.h>
9 #include <linux/list.h>
10 #include <linux/mm.h>
11 #include <linux/vmalloc.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 
16 #include "ipu6.h"
17 #include "ipu6-bus.h"
18 #include "ipu6-dma.h"
19 #include "ipu6-mmu.h"
20 
21 struct vm_info {
22 	struct list_head list;
23 	struct page **pages;
24 	dma_addr_t ipu6_iova;
25 	void *vaddr;
26 	unsigned long size;
27 };
28 
get_vm_info(struct ipu6_mmu * mmu,dma_addr_t iova)29 static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova)
30 {
31 	struct vm_info *info, *save;
32 
33 	list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
34 		if (iova >= info->ipu6_iova &&
35 		    iova < (info->ipu6_iova + info->size))
36 			return info;
37 	}
38 
39 	return NULL;
40 }
41 
__clear_buffer(struct page * page,size_t size,unsigned long attrs)42 static void __clear_buffer(struct page *page, size_t size, unsigned long attrs)
43 {
44 	void *ptr;
45 
46 	if (!page)
47 		return;
48 	/*
49 	 * Ensure that the allocated pages are zeroed, and that any data
50 	 * lurking in the kernel direct-mapped region is invalidated.
51 	 */
52 	ptr = page_address(page);
53 	memset(ptr, 0, size);
54 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
55 		clflush_cache_range(ptr, size);
56 }
57 
__alloc_buffer(size_t size,gfp_t gfp,unsigned long attrs)58 static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs)
59 {
60 	int count = PHYS_PFN(size);
61 	int array_size = count * sizeof(struct page *);
62 	struct page **pages;
63 	int i = 0;
64 
65 	pages = kvzalloc(array_size, GFP_KERNEL);
66 	if (!pages)
67 		return NULL;
68 
69 	gfp |= __GFP_NOWARN;
70 
71 	while (count) {
72 		int j, order = __fls(count);
73 
74 		pages[i] = alloc_pages(gfp, order);
75 		while (!pages[i] && order)
76 			pages[i] = alloc_pages(gfp, --order);
77 		if (!pages[i])
78 			goto error;
79 
80 		if (order) {
81 			split_page(pages[i], order);
82 			j = 1 << order;
83 			while (j--)
84 				pages[i + j] = pages[i] + j;
85 		}
86 
87 		__clear_buffer(pages[i], PAGE_SIZE << order, attrs);
88 		i += 1 << order;
89 		count -= 1 << order;
90 	}
91 
92 	return pages;
93 error:
94 	while (i--)
95 		if (pages[i])
96 			__free_pages(pages[i], 0);
97 	kvfree(pages);
98 	return NULL;
99 }
100 
__free_buffer(struct page ** pages,size_t size,unsigned long attrs)101 static void __free_buffer(struct page **pages, size_t size, unsigned long attrs)
102 {
103 	int count = PHYS_PFN(size);
104 	unsigned int i;
105 
106 	for (i = 0; i < count && pages[i]; i++) {
107 		__clear_buffer(pages[i], PAGE_SIZE, attrs);
108 		__free_pages(pages[i], 0);
109 	}
110 
111 	kvfree(pages);
112 }
113 
ipu6_dma_sync_single(struct ipu6_bus_device * sys,dma_addr_t dma_handle,size_t size)114 void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle,
115 			  size_t size)
116 {
117 	void *vaddr;
118 	u32 offset;
119 	struct vm_info *info;
120 	struct ipu6_mmu *mmu = sys->mmu;
121 
122 	info = get_vm_info(mmu, dma_handle);
123 	if (WARN_ON(!info))
124 		return;
125 
126 	offset = dma_handle - info->ipu6_iova;
127 	if (WARN_ON(size > (info->size - offset)))
128 		return;
129 
130 	vaddr = info->vaddr + offset;
131 	clflush_cache_range(vaddr, size);
132 }
133 EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, "INTEL_IPU6");
134 
ipu6_dma_sync_sg(struct ipu6_bus_device * sys,struct scatterlist * sglist,int nents)135 void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
136 		      int nents)
137 {
138 	struct scatterlist *sg;
139 	int i;
140 
141 	for_each_sg(sglist, sg, nents, i)
142 		clflush_cache_range(sg_virt(sg), sg->length);
143 }
144 EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, "INTEL_IPU6");
145 
ipu6_dma_sync_sgtable(struct ipu6_bus_device * sys,struct sg_table * sgt)146 void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt)
147 {
148 	ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents);
149 }
150 EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, "INTEL_IPU6");
151 
ipu6_dma_alloc(struct ipu6_bus_device * sys,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)152 void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size,
153 		     dma_addr_t *dma_handle, gfp_t gfp,
154 		     unsigned long attrs)
155 {
156 	struct device *dev = &sys->auxdev.dev;
157 	struct pci_dev *pdev = sys->isp->pdev;
158 	dma_addr_t pci_dma_addr, ipu6_iova;
159 	struct ipu6_mmu *mmu = sys->mmu;
160 	struct vm_info *info;
161 	unsigned long count;
162 	struct page **pages;
163 	struct iova *iova;
164 	unsigned int i;
165 	int ret;
166 
167 	info = kzalloc(sizeof(*info), GFP_KERNEL);
168 	if (!info)
169 		return NULL;
170 
171 	size = PAGE_ALIGN(size);
172 	count = PHYS_PFN(size);
173 
174 	iova = alloc_iova(&mmu->dmap->iovad, count,
175 			  PHYS_PFN(dma_get_mask(dev)), 0);
176 	if (!iova)
177 		goto out_kfree;
178 
179 	pages = __alloc_buffer(size, gfp, attrs);
180 	if (!pages)
181 		goto out_free_iova;
182 
183 	dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n",
184 		size, iova->pfn_lo, iova->pfn_hi);
185 	for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
186 		pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0,
187 						  PAGE_SIZE, DMA_BIDIRECTIONAL,
188 						  attrs);
189 		dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n",
190 			&pci_dma_addr);
191 		if (dma_mapping_error(&pdev->dev, pci_dma_addr)) {
192 			dev_err(dev, "pci_dma_mapping for page[%d] failed", i);
193 			goto out_unmap;
194 		}
195 
196 		ret = ipu6_mmu_map(mmu->dmap->mmu_info,
197 				   PFN_PHYS(iova->pfn_lo + i), pci_dma_addr,
198 				   PAGE_SIZE);
199 		if (ret) {
200 			dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed",
201 				i, &pci_dma_addr);
202 			dma_unmap_page_attrs(&pdev->dev, pci_dma_addr,
203 					     PAGE_SIZE, DMA_BIDIRECTIONAL,
204 					     attrs);
205 			goto out_unmap;
206 		}
207 	}
208 
209 	info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
210 	if (!info->vaddr)
211 		goto out_unmap;
212 
213 	*dma_handle = PFN_PHYS(iova->pfn_lo);
214 
215 	info->pages = pages;
216 	info->ipu6_iova = *dma_handle;
217 	info->size = size;
218 	list_add(&info->list, &mmu->vma_list);
219 
220 	return info->vaddr;
221 
222 out_unmap:
223 	while (i--) {
224 		ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
225 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
226 						     ipu6_iova);
227 		dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
228 				     DMA_BIDIRECTIONAL, attrs);
229 
230 		ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
231 	}
232 
233 	__free_buffer(pages, size, attrs);
234 
235 out_free_iova:
236 	__free_iova(&mmu->dmap->iovad, iova);
237 out_kfree:
238 	kfree(info);
239 
240 	return NULL;
241 }
242 EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, "INTEL_IPU6");
243 
ipu6_dma_free(struct ipu6_bus_device * sys,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)244 void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr,
245 		   dma_addr_t dma_handle, unsigned long attrs)
246 {
247 	struct ipu6_mmu *mmu = sys->mmu;
248 	struct pci_dev *pdev = sys->isp->pdev;
249 	struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
250 	dma_addr_t pci_dma_addr, ipu6_iova;
251 	struct vm_info *info;
252 	struct page **pages;
253 	unsigned int i;
254 
255 	if (WARN_ON(!iova))
256 		return;
257 
258 	info = get_vm_info(mmu, dma_handle);
259 	if (WARN_ON(!info))
260 		return;
261 
262 	if (WARN_ON(!info->vaddr))
263 		return;
264 
265 	if (WARN_ON(!info->pages))
266 		return;
267 
268 	list_del(&info->list);
269 
270 	size = PAGE_ALIGN(size);
271 
272 	pages = info->pages;
273 
274 	vunmap(vaddr);
275 
276 	for (i = 0; i < PHYS_PFN(size); i++) {
277 		ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
278 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
279 						     ipu6_iova);
280 		dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
281 				     DMA_BIDIRECTIONAL, attrs);
282 	}
283 
284 	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
285 		       PFN_PHYS(iova_size(iova)));
286 
287 	__free_buffer(pages, size, attrs);
288 
289 	mmu->tlb_invalidate(mmu);
290 
291 	__free_iova(&mmu->dmap->iovad, iova);
292 
293 	kfree(info);
294 }
295 EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, "INTEL_IPU6");
296 
ipu6_dma_mmap(struct ipu6_bus_device * sys,struct vm_area_struct * vma,void * addr,dma_addr_t iova,size_t size,unsigned long attrs)297 int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma,
298 		  void *addr, dma_addr_t iova, size_t size,
299 		  unsigned long attrs)
300 {
301 	struct ipu6_mmu *mmu = sys->mmu;
302 	size_t count = PFN_UP(size);
303 	struct vm_info *info;
304 	size_t i;
305 	int ret;
306 
307 	info = get_vm_info(mmu, iova);
308 	if (!info)
309 		return -EFAULT;
310 
311 	if (!info->vaddr)
312 		return -EFAULT;
313 
314 	if (vma->vm_start & ~PAGE_MASK)
315 		return -EINVAL;
316 
317 	if (size > info->size)
318 		return -EFAULT;
319 
320 	for (i = 0; i < count; i++) {
321 		ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i),
322 				     info->pages[i]);
323 		if (ret < 0)
324 			return ret;
325 	}
326 
327 	return 0;
328 }
329 
ipu6_dma_unmap_sg(struct ipu6_bus_device * sys,struct scatterlist * sglist,int nents,enum dma_data_direction dir,unsigned long attrs)330 void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
331 		       int nents, enum dma_data_direction dir,
332 		       unsigned long attrs)
333 {
334 	struct device *dev = &sys->auxdev.dev;
335 	struct ipu6_mmu *mmu = sys->mmu;
336 	struct iova *iova = find_iova(&mmu->dmap->iovad,
337 				      PHYS_PFN(sg_dma_address(sglist)));
338 	struct scatterlist *sg;
339 	dma_addr_t pci_dma_addr;
340 	unsigned int i;
341 
342 	if (!nents)
343 		return;
344 
345 	if (WARN_ON(!iova))
346 		return;
347 
348 	/*
349 	 * Before IPU6 mmu unmap, return the pci dma address back to sg
350 	 * assume the nents is less than orig_nents as the least granule
351 	 * is 1 SZ_4K page
352 	 */
353 	dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents);
354 	for_each_sg(sglist, sg, nents, i) {
355 		dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i,
356 			&sg_dma_address(sg), sg_dma_len(sg));
357 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
358 						     sg_dma_address(sg));
359 		dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
360 			&pci_dma_addr, i);
361 		sg_dma_address(sg) = pci_dma_addr;
362 	}
363 
364 	dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n",
365 		iova->pfn_lo, iova->pfn_hi);
366 	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
367 		       PFN_PHYS(iova_size(iova)));
368 
369 	mmu->tlb_invalidate(mmu);
370 	__free_iova(&mmu->dmap->iovad, iova);
371 }
372 EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, "INTEL_IPU6");
373 
ipu6_dma_map_sg(struct ipu6_bus_device * sys,struct scatterlist * sglist,int nents,enum dma_data_direction dir,unsigned long attrs)374 int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
375 		    int nents, enum dma_data_direction dir,
376 		    unsigned long attrs)
377 {
378 	struct device *dev = &sys->auxdev.dev;
379 	struct ipu6_mmu *mmu = sys->mmu;
380 	struct scatterlist *sg;
381 	struct iova *iova;
382 	size_t npages = 0;
383 	unsigned long iova_addr;
384 	int i;
385 
386 	for_each_sg(sglist, sg, nents, i) {
387 		if (sg->offset) {
388 			dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n",
389 				i, sg->offset);
390 			return -EFAULT;
391 		}
392 	}
393 
394 	for_each_sg(sglist, sg, nents, i)
395 		npages += PFN_UP(sg_dma_len(sg));
396 
397 	dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n",
398 		nents, npages);
399 
400 	iova = alloc_iova(&mmu->dmap->iovad, npages,
401 			  PHYS_PFN(dma_get_mask(dev)), 0);
402 	if (!iova)
403 		return 0;
404 
405 	dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
406 		iova->pfn_hi);
407 
408 	iova_addr = iova->pfn_lo;
409 	for_each_sg(sglist, sg, nents, i) {
410 		phys_addr_t iova_pa;
411 		int ret;
412 
413 		iova_pa = PFN_PHYS(iova_addr);
414 		dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n",
415 			i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg));
416 
417 		ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
418 				   sg_dma_address(sg),
419 				   PAGE_ALIGN(sg_dma_len(sg)));
420 		if (ret)
421 			goto out_fail;
422 
423 		sg_dma_address(sg) = PFN_PHYS(iova_addr);
424 
425 		iova_addr += PFN_UP(sg_dma_len(sg));
426 	}
427 
428 	dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages);
429 
430 	return nents;
431 
432 out_fail:
433 	ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs);
434 
435 	return 0;
436 }
437 EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, "INTEL_IPU6");
438 
ipu6_dma_map_sgtable(struct ipu6_bus_device * sys,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)439 int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
440 			 enum dma_data_direction dir, unsigned long attrs)
441 {
442 	int nents;
443 
444 	nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
445 	if (nents < 0)
446 		return nents;
447 
448 	sgt->nents = nents;
449 
450 	return 0;
451 }
452 EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, "INTEL_IPU6");
453 
ipu6_dma_unmap_sgtable(struct ipu6_bus_device * sys,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)454 void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
455 			    enum dma_data_direction dir, unsigned long attrs)
456 {
457 	ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
458 }
459 EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6");
460 
461 /*
462  * Create scatter-list for the already allocated DMA buffer
463  */
ipu6_dma_get_sgtable(struct ipu6_bus_device * sys,struct sg_table * sgt,void * cpu_addr,dma_addr_t handle,size_t size,unsigned long attrs)464 int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
465 			 void *cpu_addr, dma_addr_t handle, size_t size,
466 			 unsigned long attrs)
467 {
468 	struct device *dev = &sys->auxdev.dev;
469 	struct ipu6_mmu *mmu = sys->mmu;
470 	struct vm_info *info;
471 	int n_pages;
472 	int ret = 0;
473 
474 	info = get_vm_info(mmu, handle);
475 	if (!info)
476 		return -EFAULT;
477 
478 	if (!info->vaddr)
479 		return -EFAULT;
480 
481 	if (WARN_ON(!info->pages))
482 		return -ENOMEM;
483 
484 	n_pages = PFN_UP(size);
485 
486 	ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
487 					GFP_KERNEL);
488 	if (ret)
489 		dev_warn(dev, "get sgt table failed\n");
490 
491 	return ret;
492 }
493