xref: /linux/drivers/media/pci/intel/ipu6/ipu6-mmu.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013--2024 Intel Corporation
4  */
5 #include <asm/barrier.h>
6 
7 #include <linux/align.h>
8 #include <linux/atomic.h>
9 #include <linux/bitops.h>
10 #include <linux/bits.h>
11 #include <linux/bug.h>
12 #include <linux/cacheflush.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/gfp.h>
16 #include <linux/io.h>
17 #include <linux/iova.h>
18 #include <linux/math.h>
19 #include <linux/minmax.h>
20 #include <linux/mm.h>
21 #include <linux/pfn.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/vmalloc.h>
26 
27 #include "ipu6.h"
28 #include "ipu6-dma.h"
29 #include "ipu6-mmu.h"
30 #include "ipu6-platform-regs.h"
31 
32 #define ISP_PAGE_SHIFT		12
33 #define ISP_PAGE_SIZE		BIT(ISP_PAGE_SHIFT)
34 #define ISP_PAGE_MASK		(~(ISP_PAGE_SIZE - 1))
35 
36 #define ISP_L1PT_SHIFT		22
37 #define ISP_L1PT_MASK		(~((1U << ISP_L1PT_SHIFT) - 1))
38 
39 #define ISP_L2PT_SHIFT		12
40 #define ISP_L2PT_MASK		(~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
41 
42 #define ISP_L1PT_PTES           1024
43 #define ISP_L2PT_PTES           1024
44 
45 #define ISP_PADDR_SHIFT		12
46 
47 #define REG_TLB_INVALIDATE	0x0000
48 
49 #define REG_L1_PHYS		0x0004	/* 27-bit pfn */
50 #define REG_INFO		0x0008
51 
52 #define TBL_PHYS_ADDR(a)	((phys_addr_t)(a) << ISP_PADDR_SHIFT)
53 
tlb_invalidate(struct ipu6_mmu * mmu)54 static void tlb_invalidate(struct ipu6_mmu *mmu)
55 {
56 	unsigned long flags;
57 	unsigned int i;
58 
59 	spin_lock_irqsave(&mmu->ready_lock, flags);
60 	if (!mmu->ready) {
61 		spin_unlock_irqrestore(&mmu->ready_lock, flags);
62 		return;
63 	}
64 
65 	for (i = 0; i < mmu->nr_mmus; i++) {
66 		/*
67 		 * To avoid the HW bug induced dead lock in some of the IPU6
68 		 * MMUs on successive invalidate calls, we need to first do a
69 		 * read to the page table base before writing the invalidate
70 		 * register. MMUs which need to implement this WA, will have
71 		 * the insert_read_before_invalidate flags set as true.
72 		 * Disregard the return value of the read.
73 		 */
74 		if (mmu->mmu_hw[i].insert_read_before_invalidate)
75 			readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
76 
77 		writel(0xffffffff, mmu->mmu_hw[i].base +
78 		       REG_TLB_INVALIDATE);
79 		/*
80 		 * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
81 		 * When the actual MMIO write reaches the IPU6 TLB Invalidate
82 		 * register, wmb() will force the TLB invalidate out if the CPU
83 		 * attempts to update the IOMMU page table (or sooner).
84 		 */
85 		wmb();
86 	}
87 	spin_unlock_irqrestore(&mmu->ready_lock, flags);
88 }
89 
90 #ifdef DEBUG
page_table_dump(struct ipu6_mmu_info * mmu_info)91 static void page_table_dump(struct ipu6_mmu_info *mmu_info)
92 {
93 	u32 l1_idx;
94 
95 	dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
96 
97 	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
98 		u32 l2_idx;
99 		u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
100 		phys_addr_t l2_phys;
101 
102 		if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
103 			continue;
104 
105 		l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];)
106 		dev_dbg(mmu_info->dev,
107 			"l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n",
108 			l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys);
109 
110 		for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
111 			u32 *l2_pt = mmu_info->l2_pts[l1_idx];
112 			u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
113 
114 			if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
115 				continue;
116 
117 			dev_dbg(mmu_info->dev,
118 				"\tl2 entry %u; iova 0x%8.8x, phys %pa\n",
119 				l2_idx, iova2,
120 				TBL_PHYS_ADDR(l2_pt[l2_idx]));
121 		}
122 	}
123 
124 	dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
125 }
126 #endif /* DEBUG */
127 
map_single(struct ipu6_mmu_info * mmu_info,void * ptr)128 static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr)
129 {
130 	dma_addr_t dma;
131 
132 	dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
133 	if (dma_mapping_error(mmu_info->dev, dma))
134 		return 0;
135 
136 	return dma;
137 }
138 
get_dummy_page(struct ipu6_mmu_info * mmu_info)139 static int get_dummy_page(struct ipu6_mmu_info *mmu_info)
140 {
141 	void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
142 	dma_addr_t dma;
143 
144 	if (!pt)
145 		return -ENOMEM;
146 
147 	dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
148 
149 	dma = map_single(mmu_info, pt);
150 	if (!dma) {
151 		dev_err(mmu_info->dev, "Failed to map dummy page\n");
152 		goto err_free_page;
153 	}
154 
155 	mmu_info->dummy_page = pt;
156 	mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
157 
158 	return 0;
159 
160 err_free_page:
161 	free_page((unsigned long)pt);
162 	return -ENOMEM;
163 }
164 
free_dummy_page(struct ipu6_mmu_info * mmu_info)165 static void free_dummy_page(struct ipu6_mmu_info *mmu_info)
166 {
167 	dma_unmap_single(mmu_info->dev,
168 			 TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
169 			 PAGE_SIZE, DMA_BIDIRECTIONAL);
170 	free_page((unsigned long)mmu_info->dummy_page);
171 }
172 
alloc_dummy_l2_pt(struct ipu6_mmu_info * mmu_info)173 static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
174 {
175 	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
176 	dma_addr_t dma;
177 	unsigned int i;
178 
179 	if (!pt)
180 		return -ENOMEM;
181 
182 	dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
183 
184 	dma = map_single(mmu_info, pt);
185 	if (!dma) {
186 		dev_err(mmu_info->dev, "Failed to map l2pt page\n");
187 		goto err_free_page;
188 	}
189 
190 	for (i = 0; i < ISP_L2PT_PTES; i++)
191 		pt[i] = mmu_info->dummy_page_pteval;
192 
193 	mmu_info->dummy_l2_pt = pt;
194 	mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
195 
196 	return 0;
197 
198 err_free_page:
199 	free_page((unsigned long)pt);
200 	return -ENOMEM;
201 }
202 
free_dummy_l2_pt(struct ipu6_mmu_info * mmu_info)203 static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
204 {
205 	dma_unmap_single(mmu_info->dev,
206 			 TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
207 			 PAGE_SIZE, DMA_BIDIRECTIONAL);
208 	free_page((unsigned long)mmu_info->dummy_l2_pt);
209 }
210 
alloc_l1_pt(struct ipu6_mmu_info * mmu_info)211 static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
212 {
213 	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
214 	dma_addr_t dma;
215 	unsigned int i;
216 
217 	if (!pt)
218 		return NULL;
219 
220 	dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
221 
222 	for (i = 0; i < ISP_L1PT_PTES; i++)
223 		pt[i] = mmu_info->dummy_l2_pteval;
224 
225 	dma = map_single(mmu_info, pt);
226 	if (!dma) {
227 		dev_err(mmu_info->dev, "Failed to map l1pt page\n");
228 		goto err_free_page;
229 	}
230 
231 	mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
232 	dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma);
233 
234 	return pt;
235 
236 err_free_page:
237 	free_page((unsigned long)pt);
238 	return NULL;
239 }
240 
alloc_l2_pt(struct ipu6_mmu_info * mmu_info)241 static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
242 {
243 	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
244 	unsigned int i;
245 
246 	if (!pt)
247 		return NULL;
248 
249 	dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
250 
251 	for (i = 0; i < ISP_L1PT_PTES; i++)
252 		pt[i] = mmu_info->dummy_page_pteval;
253 
254 	return pt;
255 }
256 
l2_unmap(struct ipu6_mmu_info * mmu_info,unsigned long iova,phys_addr_t dummy,size_t size)257 static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
258 		     phys_addr_t dummy, size_t size)
259 {
260 	unsigned int l2_entries;
261 	unsigned int l2_idx;
262 	unsigned long flags;
263 	u32 l1_idx;
264 	u32 *l2_pt;
265 
266 	spin_lock_irqsave(&mmu_info->lock, flags);
267 	for (l1_idx = iova >> ISP_L1PT_SHIFT;
268 	     size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) {
269 		dev_dbg(mmu_info->dev,
270 			"unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
271 			l1_idx, iova);
272 
273 		if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
274 			dev_err(mmu_info->dev,
275 				"unmap not mapped iova 0x%8.8lx l1 index %u\n",
276 				iova, l1_idx);
277 			continue;
278 		}
279 		l2_pt = mmu_info->l2_pts[l1_idx];
280 
281 		l2_entries = 0;
282 		for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
283 		     size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) {
284 			phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
285 
286 			dev_dbg(mmu_info->dev,
287 				"unmap l2 index %u with pteval 0x%p\n",
288 				l2_idx, &pteval);
289 			l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
290 
291 			iova += ISP_PAGE_SIZE;
292 			size -= ISP_PAGE_SIZE;
293 
294 			l2_entries++;
295 		}
296 
297 		WARN_ON_ONCE(!l2_entries);
298 		clflush_cache_range(&l2_pt[l2_idx - l2_entries],
299 				    sizeof(l2_pt[0]) * l2_entries);
300 	}
301 
302 	WARN_ON_ONCE(size);
303 	spin_unlock_irqrestore(&mmu_info->lock, flags);
304 }
305 
l2_map(struct ipu6_mmu_info * mmu_info,unsigned long iova,phys_addr_t paddr,size_t size)306 static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
307 		  phys_addr_t paddr, size_t size)
308 {
309 	struct device *dev = mmu_info->dev;
310 	unsigned int l2_entries;
311 	u32 *l2_pt, *l2_virt;
312 	unsigned int l2_idx;
313 	unsigned long flags;
314 	size_t mapped = 0;
315 	dma_addr_t dma;
316 	u32 l1_entry;
317 	u32 l1_idx;
318 	int err = 0;
319 
320 	spin_lock_irqsave(&mmu_info->lock, flags);
321 
322 	paddr = ALIGN(paddr, ISP_PAGE_SIZE);
323 	for (l1_idx = iova >> ISP_L1PT_SHIFT;
324 	     size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) {
325 		dev_dbg(dev,
326 			"mapping l2 page table for l1 index %u (iova %8.8x)\n",
327 			l1_idx, (u32)iova);
328 
329 		l1_entry = mmu_info->l1_pt[l1_idx];
330 		if (l1_entry == mmu_info->dummy_l2_pteval) {
331 			l2_virt = mmu_info->l2_pts[l1_idx];
332 			if (likely(!l2_virt)) {
333 				l2_virt = alloc_l2_pt(mmu_info);
334 				if (!l2_virt) {
335 					err = -ENOMEM;
336 					goto error;
337 				}
338 			}
339 
340 			dma = map_single(mmu_info, l2_virt);
341 			if (!dma) {
342 				dev_err(dev, "Failed to map l2pt page\n");
343 				free_page((unsigned long)l2_virt);
344 				err = -EINVAL;
345 				goto error;
346 			}
347 
348 			l1_entry = dma >> ISP_PADDR_SHIFT;
349 
350 			dev_dbg(dev, "page for l1_idx %u %p allocated\n",
351 				l1_idx, l2_virt);
352 			mmu_info->l1_pt[l1_idx] = l1_entry;
353 			mmu_info->l2_pts[l1_idx] = l2_virt;
354 
355 			clflush_cache_range(&mmu_info->l1_pt[l1_idx],
356 					    sizeof(mmu_info->l1_pt[l1_idx]));
357 		}
358 
359 		l2_pt = mmu_info->l2_pts[l1_idx];
360 		l2_entries = 0;
361 
362 		for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
363 		     size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) {
364 			l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
365 
366 			dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
367 				l2_pt[l2_idx]);
368 
369 			iova += ISP_PAGE_SIZE;
370 			paddr += ISP_PAGE_SIZE;
371 			mapped += ISP_PAGE_SIZE;
372 			size -= ISP_PAGE_SIZE;
373 
374 			l2_entries++;
375 		}
376 
377 		WARN_ON_ONCE(!l2_entries);
378 		clflush_cache_range(&l2_pt[l2_idx - l2_entries],
379 				    sizeof(l2_pt[0]) * l2_entries);
380 	}
381 
382 	spin_unlock_irqrestore(&mmu_info->lock, flags);
383 
384 	return 0;
385 
386 error:
387 	spin_unlock_irqrestore(&mmu_info->lock, flags);
388 	/* unroll mapping in case something went wrong */
389 	if (mapped)
390 		l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped);
391 
392 	return err;
393 }
394 
__ipu6_mmu_map(struct ipu6_mmu_info * mmu_info,unsigned long iova,phys_addr_t paddr,size_t size)395 static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
396 			  phys_addr_t paddr, size_t size)
397 {
398 	u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
399 	u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
400 
401 	dev_dbg(mmu_info->dev,
402 		"mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
403 		iova_start, iova_end, size, &paddr);
404 
405 	return l2_map(mmu_info, iova_start, paddr, size);
406 }
407 
__ipu6_mmu_unmap(struct ipu6_mmu_info * mmu_info,unsigned long iova,size_t size)408 static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
409 			     unsigned long iova, size_t size)
410 {
411 	l2_unmap(mmu_info, iova, 0, size);
412 }
413 
allocate_trash_buffer(struct ipu6_mmu * mmu)414 static int allocate_trash_buffer(struct ipu6_mmu *mmu)
415 {
416 	unsigned int n_pages = PFN_UP(IPU6_MMUV2_TRASH_RANGE);
417 	struct iova *iova;
418 	unsigned int i;
419 	dma_addr_t dma;
420 	unsigned long iova_addr;
421 	int ret;
422 
423 	/* Allocate 8MB in iova range */
424 	iova = alloc_iova(&mmu->dmap->iovad, n_pages,
425 			  PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
426 	if (!iova) {
427 		dev_err(mmu->dev, "cannot allocate iova range for trash\n");
428 		return -ENOMEM;
429 	}
430 
431 	dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
432 			   PAGE_SIZE, DMA_BIDIRECTIONAL);
433 	if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
434 		dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
435 		ret = -ENOMEM;
436 		goto out_free_iova;
437 	}
438 
439 	mmu->pci_trash_page = dma;
440 
441 	/*
442 	 * Map the 8MB iova address range to the same physical trash page
443 	 * mmu->trash_page which is already reserved at the probe
444 	 */
445 	iova_addr = iova->pfn_lo;
446 	for (i = 0; i < n_pages; i++) {
447 		ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
448 				   mmu->pci_trash_page, PAGE_SIZE);
449 		if (ret) {
450 			dev_err(mmu->dev,
451 				"mapping trash buffer range failed\n");
452 			goto out_unmap;
453 		}
454 
455 		iova_addr++;
456 	}
457 
458 	mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
459 	dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
460 		mmu->mmid, (unsigned int)mmu->iova_trash_page);
461 	return 0;
462 
463 out_unmap:
464 	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
465 		       PFN_PHYS(iova_size(iova)));
466 	dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
467 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
468 out_free_iova:
469 	__free_iova(&mmu->dmap->iovad, iova);
470 	return ret;
471 }
472 
ipu6_mmu_hw_init(struct ipu6_mmu * mmu)473 int ipu6_mmu_hw_init(struct ipu6_mmu *mmu)
474 {
475 	struct ipu6_mmu_info *mmu_info;
476 	unsigned long flags;
477 	unsigned int i;
478 
479 	mmu_info = mmu->dmap->mmu_info;
480 
481 	/* Initialise the each MMU HW block */
482 	for (i = 0; i < mmu->nr_mmus; i++) {
483 		struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
484 		unsigned int j;
485 		u16 block_addr;
486 
487 		/* Write page table address per MMU */
488 		writel((phys_addr_t)mmu_info->l1_pt_dma,
489 		       mmu->mmu_hw[i].base + REG_L1_PHYS);
490 
491 		/* Set info bits per MMU */
492 		writel(mmu->mmu_hw[i].info_bits,
493 		       mmu->mmu_hw[i].base + REG_INFO);
494 
495 		/* Configure MMU TLB stream configuration for L1 */
496 		for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
497 		     block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
498 			if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) {
499 				dev_err(mmu->dev, "invalid L1 configuration\n");
500 				return -EINVAL;
501 			}
502 
503 			/* Write block start address for each streams */
504 			writel(block_addr, mmu_hw->base +
505 			       mmu_hw->l1_stream_id_reg_offset + 4 * j);
506 		}
507 
508 		/* Configure MMU TLB stream configuration for L2 */
509 		for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
510 		     block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
511 			if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) {
512 				dev_err(mmu->dev, "invalid L2 configuration\n");
513 				return -EINVAL;
514 			}
515 
516 			writel(block_addr, mmu_hw->base +
517 			       mmu_hw->l2_stream_id_reg_offset + 4 * j);
518 		}
519 	}
520 
521 	if (!mmu->trash_page) {
522 		int ret;
523 
524 		mmu->trash_page = alloc_page(GFP_KERNEL);
525 		if (!mmu->trash_page) {
526 			dev_err(mmu->dev, "insufficient memory for trash buffer\n");
527 			return -ENOMEM;
528 		}
529 
530 		ret = allocate_trash_buffer(mmu);
531 		if (ret) {
532 			__free_page(mmu->trash_page);
533 			mmu->trash_page = NULL;
534 			dev_err(mmu->dev, "trash buffer allocation failed\n");
535 			return ret;
536 		}
537 	}
538 
539 	spin_lock_irqsave(&mmu->ready_lock, flags);
540 	mmu->ready = true;
541 	spin_unlock_irqrestore(&mmu->ready_lock, flags);
542 
543 	return 0;
544 }
545 EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, "INTEL_IPU6");
546 
ipu6_mmu_alloc(struct ipu6_device * isp)547 static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
548 {
549 	struct ipu6_mmu_info *mmu_info;
550 	int ret;
551 
552 	mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
553 	if (!mmu_info)
554 		return NULL;
555 
556 	mmu_info->aperture_start = 0;
557 	mmu_info->aperture_end =
558 		(dma_addr_t)DMA_BIT_MASK(isp->secure_mode ?
559 					 IPU6_MMU_ADDR_BITS :
560 					 IPU6_MMU_ADDR_BITS_NON_SECURE);
561 	mmu_info->pgsize_bitmap = SZ_4K;
562 	mmu_info->dev = &isp->pdev->dev;
563 
564 	ret = get_dummy_page(mmu_info);
565 	if (ret)
566 		goto err_free_info;
567 
568 	ret = alloc_dummy_l2_pt(mmu_info);
569 	if (ret)
570 		goto err_free_dummy_page;
571 
572 	mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
573 	if (!mmu_info->l2_pts)
574 		goto err_free_dummy_l2_pt;
575 
576 	/*
577 	 * We always map the L1 page table (a single page as well as
578 	 * the L2 page tables).
579 	 */
580 	mmu_info->l1_pt = alloc_l1_pt(mmu_info);
581 	if (!mmu_info->l1_pt)
582 		goto err_free_l2_pts;
583 
584 	spin_lock_init(&mmu_info->lock);
585 
586 	dev_dbg(mmu_info->dev, "domain initialised\n");
587 
588 	return mmu_info;
589 
590 err_free_l2_pts:
591 	vfree(mmu_info->l2_pts);
592 err_free_dummy_l2_pt:
593 	free_dummy_l2_pt(mmu_info);
594 err_free_dummy_page:
595 	free_dummy_page(mmu_info);
596 err_free_info:
597 	kfree(mmu_info);
598 
599 	return NULL;
600 }
601 
ipu6_mmu_hw_cleanup(struct ipu6_mmu * mmu)602 void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu)
603 {
604 	unsigned long flags;
605 
606 	spin_lock_irqsave(&mmu->ready_lock, flags);
607 	mmu->ready = false;
608 	spin_unlock_irqrestore(&mmu->ready_lock, flags);
609 }
610 EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, "INTEL_IPU6");
611 
alloc_dma_mapping(struct ipu6_device * isp)612 static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp)
613 {
614 	struct ipu6_dma_mapping *dmap;
615 
616 	dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
617 	if (!dmap)
618 		return NULL;
619 
620 	dmap->mmu_info = ipu6_mmu_alloc(isp);
621 	if (!dmap->mmu_info) {
622 		kfree(dmap);
623 		return NULL;
624 	}
625 
626 	init_iova_domain(&dmap->iovad, SZ_4K, 1);
627 	dmap->mmu_info->dmap = dmap;
628 
629 	dev_dbg(&isp->pdev->dev, "alloc mapping\n");
630 
631 	iova_cache_get();
632 
633 	return dmap;
634 }
635 
ipu6_mmu_iova_to_phys(struct ipu6_mmu_info * mmu_info,dma_addr_t iova)636 phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
637 				  dma_addr_t iova)
638 {
639 	phys_addr_t phy_addr;
640 	unsigned long flags;
641 	u32 *l2_pt;
642 
643 	spin_lock_irqsave(&mmu_info->lock, flags);
644 	l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
645 	phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
646 	phy_addr <<= ISP_PAGE_SHIFT;
647 	spin_unlock_irqrestore(&mmu_info->lock, flags);
648 
649 	return phy_addr;
650 }
651 
ipu6_mmu_unmap(struct ipu6_mmu_info * mmu_info,unsigned long iova,size_t size)652 void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
653 		    size_t size)
654 {
655 	unsigned int min_pagesz;
656 
657 	dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size);
658 
659 	/* find out the minimum page size supported */
660 	min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
661 
662 	/*
663 	 * The virtual address and the size of the mapping must be
664 	 * aligned (at least) to the size of the smallest page supported
665 	 * by the hardware
666 	 */
667 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
668 		dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
669 			iova, size, min_pagesz);
670 		return;
671 	}
672 
673 	__ipu6_mmu_unmap(mmu_info, iova, size);
674 }
675 
ipu6_mmu_map(struct ipu6_mmu_info * mmu_info,unsigned long iova,phys_addr_t paddr,size_t size)676 int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
677 		 phys_addr_t paddr, size_t size)
678 {
679 	unsigned int min_pagesz;
680 
681 	if (mmu_info->pgsize_bitmap == 0UL)
682 		return -ENODEV;
683 
684 	/* find out the minimum page size supported */
685 	min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
686 
687 	/*
688 	 * both the virtual address and the physical one, as well as
689 	 * the size of the mapping, must be aligned (at least) to the
690 	 * size of the smallest page supported by the hardware
691 	 */
692 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
693 		dev_err(mmu_info->dev,
694 			"unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
695 			iova, &paddr, size, min_pagesz);
696 		return -EINVAL;
697 	}
698 
699 	dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
700 		iova, &paddr, size);
701 
702 	return __ipu6_mmu_map(mmu_info, iova, paddr, size);
703 }
704 
ipu6_mmu_destroy(struct ipu6_mmu * mmu)705 static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
706 {
707 	struct ipu6_dma_mapping *dmap = mmu->dmap;
708 	struct ipu6_mmu_info *mmu_info = dmap->mmu_info;
709 	struct iova *iova;
710 	u32 l1_idx;
711 
712 	if (mmu->iova_trash_page) {
713 		iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
714 		if (iova) {
715 			/* unmap and free the trash buffer iova */
716 			ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo),
717 				       PFN_PHYS(iova_size(iova)));
718 			__free_iova(&dmap->iovad, iova);
719 		} else {
720 			dev_err(mmu->dev, "trash buffer iova not found.\n");
721 		}
722 
723 		mmu->iova_trash_page = 0;
724 		dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
725 			       PAGE_SIZE, DMA_BIDIRECTIONAL);
726 		mmu->pci_trash_page = 0;
727 		__free_page(mmu->trash_page);
728 	}
729 
730 	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
731 		if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
732 			dma_unmap_single(mmu_info->dev,
733 					 TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
734 					 PAGE_SIZE, DMA_BIDIRECTIONAL);
735 			free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
736 		}
737 	}
738 
739 	vfree(mmu_info->l2_pts);
740 	free_dummy_page(mmu_info);
741 	dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma),
742 			 PAGE_SIZE, DMA_BIDIRECTIONAL);
743 	free_page((unsigned long)mmu_info->dummy_l2_pt);
744 	free_page((unsigned long)mmu_info->l1_pt);
745 	kfree(mmu_info);
746 }
747 
ipu6_mmu_init(struct device * dev,void __iomem * base,int mmid,const struct ipu6_hw_variants * hw)748 struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
749 			       void __iomem *base, int mmid,
750 			       const struct ipu6_hw_variants *hw)
751 {
752 	struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev));
753 	struct ipu6_mmu_pdata *pdata;
754 	struct ipu6_mmu *mmu;
755 	unsigned int i;
756 
757 	if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES)
758 		return ERR_PTR(-EINVAL);
759 
760 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
761 	if (!pdata)
762 		return ERR_PTR(-ENOMEM);
763 
764 	for (i = 0; i < hw->nr_mmus; i++) {
765 		struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
766 		const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i];
767 
768 		if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS ||
769 		    src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS)
770 			return ERR_PTR(-EINVAL);
771 
772 		*pdata_mmu = *src_mmu;
773 		pdata_mmu->base = base + src_mmu->offset;
774 	}
775 
776 	mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
777 	if (!mmu)
778 		return ERR_PTR(-ENOMEM);
779 
780 	mmu->mmid = mmid;
781 	mmu->mmu_hw = pdata->mmu_hw;
782 	mmu->nr_mmus = hw->nr_mmus;
783 	mmu->tlb_invalidate = tlb_invalidate;
784 	mmu->ready = false;
785 	INIT_LIST_HEAD(&mmu->vma_list);
786 	spin_lock_init(&mmu->ready_lock);
787 
788 	mmu->dmap = alloc_dma_mapping(isp);
789 	if (!mmu->dmap) {
790 		dev_err(dev, "can't alloc dma mapping\n");
791 		return ERR_PTR(-ENOMEM);
792 	}
793 
794 	return mmu;
795 }
796 
ipu6_mmu_cleanup(struct ipu6_mmu * mmu)797 void ipu6_mmu_cleanup(struct ipu6_mmu *mmu)
798 {
799 	struct ipu6_dma_mapping *dmap = mmu->dmap;
800 
801 	ipu6_mmu_destroy(mmu);
802 	mmu->dmap = NULL;
803 	iova_cache_put();
804 	put_iova_domain(&dmap->iovad);
805 	kfree(dmap);
806 }
807