xref: /linux/drivers/gpu/drm/gma500/mmu.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007, Intel Corporation.
4  *
5  **************************************************************************/
6 
7 #include <linux/highmem.h>
8 #include <linux/vmalloc.h>
9 
10 #include "mmu.h"
11 #include "psb_drv.h"
12 #include "psb_reg.h"
13 
14 /*
15  * Code for the SGX MMU:
16  */
17 
18 /*
19  * clflush on one processor only:
20  * clflush should apparently flush the cache line on all processors in an
21  * SMP system.
22  */
23 
24 /*
25  * kmap atomic:
26  * The usage of the slots must be completely encapsulated within a spinlock, and
27  * no other functions that may be using the locks for other purposed may be
28  * called from within the locked region.
29  * Since the slots are per processor, this will guarantee that we are the only
30  * user.
31  */
32 
33 /*
34  * TODO: Inserting ptes from an interrupt handler:
35  * This may be desirable for some SGX functionality where the GPU can fault in
36  * needed pages. For that, we need to make an atomic insert_pages function, that
37  * may fail.
38  * If it fails, the caller need to insert the page using a workqueue function,
39  * but on average it should be fast.
40  */
41 
42 static inline uint32_t psb_mmu_pt_index(uint32_t offset)
43 {
44 	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
45 }
46 
47 static inline uint32_t psb_mmu_pd_index(uint32_t offset)
48 {
49 	return offset >> PSB_PDE_SHIFT;
50 }
51 
52 static inline void psb_clflush(void *addr)
53 {
54 	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
55 }
56 
57 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
58 {
59 	if (!driver->has_clflush)
60 		return;
61 
62 	mb();
63 	psb_clflush(addr);
64 	mb();
65 }
66 
67 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
68 {
69 	struct drm_device *dev = driver->dev;
70 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
71 
72 	if (atomic_read(&driver->needs_tlbflush) || force) {
73 		uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
74 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
75 
76 		/* Make sure data cache is turned off before enabling it */
77 		wmb();
78 		PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
79 		(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
80 		if (driver->msvdx_mmu_invaldc)
81 			atomic_set(driver->msvdx_mmu_invaldc, 1);
82 	}
83 	atomic_set(&driver->needs_tlbflush, 0);
84 }
85 
86 #if 0
87 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
88 {
89 	down_write(&driver->sem);
90 	psb_mmu_flush_pd_locked(driver, force);
91 	up_write(&driver->sem);
92 }
93 #endif
94 
95 void psb_mmu_flush(struct psb_mmu_driver *driver)
96 {
97 	struct drm_device *dev = driver->dev;
98 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
99 	uint32_t val;
100 
101 	down_write(&driver->sem);
102 	val = PSB_RSGX32(PSB_CR_BIF_CTRL);
103 	if (atomic_read(&driver->needs_tlbflush))
104 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
105 	else
106 		PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
107 
108 	/* Make sure data cache is turned off and MMU is flushed before
109 	   restoring bank interface control register */
110 	wmb();
111 	PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
112 		   PSB_CR_BIF_CTRL);
113 	(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
114 
115 	atomic_set(&driver->needs_tlbflush, 0);
116 	if (driver->msvdx_mmu_invaldc)
117 		atomic_set(driver->msvdx_mmu_invaldc, 1);
118 	up_write(&driver->sem);
119 }
120 
121 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
122 {
123 	struct drm_device *dev = pd->driver->dev;
124 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
125 	uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
126 			  PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
127 
128 	down_write(&pd->driver->sem);
129 	PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
130 	wmb();
131 	psb_mmu_flush_pd_locked(pd->driver, 1);
132 	pd->hw_context = hw_context;
133 	up_write(&pd->driver->sem);
134 
135 }
136 
137 static inline unsigned long psb_pd_addr_end(unsigned long addr,
138 					    unsigned long end)
139 {
140 	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
141 	return (addr < end) ? addr : end;
142 }
143 
144 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
145 {
146 	uint32_t mask = PSB_PTE_VALID;
147 
148 	if (type & PSB_MMU_CACHED_MEMORY)
149 		mask |= PSB_PTE_CACHED;
150 	if (type & PSB_MMU_RO_MEMORY)
151 		mask |= PSB_PTE_RO;
152 	if (type & PSB_MMU_WO_MEMORY)
153 		mask |= PSB_PTE_WO;
154 
155 	return (pfn << PAGE_SHIFT) | mask;
156 }
157 
158 struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
159 				    int trap_pagefaults, int invalid_type)
160 {
161 	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
162 	uint32_t *v;
163 	int i;
164 
165 	if (!pd)
166 		return NULL;
167 
168 	pd->p = alloc_page(GFP_DMA32);
169 	if (!pd->p)
170 		goto out_err1;
171 	pd->dummy_pt = alloc_page(GFP_DMA32);
172 	if (!pd->dummy_pt)
173 		goto out_err2;
174 	pd->dummy_page = alloc_page(GFP_DMA32);
175 	if (!pd->dummy_page)
176 		goto out_err3;
177 
178 	if (!trap_pagefaults) {
179 		pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
180 						   invalid_type);
181 		pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
182 						   invalid_type);
183 	} else {
184 		pd->invalid_pde = 0;
185 		pd->invalid_pte = 0;
186 	}
187 
188 	v = kmap_local_page(pd->dummy_pt);
189 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
190 		v[i] = pd->invalid_pte;
191 
192 	kunmap_local(v);
193 
194 	v = kmap_local_page(pd->p);
195 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
196 		v[i] = pd->invalid_pde;
197 
198 	kunmap_local(v);
199 
200 	clear_page(kmap(pd->dummy_page));
201 	kunmap(pd->dummy_page);
202 
203 	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
204 	if (!pd->tables)
205 		goto out_err4;
206 
207 	pd->hw_context = -1;
208 	pd->pd_mask = PSB_PTE_VALID;
209 	pd->driver = driver;
210 
211 	return pd;
212 
213 out_err4:
214 	__free_page(pd->dummy_page);
215 out_err3:
216 	__free_page(pd->dummy_pt);
217 out_err2:
218 	__free_page(pd->p);
219 out_err1:
220 	kfree(pd);
221 	return NULL;
222 }
223 
224 static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
225 {
226 	__free_page(pt->p);
227 	kfree(pt);
228 }
229 
230 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
231 {
232 	struct psb_mmu_driver *driver = pd->driver;
233 	struct drm_device *dev = driver->dev;
234 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
235 	struct psb_mmu_pt *pt;
236 	int i;
237 
238 	down_write(&driver->sem);
239 	if (pd->hw_context != -1) {
240 		PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
241 		psb_mmu_flush_pd_locked(driver, 1);
242 	}
243 
244 	/* Should take the spinlock here, but we don't need to do that
245 	   since we have the semaphore in write mode. */
246 
247 	for (i = 0; i < 1024; ++i) {
248 		pt = pd->tables[i];
249 		if (pt)
250 			psb_mmu_free_pt(pt);
251 	}
252 
253 	vfree(pd->tables);
254 	__free_page(pd->dummy_page);
255 	__free_page(pd->dummy_pt);
256 	__free_page(pd->p);
257 	kfree(pd);
258 	up_write(&driver->sem);
259 }
260 
261 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
262 {
263 	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
264 	void *v;
265 	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
266 	uint32_t clflush_count = PAGE_SIZE / clflush_add;
267 	spinlock_t *lock = &pd->driver->lock;
268 	uint8_t *clf;
269 	uint32_t *ptes;
270 	int i;
271 
272 	if (!pt)
273 		return NULL;
274 
275 	pt->p = alloc_page(GFP_DMA32);
276 	if (!pt->p) {
277 		kfree(pt);
278 		return NULL;
279 	}
280 
281 	spin_lock(lock);
282 
283 	v = kmap_atomic(pt->p);
284 	clf = (uint8_t *) v;
285 	ptes = (uint32_t *) v;
286 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
287 		*ptes++ = pd->invalid_pte;
288 
289 	if (pd->driver->has_clflush && pd->hw_context != -1) {
290 		mb();
291 		for (i = 0; i < clflush_count; ++i) {
292 			psb_clflush(clf);
293 			clf += clflush_add;
294 		}
295 		mb();
296 	}
297 	kunmap_atomic(v);
298 	spin_unlock(lock);
299 
300 	pt->count = 0;
301 	pt->pd = pd;
302 	pt->index = 0;
303 
304 	return pt;
305 }
306 
307 static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
308 						    unsigned long addr)
309 {
310 	uint32_t index = psb_mmu_pd_index(addr);
311 	struct psb_mmu_pt *pt;
312 	uint32_t *v;
313 	spinlock_t *lock = &pd->driver->lock;
314 
315 	spin_lock(lock);
316 	pt = pd->tables[index];
317 	while (!pt) {
318 		spin_unlock(lock);
319 		pt = psb_mmu_alloc_pt(pd);
320 		if (!pt)
321 			return NULL;
322 		spin_lock(lock);
323 
324 		if (pd->tables[index]) {
325 			spin_unlock(lock);
326 			psb_mmu_free_pt(pt);
327 			spin_lock(lock);
328 			pt = pd->tables[index];
329 			continue;
330 		}
331 
332 		v = kmap_atomic(pd->p);
333 		pd->tables[index] = pt;
334 		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
335 		pt->index = index;
336 		kunmap_atomic((void *) v);
337 
338 		if (pd->hw_context != -1) {
339 			psb_mmu_clflush(pd->driver, (void *)&v[index]);
340 			atomic_set(&pd->driver->needs_tlbflush, 1);
341 		}
342 	}
343 	pt->v = kmap_atomic(pt->p);
344 	return pt;
345 }
346 
347 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
348 					      unsigned long addr)
349 {
350 	uint32_t index = psb_mmu_pd_index(addr);
351 	struct psb_mmu_pt *pt;
352 	spinlock_t *lock = &pd->driver->lock;
353 
354 	spin_lock(lock);
355 	pt = pd->tables[index];
356 	if (!pt) {
357 		spin_unlock(lock);
358 		return NULL;
359 	}
360 	pt->v = kmap_atomic(pt->p);
361 	return pt;
362 }
363 
364 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
365 {
366 	struct psb_mmu_pd *pd = pt->pd;
367 	uint32_t *v;
368 
369 	kunmap_atomic(pt->v);
370 	if (pt->count == 0) {
371 		v = kmap_atomic(pd->p);
372 		v[pt->index] = pd->invalid_pde;
373 		pd->tables[pt->index] = NULL;
374 
375 		if (pd->hw_context != -1) {
376 			psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
377 			atomic_set(&pd->driver->needs_tlbflush, 1);
378 		}
379 		kunmap_atomic(v);
380 		spin_unlock(&pd->driver->lock);
381 		psb_mmu_free_pt(pt);
382 		return;
383 	}
384 	spin_unlock(&pd->driver->lock);
385 }
386 
387 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
388 				   uint32_t pte)
389 {
390 	pt->v[psb_mmu_pt_index(addr)] = pte;
391 }
392 
393 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
394 					  unsigned long addr)
395 {
396 	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
397 }
398 
399 struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
400 {
401 	struct psb_mmu_pd *pd;
402 
403 	down_read(&driver->sem);
404 	pd = driver->default_pd;
405 	up_read(&driver->sem);
406 
407 	return pd;
408 }
409 
410 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
411 {
412 	struct drm_device *dev = driver->dev;
413 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
414 
415 	PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
416 	psb_mmu_free_pagedir(driver->default_pd);
417 	kfree(driver);
418 }
419 
420 struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
421 					   int trap_pagefaults,
422 					   int invalid_type,
423 					   atomic_t *msvdx_mmu_invaldc)
424 {
425 	struct psb_mmu_driver *driver;
426 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
427 
428 	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
429 
430 	if (!driver)
431 		return NULL;
432 
433 	driver->dev = dev;
434 	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
435 					      invalid_type);
436 	if (!driver->default_pd)
437 		goto out_err1;
438 
439 	spin_lock_init(&driver->lock);
440 	init_rwsem(&driver->sem);
441 	down_write(&driver->sem);
442 	atomic_set(&driver->needs_tlbflush, 1);
443 	driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
444 
445 	driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
446 	PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
447 		   PSB_CR_BIF_CTRL);
448 	PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
449 		   PSB_CR_BIF_CTRL);
450 
451 	driver->has_clflush = 0;
452 
453 	if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
454 		uint32_t tfms, misc, cap0, cap4, clflush_size;
455 
456 		/*
457 		 * clflush size is determined at kernel setup for x86_64 but not
458 		 * for i386. We have to do it here.
459 		 */
460 
461 		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
462 		clflush_size = ((misc >> 8) & 0xff) * 8;
463 		driver->has_clflush = 1;
464 		driver->clflush_add =
465 		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
466 		driver->clflush_mask = driver->clflush_add - 1;
467 		driver->clflush_mask = ~driver->clflush_mask;
468 	}
469 
470 	up_write(&driver->sem);
471 	return driver;
472 
473 out_err1:
474 	kfree(driver);
475 	return NULL;
476 }
477 
478 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
479 			       uint32_t num_pages, uint32_t desired_tile_stride,
480 			       uint32_t hw_tile_stride)
481 {
482 	struct psb_mmu_pt *pt;
483 	uint32_t rows = 1;
484 	uint32_t i;
485 	unsigned long addr;
486 	unsigned long end;
487 	unsigned long next;
488 	unsigned long add;
489 	unsigned long row_add;
490 	unsigned long clflush_add = pd->driver->clflush_add;
491 	unsigned long clflush_mask = pd->driver->clflush_mask;
492 
493 	if (!pd->driver->has_clflush)
494 		return;
495 
496 	if (hw_tile_stride)
497 		rows = num_pages / desired_tile_stride;
498 	else
499 		desired_tile_stride = num_pages;
500 
501 	add = desired_tile_stride << PAGE_SHIFT;
502 	row_add = hw_tile_stride << PAGE_SHIFT;
503 	mb();
504 	for (i = 0; i < rows; ++i) {
505 
506 		addr = address;
507 		end = addr + add;
508 
509 		do {
510 			next = psb_pd_addr_end(addr, end);
511 			pt = psb_mmu_pt_map_lock(pd, addr);
512 			if (!pt)
513 				continue;
514 			do {
515 				psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
516 			} while (addr += clflush_add,
517 				 (addr & clflush_mask) < next);
518 
519 			psb_mmu_pt_unmap_unlock(pt);
520 		} while (addr = next, next != end);
521 		address += row_add;
522 	}
523 	mb();
524 }
525 
526 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
527 				 unsigned long address, uint32_t num_pages)
528 {
529 	struct psb_mmu_pt *pt;
530 	unsigned long addr;
531 	unsigned long end;
532 	unsigned long next;
533 	unsigned long f_address = address;
534 
535 	down_read(&pd->driver->sem);
536 
537 	addr = address;
538 	end = addr + (num_pages << PAGE_SHIFT);
539 
540 	do {
541 		next = psb_pd_addr_end(addr, end);
542 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
543 		if (!pt)
544 			goto out;
545 		do {
546 			psb_mmu_invalidate_pte(pt, addr);
547 			--pt->count;
548 		} while (addr += PAGE_SIZE, addr < next);
549 		psb_mmu_pt_unmap_unlock(pt);
550 
551 	} while (addr = next, next != end);
552 
553 out:
554 	if (pd->hw_context != -1)
555 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
556 
557 	up_read(&pd->driver->sem);
558 
559 	if (pd->hw_context != -1)
560 		psb_mmu_flush(pd->driver);
561 
562 	return;
563 }
564 
565 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
566 			  uint32_t num_pages, uint32_t desired_tile_stride,
567 			  uint32_t hw_tile_stride)
568 {
569 	struct psb_mmu_pt *pt;
570 	uint32_t rows = 1;
571 	uint32_t i;
572 	unsigned long addr;
573 	unsigned long end;
574 	unsigned long next;
575 	unsigned long add;
576 	unsigned long row_add;
577 	unsigned long f_address = address;
578 
579 	if (hw_tile_stride)
580 		rows = num_pages / desired_tile_stride;
581 	else
582 		desired_tile_stride = num_pages;
583 
584 	add = desired_tile_stride << PAGE_SHIFT;
585 	row_add = hw_tile_stride << PAGE_SHIFT;
586 
587 	down_read(&pd->driver->sem);
588 
589 	/* Make sure we only need to flush this processor's cache */
590 
591 	for (i = 0; i < rows; ++i) {
592 
593 		addr = address;
594 		end = addr + add;
595 
596 		do {
597 			next = psb_pd_addr_end(addr, end);
598 			pt = psb_mmu_pt_map_lock(pd, addr);
599 			if (!pt)
600 				continue;
601 			do {
602 				psb_mmu_invalidate_pte(pt, addr);
603 				--pt->count;
604 
605 			} while (addr += PAGE_SIZE, addr < next);
606 			psb_mmu_pt_unmap_unlock(pt);
607 
608 		} while (addr = next, next != end);
609 		address += row_add;
610 	}
611 	if (pd->hw_context != -1)
612 		psb_mmu_flush_ptes(pd, f_address, num_pages,
613 				   desired_tile_stride, hw_tile_stride);
614 
615 	up_read(&pd->driver->sem);
616 
617 	if (pd->hw_context != -1)
618 		psb_mmu_flush(pd->driver);
619 }
620 
621 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
622 				unsigned long address, uint32_t num_pages,
623 				int type)
624 {
625 	struct psb_mmu_pt *pt;
626 	uint32_t pte;
627 	unsigned long addr;
628 	unsigned long end;
629 	unsigned long next;
630 	unsigned long f_address = address;
631 	int ret = -ENOMEM;
632 
633 	down_read(&pd->driver->sem);
634 
635 	addr = address;
636 	end = addr + (num_pages << PAGE_SHIFT);
637 
638 	do {
639 		next = psb_pd_addr_end(addr, end);
640 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
641 		if (!pt) {
642 			ret = -ENOMEM;
643 			goto out;
644 		}
645 		do {
646 			pte = psb_mmu_mask_pte(start_pfn++, type);
647 			psb_mmu_set_pte(pt, addr, pte);
648 			pt->count++;
649 		} while (addr += PAGE_SIZE, addr < next);
650 		psb_mmu_pt_unmap_unlock(pt);
651 
652 	} while (addr = next, next != end);
653 	ret = 0;
654 
655 out:
656 	if (pd->hw_context != -1)
657 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
658 
659 	up_read(&pd->driver->sem);
660 
661 	if (pd->hw_context != -1)
662 		psb_mmu_flush(pd->driver);
663 
664 	return ret;
665 }
666 
667 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
668 			 unsigned long address, uint32_t num_pages,
669 			 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
670 			 int type)
671 {
672 	struct psb_mmu_pt *pt;
673 	uint32_t rows = 1;
674 	uint32_t i;
675 	uint32_t pte;
676 	unsigned long addr;
677 	unsigned long end;
678 	unsigned long next;
679 	unsigned long add;
680 	unsigned long row_add;
681 	unsigned long f_address = address;
682 	int ret = -ENOMEM;
683 
684 	if (hw_tile_stride) {
685 		if (num_pages % desired_tile_stride != 0)
686 			return -EINVAL;
687 		rows = num_pages / desired_tile_stride;
688 	} else {
689 		desired_tile_stride = num_pages;
690 	}
691 
692 	add = desired_tile_stride << PAGE_SHIFT;
693 	row_add = hw_tile_stride << PAGE_SHIFT;
694 
695 	down_read(&pd->driver->sem);
696 
697 	for (i = 0; i < rows; ++i) {
698 
699 		addr = address;
700 		end = addr + add;
701 
702 		do {
703 			next = psb_pd_addr_end(addr, end);
704 			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
705 			if (!pt)
706 				goto out;
707 			do {
708 				pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
709 						       type);
710 				psb_mmu_set_pte(pt, addr, pte);
711 				pt->count++;
712 			} while (addr += PAGE_SIZE, addr < next);
713 			psb_mmu_pt_unmap_unlock(pt);
714 
715 		} while (addr = next, next != end);
716 
717 		address += row_add;
718 	}
719 
720 	ret = 0;
721 out:
722 	if (pd->hw_context != -1)
723 		psb_mmu_flush_ptes(pd, f_address, num_pages,
724 				   desired_tile_stride, hw_tile_stride);
725 
726 	up_read(&pd->driver->sem);
727 
728 	if (pd->hw_context != -1)
729 		psb_mmu_flush(pd->driver);
730 
731 	return ret;
732 }
733 
734 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
735 			   unsigned long *pfn)
736 {
737 	int ret;
738 	struct psb_mmu_pt *pt;
739 	uint32_t tmp;
740 	spinlock_t *lock = &pd->driver->lock;
741 
742 	down_read(&pd->driver->sem);
743 	pt = psb_mmu_pt_map_lock(pd, virtual);
744 	if (!pt) {
745 		uint32_t *v;
746 
747 		spin_lock(lock);
748 		v = kmap_atomic(pd->p);
749 		tmp = v[psb_mmu_pd_index(virtual)];
750 		kunmap_atomic(v);
751 		spin_unlock(lock);
752 
753 		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
754 		    !(pd->invalid_pte & PSB_PTE_VALID)) {
755 			ret = -EINVAL;
756 			goto out;
757 		}
758 		ret = 0;
759 		*pfn = pd->invalid_pte >> PAGE_SHIFT;
760 		goto out;
761 	}
762 	tmp = pt->v[psb_mmu_pt_index(virtual)];
763 	if (!(tmp & PSB_PTE_VALID)) {
764 		ret = -EINVAL;
765 	} else {
766 		ret = 0;
767 		*pfn = tmp >> PAGE_SHIFT;
768 	}
769 	psb_mmu_pt_unmap_unlock(pt);
770 out:
771 	up_read(&pd->driver->sem);
772 	return ret;
773 }
774