xref: /linux/drivers/gpu/drm/gma500/mmu.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1a61127c2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
28c8f1c95SAlan Cox /**************************************************************************
38c8f1c95SAlan Cox  * Copyright (c) 2007, Intel Corporation.
48c8f1c95SAlan Cox  *
58c8f1c95SAlan Cox  **************************************************************************/
60c7b178aSSam Ravnborg 
70c7b178aSSam Ravnborg #include <linux/highmem.h>
8*0069455bSKent Overstreet #include <linux/vmalloc.h>
90c7b178aSSam Ravnborg 
100c7b178aSSam Ravnborg #include "mmu.h"
118c8f1c95SAlan Cox #include "psb_drv.h"
128c8f1c95SAlan Cox #include "psb_reg.h"
138c8f1c95SAlan Cox 
148c8f1c95SAlan Cox /*
158c8f1c95SAlan Cox  * Code for the SGX MMU:
168c8f1c95SAlan Cox  */
178c8f1c95SAlan Cox 
188c8f1c95SAlan Cox /*
198c8f1c95SAlan Cox  * clflush on one processor only:
208c8f1c95SAlan Cox  * clflush should apparently flush the cache line on all processors in an
218c8f1c95SAlan Cox  * SMP system.
228c8f1c95SAlan Cox  */
238c8f1c95SAlan Cox 
248c8f1c95SAlan Cox /*
258c8f1c95SAlan Cox  * kmap atomic:
268c8f1c95SAlan Cox  * The usage of the slots must be completely encapsulated within a spinlock, and
278c8f1c95SAlan Cox  * no other functions that may be using the locks for other purposed may be
288c8f1c95SAlan Cox  * called from within the locked region.
298c8f1c95SAlan Cox  * Since the slots are per processor, this will guarantee that we are the only
308c8f1c95SAlan Cox  * user.
318c8f1c95SAlan Cox  */
328c8f1c95SAlan Cox 
338c8f1c95SAlan Cox /*
348c8f1c95SAlan Cox  * TODO: Inserting ptes from an interrupt handler:
358c8f1c95SAlan Cox  * This may be desirable for some SGX functionality where the GPU can fault in
368c8f1c95SAlan Cox  * needed pages. For that, we need to make an atomic insert_pages function, that
378c8f1c95SAlan Cox  * may fail.
388c8f1c95SAlan Cox  * If it fails, the caller need to insert the page using a workqueue function,
398c8f1c95SAlan Cox  * but on average it should be fast.
408c8f1c95SAlan Cox  */
418c8f1c95SAlan Cox 
428c8f1c95SAlan Cox static inline uint32_t psb_mmu_pt_index(uint32_t offset)
438c8f1c95SAlan Cox {
448c8f1c95SAlan Cox 	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
458c8f1c95SAlan Cox }
468c8f1c95SAlan Cox 
478c8f1c95SAlan Cox static inline uint32_t psb_mmu_pd_index(uint32_t offset)
488c8f1c95SAlan Cox {
498c8f1c95SAlan Cox 	return offset >> PSB_PDE_SHIFT;
508c8f1c95SAlan Cox }
518c8f1c95SAlan Cox 
528c8f1c95SAlan Cox static inline void psb_clflush(void *addr)
538c8f1c95SAlan Cox {
548c8f1c95SAlan Cox 	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
558c8f1c95SAlan Cox }
568c8f1c95SAlan Cox 
57b219372dSPatrik Jakobsson static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
588c8f1c95SAlan Cox {
598c8f1c95SAlan Cox 	if (!driver->has_clflush)
608c8f1c95SAlan Cox 		return;
618c8f1c95SAlan Cox 
628c8f1c95SAlan Cox 	mb();
638c8f1c95SAlan Cox 	psb_clflush(addr);
648c8f1c95SAlan Cox 	mb();
658c8f1c95SAlan Cox }
66b219372dSPatrik Jakobsson 
67b219372dSPatrik Jakobsson static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
688c8f1c95SAlan Cox {
69b219372dSPatrik Jakobsson 	struct drm_device *dev = driver->dev;
70f71635e8SThomas Zimmermann 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
718c8f1c95SAlan Cox 
72b219372dSPatrik Jakobsson 	if (atomic_read(&driver->needs_tlbflush) || force) {
73b219372dSPatrik Jakobsson 		uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
74b219372dSPatrik Jakobsson 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
758c8f1c95SAlan Cox 
76b219372dSPatrik Jakobsson 		/* Make sure data cache is turned off before enabling it */
77b219372dSPatrik Jakobsson 		wmb();
78b219372dSPatrik Jakobsson 		PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
79b219372dSPatrik Jakobsson 		(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
80b219372dSPatrik Jakobsson 		if (driver->msvdx_mmu_invaldc)
81b219372dSPatrik Jakobsson 			atomic_set(driver->msvdx_mmu_invaldc, 1);
828c8f1c95SAlan Cox 	}
838c8f1c95SAlan Cox 	atomic_set(&driver->needs_tlbflush, 0);
848c8f1c95SAlan Cox }
858c8f1c95SAlan Cox 
86b219372dSPatrik Jakobsson #if 0
878c8f1c95SAlan Cox static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
888c8f1c95SAlan Cox {
898c8f1c95SAlan Cox 	down_write(&driver->sem);
908c8f1c95SAlan Cox 	psb_mmu_flush_pd_locked(driver, force);
918c8f1c95SAlan Cox 	up_write(&driver->sem);
928c8f1c95SAlan Cox }
93b219372dSPatrik Jakobsson #endif
948c8f1c95SAlan Cox 
95b219372dSPatrik Jakobsson void psb_mmu_flush(struct psb_mmu_driver *driver)
968c8f1c95SAlan Cox {
97b219372dSPatrik Jakobsson 	struct drm_device *dev = driver->dev;
98f71635e8SThomas Zimmermann 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
99b219372dSPatrik Jakobsson 	uint32_t val;
100b219372dSPatrik Jakobsson 
1018c8f1c95SAlan Cox 	down_write(&driver->sem);
102b219372dSPatrik Jakobsson 	val = PSB_RSGX32(PSB_CR_BIF_CTRL);
103b219372dSPatrik Jakobsson 	if (atomic_read(&driver->needs_tlbflush))
104b219372dSPatrik Jakobsson 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
105b219372dSPatrik Jakobsson 	else
106b219372dSPatrik Jakobsson 		PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
107b219372dSPatrik Jakobsson 
108b219372dSPatrik Jakobsson 	/* Make sure data cache is turned off and MMU is flushed before
109b219372dSPatrik Jakobsson 	   restoring bank interface control register */
110b219372dSPatrik Jakobsson 	wmb();
111b219372dSPatrik Jakobsson 	PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
112b219372dSPatrik Jakobsson 		   PSB_CR_BIF_CTRL);
113b219372dSPatrik Jakobsson 	(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
114b219372dSPatrik Jakobsson 
115b219372dSPatrik Jakobsson 	atomic_set(&driver->needs_tlbflush, 0);
116b219372dSPatrik Jakobsson 	if (driver->msvdx_mmu_invaldc)
117b219372dSPatrik Jakobsson 		atomic_set(driver->msvdx_mmu_invaldc, 1);
1188c8f1c95SAlan Cox 	up_write(&driver->sem);
1198c8f1c95SAlan Cox }
1208c8f1c95SAlan Cox 
1218c8f1c95SAlan Cox void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
1228c8f1c95SAlan Cox {
123b219372dSPatrik Jakobsson 	struct drm_device *dev = pd->driver->dev;
124f71635e8SThomas Zimmermann 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
125b219372dSPatrik Jakobsson 	uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
126b219372dSPatrik Jakobsson 			  PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
127b219372dSPatrik Jakobsson 
1288c8f1c95SAlan Cox 	down_write(&pd->driver->sem);
129b219372dSPatrik Jakobsson 	PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
1308c8f1c95SAlan Cox 	wmb();
1318c8f1c95SAlan Cox 	psb_mmu_flush_pd_locked(pd->driver, 1);
1328c8f1c95SAlan Cox 	pd->hw_context = hw_context;
1338c8f1c95SAlan Cox 	up_write(&pd->driver->sem);
1348c8f1c95SAlan Cox 
1358c8f1c95SAlan Cox }
1368c8f1c95SAlan Cox 
1378c8f1c95SAlan Cox static inline unsigned long psb_pd_addr_end(unsigned long addr,
1388c8f1c95SAlan Cox 					    unsigned long end)
1398c8f1c95SAlan Cox {
1408c8f1c95SAlan Cox 	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
1418c8f1c95SAlan Cox 	return (addr < end) ? addr : end;
1428c8f1c95SAlan Cox }
1438c8f1c95SAlan Cox 
1448c8f1c95SAlan Cox static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
1458c8f1c95SAlan Cox {
1468c8f1c95SAlan Cox 	uint32_t mask = PSB_PTE_VALID;
1478c8f1c95SAlan Cox 
1488c8f1c95SAlan Cox 	if (type & PSB_MMU_CACHED_MEMORY)
1498c8f1c95SAlan Cox 		mask |= PSB_PTE_CACHED;
1508c8f1c95SAlan Cox 	if (type & PSB_MMU_RO_MEMORY)
1518c8f1c95SAlan Cox 		mask |= PSB_PTE_RO;
1528c8f1c95SAlan Cox 	if (type & PSB_MMU_WO_MEMORY)
1538c8f1c95SAlan Cox 		mask |= PSB_PTE_WO;
1548c8f1c95SAlan Cox 
1558c8f1c95SAlan Cox 	return (pfn << PAGE_SHIFT) | mask;
1568c8f1c95SAlan Cox }
1578c8f1c95SAlan Cox 
1588c8f1c95SAlan Cox struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
1598c8f1c95SAlan Cox 				    int trap_pagefaults, int invalid_type)
1608c8f1c95SAlan Cox {
1618c8f1c95SAlan Cox 	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1628c8f1c95SAlan Cox 	uint32_t *v;
1638c8f1c95SAlan Cox 	int i;
1648c8f1c95SAlan Cox 
1658c8f1c95SAlan Cox 	if (!pd)
1668c8f1c95SAlan Cox 		return NULL;
1678c8f1c95SAlan Cox 
1688c8f1c95SAlan Cox 	pd->p = alloc_page(GFP_DMA32);
1698c8f1c95SAlan Cox 	if (!pd->p)
1708c8f1c95SAlan Cox 		goto out_err1;
1718c8f1c95SAlan Cox 	pd->dummy_pt = alloc_page(GFP_DMA32);
1728c8f1c95SAlan Cox 	if (!pd->dummy_pt)
1738c8f1c95SAlan Cox 		goto out_err2;
1748c8f1c95SAlan Cox 	pd->dummy_page = alloc_page(GFP_DMA32);
1758c8f1c95SAlan Cox 	if (!pd->dummy_page)
1768c8f1c95SAlan Cox 		goto out_err3;
1778c8f1c95SAlan Cox 
1788c8f1c95SAlan Cox 	if (!trap_pagefaults) {
179b219372dSPatrik Jakobsson 		pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
1808c8f1c95SAlan Cox 						   invalid_type);
181b219372dSPatrik Jakobsson 		pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
1828c8f1c95SAlan Cox 						   invalid_type);
1838c8f1c95SAlan Cox 	} else {
1848c8f1c95SAlan Cox 		pd->invalid_pde = 0;
1858c8f1c95SAlan Cox 		pd->invalid_pte = 0;
1868c8f1c95SAlan Cox 	}
1878c8f1c95SAlan Cox 
1888b250cd3SIra Weiny 	v = kmap_local_page(pd->dummy_pt);
1898c8f1c95SAlan Cox 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
1908c8f1c95SAlan Cox 		v[i] = pd->invalid_pte;
1918c8f1c95SAlan Cox 
1928b250cd3SIra Weiny 	kunmap_local(v);
1938c8f1c95SAlan Cox 
1948b250cd3SIra Weiny 	v = kmap_local_page(pd->p);
1958c8f1c95SAlan Cox 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
1968c8f1c95SAlan Cox 		v[i] = pd->invalid_pde;
1978c8f1c95SAlan Cox 
1988b250cd3SIra Weiny 	kunmap_local(v);
1998c8f1c95SAlan Cox 
2008c8f1c95SAlan Cox 	clear_page(kmap(pd->dummy_page));
2018c8f1c95SAlan Cox 	kunmap(pd->dummy_page);
2028c8f1c95SAlan Cox 
2038c8f1c95SAlan Cox 	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
2048c8f1c95SAlan Cox 	if (!pd->tables)
2058c8f1c95SAlan Cox 		goto out_err4;
2068c8f1c95SAlan Cox 
2078c8f1c95SAlan Cox 	pd->hw_context = -1;
2088c8f1c95SAlan Cox 	pd->pd_mask = PSB_PTE_VALID;
2098c8f1c95SAlan Cox 	pd->driver = driver;
2108c8f1c95SAlan Cox 
2118c8f1c95SAlan Cox 	return pd;
2128c8f1c95SAlan Cox 
2138c8f1c95SAlan Cox out_err4:
2148c8f1c95SAlan Cox 	__free_page(pd->dummy_page);
2158c8f1c95SAlan Cox out_err3:
2168c8f1c95SAlan Cox 	__free_page(pd->dummy_pt);
2178c8f1c95SAlan Cox out_err2:
2188c8f1c95SAlan Cox 	__free_page(pd->p);
2198c8f1c95SAlan Cox out_err1:
2208c8f1c95SAlan Cox 	kfree(pd);
2218c8f1c95SAlan Cox 	return NULL;
2228c8f1c95SAlan Cox }
2238c8f1c95SAlan Cox 
2243afad3c2SKirill A. Shutemov static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
2258c8f1c95SAlan Cox {
2268c8f1c95SAlan Cox 	__free_page(pt->p);
2278c8f1c95SAlan Cox 	kfree(pt);
2288c8f1c95SAlan Cox }
2298c8f1c95SAlan Cox 
2308c8f1c95SAlan Cox void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
2318c8f1c95SAlan Cox {
2328c8f1c95SAlan Cox 	struct psb_mmu_driver *driver = pd->driver;
233b219372dSPatrik Jakobsson 	struct drm_device *dev = driver->dev;
234f71635e8SThomas Zimmermann 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
2358c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
2368c8f1c95SAlan Cox 	int i;
2378c8f1c95SAlan Cox 
2388c8f1c95SAlan Cox 	down_write(&driver->sem);
239b219372dSPatrik Jakobsson 	if (pd->hw_context != -1) {
240b219372dSPatrik Jakobsson 		PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
2418c8f1c95SAlan Cox 		psb_mmu_flush_pd_locked(driver, 1);
242b219372dSPatrik Jakobsson 	}
2438c8f1c95SAlan Cox 
2448c8f1c95SAlan Cox 	/* Should take the spinlock here, but we don't need to do that
2458c8f1c95SAlan Cox 	   since we have the semaphore in write mode. */
2468c8f1c95SAlan Cox 
2478c8f1c95SAlan Cox 	for (i = 0; i < 1024; ++i) {
2488c8f1c95SAlan Cox 		pt = pd->tables[i];
2498c8f1c95SAlan Cox 		if (pt)
2508c8f1c95SAlan Cox 			psb_mmu_free_pt(pt);
2518c8f1c95SAlan Cox 	}
2528c8f1c95SAlan Cox 
2538c8f1c95SAlan Cox 	vfree(pd->tables);
2548c8f1c95SAlan Cox 	__free_page(pd->dummy_page);
2558c8f1c95SAlan Cox 	__free_page(pd->dummy_pt);
2568c8f1c95SAlan Cox 	__free_page(pd->p);
2578c8f1c95SAlan Cox 	kfree(pd);
2588c8f1c95SAlan Cox 	up_write(&driver->sem);
2598c8f1c95SAlan Cox }
2608c8f1c95SAlan Cox 
2618c8f1c95SAlan Cox static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
2628c8f1c95SAlan Cox {
2638c8f1c95SAlan Cox 	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
2648c8f1c95SAlan Cox 	void *v;
2658c8f1c95SAlan Cox 	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
2668c8f1c95SAlan Cox 	uint32_t clflush_count = PAGE_SIZE / clflush_add;
2678c8f1c95SAlan Cox 	spinlock_t *lock = &pd->driver->lock;
2688c8f1c95SAlan Cox 	uint8_t *clf;
2698c8f1c95SAlan Cox 	uint32_t *ptes;
2708c8f1c95SAlan Cox 	int i;
2718c8f1c95SAlan Cox 
2728c8f1c95SAlan Cox 	if (!pt)
2738c8f1c95SAlan Cox 		return NULL;
2748c8f1c95SAlan Cox 
2758c8f1c95SAlan Cox 	pt->p = alloc_page(GFP_DMA32);
2768c8f1c95SAlan Cox 	if (!pt->p) {
2778c8f1c95SAlan Cox 		kfree(pt);
2788c8f1c95SAlan Cox 		return NULL;
2798c8f1c95SAlan Cox 	}
2808c8f1c95SAlan Cox 
2818c8f1c95SAlan Cox 	spin_lock(lock);
2828c8f1c95SAlan Cox 
283f0c5b592SCong Wang 	v = kmap_atomic(pt->p);
2848c8f1c95SAlan Cox 	clf = (uint8_t *) v;
2858c8f1c95SAlan Cox 	ptes = (uint32_t *) v;
2868c8f1c95SAlan Cox 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
2878c8f1c95SAlan Cox 		*ptes++ = pd->invalid_pte;
2888c8f1c95SAlan Cox 
2898c8f1c95SAlan Cox 	if (pd->driver->has_clflush && pd->hw_context != -1) {
2908c8f1c95SAlan Cox 		mb();
2918c8f1c95SAlan Cox 		for (i = 0; i < clflush_count; ++i) {
2928c8f1c95SAlan Cox 			psb_clflush(clf);
2938c8f1c95SAlan Cox 			clf += clflush_add;
2948c8f1c95SAlan Cox 		}
2958c8f1c95SAlan Cox 		mb();
2968c8f1c95SAlan Cox 	}
297f0c5b592SCong Wang 	kunmap_atomic(v);
2988c8f1c95SAlan Cox 	spin_unlock(lock);
2998c8f1c95SAlan Cox 
3008c8f1c95SAlan Cox 	pt->count = 0;
3018c8f1c95SAlan Cox 	pt->pd = pd;
3028c8f1c95SAlan Cox 	pt->index = 0;
3038c8f1c95SAlan Cox 
3048c8f1c95SAlan Cox 	return pt;
3058c8f1c95SAlan Cox }
3068c8f1c95SAlan Cox 
3075461bdc5SLee Jones static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
3088c8f1c95SAlan Cox 						    unsigned long addr)
3098c8f1c95SAlan Cox {
3108c8f1c95SAlan Cox 	uint32_t index = psb_mmu_pd_index(addr);
3118c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
3128c8f1c95SAlan Cox 	uint32_t *v;
3138c8f1c95SAlan Cox 	spinlock_t *lock = &pd->driver->lock;
3148c8f1c95SAlan Cox 
3158c8f1c95SAlan Cox 	spin_lock(lock);
3168c8f1c95SAlan Cox 	pt = pd->tables[index];
3178c8f1c95SAlan Cox 	while (!pt) {
3188c8f1c95SAlan Cox 		spin_unlock(lock);
3198c8f1c95SAlan Cox 		pt = psb_mmu_alloc_pt(pd);
3208c8f1c95SAlan Cox 		if (!pt)
3218c8f1c95SAlan Cox 			return NULL;
3228c8f1c95SAlan Cox 		spin_lock(lock);
3238c8f1c95SAlan Cox 
3248c8f1c95SAlan Cox 		if (pd->tables[index]) {
3258c8f1c95SAlan Cox 			spin_unlock(lock);
3268c8f1c95SAlan Cox 			psb_mmu_free_pt(pt);
3278c8f1c95SAlan Cox 			spin_lock(lock);
3288c8f1c95SAlan Cox 			pt = pd->tables[index];
3298c8f1c95SAlan Cox 			continue;
3308c8f1c95SAlan Cox 		}
3318c8f1c95SAlan Cox 
332f0c5b592SCong Wang 		v = kmap_atomic(pd->p);
3338c8f1c95SAlan Cox 		pd->tables[index] = pt;
3348c8f1c95SAlan Cox 		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
3358c8f1c95SAlan Cox 		pt->index = index;
336f0c5b592SCong Wang 		kunmap_atomic((void *) v);
3378c8f1c95SAlan Cox 
3388c8f1c95SAlan Cox 		if (pd->hw_context != -1) {
3398c8f1c95SAlan Cox 			psb_mmu_clflush(pd->driver, (void *)&v[index]);
3408c8f1c95SAlan Cox 			atomic_set(&pd->driver->needs_tlbflush, 1);
3418c8f1c95SAlan Cox 		}
3428c8f1c95SAlan Cox 	}
343f0c5b592SCong Wang 	pt->v = kmap_atomic(pt->p);
3448c8f1c95SAlan Cox 	return pt;
3458c8f1c95SAlan Cox }
3468c8f1c95SAlan Cox 
3478c8f1c95SAlan Cox static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
3488c8f1c95SAlan Cox 					      unsigned long addr)
3498c8f1c95SAlan Cox {
3508c8f1c95SAlan Cox 	uint32_t index = psb_mmu_pd_index(addr);
3518c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
3528c8f1c95SAlan Cox 	spinlock_t *lock = &pd->driver->lock;
3538c8f1c95SAlan Cox 
3548c8f1c95SAlan Cox 	spin_lock(lock);
3558c8f1c95SAlan Cox 	pt = pd->tables[index];
3568c8f1c95SAlan Cox 	if (!pt) {
3578c8f1c95SAlan Cox 		spin_unlock(lock);
3588c8f1c95SAlan Cox 		return NULL;
3598c8f1c95SAlan Cox 	}
360f0c5b592SCong Wang 	pt->v = kmap_atomic(pt->p);
3618c8f1c95SAlan Cox 	return pt;
3628c8f1c95SAlan Cox }
3638c8f1c95SAlan Cox 
3648c8f1c95SAlan Cox static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
3658c8f1c95SAlan Cox {
3668c8f1c95SAlan Cox 	struct psb_mmu_pd *pd = pt->pd;
3678c8f1c95SAlan Cox 	uint32_t *v;
3688c8f1c95SAlan Cox 
369f0c5b592SCong Wang 	kunmap_atomic(pt->v);
3708c8f1c95SAlan Cox 	if (pt->count == 0) {
371f0c5b592SCong Wang 		v = kmap_atomic(pd->p);
3728c8f1c95SAlan Cox 		v[pt->index] = pd->invalid_pde;
3738c8f1c95SAlan Cox 		pd->tables[pt->index] = NULL;
3748c8f1c95SAlan Cox 
3758c8f1c95SAlan Cox 		if (pd->hw_context != -1) {
376b219372dSPatrik Jakobsson 			psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
3778c8f1c95SAlan Cox 			atomic_set(&pd->driver->needs_tlbflush, 1);
3788c8f1c95SAlan Cox 		}
37939c9ce70SPan Bian 		kunmap_atomic(v);
3808c8f1c95SAlan Cox 		spin_unlock(&pd->driver->lock);
3818c8f1c95SAlan Cox 		psb_mmu_free_pt(pt);
3828c8f1c95SAlan Cox 		return;
3838c8f1c95SAlan Cox 	}
3848c8f1c95SAlan Cox 	spin_unlock(&pd->driver->lock);
3858c8f1c95SAlan Cox }
3868c8f1c95SAlan Cox 
387b219372dSPatrik Jakobsson static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
388b219372dSPatrik Jakobsson 				   uint32_t pte)
3898c8f1c95SAlan Cox {
3908c8f1c95SAlan Cox 	pt->v[psb_mmu_pt_index(addr)] = pte;
3918c8f1c95SAlan Cox }
3928c8f1c95SAlan Cox 
3938c8f1c95SAlan Cox static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
3948c8f1c95SAlan Cox 					  unsigned long addr)
3958c8f1c95SAlan Cox {
3968c8f1c95SAlan Cox 	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
3978c8f1c95SAlan Cox }
3988c8f1c95SAlan Cox 
3998c8f1c95SAlan Cox struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
4008c8f1c95SAlan Cox {
4018c8f1c95SAlan Cox 	struct psb_mmu_pd *pd;
4028c8f1c95SAlan Cox 
403b219372dSPatrik Jakobsson 	down_read(&driver->sem);
4048c8f1c95SAlan Cox 	pd = driver->default_pd;
405b219372dSPatrik Jakobsson 	up_read(&driver->sem);
4068c8f1c95SAlan Cox 
4078c8f1c95SAlan Cox 	return pd;
4088c8f1c95SAlan Cox }
4098c8f1c95SAlan Cox 
4108c8f1c95SAlan Cox void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
4118c8f1c95SAlan Cox {
412b219372dSPatrik Jakobsson 	struct drm_device *dev = driver->dev;
413f71635e8SThomas Zimmermann 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
414b219372dSPatrik Jakobsson 
415b219372dSPatrik Jakobsson 	PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
4168c8f1c95SAlan Cox 	psb_mmu_free_pagedir(driver->default_pd);
4178c8f1c95SAlan Cox 	kfree(driver);
4188c8f1c95SAlan Cox }
4198c8f1c95SAlan Cox 
420b219372dSPatrik Jakobsson struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
4218c8f1c95SAlan Cox 					   int trap_pagefaults,
4228c8f1c95SAlan Cox 					   int invalid_type,
423b219372dSPatrik Jakobsson 					   atomic_t *msvdx_mmu_invaldc)
4248c8f1c95SAlan Cox {
4258c8f1c95SAlan Cox 	struct psb_mmu_driver *driver;
426f71635e8SThomas Zimmermann 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
4278c8f1c95SAlan Cox 
4288c8f1c95SAlan Cox 	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
4298c8f1c95SAlan Cox 
4308c8f1c95SAlan Cox 	if (!driver)
4318c8f1c95SAlan Cox 		return NULL;
4328c8f1c95SAlan Cox 
433b219372dSPatrik Jakobsson 	driver->dev = dev;
4348c8f1c95SAlan Cox 	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
4358c8f1c95SAlan Cox 					      invalid_type);
4368c8f1c95SAlan Cox 	if (!driver->default_pd)
4378c8f1c95SAlan Cox 		goto out_err1;
4388c8f1c95SAlan Cox 
4398c8f1c95SAlan Cox 	spin_lock_init(&driver->lock);
4408c8f1c95SAlan Cox 	init_rwsem(&driver->sem);
4418c8f1c95SAlan Cox 	down_write(&driver->sem);
4428c8f1c95SAlan Cox 	atomic_set(&driver->needs_tlbflush, 1);
443b219372dSPatrik Jakobsson 	driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
444b219372dSPatrik Jakobsson 
445b219372dSPatrik Jakobsson 	driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
446b219372dSPatrik Jakobsson 	PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
447b219372dSPatrik Jakobsson 		   PSB_CR_BIF_CTRL);
448b219372dSPatrik Jakobsson 	PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
449b219372dSPatrik Jakobsson 		   PSB_CR_BIF_CTRL);
4508c8f1c95SAlan Cox 
4518c8f1c95SAlan Cox 	driver->has_clflush = 0;
4528c8f1c95SAlan Cox 
453840d2830SH. Peter Anvin 	if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
4548c8f1c95SAlan Cox 		uint32_t tfms, misc, cap0, cap4, clflush_size;
4558c8f1c95SAlan Cox 
4568c8f1c95SAlan Cox 		/*
457b219372dSPatrik Jakobsson 		 * clflush size is determined at kernel setup for x86_64 but not
458b219372dSPatrik Jakobsson 		 * for i386. We have to do it here.
4598c8f1c95SAlan Cox 		 */
4608c8f1c95SAlan Cox 
4618c8f1c95SAlan Cox 		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
4628c8f1c95SAlan Cox 		clflush_size = ((misc >> 8) & 0xff) * 8;
4638c8f1c95SAlan Cox 		driver->has_clflush = 1;
4648c8f1c95SAlan Cox 		driver->clflush_add =
4658c8f1c95SAlan Cox 		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
4668c8f1c95SAlan Cox 		driver->clflush_mask = driver->clflush_add - 1;
4678c8f1c95SAlan Cox 		driver->clflush_mask = ~driver->clflush_mask;
4688c8f1c95SAlan Cox 	}
4698c8f1c95SAlan Cox 
4708c8f1c95SAlan Cox 	up_write(&driver->sem);
4718c8f1c95SAlan Cox 	return driver;
4728c8f1c95SAlan Cox 
4738c8f1c95SAlan Cox out_err1:
4748c8f1c95SAlan Cox 	kfree(driver);
4758c8f1c95SAlan Cox 	return NULL;
4768c8f1c95SAlan Cox }
4778c8f1c95SAlan Cox 
478b219372dSPatrik Jakobsson static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
479b219372dSPatrik Jakobsson 			       uint32_t num_pages, uint32_t desired_tile_stride,
4808c8f1c95SAlan Cox 			       uint32_t hw_tile_stride)
4818c8f1c95SAlan Cox {
4828c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
4838c8f1c95SAlan Cox 	uint32_t rows = 1;
4848c8f1c95SAlan Cox 	uint32_t i;
4858c8f1c95SAlan Cox 	unsigned long addr;
4868c8f1c95SAlan Cox 	unsigned long end;
4878c8f1c95SAlan Cox 	unsigned long next;
4888c8f1c95SAlan Cox 	unsigned long add;
4898c8f1c95SAlan Cox 	unsigned long row_add;
4908c8f1c95SAlan Cox 	unsigned long clflush_add = pd->driver->clflush_add;
4918c8f1c95SAlan Cox 	unsigned long clflush_mask = pd->driver->clflush_mask;
4928c8f1c95SAlan Cox 
493b219372dSPatrik Jakobsson 	if (!pd->driver->has_clflush)
4948c8f1c95SAlan Cox 		return;
4958c8f1c95SAlan Cox 
4968c8f1c95SAlan Cox 	if (hw_tile_stride)
4978c8f1c95SAlan Cox 		rows = num_pages / desired_tile_stride;
4988c8f1c95SAlan Cox 	else
4998c8f1c95SAlan Cox 		desired_tile_stride = num_pages;
5008c8f1c95SAlan Cox 
5018c8f1c95SAlan Cox 	add = desired_tile_stride << PAGE_SHIFT;
5028c8f1c95SAlan Cox 	row_add = hw_tile_stride << PAGE_SHIFT;
5038c8f1c95SAlan Cox 	mb();
5048c8f1c95SAlan Cox 	for (i = 0; i < rows; ++i) {
5058c8f1c95SAlan Cox 
5068c8f1c95SAlan Cox 		addr = address;
5078c8f1c95SAlan Cox 		end = addr + add;
5088c8f1c95SAlan Cox 
5098c8f1c95SAlan Cox 		do {
5108c8f1c95SAlan Cox 			next = psb_pd_addr_end(addr, end);
5118c8f1c95SAlan Cox 			pt = psb_mmu_pt_map_lock(pd, addr);
5128c8f1c95SAlan Cox 			if (!pt)
5138c8f1c95SAlan Cox 				continue;
5148c8f1c95SAlan Cox 			do {
515b219372dSPatrik Jakobsson 				psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
516b219372dSPatrik Jakobsson 			} while (addr += clflush_add,
5178c8f1c95SAlan Cox 				 (addr & clflush_mask) < next);
5188c8f1c95SAlan Cox 
5198c8f1c95SAlan Cox 			psb_mmu_pt_unmap_unlock(pt);
5208c8f1c95SAlan Cox 		} while (addr = next, next != end);
5218c8f1c95SAlan Cox 		address += row_add;
5228c8f1c95SAlan Cox 	}
5238c8f1c95SAlan Cox 	mb();
5248c8f1c95SAlan Cox }
5258c8f1c95SAlan Cox 
5268c8f1c95SAlan Cox void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
5278c8f1c95SAlan Cox 				 unsigned long address, uint32_t num_pages)
5288c8f1c95SAlan Cox {
5298c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
5308c8f1c95SAlan Cox 	unsigned long addr;
5318c8f1c95SAlan Cox 	unsigned long end;
5328c8f1c95SAlan Cox 	unsigned long next;
5338c8f1c95SAlan Cox 	unsigned long f_address = address;
5348c8f1c95SAlan Cox 
5358c8f1c95SAlan Cox 	down_read(&pd->driver->sem);
5368c8f1c95SAlan Cox 
5378c8f1c95SAlan Cox 	addr = address;
5388c8f1c95SAlan Cox 	end = addr + (num_pages << PAGE_SHIFT);
5398c8f1c95SAlan Cox 
5408c8f1c95SAlan Cox 	do {
5418c8f1c95SAlan Cox 		next = psb_pd_addr_end(addr, end);
5428c8f1c95SAlan Cox 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
5438c8f1c95SAlan Cox 		if (!pt)
5448c8f1c95SAlan Cox 			goto out;
5458c8f1c95SAlan Cox 		do {
5468c8f1c95SAlan Cox 			psb_mmu_invalidate_pte(pt, addr);
5478c8f1c95SAlan Cox 			--pt->count;
5488c8f1c95SAlan Cox 		} while (addr += PAGE_SIZE, addr < next);
5498c8f1c95SAlan Cox 		psb_mmu_pt_unmap_unlock(pt);
5508c8f1c95SAlan Cox 
5518c8f1c95SAlan Cox 	} while (addr = next, next != end);
5528c8f1c95SAlan Cox 
5538c8f1c95SAlan Cox out:
5548c8f1c95SAlan Cox 	if (pd->hw_context != -1)
5558c8f1c95SAlan Cox 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
5568c8f1c95SAlan Cox 
5578c8f1c95SAlan Cox 	up_read(&pd->driver->sem);
5588c8f1c95SAlan Cox 
5598c8f1c95SAlan Cox 	if (pd->hw_context != -1)
560b219372dSPatrik Jakobsson 		psb_mmu_flush(pd->driver);
5618c8f1c95SAlan Cox 
5628c8f1c95SAlan Cox 	return;
5638c8f1c95SAlan Cox }
5648c8f1c95SAlan Cox 
5658c8f1c95SAlan Cox void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
5668c8f1c95SAlan Cox 			  uint32_t num_pages, uint32_t desired_tile_stride,
5678c8f1c95SAlan Cox 			  uint32_t hw_tile_stride)
5688c8f1c95SAlan Cox {
5698c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
5708c8f1c95SAlan Cox 	uint32_t rows = 1;
5718c8f1c95SAlan Cox 	uint32_t i;
5728c8f1c95SAlan Cox 	unsigned long addr;
5738c8f1c95SAlan Cox 	unsigned long end;
5748c8f1c95SAlan Cox 	unsigned long next;
5758c8f1c95SAlan Cox 	unsigned long add;
5768c8f1c95SAlan Cox 	unsigned long row_add;
5778c8f1c95SAlan Cox 	unsigned long f_address = address;
5788c8f1c95SAlan Cox 
5798c8f1c95SAlan Cox 	if (hw_tile_stride)
5808c8f1c95SAlan Cox 		rows = num_pages / desired_tile_stride;
5818c8f1c95SAlan Cox 	else
5828c8f1c95SAlan Cox 		desired_tile_stride = num_pages;
5838c8f1c95SAlan Cox 
5848c8f1c95SAlan Cox 	add = desired_tile_stride << PAGE_SHIFT;
5858c8f1c95SAlan Cox 	row_add = hw_tile_stride << PAGE_SHIFT;
5868c8f1c95SAlan Cox 
587b219372dSPatrik Jakobsson 	down_read(&pd->driver->sem);
5888c8f1c95SAlan Cox 
5898c8f1c95SAlan Cox 	/* Make sure we only need to flush this processor's cache */
5908c8f1c95SAlan Cox 
5918c8f1c95SAlan Cox 	for (i = 0; i < rows; ++i) {
5928c8f1c95SAlan Cox 
5938c8f1c95SAlan Cox 		addr = address;
5948c8f1c95SAlan Cox 		end = addr + add;
5958c8f1c95SAlan Cox 
5968c8f1c95SAlan Cox 		do {
5978c8f1c95SAlan Cox 			next = psb_pd_addr_end(addr, end);
5988c8f1c95SAlan Cox 			pt = psb_mmu_pt_map_lock(pd, addr);
5998c8f1c95SAlan Cox 			if (!pt)
6008c8f1c95SAlan Cox 				continue;
6018c8f1c95SAlan Cox 			do {
6028c8f1c95SAlan Cox 				psb_mmu_invalidate_pte(pt, addr);
6038c8f1c95SAlan Cox 				--pt->count;
6048c8f1c95SAlan Cox 
6058c8f1c95SAlan Cox 			} while (addr += PAGE_SIZE, addr < next);
6068c8f1c95SAlan Cox 			psb_mmu_pt_unmap_unlock(pt);
6078c8f1c95SAlan Cox 
6088c8f1c95SAlan Cox 		} while (addr = next, next != end);
6098c8f1c95SAlan Cox 		address += row_add;
6108c8f1c95SAlan Cox 	}
6118c8f1c95SAlan Cox 	if (pd->hw_context != -1)
6128c8f1c95SAlan Cox 		psb_mmu_flush_ptes(pd, f_address, num_pages,
6138c8f1c95SAlan Cox 				   desired_tile_stride, hw_tile_stride);
6148c8f1c95SAlan Cox 
615b219372dSPatrik Jakobsson 	up_read(&pd->driver->sem);
6168c8f1c95SAlan Cox 
6178c8f1c95SAlan Cox 	if (pd->hw_context != -1)
618b219372dSPatrik Jakobsson 		psb_mmu_flush(pd->driver);
6198c8f1c95SAlan Cox }
6208c8f1c95SAlan Cox 
6218c8f1c95SAlan Cox int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
6228c8f1c95SAlan Cox 				unsigned long address, uint32_t num_pages,
6238c8f1c95SAlan Cox 				int type)
6248c8f1c95SAlan Cox {
6258c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
6268c8f1c95SAlan Cox 	uint32_t pte;
6278c8f1c95SAlan Cox 	unsigned long addr;
6288c8f1c95SAlan Cox 	unsigned long end;
6298c8f1c95SAlan Cox 	unsigned long next;
6308c8f1c95SAlan Cox 	unsigned long f_address = address;
631b219372dSPatrik Jakobsson 	int ret = -ENOMEM;
6328c8f1c95SAlan Cox 
6338c8f1c95SAlan Cox 	down_read(&pd->driver->sem);
6348c8f1c95SAlan Cox 
6358c8f1c95SAlan Cox 	addr = address;
6368c8f1c95SAlan Cox 	end = addr + (num_pages << PAGE_SHIFT);
6378c8f1c95SAlan Cox 
6388c8f1c95SAlan Cox 	do {
6398c8f1c95SAlan Cox 		next = psb_pd_addr_end(addr, end);
6408c8f1c95SAlan Cox 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
6418c8f1c95SAlan Cox 		if (!pt) {
6428c8f1c95SAlan Cox 			ret = -ENOMEM;
6438c8f1c95SAlan Cox 			goto out;
6448c8f1c95SAlan Cox 		}
6458c8f1c95SAlan Cox 		do {
6468c8f1c95SAlan Cox 			pte = psb_mmu_mask_pte(start_pfn++, type);
6478c8f1c95SAlan Cox 			psb_mmu_set_pte(pt, addr, pte);
6488c8f1c95SAlan Cox 			pt->count++;
6498c8f1c95SAlan Cox 		} while (addr += PAGE_SIZE, addr < next);
6508c8f1c95SAlan Cox 		psb_mmu_pt_unmap_unlock(pt);
6518c8f1c95SAlan Cox 
6528c8f1c95SAlan Cox 	} while (addr = next, next != end);
653b219372dSPatrik Jakobsson 	ret = 0;
6548c8f1c95SAlan Cox 
6558c8f1c95SAlan Cox out:
6568c8f1c95SAlan Cox 	if (pd->hw_context != -1)
6578c8f1c95SAlan Cox 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
6588c8f1c95SAlan Cox 
6598c8f1c95SAlan Cox 	up_read(&pd->driver->sem);
6608c8f1c95SAlan Cox 
6618c8f1c95SAlan Cox 	if (pd->hw_context != -1)
662b219372dSPatrik Jakobsson 		psb_mmu_flush(pd->driver);
6638c8f1c95SAlan Cox 
6644dacee16SLee Jones 	return ret;
6658c8f1c95SAlan Cox }
6668c8f1c95SAlan Cox 
6678c8f1c95SAlan Cox int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
6688c8f1c95SAlan Cox 			 unsigned long address, uint32_t num_pages,
669b219372dSPatrik Jakobsson 			 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
670b219372dSPatrik Jakobsson 			 int type)
6718c8f1c95SAlan Cox {
6728c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
6738c8f1c95SAlan Cox 	uint32_t rows = 1;
6748c8f1c95SAlan Cox 	uint32_t i;
6758c8f1c95SAlan Cox 	uint32_t pte;
6768c8f1c95SAlan Cox 	unsigned long addr;
6778c8f1c95SAlan Cox 	unsigned long end;
6788c8f1c95SAlan Cox 	unsigned long next;
6798c8f1c95SAlan Cox 	unsigned long add;
6808c8f1c95SAlan Cox 	unsigned long row_add;
6818c8f1c95SAlan Cox 	unsigned long f_address = address;
682b219372dSPatrik Jakobsson 	int ret = -ENOMEM;
6838c8f1c95SAlan Cox 
6848c8f1c95SAlan Cox 	if (hw_tile_stride) {
6858c8f1c95SAlan Cox 		if (num_pages % desired_tile_stride != 0)
6868c8f1c95SAlan Cox 			return -EINVAL;
6878c8f1c95SAlan Cox 		rows = num_pages / desired_tile_stride;
6888c8f1c95SAlan Cox 	} else {
6898c8f1c95SAlan Cox 		desired_tile_stride = num_pages;
6908c8f1c95SAlan Cox 	}
6918c8f1c95SAlan Cox 
6928c8f1c95SAlan Cox 	add = desired_tile_stride << PAGE_SHIFT;
6938c8f1c95SAlan Cox 	row_add = hw_tile_stride << PAGE_SHIFT;
6948c8f1c95SAlan Cox 
6958c8f1c95SAlan Cox 	down_read(&pd->driver->sem);
6968c8f1c95SAlan Cox 
6978c8f1c95SAlan Cox 	for (i = 0; i < rows; ++i) {
6988c8f1c95SAlan Cox 
6998c8f1c95SAlan Cox 		addr = address;
7008c8f1c95SAlan Cox 		end = addr + add;
7018c8f1c95SAlan Cox 
7028c8f1c95SAlan Cox 		do {
7038c8f1c95SAlan Cox 			next = psb_pd_addr_end(addr, end);
7048c8f1c95SAlan Cox 			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
705b219372dSPatrik Jakobsson 			if (!pt)
7068c8f1c95SAlan Cox 				goto out;
7078c8f1c95SAlan Cox 			do {
708b219372dSPatrik Jakobsson 				pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
7098c8f1c95SAlan Cox 						       type);
7108c8f1c95SAlan Cox 				psb_mmu_set_pte(pt, addr, pte);
7118c8f1c95SAlan Cox 				pt->count++;
7128c8f1c95SAlan Cox 			} while (addr += PAGE_SIZE, addr < next);
7138c8f1c95SAlan Cox 			psb_mmu_pt_unmap_unlock(pt);
7148c8f1c95SAlan Cox 
7158c8f1c95SAlan Cox 		} while (addr = next, next != end);
7168c8f1c95SAlan Cox 
7178c8f1c95SAlan Cox 		address += row_add;
7188c8f1c95SAlan Cox 	}
719b219372dSPatrik Jakobsson 
720b219372dSPatrik Jakobsson 	ret = 0;
7218c8f1c95SAlan Cox out:
7228c8f1c95SAlan Cox 	if (pd->hw_context != -1)
7238c8f1c95SAlan Cox 		psb_mmu_flush_ptes(pd, f_address, num_pages,
7248c8f1c95SAlan Cox 				   desired_tile_stride, hw_tile_stride);
7258c8f1c95SAlan Cox 
7268c8f1c95SAlan Cox 	up_read(&pd->driver->sem);
7278c8f1c95SAlan Cox 
7288c8f1c95SAlan Cox 	if (pd->hw_context != -1)
729b219372dSPatrik Jakobsson 		psb_mmu_flush(pd->driver);
7308c8f1c95SAlan Cox 
7318c8f1c95SAlan Cox 	return ret;
7328c8f1c95SAlan Cox }
7338c8f1c95SAlan Cox 
7348c8f1c95SAlan Cox int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
7358c8f1c95SAlan Cox 			   unsigned long *pfn)
7368c8f1c95SAlan Cox {
7378c8f1c95SAlan Cox 	int ret;
7388c8f1c95SAlan Cox 	struct psb_mmu_pt *pt;
7398c8f1c95SAlan Cox 	uint32_t tmp;
7408c8f1c95SAlan Cox 	spinlock_t *lock = &pd->driver->lock;
7418c8f1c95SAlan Cox 
7428c8f1c95SAlan Cox 	down_read(&pd->driver->sem);
7438c8f1c95SAlan Cox 	pt = psb_mmu_pt_map_lock(pd, virtual);
7448c8f1c95SAlan Cox 	if (!pt) {
7458c8f1c95SAlan Cox 		uint32_t *v;
7468c8f1c95SAlan Cox 
7478c8f1c95SAlan Cox 		spin_lock(lock);
748f0c5b592SCong Wang 		v = kmap_atomic(pd->p);
7498c8f1c95SAlan Cox 		tmp = v[psb_mmu_pd_index(virtual)];
750f0c5b592SCong Wang 		kunmap_atomic(v);
7518c8f1c95SAlan Cox 		spin_unlock(lock);
7528c8f1c95SAlan Cox 
7538c8f1c95SAlan Cox 		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
7548c8f1c95SAlan Cox 		    !(pd->invalid_pte & PSB_PTE_VALID)) {
7558c8f1c95SAlan Cox 			ret = -EINVAL;
7568c8f1c95SAlan Cox 			goto out;
7578c8f1c95SAlan Cox 		}
7588c8f1c95SAlan Cox 		ret = 0;
7598c8f1c95SAlan Cox 		*pfn = pd->invalid_pte >> PAGE_SHIFT;
7608c8f1c95SAlan Cox 		goto out;
7618c8f1c95SAlan Cox 	}
7628c8f1c95SAlan Cox 	tmp = pt->v[psb_mmu_pt_index(virtual)];
7638c8f1c95SAlan Cox 	if (!(tmp & PSB_PTE_VALID)) {
7648c8f1c95SAlan Cox 		ret = -EINVAL;
7658c8f1c95SAlan Cox 	} else {
7668c8f1c95SAlan Cox 		ret = 0;
7678c8f1c95SAlan Cox 		*pfn = tmp >> PAGE_SHIFT;
7688c8f1c95SAlan Cox 	}
7698c8f1c95SAlan Cox 	psb_mmu_pt_unmap_unlock(pt);
7708c8f1c95SAlan Cox out:
7718c8f1c95SAlan Cox 	up_read(&pd->driver->sem);
7728c8f1c95SAlan Cox 	return ret;
7738c8f1c95SAlan Cox }
774