xref: /linux/drivers/gpu/drm/gma500/mmu.h (revision 2697b79a469b68e3ad3640f55284359c1396278d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /**************************************************************************
3  * Copyright (c) 2007-2011, Intel Corporation.
4  * All Rights Reserved.
5  *
6  **************************************************************************/
7 
8 #ifndef __MMU_H
9 #define __MMU_H
10 
11 struct psb_mmu_driver {
12 	/* protects driver- and pd structures. Always take in read mode
13 	 * before taking the page table spinlock.
14 	 */
15 	struct rw_semaphore sem;
16 
17 	/* protects page tables, directory tables and pt tables.
18 	 * and pt structures.
19 	 */
20 	spinlock_t lock;
21 
22 	atomic_t needs_tlbflush;
23 	atomic_t *msvdx_mmu_invaldc;
24 	struct psb_mmu_pd *default_pd;
25 	uint32_t bif_ctrl;
26 	int has_clflush;
27 	int clflush_add;
28 	unsigned long clflush_mask;
29 
30 	struct drm_device *dev;
31 };
32 
33 struct psb_mmu_pd;
34 
35 struct psb_mmu_pt {
36 	struct psb_mmu_pd *pd;
37 	uint32_t index;
38 	uint32_t count;
39 	struct page *p;
40 	uint32_t *v;
41 };
42 
43 struct psb_mmu_pd {
44 	struct psb_mmu_driver *driver;
45 	int hw_context;
46 	struct psb_mmu_pt **tables;
47 	struct page *p;
48 	struct page *dummy_pt;
49 	struct page *dummy_page;
50 	uint32_t pd_mask;
51 	uint32_t invalid_pde;
52 	uint32_t invalid_pte;
53 };
54 
55 extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
56 						  int trap_pagefaults,
57 						  int invalid_type,
58 						  atomic_t *msvdx_mmu_invaldc);
59 extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
60 extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
61 						 *driver);
62 extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
63 					   int trap_pagefaults,
64 					   int invalid_type);
65 extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
66 extern void psb_mmu_flush(struct psb_mmu_driver *driver);
67 extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
68 					unsigned long address,
69 					uint32_t num_pages);
70 extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
71 				       uint32_t start_pfn,
72 				       unsigned long address,
73 				       uint32_t num_pages, int type);
74 extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
75 				  unsigned long *pfn);
76 extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
77 extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
78 				unsigned long address, uint32_t num_pages,
79 				uint32_t desired_tile_stride,
80 				uint32_t hw_tile_stride, int type);
81 extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
82 				 unsigned long address, uint32_t num_pages,
83 				 uint32_t desired_tile_stride,
84 				 uint32_t hw_tile_stride);
85 
86 #endif
87