xref: /linux/arch/s390/include/asm/gmap.h (revision 954a209f431c06b62718a49b403bd4c549f0d6fb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  KVM guest address space mapping code
4  *
5  *    Copyright IBM Corp. 2007, 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #ifndef _ASM_S390_GMAP_H
10 #define _ASM_S390_GMAP_H
11 
12 #include <linux/radix-tree.h>
13 #include <linux/refcount.h>
14 
15 /* Generic bits for GMAP notification on DAT table entry changes. */
16 #define GMAP_NOTIFY_SHADOW	0x2
17 #define GMAP_NOTIFY_MPROT	0x1
18 
19 /* Status bits only for huge segment entries */
20 #define _SEGMENT_ENTRY_GMAP_IN		0x0800	/* invalidation notify bit */
21 #define _SEGMENT_ENTRY_GMAP_UC		0x0002	/* dirty (migration) */
22 
23 /**
24  * struct gmap_struct - guest address space
25  * @list: list head for the mm->context gmap list
26  * @mm: pointer to the parent mm_struct
27  * @guest_to_host: radix tree with guest to host address translation
28  * @host_to_guest: radix tree with pointer to segment table entries
29  * @guest_table_lock: spinlock to protect all entries in the guest page table
30  * @ref_count: reference counter for the gmap structure
31  * @table: pointer to the page directory
32  * @asce: address space control element for gmap page table
33  * @pfault_enabled: defines if pfaults are applicable for the guest
34  * @guest_handle: protected virtual machine handle for the ultravisor
35  * @host_to_rmap: radix tree with gmap_rmap lists
36  * @children: list of shadow gmap structures
37  * @shadow_lock: spinlock to protect the shadow gmap list
38  * @parent: pointer to the parent gmap for shadow guest address spaces
39  * @orig_asce: ASCE for which the shadow page table has been created
40  * @edat_level: edat level to be used for the shadow translation
41  * @removed: flag to indicate if a shadow guest address space has been removed
42  * @initialized: flag to indicate if a shadow guest address space can be used
43  */
44 struct gmap {
45 	struct list_head list;
46 	struct mm_struct *mm;
47 	struct radix_tree_root guest_to_host;
48 	struct radix_tree_root host_to_guest;
49 	spinlock_t guest_table_lock;
50 	refcount_t ref_count;
51 	unsigned long *table;
52 	unsigned long asce;
53 	unsigned long asce_end;
54 	void *private;
55 	bool pfault_enabled;
56 	/* only set for protected virtual machines */
57 	unsigned long guest_handle;
58 	/* Additional data for shadow guest address spaces */
59 	struct radix_tree_root host_to_rmap;
60 	struct list_head children;
61 	spinlock_t shadow_lock;
62 	struct gmap *parent;
63 	unsigned long orig_asce;
64 	int edat_level;
65 	bool removed;
66 	bool initialized;
67 };
68 
69 /**
70  * struct gmap_rmap - reverse mapping for shadow page table entries
71  * @next: pointer to next rmap in the list
72  * @raddr: virtual rmap address in the shadow guest address space
73  */
74 struct gmap_rmap {
75 	struct gmap_rmap *next;
76 	unsigned long raddr;
77 };
78 
79 #define gmap_for_each_rmap(pos, head) \
80 	for (pos = (head); pos; pos = pos->next)
81 
82 #define gmap_for_each_rmap_safe(pos, n, head) \
83 	for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
84 
85 /**
86  * struct gmap_notifier - notify function block for page invalidation
87  * @notifier_call: address of callback function
88  */
89 struct gmap_notifier {
90 	struct list_head list;
91 	struct rcu_head rcu;
92 	void (*notifier_call)(struct gmap *gmap, unsigned long start,
93 			      unsigned long end);
94 };
95 
gmap_is_shadow(struct gmap * gmap)96 static inline int gmap_is_shadow(struct gmap *gmap)
97 {
98 	return !!gmap->parent;
99 }
100 
101 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
102 void gmap_remove(struct gmap *gmap);
103 struct gmap *gmap_get(struct gmap *gmap);
104 void gmap_put(struct gmap *gmap);
105 void gmap_free(struct gmap *gmap);
106 struct gmap *gmap_alloc(unsigned long limit);
107 
108 int gmap_map_segment(struct gmap *gmap, unsigned long from,
109 		     unsigned long to, unsigned long len);
110 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
111 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
112 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
113 void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
114 void __gmap_zap(struct gmap *, unsigned long gaddr);
115 void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
116 
117 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
118 
119 void gmap_unshadow(struct gmap *sg);
120 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
121 		    int fake);
122 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
123 		    int fake);
124 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
125 		    int fake);
126 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
127 		    int fake);
128 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
129 
130 void gmap_register_pte_notifier(struct gmap_notifier *);
131 void gmap_unregister_pte_notifier(struct gmap_notifier *);
132 
133 int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits);
134 
135 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
136 			     unsigned long gaddr, unsigned long vmaddr);
137 int s390_disable_cow_sharing(void);
138 int s390_replace_asce(struct gmap *gmap);
139 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
140 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
141 			    unsigned long end, bool interruptible);
142 int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
143 unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
144 
145 /**
146  * s390_uv_destroy_range - Destroy a range of pages in the given mm.
147  * @mm: the mm on which to operate on
148  * @start: the start of the range
149  * @end: the end of the range
150  *
151  * This function will call cond_sched, so it should not generate stalls, but
152  * it will otherwise only return when it completed.
153  */
s390_uv_destroy_range(struct mm_struct * mm,unsigned long start,unsigned long end)154 static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
155 					 unsigned long end)
156 {
157 	(void)__s390_uv_destroy_range(mm, start, end, false);
158 }
159 
160 /**
161  * s390_uv_destroy_range_interruptible - Destroy a range of pages in the
162  * given mm, but stop when a fatal signal is received.
163  * @mm: the mm on which to operate on
164  * @start: the start of the range
165  * @end: the end of the range
166  *
167  * This function will call cond_sched, so it should not generate stalls. If
168  * a fatal signal is received, it will return with -EINTR immediately,
169  * without finishing destroying the whole range. Upon successful
170  * completion, 0 is returned.
171  */
s390_uv_destroy_range_interruptible(struct mm_struct * mm,unsigned long start,unsigned long end)172 static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start,
173 						      unsigned long end)
174 {
175 	return __s390_uv_destroy_range(mm, start, end, true);
176 }
177 #endif /* _ASM_S390_GMAP_H */
178