xref: /linux/arch/s390/mm/maccess.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Access kernel memory without faulting -- s390 specific implementation.
3  *
4  * Copyright IBM Corp. 2009, 2015
5  *
6  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7  *
8  */
9 
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/gfp.h>
15 #include <linux/cpu.h>
16 #include <asm/ctl_reg.h>
17 #include <asm/io.h>
18 
19 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
20 {
21 	unsigned long aligned, offset, count;
22 	char tmp[8];
23 
24 	aligned = (unsigned long) dst & ~7UL;
25 	offset = (unsigned long) dst & 7UL;
26 	size = min(8UL - offset, size);
27 	count = size - 1;
28 	asm volatile(
29 		"	bras	1,0f\n"
30 		"	mvc	0(1,%4),0(%5)\n"
31 		"0:	mvc	0(8,%3),0(%0)\n"
32 		"	ex	%1,0(1)\n"
33 		"	lg	%1,0(%3)\n"
34 		"	lra	%0,0(%0)\n"
35 		"	sturg	%1,%0\n"
36 		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
37 		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
38 		: "cc", "memory", "1");
39 	return size;
40 }
41 
42 /*
43  * s390_kernel_write - write to kernel memory bypassing DAT
44  * @dst: destination address
45  * @src: source address
46  * @size: number of bytes to copy
47  *
48  * This function writes to kernel memory bypassing DAT and possible page table
49  * write protection. It writes to the destination using the sturg instruction.
50  * Therefore we have a read-modify-write sequence: the function reads eight
51  * bytes from destination at an eight byte boundary, modifies the bytes
52  * requested and writes the result back in a loop.
53  *
54  * Note: this means that this function may not be called concurrently on
55  *	 several cpus with overlapping words, since this may potentially
56  *	 cause data corruption.
57  */
58 void notrace s390_kernel_write(void *dst, const void *src, size_t size)
59 {
60 	long copied;
61 
62 	while (size) {
63 		copied = s390_kernel_write_odd(dst, src, size);
64 		dst += copied;
65 		src += copied;
66 		size -= copied;
67 	}
68 }
69 
70 static int __memcpy_real(void *dest, void *src, size_t count)
71 {
72 	register unsigned long _dest asm("2") = (unsigned long) dest;
73 	register unsigned long _len1 asm("3") = (unsigned long) count;
74 	register unsigned long _src  asm("4") = (unsigned long) src;
75 	register unsigned long _len2 asm("5") = (unsigned long) count;
76 	int rc = -EFAULT;
77 
78 	asm volatile (
79 		"0:	mvcle	%1,%2,0x0\n"
80 		"1:	jo	0b\n"
81 		"	lhi	%0,0x0\n"
82 		"2:\n"
83 		EX_TABLE(1b,2b)
84 		: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
85 		  "+d" (_len2), "=m" (*((long *) dest))
86 		: "m" (*((long *) src))
87 		: "cc", "memory");
88 	return rc;
89 }
90 
91 /*
92  * Copy memory in real mode (kernel to kernel)
93  */
94 int memcpy_real(void *dest, void *src, size_t count)
95 {
96 	unsigned long flags;
97 	int rc;
98 
99 	if (!count)
100 		return 0;
101 	local_irq_save(flags);
102 	__arch_local_irq_stnsm(0xfbUL);
103 	rc = __memcpy_real(dest, src, count);
104 	local_irq_restore(flags);
105 	return rc;
106 }
107 
108 /*
109  * Copy memory in absolute mode (kernel to kernel)
110  */
111 void memcpy_absolute(void *dest, void *src, size_t count)
112 {
113 	unsigned long cr0, flags, prefix;
114 
115 	flags = arch_local_irq_save();
116 	__ctl_store(cr0, 0, 0);
117 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
118 	prefix = store_prefix();
119 	if (prefix) {
120 		local_mcck_disable();
121 		set_prefix(0);
122 		memcpy(dest, src, count);
123 		set_prefix(prefix);
124 		local_mcck_enable();
125 	} else {
126 		memcpy(dest, src, count);
127 	}
128 	__ctl_load(cr0, 0, 0);
129 	arch_local_irq_restore(flags);
130 }
131 
132 /*
133  * Copy memory from kernel (real) to user (virtual)
134  */
135 int copy_to_user_real(void __user *dest, void *src, unsigned long count)
136 {
137 	int offs = 0, size, rc;
138 	char *buf;
139 
140 	buf = (char *) __get_free_page(GFP_KERNEL);
141 	if (!buf)
142 		return -ENOMEM;
143 	rc = -EFAULT;
144 	while (offs < count) {
145 		size = min(PAGE_SIZE, count - offs);
146 		if (memcpy_real(buf, src + offs, size))
147 			goto out;
148 		if (copy_to_user(dest + offs, buf, size))
149 			goto out;
150 		offs += size;
151 	}
152 	rc = 0;
153 out:
154 	free_page((unsigned long) buf);
155 	return rc;
156 }
157 
158 /*
159  * Check if physical address is within prefix or zero page
160  */
161 static int is_swapped(unsigned long addr)
162 {
163 	unsigned long lc;
164 	int cpu;
165 
166 	if (addr < sizeof(struct _lowcore))
167 		return 1;
168 	for_each_online_cpu(cpu) {
169 		lc = (unsigned long) lowcore_ptr[cpu];
170 		if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
171 			continue;
172 		return 1;
173 	}
174 	return 0;
175 }
176 
177 /*
178  * Convert a physical pointer for /dev/mem access
179  *
180  * For swapped prefix pages a new buffer is returned that contains a copy of
181  * the absolute memory. The buffer size is maximum one page large.
182  */
183 void *xlate_dev_mem_ptr(phys_addr_t addr)
184 {
185 	void *bounce = (void *) addr;
186 	unsigned long size;
187 
188 	get_online_cpus();
189 	preempt_disable();
190 	if (is_swapped(addr)) {
191 		size = PAGE_SIZE - (addr & ~PAGE_MASK);
192 		bounce = (void *) __get_free_page(GFP_ATOMIC);
193 		if (bounce)
194 			memcpy_absolute(bounce, (void *) addr, size);
195 	}
196 	preempt_enable();
197 	put_online_cpus();
198 	return bounce;
199 }
200 
201 /*
202  * Free converted buffer for /dev/mem access (if necessary)
203  */
204 void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
205 {
206 	if ((void *) addr != buf)
207 		free_page((unsigned long) buf);
208 }
209