xref: /linux/arch/s390/mm/maccess.c (revision 2c97b5ae83dca56718774e7b4bf9640f05d11867)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Access kernel memory without faulting -- s390 specific implementation.
4  *
5  * Copyright IBM Corp. 2009, 2015
6  *
7  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8  *
9  */
10 
11 #include <linux/uaccess.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/cpu.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/io.h>
19 #include <asm/stacktrace.h>
20 
21 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
22 {
23 	unsigned long aligned, offset, count;
24 	char tmp[8];
25 
26 	aligned = (unsigned long) dst & ~7UL;
27 	offset = (unsigned long) dst & 7UL;
28 	size = min(8UL - offset, size);
29 	count = size - 1;
30 	asm volatile(
31 		"	bras	1,0f\n"
32 		"	mvc	0(1,%4),0(%5)\n"
33 		"0:	mvc	0(8,%3),0(%0)\n"
34 		"	ex	%1,0(1)\n"
35 		"	lg	%1,0(%3)\n"
36 		"	lra	%0,0(%0)\n"
37 		"	sturg	%1,%0\n"
38 		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
39 		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
40 		: "cc", "memory", "1");
41 	return size;
42 }
43 
44 /*
45  * s390_kernel_write - write to kernel memory bypassing DAT
46  * @dst: destination address
47  * @src: source address
48  * @size: number of bytes to copy
49  *
50  * This function writes to kernel memory bypassing DAT and possible page table
51  * write protection. It writes to the destination using the sturg instruction.
52  * Therefore we have a read-modify-write sequence: the function reads eight
53  * bytes from destination at an eight byte boundary, modifies the bytes
54  * requested and writes the result back in a loop.
55  */
56 static DEFINE_SPINLOCK(s390_kernel_write_lock);
57 
58 void notrace s390_kernel_write(void *dst, const void *src, size_t size)
59 {
60 	unsigned long flags;
61 	long copied;
62 
63 	spin_lock_irqsave(&s390_kernel_write_lock, flags);
64 	while (size) {
65 		copied = s390_kernel_write_odd(dst, src, size);
66 		dst += copied;
67 		src += copied;
68 		size -= copied;
69 	}
70 	spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
71 }
72 
73 static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
74 {
75 	register unsigned long _dest asm("2") = (unsigned long) dest;
76 	register unsigned long _len1 asm("3") = (unsigned long) count;
77 	register unsigned long _src  asm("4") = (unsigned long) src;
78 	register unsigned long _len2 asm("5") = (unsigned long) count;
79 	int rc = -EFAULT;
80 
81 	asm volatile (
82 		"0:	mvcle	%1,%2,0x0\n"
83 		"1:	jo	0b\n"
84 		"	lhi	%0,0x0\n"
85 		"2:\n"
86 		EX_TABLE(1b,2b)
87 		: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
88 		  "+d" (_len2), "=m" (*((long *) dest))
89 		: "m" (*((long *) src))
90 		: "cc", "memory");
91 	return rc;
92 }
93 
94 static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
95 							unsigned long src,
96 							unsigned long count)
97 {
98 	int irqs_disabled, rc;
99 	unsigned long flags;
100 
101 	if (!count)
102 		return 0;
103 	flags = arch_local_irq_save();
104 	irqs_disabled = arch_irqs_disabled_flags(flags);
105 	if (!irqs_disabled)
106 		trace_hardirqs_off();
107 	__arch_local_irq_stnsm(0xf8); // disable DAT
108 	rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
109 	if (flags & PSW_MASK_DAT)
110 		__arch_local_irq_stosm(0x04); // enable DAT
111 	if (!irqs_disabled)
112 		trace_hardirqs_on();
113 	__arch_local_irq_ssm(flags);
114 	return rc;
115 }
116 
117 /*
118  * Copy memory in real mode (kernel to kernel)
119  */
120 int memcpy_real(void *dest, void *src, size_t count)
121 {
122 	if (S390_lowcore.nodat_stack != 0)
123 		return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
124 				     3, dest, src, count);
125 	/*
126 	 * This is a really early memcpy_real call, the stacks are
127 	 * not set up yet. Just call _memcpy_real on the early boot
128 	 * stack
129 	 */
130 	return _memcpy_real((unsigned long) dest,(unsigned long) src,
131 			    (unsigned long) count);
132 }
133 
134 /*
135  * Copy memory in absolute mode (kernel to kernel)
136  */
137 void memcpy_absolute(void *dest, void *src, size_t count)
138 {
139 	unsigned long cr0, flags, prefix;
140 
141 	flags = arch_local_irq_save();
142 	__ctl_store(cr0, 0, 0);
143 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
144 	prefix = store_prefix();
145 	if (prefix) {
146 		local_mcck_disable();
147 		set_prefix(0);
148 		memcpy(dest, src, count);
149 		set_prefix(prefix);
150 		local_mcck_enable();
151 	} else {
152 		memcpy(dest, src, count);
153 	}
154 	__ctl_load(cr0, 0, 0);
155 	arch_local_irq_restore(flags);
156 }
157 
158 /*
159  * Copy memory from kernel (real) to user (virtual)
160  */
161 int copy_to_user_real(void __user *dest, void *src, unsigned long count)
162 {
163 	int offs = 0, size, rc;
164 	char *buf;
165 
166 	buf = (char *) __get_free_page(GFP_KERNEL);
167 	if (!buf)
168 		return -ENOMEM;
169 	rc = -EFAULT;
170 	while (offs < count) {
171 		size = min(PAGE_SIZE, count - offs);
172 		if (memcpy_real(buf, src + offs, size))
173 			goto out;
174 		if (copy_to_user(dest + offs, buf, size))
175 			goto out;
176 		offs += size;
177 	}
178 	rc = 0;
179 out:
180 	free_page((unsigned long) buf);
181 	return rc;
182 }
183 
184 /*
185  * Check if physical address is within prefix or zero page
186  */
187 static int is_swapped(unsigned long addr)
188 {
189 	unsigned long lc;
190 	int cpu;
191 
192 	if (addr < sizeof(struct lowcore))
193 		return 1;
194 	for_each_online_cpu(cpu) {
195 		lc = (unsigned long) lowcore_ptr[cpu];
196 		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
197 			continue;
198 		return 1;
199 	}
200 	return 0;
201 }
202 
203 /*
204  * Convert a physical pointer for /dev/mem access
205  *
206  * For swapped prefix pages a new buffer is returned that contains a copy of
207  * the absolute memory. The buffer size is maximum one page large.
208  */
209 void *xlate_dev_mem_ptr(phys_addr_t addr)
210 {
211 	void *bounce = (void *) addr;
212 	unsigned long size;
213 
214 	get_online_cpus();
215 	preempt_disable();
216 	if (is_swapped(addr)) {
217 		size = PAGE_SIZE - (addr & ~PAGE_MASK);
218 		bounce = (void *) __get_free_page(GFP_ATOMIC);
219 		if (bounce)
220 			memcpy_absolute(bounce, (void *) addr, size);
221 	}
222 	preempt_enable();
223 	put_online_cpus();
224 	return bounce;
225 }
226 
227 /*
228  * Free converted buffer for /dev/mem access (if necessary)
229  */
230 void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
231 {
232 	if ((void *) addr != buf)
233 		free_page((unsigned long) buf);
234 }
235