xref: /linux/arch/s390/mm/maccess.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Access kernel memory without faulting -- s390 specific implementation.
4  *
5  * Copyright IBM Corp. 2009, 2015
6  *
7  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8  *
9  */
10 
11 #include <linux/uaccess.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/cpu.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/io.h>
19 
20 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
21 {
22 	unsigned long aligned, offset, count;
23 	char tmp[8];
24 
25 	aligned = (unsigned long) dst & ~7UL;
26 	offset = (unsigned long) dst & 7UL;
27 	size = min(8UL - offset, size);
28 	count = size - 1;
29 	asm volatile(
30 		"	bras	1,0f\n"
31 		"	mvc	0(1,%4),0(%5)\n"
32 		"0:	mvc	0(8,%3),0(%0)\n"
33 		"	ex	%1,0(1)\n"
34 		"	lg	%1,0(%3)\n"
35 		"	lra	%0,0(%0)\n"
36 		"	sturg	%1,%0\n"
37 		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
38 		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
39 		: "cc", "memory", "1");
40 	return size;
41 }
42 
43 /*
44  * s390_kernel_write - write to kernel memory bypassing DAT
45  * @dst: destination address
46  * @src: source address
47  * @size: number of bytes to copy
48  *
49  * This function writes to kernel memory bypassing DAT and possible page table
50  * write protection. It writes to the destination using the sturg instruction.
51  * Therefore we have a read-modify-write sequence: the function reads eight
52  * bytes from destination at an eight byte boundary, modifies the bytes
53  * requested and writes the result back in a loop.
54  *
55  * Note: this means that this function may not be called concurrently on
56  *	 several cpus with overlapping words, since this may potentially
57  *	 cause data corruption.
58  */
59 void notrace s390_kernel_write(void *dst, const void *src, size_t size)
60 {
61 	long copied;
62 
63 	while (size) {
64 		copied = s390_kernel_write_odd(dst, src, size);
65 		dst += copied;
66 		src += copied;
67 		size -= copied;
68 	}
69 }
70 
71 static int __memcpy_real(void *dest, void *src, size_t count)
72 {
73 	register unsigned long _dest asm("2") = (unsigned long) dest;
74 	register unsigned long _len1 asm("3") = (unsigned long) count;
75 	register unsigned long _src  asm("4") = (unsigned long) src;
76 	register unsigned long _len2 asm("5") = (unsigned long) count;
77 	int rc = -EFAULT;
78 
79 	asm volatile (
80 		"0:	mvcle	%1,%2,0x0\n"
81 		"1:	jo	0b\n"
82 		"	lhi	%0,0x0\n"
83 		"2:\n"
84 		EX_TABLE(1b,2b)
85 		: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
86 		  "+d" (_len2), "=m" (*((long *) dest))
87 		: "m" (*((long *) src))
88 		: "cc", "memory");
89 	return rc;
90 }
91 
92 /*
93  * Copy memory in real mode (kernel to kernel)
94  */
95 int memcpy_real(void *dest, void *src, size_t count)
96 {
97 	int irqs_disabled, rc;
98 	unsigned long flags;
99 
100 	if (!count)
101 		return 0;
102 	flags = __arch_local_irq_stnsm(0xf8UL);
103 	irqs_disabled = arch_irqs_disabled_flags(flags);
104 	if (!irqs_disabled)
105 		trace_hardirqs_off();
106 	rc = __memcpy_real(dest, src, count);
107 	if (!irqs_disabled)
108 		trace_hardirqs_on();
109 	__arch_local_irq_ssm(flags);
110 	return rc;
111 }
112 
113 /*
114  * Copy memory in absolute mode (kernel to kernel)
115  */
116 void memcpy_absolute(void *dest, void *src, size_t count)
117 {
118 	unsigned long cr0, flags, prefix;
119 
120 	flags = arch_local_irq_save();
121 	__ctl_store(cr0, 0, 0);
122 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
123 	prefix = store_prefix();
124 	if (prefix) {
125 		local_mcck_disable();
126 		set_prefix(0);
127 		memcpy(dest, src, count);
128 		set_prefix(prefix);
129 		local_mcck_enable();
130 	} else {
131 		memcpy(dest, src, count);
132 	}
133 	__ctl_load(cr0, 0, 0);
134 	arch_local_irq_restore(flags);
135 }
136 
137 /*
138  * Copy memory from kernel (real) to user (virtual)
139  */
140 int copy_to_user_real(void __user *dest, void *src, unsigned long count)
141 {
142 	int offs = 0, size, rc;
143 	char *buf;
144 
145 	buf = (char *) __get_free_page(GFP_KERNEL);
146 	if (!buf)
147 		return -ENOMEM;
148 	rc = -EFAULT;
149 	while (offs < count) {
150 		size = min(PAGE_SIZE, count - offs);
151 		if (memcpy_real(buf, src + offs, size))
152 			goto out;
153 		if (copy_to_user(dest + offs, buf, size))
154 			goto out;
155 		offs += size;
156 	}
157 	rc = 0;
158 out:
159 	free_page((unsigned long) buf);
160 	return rc;
161 }
162 
163 /*
164  * Check if physical address is within prefix or zero page
165  */
166 static int is_swapped(unsigned long addr)
167 {
168 	unsigned long lc;
169 	int cpu;
170 
171 	if (addr < sizeof(struct lowcore))
172 		return 1;
173 	for_each_online_cpu(cpu) {
174 		lc = (unsigned long) lowcore_ptr[cpu];
175 		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
176 			continue;
177 		return 1;
178 	}
179 	return 0;
180 }
181 
182 /*
183  * Convert a physical pointer for /dev/mem access
184  *
185  * For swapped prefix pages a new buffer is returned that contains a copy of
186  * the absolute memory. The buffer size is maximum one page large.
187  */
188 void *xlate_dev_mem_ptr(phys_addr_t addr)
189 {
190 	void *bounce = (void *) addr;
191 	unsigned long size;
192 
193 	get_online_cpus();
194 	preempt_disable();
195 	if (is_swapped(addr)) {
196 		size = PAGE_SIZE - (addr & ~PAGE_MASK);
197 		bounce = (void *) __get_free_page(GFP_ATOMIC);
198 		if (bounce)
199 			memcpy_absolute(bounce, (void *) addr, size);
200 	}
201 	preempt_enable();
202 	put_online_cpus();
203 	return bounce;
204 }
205 
206 /*
207  * Free converted buffer for /dev/mem access (if necessary)
208  */
209 void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
210 {
211 	if ((void *) addr != buf)
212 		free_page((unsigned long) buf);
213 }
214