1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Access kernel memory without faulting -- s390 specific implementation. 4 * 5 * Copyright IBM Corp. 2009, 2015 6 * 7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * 9 */ 10 11 #include <linux/uaccess.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/errno.h> 15 #include <linux/gfp.h> 16 #include <linux/cpu.h> 17 #include <asm/ctl_reg.h> 18 #include <asm/io.h> 19 #include <asm/stacktrace.h> 20 21 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) 22 { 23 unsigned long aligned, offset, count; 24 char tmp[8]; 25 26 aligned = (unsigned long) dst & ~7UL; 27 offset = (unsigned long) dst & 7UL; 28 size = min(8UL - offset, size); 29 count = size - 1; 30 asm volatile( 31 " bras 1,0f\n" 32 " mvc 0(1,%4),0(%5)\n" 33 "0: mvc 0(8,%3),0(%0)\n" 34 " ex %1,0(1)\n" 35 " lg %1,0(%3)\n" 36 " lra %0,0(%0)\n" 37 " sturg %1,%0\n" 38 : "+&a" (aligned), "+&a" (count), "=m" (tmp) 39 : "a" (&tmp), "a" (&tmp[offset]), "a" (src) 40 : "cc", "memory", "1"); 41 return size; 42 } 43 44 /* 45 * s390_kernel_write - write to kernel memory bypassing DAT 46 * @dst: destination address 47 * @src: source address 48 * @size: number of bytes to copy 49 * 50 * This function writes to kernel memory bypassing DAT and possible page table 51 * write protection. It writes to the destination using the sturg instruction. 52 * Therefore we have a read-modify-write sequence: the function reads eight 53 * bytes from destination at an eight byte boundary, modifies the bytes 54 * requested and writes the result back in a loop. 55 */ 56 static DEFINE_SPINLOCK(s390_kernel_write_lock); 57 58 notrace void *s390_kernel_write(void *dst, const void *src, size_t size) 59 { 60 void *tmp = dst; 61 unsigned long flags; 62 long copied; 63 64 spin_lock_irqsave(&s390_kernel_write_lock, flags); 65 if (!(flags & PSW_MASK_DAT)) { 66 memcpy(dst, src, size); 67 } else { 68 while (size) { 69 copied = s390_kernel_write_odd(tmp, src, size); 70 tmp += copied; 71 src += copied; 72 size -= copied; 73 } 74 } 75 spin_unlock_irqrestore(&s390_kernel_write_lock, flags); 76 77 return dst; 78 } 79 80 static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) 81 { 82 register unsigned long _dest asm("2") = (unsigned long) dest; 83 register unsigned long _len1 asm("3") = (unsigned long) count; 84 register unsigned long _src asm("4") = (unsigned long) src; 85 register unsigned long _len2 asm("5") = (unsigned long) count; 86 int rc = -EFAULT; 87 88 asm volatile ( 89 "0: mvcle %1,%2,0x0\n" 90 "1: jo 0b\n" 91 " lhi %0,0x0\n" 92 "2:\n" 93 EX_TABLE(1b,2b) 94 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), 95 "+d" (_len2), "=m" (*((long *) dest)) 96 : "m" (*((long *) src)) 97 : "cc", "memory"); 98 return rc; 99 } 100 101 static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, 102 unsigned long src, 103 unsigned long count) 104 { 105 int irqs_disabled, rc; 106 unsigned long flags; 107 108 if (!count) 109 return 0; 110 flags = arch_local_irq_save(); 111 irqs_disabled = arch_irqs_disabled_flags(flags); 112 if (!irqs_disabled) 113 trace_hardirqs_off(); 114 __arch_local_irq_stnsm(0xf8); // disable DAT 115 rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); 116 if (flags & PSW_MASK_DAT) 117 __arch_local_irq_stosm(0x04); // enable DAT 118 if (!irqs_disabled) 119 trace_hardirqs_on(); 120 __arch_local_irq_ssm(flags); 121 return rc; 122 } 123 124 /* 125 * Copy memory in real mode (kernel to kernel) 126 */ 127 int memcpy_real(void *dest, void *src, size_t count) 128 { 129 int rc; 130 131 if (S390_lowcore.nodat_stack != 0) { 132 preempt_disable(); 133 rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3, 134 dest, src, count); 135 preempt_enable(); 136 return rc; 137 } 138 /* 139 * This is a really early memcpy_real call, the stacks are 140 * not set up yet. Just call _memcpy_real on the early boot 141 * stack 142 */ 143 return _memcpy_real((unsigned long) dest,(unsigned long) src, 144 (unsigned long) count); 145 } 146 147 /* 148 * Copy memory in absolute mode (kernel to kernel) 149 */ 150 void memcpy_absolute(void *dest, void *src, size_t count) 151 { 152 unsigned long cr0, flags, prefix; 153 154 flags = arch_local_irq_save(); 155 __ctl_store(cr0, 0, 0); 156 __ctl_clear_bit(0, 28); /* disable lowcore protection */ 157 prefix = store_prefix(); 158 if (prefix) { 159 local_mcck_disable(); 160 set_prefix(0); 161 memcpy(dest, src, count); 162 set_prefix(prefix); 163 local_mcck_enable(); 164 } else { 165 memcpy(dest, src, count); 166 } 167 __ctl_load(cr0, 0, 0); 168 arch_local_irq_restore(flags); 169 } 170 171 /* 172 * Copy memory from kernel (real) to user (virtual) 173 */ 174 int copy_to_user_real(void __user *dest, void *src, unsigned long count) 175 { 176 int offs = 0, size, rc; 177 char *buf; 178 179 buf = (char *) __get_free_page(GFP_KERNEL); 180 if (!buf) 181 return -ENOMEM; 182 rc = -EFAULT; 183 while (offs < count) { 184 size = min(PAGE_SIZE, count - offs); 185 if (memcpy_real(buf, src + offs, size)) 186 goto out; 187 if (copy_to_user(dest + offs, buf, size)) 188 goto out; 189 offs += size; 190 } 191 rc = 0; 192 out: 193 free_page((unsigned long) buf); 194 return rc; 195 } 196 197 /* 198 * Check if physical address is within prefix or zero page 199 */ 200 static int is_swapped(unsigned long addr) 201 { 202 unsigned long lc; 203 int cpu; 204 205 if (addr < sizeof(struct lowcore)) 206 return 1; 207 for_each_online_cpu(cpu) { 208 lc = (unsigned long) lowcore_ptr[cpu]; 209 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) 210 continue; 211 return 1; 212 } 213 return 0; 214 } 215 216 /* 217 * Convert a physical pointer for /dev/mem access 218 * 219 * For swapped prefix pages a new buffer is returned that contains a copy of 220 * the absolute memory. The buffer size is maximum one page large. 221 */ 222 void *xlate_dev_mem_ptr(phys_addr_t addr) 223 { 224 void *bounce = (void *) addr; 225 unsigned long size; 226 227 get_online_cpus(); 228 preempt_disable(); 229 if (is_swapped(addr)) { 230 size = PAGE_SIZE - (addr & ~PAGE_MASK); 231 bounce = (void *) __get_free_page(GFP_ATOMIC); 232 if (bounce) 233 memcpy_absolute(bounce, (void *) addr, size); 234 } 235 preempt_enable(); 236 put_online_cpus(); 237 return bounce; 238 } 239 240 /* 241 * Free converted buffer for /dev/mem access (if necessary) 242 */ 243 void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf) 244 { 245 if ((void *) addr != buf) 246 free_page((unsigned long) buf); 247 } 248