1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_UACCESS_H 3 #define _ASM_UACCESS_H 4 5 /* 6 * User space memory access functions 7 */ 8 9 #include <linux/compiler.h> 10 #include <linux/string.h> 11 #include <linux/mm_types.h> 12 #include <asm/asi.h> 13 #include <asm/spitfire.h> 14 #include <asm/pgtable.h> 15 16 #include <asm/processor.h> 17 #include <asm-generic/access_ok.h> 18 19 /* 20 * Sparc64 is segmented, though more like the M68K than the I386. 21 * We use the secondary ASI to address user memory, which references a 22 * completely different VM map, thus there is zero chance of the user 23 * doing something queer and tricking us into poking kernel memory. 24 */ 25 26 /* 27 * Test whether a block of memory is a valid user space address. 28 * Returns 0 if the range is valid, nonzero otherwise. 29 */ 30 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 31 { 32 if (__builtin_constant_p(size)) 33 return addr > limit - size; 34 35 addr += size; 36 if (addr < size) 37 return true; 38 39 return addr > limit; 40 } 41 42 #define __range_not_ok(addr, size, limit) \ 43 ({ \ 44 __chk_user_ptr(addr); \ 45 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 46 }) 47 48 void __retl_efault(void); 49 50 /* Uh, these should become the main single-value transfer routines.. 51 * They automatically use the right size if we just have the right 52 * pointer type.. 53 * 54 * This gets kind of ugly. We want to return _two_ values in "get_user()" 55 * and yet we don't want to do any pointers, because that is too much 56 * of a performance impact. Thus we have a few rather ugly macros here, 57 * and hide all the ugliness from the user. 58 */ 59 #define put_user(x, ptr) ({ \ 60 unsigned long __pu_addr = (unsigned long)(ptr); \ 61 __chk_user_ptr(ptr); \ 62 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ 63 }) 64 65 #define get_user(x, ptr) ({ \ 66 unsigned long __gu_addr = (unsigned long)(ptr); \ 67 __chk_user_ptr(ptr); \ 68 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ 69 }) 70 71 #define __put_user(x, ptr) put_user(x, ptr) 72 #define __get_user(x, ptr) get_user(x, ptr) 73 74 struct __large_struct { unsigned long buf[100]; }; 75 #define __m(x) ((struct __large_struct *)(x)) 76 77 #define __put_kernel_nofault(dst, src, type, label) \ 78 do { \ 79 type *addr = (type __force *)(dst); \ 80 type data = *(type *)src; \ 81 register int __pu_ret; \ 82 switch (sizeof(type)) { \ 83 case 1: __put_kernel_asm(data, b, addr, __pu_ret); break; \ 84 case 2: __put_kernel_asm(data, h, addr, __pu_ret); break; \ 85 case 4: __put_kernel_asm(data, w, addr, __pu_ret); break; \ 86 case 8: __put_kernel_asm(data, x, addr, __pu_ret); break; \ 87 default: __pu_ret = __put_user_bad(); break; \ 88 } \ 89 if (__pu_ret) \ 90 goto label; \ 91 } while (0) 92 93 #define __put_kernel_asm(x, size, addr, ret) \ 94 __asm__ __volatile__( \ 95 "/* Put kernel asm, inline. */\n" \ 96 "1:\t" "st"#size " %1, [%2]\n\t" \ 97 "clr %0\n" \ 98 "2:\n\n\t" \ 99 ".section .fixup,#alloc,#execinstr\n\t" \ 100 ".align 4\n" \ 101 "3:\n\t" \ 102 "sethi %%hi(2b), %0\n\t" \ 103 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 104 " mov %3, %0\n\n\t" \ 105 ".previous\n\t" \ 106 ".section __ex_table,\"a\"\n\t" \ 107 ".align 4\n\t" \ 108 ".word 1b, 3b\n\t" \ 109 ".previous\n\n\t" \ 110 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 111 "i" (-EFAULT)) 112 113 #define __put_user_nocheck(data, addr, size) ({ \ 114 register int __pu_ret; \ 115 switch (size) { \ 116 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ 117 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ 118 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ 119 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ 120 default: __pu_ret = __put_user_bad(); break; \ 121 } \ 122 __pu_ret; \ 123 }) 124 125 #define __put_user_asm(x, size, addr, ret) \ 126 __asm__ __volatile__( \ 127 "/* Put user asm, inline. */\n" \ 128 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ 129 "clr %0\n" \ 130 "2:\n\n\t" \ 131 ".section .fixup,#alloc,#execinstr\n\t" \ 132 ".align 4\n" \ 133 "3:\n\t" \ 134 "sethi %%hi(2b), %0\n\t" \ 135 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 136 " mov %3, %0\n\n\t" \ 137 ".previous\n\t" \ 138 ".section __ex_table,\"a\"\n\t" \ 139 ".align 4\n\t" \ 140 ".word 1b, 3b\n\t" \ 141 ".previous\n\n\t" \ 142 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 143 "i" (-EFAULT)) 144 145 int __put_user_bad(void); 146 147 #define __get_kernel_nofault(dst, src, type, label) \ 148 do { \ 149 type *addr = (type __force *)(src); \ 150 register int __gu_ret; \ 151 register unsigned long __gu_val; \ 152 switch (sizeof(type)) { \ 153 case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \ 154 case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \ 155 case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \ 156 case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break; \ 157 default: \ 158 __gu_val = 0; \ 159 __gu_ret = __get_user_bad(); \ 160 break; \ 161 } \ 162 if (__gu_ret) \ 163 goto label; \ 164 *(type *)dst = (__force type) __gu_val; \ 165 } while (0) 166 #define __get_kernel_asm(x, size, addr, ret) \ 167 __asm__ __volatile__( \ 168 "/* Get kernel asm, inline. */\n" \ 169 "1:\t" "ld"#size " [%2], %1\n\t" \ 170 "clr %0\n" \ 171 "2:\n\n\t" \ 172 ".section .fixup,#alloc,#execinstr\n\t" \ 173 ".align 4\n" \ 174 "3:\n\t" \ 175 "sethi %%hi(2b), %0\n\t" \ 176 "clr %1\n\t" \ 177 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 178 " mov %3, %0\n\n\t" \ 179 ".previous\n\t" \ 180 ".section __ex_table,\"a\"\n\t" \ 181 ".align 4\n\t" \ 182 ".word 1b, 3b\n\n\t" \ 183 ".previous\n\t" \ 184 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ 185 "i" (-EFAULT)) 186 187 #define __get_user_nocheck(data, addr, size, type) ({ \ 188 register int __gu_ret; \ 189 register unsigned long __gu_val; \ 190 switch (size) { \ 191 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ 192 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ 193 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ 194 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ 195 default: \ 196 __gu_val = 0; \ 197 __gu_ret = __get_user_bad(); \ 198 break; \ 199 } \ 200 data = (__force type) __gu_val; \ 201 __gu_ret; \ 202 }) 203 204 #define __get_user_asm(x, size, addr, ret) \ 205 __asm__ __volatile__( \ 206 "/* Get user asm, inline. */\n" \ 207 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ 208 "clr %0\n" \ 209 "2:\n\n\t" \ 210 ".section .fixup,#alloc,#execinstr\n\t" \ 211 ".align 4\n" \ 212 "3:\n\t" \ 213 "sethi %%hi(2b), %0\n\t" \ 214 "clr %1\n\t" \ 215 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 216 " mov %3, %0\n\n\t" \ 217 ".previous\n\t" \ 218 ".section __ex_table,\"a\"\n\t" \ 219 ".align 4\n\t" \ 220 ".word 1b, 3b\n\n\t" \ 221 ".previous\n\t" \ 222 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ 223 "i" (-EFAULT)) 224 225 int __get_user_bad(void); 226 227 unsigned long __must_check raw_copy_from_user(void *to, 228 const void __user *from, 229 unsigned long size); 230 231 unsigned long __must_check raw_copy_to_user(void __user *to, 232 const void *from, 233 unsigned long size); 234 #define INLINE_COPY_FROM_USER 235 #define INLINE_COPY_TO_USER 236 237 unsigned long __must_check raw_copy_in_user(void __user *to, 238 const void __user *from, 239 unsigned long size); 240 241 unsigned long __must_check __clear_user(void __user *, unsigned long); 242 243 #define clear_user __clear_user 244 245 __must_check long strnlen_user(const char __user *str, long n); 246 247 struct pt_regs; 248 unsigned long compute_effective_address(struct pt_regs *, 249 unsigned int insn, 250 unsigned int rd); 251 252 #endif /* _ASM_UACCESS_H */ 253