1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PARISC_UACCESS_H 3 #define __PARISC_UACCESS_H 4 5 /* 6 * User space memory access functions 7 */ 8 #include <asm/page.h> 9 #include <asm/cache.h> 10 #include <asm/extable.h> 11 12 #include <linux/bug.h> 13 #include <linux/string.h> 14 15 #define TASK_SIZE_MAX DEFAULT_TASK_SIZE 16 #include <asm/pgtable.h> 17 #include <asm-generic/access_ok.h> 18 19 #define put_user __put_user 20 #define get_user __get_user 21 22 #if !defined(CONFIG_64BIT) 23 #define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) 24 #define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) 25 #else 26 #define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) 27 #define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) 28 #endif 29 30 #define __get_user_internal(sr, val, ptr) \ 31 ({ \ 32 ASM_EXCEPTIONTABLE_VAR(__gu_err); \ 33 \ 34 switch (sizeof(*(ptr))) { \ 35 case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ 36 case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ 37 case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ 38 case 8: LDD_USER(sr, val, ptr); break; \ 39 default: BUILD_BUG(); \ 40 } \ 41 \ 42 __gu_err; \ 43 }) 44 45 #define __probe_user_internal(sr, error, ptr) \ 46 ({ \ 47 __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \ 48 "\tcmpiclr,= 1,%0,%0\n" \ 49 "\tldi %4,%0\n" \ 50 : "=r"(error) \ 51 : "i"(sr), "r"(ptr), "i"(PRIV_USER), \ 52 "i"(-EFAULT)); \ 53 }) 54 55 #define __get_user(val, ptr) \ 56 ({ \ 57 register long __gu_err; \ 58 \ 59 __gu_err = __get_user_internal(SR_USER, val, ptr); \ 60 if (likely(!__gu_err)) \ 61 __probe_user_internal(SR_USER, __gu_err, ptr); \ 62 __gu_err; \ 63 }) 64 65 #define __get_user_asm(sr, val, ldx, ptr) \ 66 { \ 67 register long __gu_val; \ 68 \ 69 __asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \ 70 "9:\n" \ 71 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \ 72 : "=r"(__gu_val), "+r"(__gu_err) \ 73 : "i"(sr), "r"(ptr)); \ 74 \ 75 (val) = (__force __typeof__(*(ptr))) __gu_val; \ 76 } 77 78 #define __get_kernel_nofault(dst, src, type, err_label) \ 79 { \ 80 type __z; \ 81 long __err; \ 82 __err = __get_user_internal(SR_KERNEL, __z, (type *)(src)); \ 83 if (unlikely(__err)) \ 84 goto err_label; \ 85 else \ 86 *(type *)(dst) = __z; \ 87 } 88 89 90 #if !defined(CONFIG_64BIT) 91 92 #define __get_user_asm64(sr, val, ptr) \ 93 { \ 94 union { \ 95 unsigned long long l; \ 96 __typeof__(*(ptr)) t; \ 97 } __gu_tmp; \ 98 \ 99 __asm__(" copy %%r0,%R0\n" \ 100 "1: ldw 0(%%sr%2,%3),%0\n" \ 101 "2: ldw 4(%%sr%2,%3),%R0\n" \ 102 "9:\n" \ 103 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \ 104 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \ 105 : "=&r"(__gu_tmp.l), "+r"(__gu_err) \ 106 : "i"(sr), "r"(ptr)); \ 107 \ 108 (val) = __gu_tmp.t; \ 109 } 110 111 #endif /* !defined(CONFIG_64BIT) */ 112 113 114 #define __put_user_internal(sr, x, ptr) \ 115 ({ \ 116 ASM_EXCEPTIONTABLE_VAR(__pu_err); \ 117 \ 118 switch (sizeof(*(ptr))) { \ 119 case 1: __put_user_asm(sr, "stb", x, ptr); break; \ 120 case 2: __put_user_asm(sr, "sth", x, ptr); break; \ 121 case 4: __put_user_asm(sr, "stw", x, ptr); break; \ 122 case 8: STD_USER(sr, x, ptr); break; \ 123 default: BUILD_BUG(); \ 124 } \ 125 \ 126 __pu_err; \ 127 }) 128 129 #define __put_user(x, ptr) \ 130 ({ \ 131 __typeof__(&*(ptr)) __ptr = ptr; \ 132 __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \ 133 __put_user_internal(SR_USER, __x, __ptr); \ 134 }) 135 136 #define __put_kernel_nofault(dst, src, type, err_label) \ 137 { \ 138 type __z = *(type *)(src); \ 139 long __err; \ 140 __err = __put_user_internal(SR_KERNEL, __z, (type *)(dst)); \ 141 if (unlikely(__err)) \ 142 goto err_label; \ 143 } 144 145 146 147 148 /* 149 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 150 * instead of writing. This is because they do not write to any memory 151 * gcc knows about, so there are no aliasing issues. These macros must 152 * also be aware that fixups are executed in the context of the fault, 153 * and any registers used there must be listed as clobbers. 154 * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG) 155 * is already listed as input and output register. 156 */ 157 158 #define __put_user_asm(sr, stx, x, ptr) \ 159 __asm__ __volatile__ ( \ 160 "1: " stx " %1,0(%%sr%2,%3)\n" \ 161 "9:\n" \ 162 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \ 163 : "+r"(__pu_err) \ 164 : "r"(x), "i"(sr), "r"(ptr)) 165 166 167 #if !defined(CONFIG_64BIT) 168 169 #define __put_user_asm64(sr, __val, ptr) do { \ 170 __asm__ __volatile__ ( \ 171 "1: stw %1,0(%%sr%2,%3)\n" \ 172 "2: stw %R1,4(%%sr%2,%3)\n" \ 173 "9:\n" \ 174 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \ 175 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \ 176 : "+r"(__pu_err) \ 177 : "r"(__val), "i"(sr), "r"(ptr)); \ 178 } while (0) 179 180 #endif /* !defined(CONFIG_64BIT) */ 181 182 /* 183 * Complex access routines -- external declarations 184 */ 185 186 extern long strncpy_from_user(char *, const char __user *, long); 187 extern __must_check unsigned lclear_user(void __user *, unsigned long); 188 extern __must_check long strnlen_user(const char __user *src, long n); 189 /* 190 * Complex access routines -- macros 191 */ 192 193 #define clear_user lclear_user 194 #define __clear_user lclear_user 195 196 unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, 197 unsigned long len); 198 unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, 199 unsigned long len); 200 #define INLINE_COPY_TO_USER 201 #define INLINE_COPY_FROM_USER 202 203 #endif /* __PARISC_UACCESS_H */ 204