1 #ifndef __PARISC_UACCESS_H 2 #define __PARISC_UACCESS_H 3 4 /* 5 * User space memory access functions 6 */ 7 #include <asm/page.h> 8 #include <asm/cache.h> 9 #include <asm/errno.h> 10 #include <asm-generic/uaccess-unaligned.h> 11 12 #include <linux/bug.h> 13 #include <linux/string.h> 14 15 #define VERIFY_READ 0 16 #define VERIFY_WRITE 1 17 18 #define KERNEL_DS ((mm_segment_t){0}) 19 #define USER_DS ((mm_segment_t){1}) 20 21 #define segment_eq(a, b) ((a).seg == (b).seg) 22 23 #define get_ds() (KERNEL_DS) 24 #define get_fs() (current_thread_info()->addr_limit) 25 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 26 27 /* 28 * Note that since kernel addresses are in a separate address space on 29 * parisc, we don't need to do anything for access_ok(). 30 * We just let the page fault handler do the right thing. This also means 31 * that put_user is the same as __put_user, etc. 32 */ 33 34 static inline long access_ok(int type, const void __user * addr, 35 unsigned long size) 36 { 37 return 1; 38 } 39 40 #define put_user __put_user 41 #define get_user __get_user 42 43 #if !defined(CONFIG_64BIT) 44 #define LDD_USER(ptr) __get_user_asm64(ptr) 45 #define STD_USER(x, ptr) __put_user_asm64(x, ptr) 46 #else 47 #define LDD_USER(ptr) __get_user_asm("ldd", ptr) 48 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 49 #endif 50 51 /* 52 * The exception table contains two values: the first is the relative offset to 53 * the address of the instruction that is allowed to fault, and the second is 54 * the relative offset to the address of the fixup routine. Since relative 55 * addresses are used, 32bit values are sufficient even on 64bit kernel. 56 */ 57 58 #define ARCH_HAS_RELATIVE_EXTABLE 59 struct exception_table_entry { 60 int insn; /* relative address of insn that is allowed to fault. */ 61 int fixup; /* relative address of fixup routine */ 62 }; 63 64 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 65 ".section __ex_table,\"aw\"\n" \ 66 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 67 ".previous\n" 68 69 /* 70 * The page fault handler stores, in a per-cpu area, the following information 71 * if a fixup routine is available. 72 */ 73 struct exception_data { 74 unsigned long fault_ip; 75 unsigned long fault_gp; 76 unsigned long fault_space; 77 unsigned long fault_addr; 78 }; 79 80 /* 81 * load_sr2() preloads the space register %%sr2 - based on the value of 82 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which 83 * is 0), or with the current value of %%sr3 to access user space (USER_DS) 84 * memory. The following __get_user_asm() and __put_user_asm() functions have 85 * %%sr2 hard-coded to access the requested memory. 86 */ 87 #define load_sr2() \ 88 __asm__(" or,= %0,%%r0,%%r0\n\t" \ 89 " mfsp %%sr3,%0\n\t" \ 90 " mtsp %0,%%sr2\n\t" \ 91 : : "r"(get_fs()) : ) 92 93 #define __get_user(x, ptr) \ 94 ({ \ 95 register long __gu_err __asm__ ("r8") = 0; \ 96 register long __gu_val __asm__ ("r9") = 0; \ 97 \ 98 load_sr2(); \ 99 switch (sizeof(*(ptr))) { \ 100 case 1: __get_user_asm("ldb", ptr); break; \ 101 case 2: __get_user_asm("ldh", ptr); break; \ 102 case 4: __get_user_asm("ldw", ptr); break; \ 103 case 8: LDD_USER(ptr); break; \ 104 default: BUILD_BUG(); break; \ 105 } \ 106 \ 107 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 108 __gu_err; \ 109 }) 110 111 #define __get_user_asm(ldx, ptr) \ 112 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ 113 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 114 : "=r"(__gu_val), "=r"(__gu_err) \ 115 : "r"(ptr), "1"(__gu_err) \ 116 : "r1"); 117 118 #if !defined(CONFIG_64BIT) 119 120 #define __get_user_asm64(ptr) \ 121 __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ 122 "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ 123 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ 124 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ 125 : "=r"(__gu_val), "=r"(__gu_err) \ 126 : "r"(ptr), "1"(__gu_err) \ 127 : "r1"); 128 129 #endif /* !defined(CONFIG_64BIT) */ 130 131 132 #define __put_user(x, ptr) \ 133 ({ \ 134 register long __pu_err __asm__ ("r8") = 0; \ 135 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 136 \ 137 load_sr2(); \ 138 switch (sizeof(*(ptr))) { \ 139 case 1: __put_user_asm("stb", __x, ptr); break; \ 140 case 2: __put_user_asm("sth", __x, ptr); break; \ 141 case 4: __put_user_asm("stw", __x, ptr); break; \ 142 case 8: STD_USER(__x, ptr); break; \ 143 default: BUILD_BUG(); break; \ 144 } \ 145 \ 146 __pu_err; \ 147 }) 148 149 /* 150 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 151 * instead of writing. This is because they do not write to any memory 152 * gcc knows about, so there are no aliasing issues. These macros must 153 * also be aware that "fixup_put_user_skip_[12]" are executed in the 154 * context of the fault, and any registers used there must be listed 155 * as clobbers. In this case only "r1" is used by the current routines. 156 * r8/r9 are already listed as err/val. 157 */ 158 159 #define __put_user_asm(stx, x, ptr) \ 160 __asm__ __volatile__ ( \ 161 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ 162 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 163 : "=r"(__pu_err) \ 164 : "r"(ptr), "r"(x), "0"(__pu_err) \ 165 : "r1") 166 167 168 #if !defined(CONFIG_64BIT) 169 170 #define __put_user_asm64(__val, ptr) do { \ 171 __asm__ __volatile__ ( \ 172 "\n1:\tstw %2,0(%%sr2,%1)" \ 173 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ 174 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 175 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 176 : "=r"(__pu_err) \ 177 : "r"(ptr), "r"(__val), "0"(__pu_err) \ 178 : "r1"); \ 179 } while (0) 180 181 #endif /* !defined(CONFIG_64BIT) */ 182 183 184 /* 185 * Complex access routines -- external declarations 186 */ 187 188 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); 189 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); 190 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); 191 extern long strncpy_from_user(char *, const char __user *, long); 192 extern unsigned lclear_user(void __user *, unsigned long); 193 extern long lstrnlen_user(const char __user *, long); 194 /* 195 * Complex access routines -- macros 196 */ 197 #define user_addr_max() (~0UL) 198 199 #define strnlen_user lstrnlen_user 200 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 201 #define clear_user lclear_user 202 #define __clear_user lclear_user 203 204 unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); 205 #define __copy_to_user copy_to_user 206 unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len); 207 unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); 208 #define __copy_in_user copy_in_user 209 #define __copy_to_user_inatomic __copy_to_user 210 #define __copy_from_user_inatomic __copy_from_user 211 212 extern void __compiletime_error("usercopy buffer size is too small") 213 __bad_copy_user(void); 214 215 static inline void copy_user_overflow(int size, unsigned long count) 216 { 217 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 218 } 219 220 static inline unsigned long __must_check copy_from_user(void *to, 221 const void __user *from, 222 unsigned long n) 223 { 224 int sz = __compiletime_object_size(to); 225 unsigned long ret = n; 226 227 if (likely(sz == -1 || sz >= n)) 228 ret = __copy_from_user(to, from, n); 229 else if (!__builtin_constant_p(n)) 230 copy_user_overflow(sz, n); 231 else 232 __bad_copy_user(); 233 234 if (unlikely(ret)) 235 memset(to + (n - ret), 0, ret); 236 return ret; 237 } 238 239 struct pt_regs; 240 int fixup_exception(struct pt_regs *regs); 241 242 #endif /* __PARISC_UACCESS_H */ 243