1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * User-space Probes (UProbes) for s390 4 * 5 * Copyright IBM Corp. 2014 6 * Author(s): Jan Willeke, 7 */ 8 9 #include <linux/uaccess.h> 10 #include <linux/uprobes.h> 11 #include <linux/kdebug.h> 12 #include <linux/sched/task_stack.h> 13 14 #include <asm/facility.h> 15 #include <asm/kprobes.h> 16 #include <asm/dis.h> 17 #include "entry.h" 18 19 #define UPROBE_TRAP_NR UINT_MAX 20 21 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, 22 unsigned long addr) 23 { 24 return probe_is_prohibited_opcode(auprobe->insn); 25 } 26 27 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 28 { 29 if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) 30 return -EINVAL; 31 if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) 32 return -EINVAL; 33 clear_thread_flag(TIF_PER_TRAP); 34 auprobe->saved_per = psw_bits(regs->psw).per; 35 auprobe->saved_int_code = regs->int_code; 36 regs->int_code = UPROBE_TRAP_NR; 37 regs->psw.addr = current->utask->xol_vaddr; 38 set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); 39 update_cr_regs(current); 40 return 0; 41 } 42 43 bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) 44 { 45 struct pt_regs *regs = task_pt_regs(tsk); 46 47 if (regs->int_code != UPROBE_TRAP_NR) 48 return true; 49 return false; 50 } 51 52 static int check_per_event(unsigned short cause, unsigned long control, 53 struct pt_regs *regs) 54 { 55 if (!(regs->psw.mask & PSW_MASK_PER)) 56 return 0; 57 /* user space single step */ 58 if (control == 0) 59 return 1; 60 /* over indication for storage alteration */ 61 if ((control & 0x20200000) && (cause & 0x2000)) 62 return 1; 63 if (cause & 0x8000) { 64 /* all branches */ 65 if ((control & 0x80800000) == 0x80000000) 66 return 1; 67 /* branch into selected range */ 68 if (((control & 0x80800000) == 0x80800000) && 69 regs->psw.addr >= current->thread.per_user.start && 70 regs->psw.addr <= current->thread.per_user.end) 71 return 1; 72 } 73 return 0; 74 } 75 76 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 77 { 78 int fixup = probe_get_fixup_type(auprobe->insn); 79 struct uprobe_task *utask = current->utask; 80 81 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); 82 update_cr_regs(current); 83 psw_bits(regs->psw).per = auprobe->saved_per; 84 regs->int_code = auprobe->saved_int_code; 85 86 if (fixup & FIXUP_PSW_NORMAL) 87 regs->psw.addr += utask->vaddr - utask->xol_vaddr; 88 if (fixup & FIXUP_RETURN_REGISTER) { 89 int reg = (auprobe->insn[0] & 0xf0) >> 4; 90 91 regs->gprs[reg] += utask->vaddr - utask->xol_vaddr; 92 } 93 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 94 int ilen = insn_length(auprobe->insn[0] >> 8); 95 96 if (regs->psw.addr - utask->xol_vaddr == ilen) 97 regs->psw.addr = utask->vaddr + ilen; 98 } 99 if (check_per_event(current->thread.per_event.cause, 100 current->thread.per_user.control, regs)) { 101 /* fix per address */ 102 current->thread.per_event.address = utask->vaddr; 103 /* trigger per event */ 104 set_thread_flag(TIF_PER_TRAP); 105 } 106 return 0; 107 } 108 109 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, 110 void *data) 111 { 112 struct die_args *args = data; 113 struct pt_regs *regs = args->regs; 114 115 if (!user_mode(regs)) 116 return NOTIFY_DONE; 117 if (regs->int_code & 0x200) /* Trap during transaction */ 118 return NOTIFY_DONE; 119 switch (val) { 120 case DIE_BPT: 121 if (uprobe_pre_sstep_notifier(regs)) 122 return NOTIFY_STOP; 123 break; 124 case DIE_SSTEP: 125 if (uprobe_post_sstep_notifier(regs)) 126 return NOTIFY_STOP; 127 break; 128 default: 129 break; 130 } 131 return NOTIFY_DONE; 132 } 133 134 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 135 { 136 clear_thread_flag(TIF_UPROBE_SINGLESTEP); 137 regs->int_code = auprobe->saved_int_code; 138 regs->psw.addr = current->utask->vaddr; 139 current->thread.per_event.address = current->utask->vaddr; 140 } 141 142 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, 143 struct pt_regs *regs) 144 { 145 unsigned long orig; 146 147 orig = regs->gprs[14]; 148 regs->gprs[14] = trampoline; 149 return orig; 150 } 151 152 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, 153 struct pt_regs *regs) 154 { 155 if (ctx == RP_CHECK_CHAIN_CALL) 156 return user_stack_pointer(regs) <= ret->stack; 157 else 158 return user_stack_pointer(regs) < ret->stack; 159 } 160 161 /* Instruction Emulation */ 162 163 #define EMU_ILLEGAL_OP 1 164 #define EMU_SPECIFICATION 2 165 #define EMU_ADDRESSING 3 166 167 #define emu_load_ril(ptr, output) \ 168 ({ \ 169 unsigned int mask = sizeof(*(ptr)) - 1; \ 170 __typeof__(*(ptr)) input; \ 171 int __rc = 0; \ 172 \ 173 if ((u64 __force)ptr & mask) \ 174 __rc = EMU_SPECIFICATION; \ 175 else if (get_user(input, ptr)) \ 176 __rc = EMU_ADDRESSING; \ 177 else \ 178 *(output) = input; \ 179 __rc; \ 180 }) 181 182 #define emu_store_ril(regs, ptr, input) \ 183 ({ \ 184 unsigned int mask = sizeof(*(ptr)) - 1; \ 185 __typeof__(ptr) __ptr = (ptr); \ 186 int __rc = 0; \ 187 \ 188 if ((u64 __force)__ptr & mask) \ 189 __rc = EMU_SPECIFICATION; \ 190 else if (put_user(*(input), __ptr)) \ 191 __rc = EMU_ADDRESSING; \ 192 if (__rc == 0) \ 193 sim_stor_event(regs, \ 194 (void __force *)__ptr, \ 195 mask + 1); \ 196 __rc; \ 197 }) 198 199 #define emu_cmp_ril(regs, ptr, cmp) \ 200 ({ \ 201 unsigned int mask = sizeof(*(ptr)) - 1; \ 202 __typeof__(*(ptr)) input; \ 203 int __rc = 0; \ 204 \ 205 if ((u64 __force)ptr & mask) \ 206 __rc = EMU_SPECIFICATION; \ 207 else if (get_user(input, ptr)) \ 208 __rc = EMU_ADDRESSING; \ 209 else if (input > *(cmp)) \ 210 psw_bits((regs)->psw).cc = 1; \ 211 else if (input < *(cmp)) \ 212 psw_bits((regs)->psw).cc = 2; \ 213 else \ 214 psw_bits((regs)->psw).cc = 0; \ 215 __rc; \ 216 }) 217 218 struct insn_ril { 219 u8 opc0; 220 u8 reg : 4; 221 u8 opc1 : 4; 222 s32 disp; 223 } __packed; 224 225 union split_register { 226 u64 u64; 227 u32 u32[2]; 228 u16 u16[4]; 229 s64 s64; 230 s32 s32[2]; 231 s16 s16[4]; 232 }; 233 234 /* 235 * If user per registers are setup to trace storage alterations and an 236 * emulated store took place on a fitting address a user trap is generated. 237 */ 238 static void sim_stor_event(struct pt_regs *regs, void *addr, int len) 239 { 240 if (!(regs->psw.mask & PSW_MASK_PER)) 241 return; 242 if (!(current->thread.per_user.control & PER_EVENT_STORE)) 243 return; 244 if ((void *)current->thread.per_user.start > (addr + len)) 245 return; 246 if ((void *)current->thread.per_user.end < addr) 247 return; 248 current->thread.per_event.address = regs->psw.addr; 249 current->thread.per_event.cause = PER_EVENT_STORE >> 16; 250 set_thread_flag(TIF_PER_TRAP); 251 } 252 253 /* 254 * pc relative instructions are emulated, since parameters may not be 255 * accessible from the xol area due to range limitations. 256 */ 257 static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs) 258 { 259 union split_register *rx; 260 struct insn_ril *insn; 261 unsigned int ilen; 262 void *uptr; 263 int rc = 0; 264 265 insn = (struct insn_ril *) &auprobe->insn; 266 rx = (union split_register *) ®s->gprs[insn->reg]; 267 uptr = (void *)(regs->psw.addr + (insn->disp * 2)); 268 ilen = insn_length(insn->opc0); 269 270 switch (insn->opc0) { 271 case 0xc0: 272 switch (insn->opc1) { 273 case 0x00: /* larl */ 274 rx->u64 = (unsigned long)uptr; 275 break; 276 } 277 break; 278 case 0xc4: 279 switch (insn->opc1) { 280 case 0x02: /* llhrl */ 281 rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]); 282 break; 283 case 0x04: /* lghrl */ 284 rc = emu_load_ril((s16 __user *)uptr, &rx->u64); 285 break; 286 case 0x05: /* lhrl */ 287 rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]); 288 break; 289 case 0x06: /* llghrl */ 290 rc = emu_load_ril((u16 __user *)uptr, &rx->u64); 291 break; 292 case 0x08: /* lgrl */ 293 rc = emu_load_ril((u64 __user *)uptr, &rx->u64); 294 break; 295 case 0x0c: /* lgfrl */ 296 rc = emu_load_ril((s32 __user *)uptr, &rx->u64); 297 break; 298 case 0x0d: /* lrl */ 299 rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]); 300 break; 301 case 0x0e: /* llgfrl */ 302 rc = emu_load_ril((u32 __user *)uptr, &rx->u64); 303 break; 304 case 0x07: /* sthrl */ 305 rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); 306 break; 307 case 0x0b: /* stgrl */ 308 rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); 309 break; 310 case 0x0f: /* strl */ 311 rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); 312 break; 313 } 314 break; 315 case 0xc6: 316 switch (insn->opc1) { 317 case 0x04: /* cghrl */ 318 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64); 319 break; 320 case 0x05: /* chrl */ 321 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]); 322 break; 323 case 0x06: /* clghrl */ 324 rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64); 325 break; 326 case 0x07: /* clhrl */ 327 rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]); 328 break; 329 case 0x08: /* cgrl */ 330 rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64); 331 break; 332 case 0x0a: /* clgrl */ 333 rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64); 334 break; 335 case 0x0c: /* cgfrl */ 336 rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64); 337 break; 338 case 0x0d: /* crl */ 339 rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]); 340 break; 341 case 0x0e: /* clgfrl */ 342 rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64); 343 break; 344 case 0x0f: /* clrl */ 345 rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]); 346 break; 347 } 348 break; 349 } 350 regs->psw.addr = __forward_psw(regs->psw, ilen); 351 switch (rc) { 352 case EMU_ILLEGAL_OP: 353 regs->int_code = ilen << 16 | 0x0001; 354 do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL); 355 break; 356 case EMU_SPECIFICATION: 357 regs->int_code = ilen << 16 | 0x0006; 358 do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL); 359 break; 360 case EMU_ADDRESSING: 361 regs->int_code = ilen << 16 | 0x0005; 362 do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL); 363 break; 364 } 365 } 366 367 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 368 { 369 if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) || 370 (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)) { 371 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE); 372 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL); 373 return true; 374 } 375 if (probe_is_insn_relative_long(auprobe->insn)) { 376 handle_insn_ril(auprobe, regs); 377 return true; 378 } 379 return false; 380 } 381