/linux/arch/sh/drivers/ |
H A D | push-switch.c | 28 struct push_switch *psw = from_timer(psw, t, debounce); in switch_timer() local 30 schedule_work(&psw->work); in switch_timer() 35 struct push_switch *psw = container_of(work, struct push_switch, work); in switch_work_handler() local 36 struct platform_device *pdev = psw->pdev; in switch_work_handler() 38 psw->state = 0; in switch_work_handler() 46 struct push_switch *psw; in switch_drv_probe() local 49 psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL); in switch_drv_probe() 50 if (unlikely(!psw)) in switch_drv_probe() 77 INIT_WORK(&psw->work, switch_work_handler); in switch_drv_probe() 78 timer_setup(&psw->debounce, switch_timer, 0); in switch_drv_probe() [all …]
|
/linux/arch/s390/boot/ |
H A D | pgm_check_info.c | 38 struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area); in print_pgm_check_info() local 56 psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, in print_pgm_check_info() 57 psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba); in print_pgm_check_info()
|
H A D | startup.c | 437 psw_t psw; in startup_kernel() local 569 psw.addr = __kaslr_offset + vmlinux.entry; in startup_kernel() 570 psw.mask = PSW_KERNEL_BITS; in startup_kernel() 571 boot_debug("Starting kernel at: 0x%016lx\n", psw.addr); in startup_kernel() 572 __load_psw(psw); in startup_kernel()
|
/linux/arch/s390/kernel/ |
H A D | dumpstack.c | 156 struct psw_bits *psw = &psw_bits(regs->psw); in show_registers() local 160 printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); in show_registers() 162 pr_cont(" (%pSR)", (void *)regs->psw.addr); in show_registers() 165 "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext, in show_registers() 166 psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm); in show_registers() 167 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba); in show_registers()
|
H A D | uprobes.c | 30 if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) in arch_uprobe_pre_xol() 32 if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) in arch_uprobe_pre_xol() 35 auprobe->saved_per = psw_bits(regs->psw).per; in arch_uprobe_pre_xol() 38 regs->psw.addr = current->utask->xol_vaddr; in arch_uprobe_pre_xol() 56 if (!(regs->psw.mask & PSW_MASK_PER)) in check_per_event() 70 regs->psw.addr >= current->thread.per_user.start && in check_per_event() 71 regs->psw.addr <= current->thread.per_user.end) in check_per_event() 84 psw_bits(regs->psw).per = auprobe->saved_per; in arch_uprobe_post_xol() 88 regs->psw.addr += utask->vaddr - utask->xol_vaddr; in arch_uprobe_post_xol() 97 if (regs->psw.addr - utask->xol_vaddr == ilen) in arch_uprobe_post_xol() [all …]
|
H A D | signal.c | 128 user_sregs.regs.psw.mask = PSW_USER_BITS | in save_sigregs() 129 (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); in save_sigregs() 130 user_sregs.regs.psw.addr = regs->psw.addr; in save_sigregs() 150 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) in restore_sigregs() 154 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | in restore_sigregs() 155 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); in restore_sigregs() 157 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) in restore_sigregs() 158 regs->psw.mask = PSW_ASC_PRIMARY | in restore_sigregs() 159 (regs->psw.mask & ~PSW_MASK_ASC); in restore_sigregs() 161 if (regs->psw.mask & PSW_MASK_EA) in restore_sigregs() [all …]
|
H A D | compat_signal.c | 73 user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32); in save_sigregs32() 74 user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI; in save_sigregs32() 75 user_sregs.regs.psw.mask |= PSW32_USER_BITS; in save_sigregs32() 76 user_sregs.regs.psw.addr = (__u32) regs->psw.addr | in save_sigregs32() 77 (__u32)(regs->psw.mask & PSW_MASK_BA); in save_sigregs32() 99 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) in restore_sigregs32() 103 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | in restore_sigregs32() 104 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | in restore_sigregs32() 105 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | in restore_sigregs32() 106 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); in restore_sigregs32() [all …]
|
H A D | early.c | 187 ip = __rewind_psw(regs->psw, regs->int_code >> 16); in __do_early_pgm_check() 203 regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr); in __do_early_pgm_check() 211 psw_t psw; in setup_lowcore_early() local 213 psw.addr = (unsigned long)early_pgm_check_handler; in setup_lowcore_early() 214 psw.mask = PSW_KERNEL_BITS; in setup_lowcore_early() 215 lc->program_new_psw = psw; in setup_lowcore_early()
|
H A D | traps.c | 44 address = regs->psw.addr; in get_trap_ip() 243 regs->psw.mask |= PSW_ASC_HOME; in space_switch_exception() 253 switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) { in monitor_event_exception() 366 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); in __do_pgm_check() 372 if (!irqs_disabled_flags(regs->psw.mask)) in __do_pgm_check() 374 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); in __do_pgm_check()
|
H A D | ptrace.c | 112 regs->psw.mask &= ~PSW_MASK_PER; in update_cr_regs() 115 regs->psw.mask |= PSW_MASK_PER; in update_cr_regs() 211 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); in __peek_user() 212 if (addr == offsetof(struct user, regs.psw.mask)) { in __peek_user() 335 if (addr == offsetof(struct user, regs.psw.mask)) { in __poke_user() 356 *(addr_t *)((addr_t) ®s->psw + addr) = data; in __poke_user() 575 if (addr == offsetof(struct compat_user, regs.psw.mask)) { in __peek_user_compat() 577 tmp = (__u32)(regs->psw.mask >> 32); in __peek_user_compat() 580 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { in __peek_user_compat() 582 tmp = (__u32) regs->psw.addr | in __peek_user_compat() [all …]
|
H A D | relocate_kernel.S | 63 la %r4,load_psw-.base(%r13) # load psw-address into the register 64 o %r3,4(%r4) # or load address into psw 66 mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
|
H A D | perf_regs.c | 27 return regs->psw.mask; in perf_reg_value() 29 return regs->psw.addr; in perf_reg_value()
|
H A D | unwind_bc.c | 49 READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE; in is_final_pt_regs() 94 ip = READ_ONCE_NOCHECK(regs->psw.addr); in unwind_next_frame() 144 ip = regs->psw.addr; in __unwind_start()
|
H A D | syscall.c | 98 regs->psw.addr = current->restart_block.arch_data; in do_syscall() 127 regs->psw = get_lowcore()->svc_old_psw; in __do_syscall()
|
/linux/arch/sh/boards/mach-highlander/ |
H A D | psw.c | 19 struct push_switch *psw = platform_get_drvdata(pdev); in psw_irq_handler() local 27 if (psw->state) { in psw_irq_handler() 35 psw->state = !!(mask & (1 << psw_info->bit)); in psw_irq_handler() 36 if (psw->state) /* debounce */ in psw_irq_handler() 37 mod_timer(&psw->debounce, jiffies + 50); in psw_irq_handler()
|
/linux/arch/s390/mm/ |
H A D | extable.c | 26 regs->psw.addr = extable_fixup(ex); in ex_handler_fixup() 35 regs->psw.addr = extable_fixup(ex); in ex_handler_ua_fault() 49 regs->psw.addr = extable_fixup(ex); in ex_handler_ua_load_reg() 65 regs->psw.addr = extable_fixup(ex); in ex_handler_zeropad() 72 regs->psw.addr = extable_fixup(ex); in ex_handler_fpc()
|
/linux/arch/sh/boards/mach-landisk/ |
H A D | psw.c | 20 struct push_switch *psw = platform_get_drvdata(pdev); in psw_irq_handler() local 28 if (psw->state) { in psw_irq_handler() 35 psw->state = 1; in psw_irq_handler() 36 mod_timer(&psw->debounce, jiffies + 50); in psw_irq_handler()
|
/linux/arch/s390/include/asm/ |
H A D | ftrace.h | 71 arch_ftrace_regs(fregs)->regs.psw.addr = ip; in ftrace_regs_set_instruction_pointer() 88 (_regs)->psw.mask = 0; \ 89 (_regs)->psw.addr = arch_ftrace_regs(fregs)->regs.psw.addr; \
|
H A D | perf_event.h | 52 (regs)->psw.mask = 0; \ 53 (regs)->psw.addr = (__ip); \
|
/linux/drivers/gpu/drm/nouveau/nvkm/engine/sw/ |
H A D | nv10.c | 65 nv10_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) in nv10_sw_new() argument 67 return nvkm_sw_new_(&nv10_sw, device, type, inst, psw); in nv10_sw_new()
|
H A D | nv04.c | 136 nv04_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) in nv04_sw_new() argument 138 return nvkm_sw_new_(&nv04_sw, device, type, inst, psw); in nv04_sw_new()
|
H A D | base.c | 99 enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) in nvkm_sw_new_() argument 103 if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL))) in nvkm_sw_new_()
|
/linux/arch/s390/kvm/ |
H A D | gaccess.c | 404 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); in get_vcpu_asce() local 406 if (!psw.dat) { in get_vcpu_asce() 412 if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME)) in get_vcpu_asce() 413 psw.as = PSW_BITS_AS_PRIMARY; in get_vcpu_asce() 415 switch (psw.as) { in get_vcpu_asce() 634 psw_t *psw = &vcpu->arch.sie_block->gpsw; in low_address_protection_enabled() local 638 if (psw_bits(*psw).dat && asce.p) in low_address_protection_enabled() 675 psw_t *psw = &vcpu->arch.sie_block->gpsw; in fetch_prot_override_applicable() local 683 override = override && !(psw_bits(*psw).dat && asce.p); in fetch_prot_override_applicable() 785 psw_t *psw = &vcpu->arch.sie_block->gpsw; in guest_range_to_gpas() local [all …]
|
H A D | gaccess.h | 63 static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw, in _kvm_s390_logical_to_effective() argument 66 if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) in _kvm_s390_logical_to_effective() 68 if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) in _kvm_s390_logical_to_effective()
|
/linux/arch/s390/include/uapi/asm/ |
H A D | ptrace.h | 281 psw_t psw; member 293 psw_t psw; member 439 psw_t psw; member
|