/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fault/ |
H A D | base.c | 29 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local 30 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini() 36 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local 37 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init() 49 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local 50 return fault->func->intr(fault); in nvkm_fault_intr() 56 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_fini() local 57 if (fault->func->fini) in nvkm_fault_fini() 58 fault->func->fini(fault); in nvkm_fault_fini() 65 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_init() local [all …]
|
H A D | gv100.c | 33 struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work); in gv100_fault_buffer_process() local 34 struct nvkm_fault_buffer *buffer = fault->buffer[0]; in gv100_fault_buffer_process() 35 struct nvkm_device *device = fault->subdev.device; in gv100_fault_buffer_process() 44 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process() 78 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr() 89 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini() 97 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init() 109 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info() 122 struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb); in gv100_fault_ntfy_nrpfb() local 124 schedule_work(&fault->nrpfb_work); in gv100_fault_ntfy_nrpfb() [all …]
|
H A D | tu102.c | 38 nvkm_event_ntfy(&buffer->fault->event, buffer->id, NVKM_FAULT_BUFFER_EVENT_PENDING); in tu102_fault_buffer_notify() 54 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini() 63 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init() 75 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info() 88 struct nvkm_fault *fault = container_of(inth, typeof(*fault), info_fault); in tu102_fault_info_fault() local 89 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_info_fault() 116 tu102_fault_fini(struct nvkm_fault *fault) in tu102_fault_fini() argument 118 nvkm_event_ntfy_block(&fault->nrpfb); in tu102_fault_fini() 119 flush_work(&fault->nrpfb_work); in tu102_fault_fini() 121 if (fault->buffer[0]) in tu102_fault_fini() [all …]
|
H A D | Kbuild | 2 nvkm-y += nvkm/subdev/fault/base.o 3 nvkm-y += nvkm/subdev/fault/user.o 4 nvkm-y += nvkm/subdev/fault/gp100.o 5 nvkm-y += nvkm/subdev/fault/gp10b.o 6 nvkm-y += nvkm/subdev/fault/gv100.o 7 nvkm-y += nvkm/subdev/fault/tu102.o
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | fault.c | 69 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show() 74 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show() 75 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show() 96 struct fault *fault = file->private_data; in fault_opcodes_write() local 135 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write() 145 clear_bit(i, fault->opcodes); in fault_opcodes_write() 147 set_bit(i, fault->opcodes); in fault_opcodes_write() 166 struct fault *fault = file->private_data; in fault_opcodes_read() local 167 size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; in fault_opcodes_read() 172 bit = find_first_bit(fault->opcodes, bitsize); in fault_opcodes_read() [all …]
|
/linux/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 92 struct x86_exception fault; member 249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME() 380 nested_access, &walker->fault); in FNAME() 448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 481 walker->fault.vector = PF_VECTOR; in FNAME() 482 walker->fault.error_code_valid = true; in FNAME() 483 walker->fault.error_code = errcode; in FNAME() 500 walker->fault.exit_qualification = 0; in FNAME() 503 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE; in FNAME() 505 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ; in FNAME() [all …]
|
H A D | mmu_internal.h | 303 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 343 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument 345 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit() 346 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit() 347 fault->is_private); in kvm_mmu_prepare_memory_fault_exit() 354 struct kvm_page_fault fault = { in kvm_mmu_do_page_fault() local 382 fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm); in kvm_mmu_do_page_fault() 383 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); in kvm_mmu_do_page_fault() 390 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) in kvm_mmu_do_page_fault() 391 r = kvm_tdp_page_fault(vcpu, &fault); in kvm_mmu_do_page_fault() [all …]
|
/linux/arch/mips/kernel/ |
H A D | unaligned.c | 175 goto fault; in emulate_load_store_insn() 184 goto fault; in emulate_load_store_insn() 193 goto fault; in emulate_load_store_insn() 213 goto fault; in emulate_load_store_insn() 222 goto fault; in emulate_load_store_insn() 243 goto fault; in emulate_load_store_insn() 252 goto fault; in emulate_load_store_insn() 261 goto fault; in emulate_load_store_insn() 272 goto fault; in emulate_load_store_insn() 281 goto fault; in emulate_load_store_insn() [all …]
|
/linux/arch/nios2/kernel/ |
H A D | misaligned.c | 72 unsigned int fault; in handle_unaligned_c() local 85 fault = 0; in handle_unaligned_c() 98 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 99 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 111 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 112 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 116 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 117 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 133 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 134 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() [all …]
|
/linux/drivers/iommu/ |
H A D | io-pgfault.c | 47 if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) in __iopf_free_group() 64 struct iommu_fault *fault) in report_partial_fault() argument 72 iopf->fault = *fault; in report_partial_fault() 98 group->last_fault.fault = evt->fault; in iopf_group_alloc() 106 if (iopf->fault.prm.grpid == evt->fault.prm.grpid) in iopf_group_alloc() 121 struct iommu_fault *fault = &evt->fault; in find_fault_handler() local 124 if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { in find_fault_handler() 126 fault->prm.pasid, 0); in find_fault_handler() 161 struct iommu_fault *fault = &evt->fault; in iopf_error_response() local 163 .pasid = fault->prm.pasid, in iopf_error_response() [all …]
|
/linux/arch/x86/kvm/svm/ |
H A D | svm_ops.h | 12 _ASM_EXTABLE(1b, %l[fault]) \ 13 ::: clobber : fault); \ 15 fault: \ 22 _ASM_EXTABLE(1b, %l[fault]) \ 23 :: op1 : clobber : fault); \ 25 fault: \ 32 _ASM_EXTABLE(1b, %l[fault]) \ 33 :: op1, op2 : clobber : fault); \ 35 fault: \
|
/linux/arch/mips/loongson64/ |
H A D | cop2-ex.c | 76 goto fault; in loongson_cu2_call() 80 goto fault; in loongson_cu2_call() 92 goto fault; in loongson_cu2_call() 96 goto fault; in loongson_cu2_call() 118 goto fault; in loongson_cu2_call() 123 goto fault; in loongson_cu2_call() 135 goto fault; in loongson_cu2_call() 141 goto fault; in loongson_cu2_call() 165 goto fault; in loongson_cu2_call() 176 goto fault; in loongson_cu2_call() [all …]
|
/linux/arch/powerpc/lib/ |
H A D | checksum_32.S | 109 EX_TABLE(8 ## n ## 0b, fault); \ 110 EX_TABLE(8 ## n ## 1b, fault); \ 111 EX_TABLE(8 ## n ## 2b, fault); \ 112 EX_TABLE(8 ## n ## 3b, fault); \ 113 EX_TABLE(8 ## n ## 4b, fault); \ 114 EX_TABLE(8 ## n ## 5b, fault); \ 115 EX_TABLE(8 ## n ## 6b, fault); \ 116 EX_TABLE(8 ## n ## 7b, fault); 240 fault: label 244 EX_TABLE(70b, fault); [all …]
|
/linux/arch/m68k/mm/ |
H A D | fault.c | 75 vm_fault_t fault; in do_page_fault() local 141 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 142 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault() 144 if (fault_signal_pending(fault, regs)) { in do_page_fault() 151 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 154 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 155 if (fault & VM_FAULT_OOM) in do_page_fault() 157 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 159 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 164 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/linux/arch/riscv/mm/ |
H A D | fault.c | 117 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) in mm_fault_error() argument 124 if (fault & VM_FAULT_OOM) { in mm_fault_error() 131 } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { in mm_fault_error() 135 } else if (fault & VM_FAULT_SIGSEGV) { in mm_fault_error() 286 vm_fault_t fault; in handle_page_fault() local 362 fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); in handle_page_fault() 363 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) in handle_page_fault() 366 if (!(fault & VM_FAULT_RETRY)) { in handle_page_fault() 371 if (fault & VM_FAULT_MAJOR) in handle_page_fault() 374 if (fault_signal_pending(fault, regs)) { in handle_page_fault() [all …]
|
/linux/arch/hexagon/mm/ |
H A D | vm_fault.c | 43 vm_fault_t fault; in do_page_fault() local 84 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 86 if (fault_signal_pending(fault, regs)) { in do_page_fault() 93 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 97 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault() 98 if (fault & VM_FAULT_RETRY) { in do_page_fault() 113 if (fault & VM_FAULT_OOM) { in do_page_fault() 121 if (fault & VM_FAULT_SIGBUS) { in do_page_fault()
|
/linux/arch/alpha/mm/ |
H A D | fault.c | 92 vm_fault_t fault; in do_page_fault() local 145 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 147 if (fault_signal_pending(fault, regs)) { in do_page_fault() 154 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 157 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 158 if (fault & VM_FAULT_OOM) in do_page_fault() 160 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 162 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 167 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/linux/arch/parisc/mm/ |
H A D | fault.c | 274 vm_fault_t fault = 0; in do_page_fault() local 318 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 320 if (fault_signal_pending(fault, regs)) { in do_page_fault() 329 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 332 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 338 if (fault & VM_FAULT_OOM) in do_page_fault() 340 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 342 else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| in do_page_fault() 347 if (fault & VM_FAULT_RETRY) { in do_page_fault() 405 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_page_fault() [all …]
|
/linux/arch/nios2/mm/ |
H A D | fault.c | 50 vm_fault_t fault; in do_page_fault() local 124 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 126 if (fault_signal_pending(fault, regs)) { in do_page_fault() 133 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 136 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 137 if (fault & VM_FAULT_OOM) in do_page_fault() 139 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 141 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 146 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/linux/arch/microblaze/mm/ |
H A D | fault.c | 93 vm_fault_t fault; in do_page_fault() local 221 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 223 if (fault_signal_pending(fault, regs)) { in do_page_fault() 230 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 233 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 234 if (fault & VM_FAULT_OOM) in do_page_fault() 236 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 238 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 243 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/linux/arch/csky/mm/ |
H A D | fault.c | 73 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) in mm_fault_error() argument 77 if (fault & VM_FAULT_OOM) { in mm_fault_error() 88 } else if (fault & VM_FAULT_SIGBUS) { in mm_fault_error() 195 vm_fault_t fault; in do_page_fault() local 263 fault = handle_mm_fault(vma, addr, flags, regs); in do_page_fault() 270 if (fault_signal_pending(fault, regs)) { in do_page_fault() 277 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 280 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { in do_page_fault() 293 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 294 mm_fault_error(regs, addr, fault); in do_page_fault()
|
/linux/arch/s390/mm/ |
H A D | fault.c | 264 vm_fault_t fault; in do_exception() local 297 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); in do_exception() 298 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) in do_exception() 300 if (!(fault & VM_FAULT_RETRY)) { in do_exception() 305 if (fault & VM_FAULT_MAJOR) in do_exception() 308 if (fault_signal_pending(fault, regs)) { in do_exception() 320 fault = handle_mm_fault(vma, address, flags, regs); in do_exception() 321 if (fault_signal_pending(fault, regs)) { in do_exception() 327 if (fault & VM_FAULT_COMPLETED) in do_exception() 329 if (fault & VM_FAULT_RETRY) { in do_exception() [all …]
|
/linux/drivers/media/i2c/ |
H A D | adp1653.c | 80 int fault; in adp1653_get_fault() local 83 fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); in adp1653_get_fault() 84 if (fault < 0) in adp1653_get_fault() 85 return fault; in adp1653_get_fault() 87 flash->fault |= fault; in adp1653_get_fault() 89 if (!flash->fault) in adp1653_get_fault() 103 return flash->fault; in adp1653_get_fault() 152 if (flash->fault & ADP1653_REG_FAULT_FLT_SCP) in adp1653_get_ctrl() 154 if (flash->fault & ADP1653_REG_FAULT_FLT_OT) in adp1653_get_ctrl() 156 if (flash->fault & ADP1653_REG_FAULT_FLT_TMR) in adp1653_get_ctrl() [all …]
|
/linux/arch/openrisc/mm/ |
H A D | fault.c | 53 vm_fault_t fault; in do_page_fault() local 166 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 168 if (fault_signal_pending(fault, regs)) { in do_page_fault() 175 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 178 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 179 if (fault & VM_FAULT_OOM) in do_page_fault() 181 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 183 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 189 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/linux/arch/sh/mm/ |
H A D | fault.c | 314 unsigned long address, vm_fault_t fault) in mm_fault_error() argument 320 if (fault_signal_pending(fault, regs)) { in mm_fault_error() 327 if (!(fault & VM_FAULT_RETRY)) in mm_fault_error() 330 if (!(fault & VM_FAULT_ERROR)) in mm_fault_error() 333 if (fault & VM_FAULT_OOM) { in mm_fault_error() 347 if (fault & VM_FAULT_SIGBUS) in mm_fault_error() 349 else if (fault & VM_FAULT_SIGSEGV) in mm_fault_error() 397 vm_fault_t fault; in do_page_fault() local 469 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 471 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) in do_page_fault() [all …]
|