/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fault/ |
H A D | base.c | 29 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local 30 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini() 36 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local 37 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init() 49 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local 50 return fault->func->intr(fault); in nvkm_fault_intr() 56 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_fini() local 57 if (fault->func->fini) in nvkm_fault_fini() 58 fault->func->fini(fault); in nvkm_fault_fini() 65 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_init() local [all …]
|
H A D | gv100.c | 33 struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work); in gv100_fault_buffer_process() local 34 struct nvkm_fault_buffer *buffer = fault->buffer[0]; in gv100_fault_buffer_process() 35 struct nvkm_device *device = fault->subdev.device; in gv100_fault_buffer_process() 44 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process() 78 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr() 89 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini() 97 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init() 109 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info() 122 struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb); in gv100_fault_ntfy_nrpfb() local 124 schedule_work(&fault->nrpfb_work); in gv100_fault_ntfy_nrpfb() [all …]
|
H A D | tu102.c | 38 nvkm_event_ntfy(&buffer->fault->event, buffer->id, NVKM_FAULT_BUFFER_EVENT_PENDING); in tu102_fault_buffer_notify() 54 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini() 63 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init() 75 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info() 88 struct nvkm_fault *fault = container_of(inth, typeof(*fault), info_fault); in tu102_fault_info_fault() local 89 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_info_fault() 116 tu102_fault_fini(struct nvkm_fault *fault) in tu102_fault_fini() argument 118 nvkm_event_ntfy_block(&fault->nrpfb); in tu102_fault_fini() 119 flush_work(&fault->nrpfb_work); in tu102_fault_fini() 121 if (fault->buffer[0]) in tu102_fault_fini() [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | fault.c | 13 #include "fault.h" 69 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show() 74 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show() 75 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show() 96 struct fault *fault = file->private_data; in fault_opcodes_write() local 135 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write() 145 clear_bit(i, fault->opcodes); in fault_opcodes_write() 147 set_bit(i, fault->opcodes); in fault_opcodes_write() 166 struct fault *fault = file->private_data; in fault_opcodes_read() local 167 size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; in fault_opcodes_read() [all …]
|
/linux/drivers/iommu/iommufd/ |
H A D | eventq.c | 22 struct iommufd_fault *fault = hwpt->fault; in iommufd_auto_response_faults() local 27 if (!fault || !handle) in iommufd_auto_response_faults() 31 mutex_lock(&fault->mutex); in iommufd_auto_response_faults() 32 spin_lock(&fault->common.lock); in iommufd_auto_response_faults() 33 list_for_each_entry_safe(group, next, &fault->common.deliver, node) { in iommufd_auto_response_faults() 38 spin_unlock(&fault->common.lock); in iommufd_auto_response_faults() 46 xa_for_each(&fault->response, index, group) { in iommufd_auto_response_faults() 49 xa_erase(&fault->response, index); in iommufd_auto_response_faults() 53 mutex_unlock(&fault->mutex); in iommufd_auto_response_faults() 60 struct iommufd_fault *fault = eventq_to_fault(eventq); in iommufd_fault_destroy() local [all …]
|
/linux/drivers/iommu/ |
H A D | io-pgfault.c | 17 * Return the fault parameter of a device if it exists. Otherwise, return NULL. 35 /* Caller must hold a reference of the fault parameter. */ 47 if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) in __iopf_free_group() 64 struct iommu_fault *fault) in report_partial_fault() argument 72 iopf->fault = *fault; in report_partial_fault() 98 group->last_fault.fault = evt->fault; in iopf_group_alloc() 106 if (iopf->fault.prm.grpid == evt->fault.prm.grpid) in iopf_group_alloc() 107 /* Insert *before* the last fault */ in iopf_group_alloc() 121 struct iommu_fault *fault = &evt->fault; in find_fault_handler() local 124 if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { in find_fault_handler() [all …]
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | vas-fault.c | 3 * VAS Fault handling. 21 * The maximum FIFO size for fault window can be 8MB 23 * instance will be having fault window. 35 pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size, in dump_fifo() 39 pr_err("Fault FIFO Dump:\n"); in dump_fifo() 47 * Process valid CRBs in fault FIFO. 50 * request buffers, raises interrupt on the CPU to handle the fault. 51 * It takes credit on fault window, updates nx_fault_stamp in CRB with 52 * the following information and pastes CRB in fault FIFO. 55 * fault_storage_addr - fault address [all …]
|
/linux/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 92 struct x86_exception fault; member 249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME() 352 * Queue a page fault for injection if this assertion fails, as callers in FNAME() 353 * assume that walker.fault contains sane info on a walk failure. I.e. in FNAME() 380 nested_access, &walker->fault); in FNAME() 384 * instruction) triggers a nested page fault. The exit in FNAME() 386 * "guest page access" as the nested page fault's cause, in FNAME() 448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 458 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME() 481 walker->fault.vector = PF_VECTOR; in FNAME() [all …]
|
H A D | mmu_internal.h | 261 * Maximum page size that can be created for this fault; input to 303 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 309 * RET_PF_CONTINUE: So far, so good, keep handling the page fault. 310 * RET_PF_RETRY: let CPU fault again on the address. 311 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 314 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 343 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument 345 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit() 346 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit() 347 fault->is_private); in kvm_mmu_prepare_memory_fault_exit() [all …]
|
/linux/arch/microblaze/mm/ |
H A D | fault.c | 2 * arch/microblaze/mm/fault.c 6 * Derived from "arch/ppc/mm/fault.c" 9 * Derived from "arch/i386/mm/fault.c" 71 /* Are we prepared to handle this fault? */ in bad_page_fault() 83 * The error_code parameter is ESR for a data fault, 84 * 0 for an instruction fault. 93 vm_fault_t fault; in do_page_fault() local 115 pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", in do_page_fault() 119 die("Weird page fault", regs, SIGSEGV); in do_page_fault() 130 * erroneous fault occurring in a code path which already holds mmap_lock in do_page_fault() [all …]
|
/linux/arch/m68k/mm/ |
H A D | fault.c | 3 * linux/arch/m68k/mm/fault.c 20 #include "fault.h" 64 * bit 0 == 0 means no page found, 1 means protection fault 75 vm_fault_t fault; in do_page_fault() local 78 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", in do_page_fault() 83 * context, we must not take the fault.. in do_page_fault() 136 * If for any reason at all we couldn't handle the fault, in do_page_fault() 138 * the fault. in do_page_fault() 141 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 142 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault() [all …]
|
/linux/arch/parisc/mm/ |
H A D | fault.c | 46 * the instruction has generated some sort of a memory access fault). 106 * Data TLB miss fault/data page fault in parisc_acctyp() 204 [6] = "Instruction TLB miss fault", 213 [15] = "Data TLB miss fault", 214 [16] = "Non-access ITLB miss fault", 215 [17] = "Non-access DTLB miss fault", 274 vm_fault_t fault = 0; in do_page_fault() local 281 msg = "Page fault: no context"; in do_page_fault() 313 * If for any reason at all we couldn't handle the fault, make in do_page_fault() 315 * fault. in do_page_fault() [all …]
|
/linux/arch/arm/mm/ |
H A D | fsr-3level.c | 7 { do_bad, SIGBUS, 0, "reserved translation fault" }, 8 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 9 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 10 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 11 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 12 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 14 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 15 { do_bad, SIGBUS, 0, "reserved permission fault" }, 16 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, [all …]
|
H A D | fsr-2level.c | 12 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 14 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, 16 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, 18 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, 20 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, 22 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, 50 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, 52 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 53 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, 54 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, [all …]
|
H A D | fault.c | 3 * linux/arch/arm/mm/fault.c 26 #include "fault.h" 155 * Are we prepared to handle this kernel fault? in __do_kernel_fault() 193 pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", in __do_user_fault() 219 * have no context to handle this fault with. in do_bad_area() 268 vm_fault_t fault; in do_page_fault() local 282 * context, we must not take the fault.. in do_page_fault() 307 * routed via the translation fault mechanism. Check whether uaccess in do_page_fault() 323 fault = 0; in do_page_fault() 327 fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); in do_page_fault() [all …]
|
/linux/arch/hexagon/mm/ |
H A D | vm_fault.c | 3 * Memory fault handling for Hexagon 9 * Page fault handling for the Hexagon Virtual Machine. 35 * Canonical page fault handler 43 vm_fault_t fault; in do_page_fault() local 49 * then must not take the fault. in do_page_fault() 84 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 86 if (fault_signal_pending(fault, regs)) { in do_page_fault() 92 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 93 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 97 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault() [all …]
|
/linux/arch/alpha/mm/ |
H A D | fault.c | 3 * linux/arch/alpha/mm/fault.c 65 * 2 = fault-on-read 66 * 3 = fault-on-execute 67 * 4 = fault-on-write 92 vm_fault_t fault; in do_page_fault() local 110 we must not take the fault. */ in do_page_fault() 142 /* If for any reason at all we couldn't handle the fault, in do_page_fault() 144 the fault. */ in do_page_fault() 145 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 147 if (fault_signal_pending(fault, regs)) { in do_page_fault() [all …]
|
/linux/arch/powerpc/lib/ |
H A D | checksum_32.S | 109 EX_TABLE(8 ## n ## 0b, fault); \ 110 EX_TABLE(8 ## n ## 1b, fault); \ 111 EX_TABLE(8 ## n ## 2b, fault); \ 112 EX_TABLE(8 ## n ## 3b, fault); \ 113 EX_TABLE(8 ## n ## 4b, fault); \ 114 EX_TABLE(8 ## n ## 5b, fault); \ 115 EX_TABLE(8 ## n ## 6b, fault); \ 116 EX_TABLE(8 ## n ## 7b, fault); 240 fault: label 244 EX_TABLE(70b, fault); [all …]
|
/linux/arch/nios2/mm/ |
H A D | fault.c | 5 * based on arch/mips/mm/fault.c which is: 50 vm_fault_t fault; in do_page_fault() local 59 * We fault-in kernel-space virtual memory on-demand. The in do_page_fault() 79 * context, we must not take the fault.. in do_page_fault() 120 * If for any reason at all we couldn't handle the fault, in do_page_fault() 122 * the fault. in do_page_fault() 124 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 126 if (fault_signal_pending(fault, regs)) { in do_page_fault() 132 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 133 if (fault & VM_FAULT_COMPLETED) in do_page_fault() [all …]
|
/linux/arch/mips/kernel/ |
H A D | unaligned.c | 175 goto fault; in emulate_load_store_insn() 184 goto fault; in emulate_load_store_insn() 193 goto fault; in emulate_load_store_insn() 213 goto fault; in emulate_load_store_insn() 222 goto fault; in emulate_load_store_insn() 243 goto fault; in emulate_load_store_insn() 252 goto fault; in emulate_load_store_insn() 261 goto fault; in emulate_load_store_insn() 272 goto fault; in emulate_load_store_insn() 281 goto fault; in emulate_load_store_insn() [all …]
|
/linux/arch/sh/mm/ |
H A D | fault.c | 2 * Page fault handler for SH with an MMU. 7 * Based on linux/arch/i386/mm/fault.c: 157 * be another reason for the fault. Return NULL here to in vmalloc_sync_one() 158 * signal that we have not taken care of the fault. in vmalloc_sync_one() 174 * Handle a fault on the vmalloc or module mapping area 225 /* Are we prepared to handle this kernel fault? */ in no_context() 314 unsigned long address, vm_fault_t fault) in mm_fault_error() argument 320 if (fault_signal_pending(fault, regs)) { in mm_fault_error() 327 if (!(fault & VM_FAULT_RETRY)) in mm_fault_error() 330 if (!(fault & VM_FAULT_ERROR)) in mm_fault_error() [all …]
|
/linux/arch/nios2/kernel/ |
H A D | misaligned.c | 72 unsigned int fault; in handle_unaligned_c() local 85 fault = 0; in handle_unaligned_c() 98 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 99 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 111 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 112 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 116 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 117 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 133 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 134 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() [all …]
|
/linux/arch/x86/mm/ |
H A D | fault.c | 43 * Returns 0 if mmiotrace is disabled, or if the fault is not 134 * If it was a exec (instruction fetch) fault on NX page, then in is_prefetch() 135 * do not ignore the fault: in is_prefetch() 219 * Handle a fault on the vmalloc or module mapping area 230 * unhandled page-fault when they are accessed. 413 * The OS sees this as a page fault with the upper 32bits of RIP cleared. 450 * We catch this in the page fault handler because these addresses 539 pr_alert("BUG: unable to handle page fault for address: %px\n", in show_fault_oops() 563 * contributory exception from user code and gets a page fault in show_fault_oops() 564 * during delivery, the page fault can be delivered as though in show_fault_oops() [all …]
|
/linux/arch/loongarch/mm/ |
H A D | fault.c | 80 /* Are we prepared to handle this kernel fault? */ in no_context() 105 * (which will retry the fault, or kill us if we got oom-killed). in do_out_of_memory() 183 vm_fault_t fault; in __do_page_fault() local 189 * We fault-in kernel-space virtual memory on-demand. The in __do_page_fault() 207 * context, we must not take the fault.. in __do_page_fault() 249 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); in __do_page_fault() 250 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) in __do_page_fault() 253 if (!(fault & VM_FAULT_RETRY)) { in __do_page_fault() 259 if (fault & VM_FAULT_MAJOR) in __do_page_fault() 263 if (fault_signal_pending(fault, regs)) { in __do_page_fault() [all …]
|
/linux/arch/arc/mm/ |
H A D | fault.c | 2 /* Page Fault Handling for ARC (TLB Miss / ProtV) 80 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ in do_page_fault() local 98 * context, we must not take the fault.. in do_page_fault() 135 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 138 if (fault_signal_pending(fault, regs)) { in do_page_fault() 144 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 145 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 149 * Fault retry nuances, mmap_lock already relinquished by core mm in do_page_fault() 151 if (unlikely(fault & VM_FAULT_RETRY)) { in do_page_fault() 161 * Major/minor page fault accounting in do_page_fault() [all …]
|