/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/subdev/fault/ |
D | base.c | 29 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local 30 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini() 36 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local 37 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init() 49 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local 50 return fault->func->intr(fault); in nvkm_fault_intr() 56 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_fini() local 57 if (fault->func->fini) in nvkm_fault_fini() 58 fault->func->fini(fault); in nvkm_fault_fini() 65 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_init() local [all …]
|
D | gv100.c | 33 struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work); in gv100_fault_buffer_process() local 34 struct nvkm_fault_buffer *buffer = fault->buffer[0]; in gv100_fault_buffer_process() 35 struct nvkm_device *device = fault->subdev.device; in gv100_fault_buffer_process() 44 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process() 78 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr() 89 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini() 97 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init() 109 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info() 122 struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb); in gv100_fault_ntfy_nrpfb() local 124 schedule_work(&fault->nrpfb_work); in gv100_fault_ntfy_nrpfb() [all …]
|
D | tu102.c | 38 nvkm_event_ntfy(&buffer->fault->event, buffer->id, NVKM_FAULT_BUFFER_EVENT_PENDING); in tu102_fault_buffer_notify() 54 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini() 63 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init() 75 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info() 88 struct nvkm_fault *fault = container_of(inth, typeof(*fault), info_fault); in tu102_fault_info_fault() local 89 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_info_fault() 116 tu102_fault_fini(struct nvkm_fault *fault) in tu102_fault_fini() argument 118 nvkm_event_ntfy_block(&fault->nrpfb); in tu102_fault_fini() 119 flush_work(&fault->nrpfb_work); in tu102_fault_fini() 121 if (fault->buffer[0]) in tu102_fault_fini() [all …]
|
/linux-6.14.4/drivers/iommu/iommufd/ |
D | fault.c | 87 if (!hwpt->fault) in iommufd_fault_domain_attach_dev() 104 struct iommufd_fault *fault = hwpt->fault; in iommufd_auto_response_faults() local 109 if (!fault) in iommufd_auto_response_faults() 113 mutex_lock(&fault->mutex); in iommufd_auto_response_faults() 114 spin_lock(&fault->lock); in iommufd_auto_response_faults() 115 list_for_each_entry_safe(group, next, &fault->deliver, node) { in iommufd_auto_response_faults() 120 spin_unlock(&fault->lock); in iommufd_auto_response_faults() 128 xa_for_each(&fault->response, index, group) { in iommufd_auto_response_faults() 131 xa_erase(&fault->response, index); in iommufd_auto_response_faults() 135 mutex_unlock(&fault->mutex); in iommufd_auto_response_faults() [all …]
|
/linux-6.14.4/drivers/infiniband/hw/hfi1/ |
D | fault.c | 13 #include "fault.h" 69 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show() 74 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show() 75 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show() 96 struct fault *fault = file->private_data; in fault_opcodes_write() local 138 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write() 148 clear_bit(i, fault->opcodes); in fault_opcodes_write() 150 set_bit(i, fault->opcodes); in fault_opcodes_write() 170 struct fault *fault = file->private_data; in fault_opcodes_read() local 171 size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; in fault_opcodes_read() [all …]
|
/linux-6.14.4/drivers/gpu/drm/ci/xfails/ |
D | msm-sm8350-hdk-skips.txt | 23 # [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISS… 24 # [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 25 # [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 26 # [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 27 # [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 28 # [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 30 # [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 31 # [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 32 # [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 33 # [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… [all …]
|
/linux-6.14.4/arch/powerpc/mm/ |
D | fault.c | 6 * Derived from "arch/i386/mm/fault.c" 109 * 5. T1 : enters fault handler, takes mmap_lock, etc... in bad_access_pkey() 140 vm_fault_t fault) in do_sigbus() argument 147 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus() 150 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", in do_sigbus() 153 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus() 154 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus() 155 if (fault & VM_FAULT_HWPOISON) in do_sigbus() 168 vm_fault_t fault) in mm_fault_error() argument 171 * Kernel page fault interrupted by SIGKILL. We have no reason to in mm_fault_error() [all …]
|
/linux-6.14.4/drivers/iommu/ |
D | io-pgfault.c | 17 * Return the fault parameter of a device if it exists. Otherwise, return NULL. 35 /* Caller must hold a reference of the fault parameter. */ 47 if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) in __iopf_free_group() 64 struct iommu_fault *fault) in report_partial_fault() argument 72 iopf->fault = *fault; in report_partial_fault() 98 group->last_fault.fault = evt->fault; in iopf_group_alloc() 106 if (iopf->fault.prm.grpid == evt->fault.prm.grpid) in iopf_group_alloc() 107 /* Insert *before* the last fault */ in iopf_group_alloc() 121 struct iommu_fault *fault = &evt->fault; in find_fault_handler() local 124 if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { in find_fault_handler() [all …]
|
/linux-6.14.4/arch/arm64/mm/ |
D | fault.c | 3 * Based on arch/arm/mm/fault.c 291 * If we now have a valid translation, treat the translation fault as in is_spurious_el1_translation_fault() 298 * If we got a different type of fault from the AT instruction, in is_spurious_el1_translation_fault() 299 * treat the translation fault as spurious. in is_spurious_el1_translation_fault() 349 * tag fault. in do_tag_recovery() 375 * Are we prepared to handle this kernel fault? in __do_kernel_fault() 382 "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) in __do_kernel_fault() 423 * an alignment fault not caused by the memory type would take in set_thread_esr() 424 * precedence over translation fault for a real access to empty in set_thread_esr() 425 * space. Unfortunately we can't easily distinguish "alignment fault in set_thread_esr() [all …]
|
/linux-6.14.4/arch/powerpc/platforms/powernv/ |
D | vas-fault.c | 3 * VAS Fault handling. 21 * The maximum FIFO size for fault window can be 8MB 23 * instance will be having fault window. 35 pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size, in dump_fifo() 39 pr_err("Fault FIFO Dump:\n"); in dump_fifo() 47 * Process valid CRBs in fault FIFO. 50 * request buffers, raises interrupt on the CPU to handle the fault. 51 * It takes credit on fault window, updates nx_fault_stamp in CRB with 52 * the following information and pastes CRB in fault FIFO. 55 * fault_storage_addr - fault address [all …]
|
/linux-6.14.4/drivers/leds/ |
D | leds-lp8864.c | 32 "Vin under-voltage fault", 33 "Vin over-voltage fault", 34 "Vdd under-voltage fault", 35 "Vin over-current fault", 36 "Missing charge pump fault", 37 "Charge pump fault", 38 "Missing boost sync fault", 39 "CRC error fault ", 44 "Boost OVP low fault", 45 "Boost OVP high fault", [all …]
|
/linux-6.14.4/drivers/gpu/drm/nouveau/ |
D | nouveau_svm.c | 66 u8 fault; member 68 } **fault; member 160 * page fault) and maybe some other commands. in nouveau_svmm_bind() 379 /* Issue fault replay for GPU to retry accesses that faulted previously. */ 390 /* Cancel a replayable fault that could not be handled. 392 * Cancelling the fault will trigger recovery to reset the engine 412 struct nouveau_svm_fault *fault) in nouveau_svm_fault_cancel_fault() argument 414 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault() 415 fault->hub, in nouveau_svm_fault_cancel_fault() 416 fault->gpc, in nouveau_svm_fault_cancel_fault() [all …]
|
/linux-6.14.4/arch/x86/kvm/mmu/ |
D | paging_tmpl.h | 92 struct x86_exception fault; member 249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME() 352 * Queue a page fault for injection if this assertion fails, as callers in FNAME() 353 * assume that walker.fault contains sane info on a walk failure. I.e. in FNAME() 380 nested_access, &walker->fault); in FNAME() 384 * instruction) triggers a nested page fault. The exit in FNAME() 386 * "guest page access" as the nested page fault's cause, in FNAME() 448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 458 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME() 481 walker->fault.vector = PF_VECTOR; in FNAME() [all …]
|
D | mmu_internal.h | 257 * Maximum page size that can be created for this fault; input to 299 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 305 * RET_PF_CONTINUE: So far, so good, keep handling the page fault. 306 * RET_PF_RETRY: let CPU fault again on the address. 307 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 310 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 339 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument 341 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit() 342 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit() 343 fault->is_private); in kvm_mmu_prepare_memory_fault_exit() [all …]
|
/linux-6.14.4/arch/microblaze/mm/ |
D | fault.c | 2 * arch/microblaze/mm/fault.c 6 * Derived from "arch/ppc/mm/fault.c" 9 * Derived from "arch/i386/mm/fault.c" 71 /* Are we prepared to handle this fault? */ in bad_page_fault() 83 * The error_code parameter is ESR for a data fault, 84 * 0 for an instruction fault. 93 vm_fault_t fault; in do_page_fault() local 115 pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", in do_page_fault() 119 die("Weird page fault", regs, SIGSEGV); in do_page_fault() 130 * erroneous fault occurring in a code path which already holds mmap_lock in do_page_fault() [all …]
|
/linux-6.14.4/arch/m68k/mm/ |
D | fault.c | 3 * linux/arch/m68k/mm/fault.c 20 #include "fault.h" 64 * bit 0 == 0 means no page found, 1 means protection fault 75 vm_fault_t fault; in do_page_fault() local 78 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", in do_page_fault() 83 * context, we must not take the fault.. in do_page_fault() 136 * If for any reason at all we couldn't handle the fault, in do_page_fault() 138 * the fault. in do_page_fault() 141 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 142 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault() [all …]
|
/linux-6.14.4/arch/arm/mm/ |
D | fsr-3level.c | 7 { do_bad, SIGBUS, 0, "reserved translation fault" }, 8 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 9 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 10 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 11 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 12 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 14 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 15 { do_bad, SIGBUS, 0, "reserved permission fault" }, 16 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, [all …]
|
D | fsr-2level.c | 12 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 14 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, 16 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, 18 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, 20 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, 22 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, 50 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, 52 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 53 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, 54 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, [all …]
|
/linux-6.14.4/arch/parisc/mm/ |
D | fault.c | 46 * the instruction has generated some sort of a memory access fault). 106 * Data TLB miss fault/data page fault in parisc_acctyp() 204 [6] = "Instruction TLB miss fault", 213 [15] = "Data TLB miss fault", 214 [16] = "Non-access ITLB miss fault", 215 [17] = "Non-access DTLB miss fault", 274 vm_fault_t fault = 0; in do_page_fault() local 281 msg = "Page fault: no context"; in do_page_fault() 313 * If for any reason at all we couldn't handle the fault, make in do_page_fault() 315 * fault. in do_page_fault() [all …]
|
/linux-6.14.4/arch/hexagon/mm/ |
D | vm_fault.c | 3 * Memory fault handling for Hexagon 9 * Page fault handling for the Hexagon Virtual Machine. 35 * Canonical page fault handler 43 vm_fault_t fault; in do_page_fault() local 49 * then must not take the fault. in do_page_fault() 84 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 86 if (fault_signal_pending(fault, regs)) { in do_page_fault() 92 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 93 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 97 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault() [all …]
|
/linux-6.14.4/Documentation/fault-injection/ |
D | fault-injection.rst | 2 Fault injection capabilities infrastructure 8 Available fault injection capabilities 25 injects futex deadlock and uaddr fault errors. 56 When the fault is injected and the reallocation is triggered, cached pointers 61 By creating these controlled fault scenarios, the system can catch instances 70 The effectiveness of this fault detection is enhanced when KASAN is 74 - NVMe fault injection 81 - Null test block driver fault injection 90 Configure fault-injection capabilities behavior 96 fault-inject-debugfs kernel module provides some debugfs entries for runtime [all …]
|
/linux-6.14.4/arch/alpha/mm/ |
D | fault.c | 3 * linux/arch/alpha/mm/fault.c 65 * 2 = fault-on-read 66 * 3 = fault-on-execute 67 * 4 = fault-on-write 92 vm_fault_t fault; in do_page_fault() local 110 we must not take the fault. */ in do_page_fault() 142 /* If for any reason at all we couldn't handle the fault, in do_page_fault() 144 the fault. */ in do_page_fault() 145 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 147 if (fault_signal_pending(fault, regs)) { in do_page_fault() [all …]
|
/linux-6.14.4/arch/powerpc/lib/ |
D | checksum_32.S | 109 EX_TABLE(8 ## n ## 0b, fault); \ 110 EX_TABLE(8 ## n ## 1b, fault); \ 111 EX_TABLE(8 ## n ## 2b, fault); \ 112 EX_TABLE(8 ## n ## 3b, fault); \ 113 EX_TABLE(8 ## n ## 4b, fault); \ 114 EX_TABLE(8 ## n ## 5b, fault); \ 115 EX_TABLE(8 ## n ## 6b, fault); \ 116 EX_TABLE(8 ## n ## 7b, fault); 240 fault: label 244 EX_TABLE(70b, fault); [all …]
|
/linux-6.14.4/arch/nios2/mm/ |
D | fault.c | 5 * based on arch/mips/mm/fault.c which is: 50 vm_fault_t fault; in do_page_fault() local 59 * We fault-in kernel-space virtual memory on-demand. The in do_page_fault() 79 * context, we must not take the fault.. in do_page_fault() 120 * If for any reason at all we couldn't handle the fault, in do_page_fault() 122 * the fault. in do_page_fault() 124 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 126 if (fault_signal_pending(fault, regs)) { in do_page_fault() 132 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 133 if (fault & VM_FAULT_COMPLETED) in do_page_fault() [all …]
|
/linux-6.14.4/arch/sh/mm/ |
D | fault.c | 2 * Page fault handler for SH with an MMU. 7 * Based on linux/arch/i386/mm/fault.c: 157 * be another reason for the fault. Return NULL here to in vmalloc_sync_one() 158 * signal that we have not taken care of the fault. in vmalloc_sync_one() 174 * Handle a fault on the vmalloc or module mapping area 225 /* Are we prepared to handle this kernel fault? */ in no_context() 314 unsigned long address, vm_fault_t fault) in mm_fault_error() argument 320 if (fault_signal_pending(fault, regs)) { in mm_fault_error() 327 if (!(fault & VM_FAULT_RETRY)) in mm_fault_error() 330 if (!(fault & VM_FAULT_ERROR)) in mm_fault_error() [all …]
|