Lines Matching +full:cpu +full:- +full:read
1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/cpu.h>
51 * holds up an irq slot - in excessive cases (when multiple in ack_bad_irq()
54 * But only ack when the APIC is enabled -AK in ack_bad_irq()
69 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); in arch_show_interrupts()
70 seq_puts(p, " Non-maskable interrupts\n"); in arch_show_interrupts()
74 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); in arch_show_interrupts()
79 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); in arch_show_interrupts()
83 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); in arch_show_interrupts()
87 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); in arch_show_interrupts()
91 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); in arch_show_interrupts()
92 seq_puts(p, " APIC ICR read retries\n"); in arch_show_interrupts()
96 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); in arch_show_interrupts()
103 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); in arch_show_interrupts()
107 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); in arch_show_interrupts()
111 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); in arch_show_interrupts()
117 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); in arch_show_interrupts()
123 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); in arch_show_interrupts()
129 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count); in arch_show_interrupts()
147 irq_stats(j)->irq_hv_callback_count); in arch_show_interrupts()
156 irq_stats(j)->irq_hv_reenlightenment_count); in arch_show_interrupts()
157 seq_puts(p, " Hyper-V reenlightenment interrupts\n"); in arch_show_interrupts()
163 irq_stats(j)->hyperv_stimer0_count); in arch_show_interrupts()
164 seq_puts(p, " Hyper-V stimer0 interrupts\n"); in arch_show_interrupts()
174 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); in arch_show_interrupts()
175 seq_puts(p, " Posted-interrupt notification event\n"); in arch_show_interrupts()
180 irq_stats(j)->kvm_posted_intr_nested_ipis); in arch_show_interrupts()
181 seq_puts(p, " Nested posted-interrupt event\n"); in arch_show_interrupts()
186 irq_stats(j)->kvm_posted_intr_wakeup_ipis); in arch_show_interrupts()
187 seq_puts(p, " Posted-interrupt wakeup event\n"); in arch_show_interrupts()
193 irq_stats(j)->posted_msi_notification_count); in arch_show_interrupts()
202 u64 arch_irq_stat_cpu(unsigned int cpu) in arch_irq_stat_cpu() argument
204 u64 sum = irq_stats(cpu)->__nmi_count; in arch_irq_stat_cpu()
207 sum += irq_stats(cpu)->apic_timer_irqs; in arch_irq_stat_cpu()
208 sum += irq_stats(cpu)->irq_spurious_count; in arch_irq_stat_cpu()
209 sum += irq_stats(cpu)->apic_perf_irqs; in arch_irq_stat_cpu()
210 sum += irq_stats(cpu)->apic_irq_work_irqs; in arch_irq_stat_cpu()
211 sum += irq_stats(cpu)->icr_read_retry_count; in arch_irq_stat_cpu()
213 sum += irq_stats(cpu)->x86_platform_ipis; in arch_irq_stat_cpu()
216 sum += irq_stats(cpu)->irq_resched_count; in arch_irq_stat_cpu()
217 sum += irq_stats(cpu)->irq_call_count; in arch_irq_stat_cpu()
220 sum += irq_stats(cpu)->irq_thermal_count; in arch_irq_stat_cpu()
223 sum += irq_stats(cpu)->irq_threshold_count; in arch_irq_stat_cpu()
226 sum += irq_stats(cpu)->irq_hv_callback_count; in arch_irq_stat_cpu()
229 sum += irq_stats(cpu)->irq_hv_reenlightenment_count; in arch_irq_stat_cpu()
230 sum += irq_stats(cpu)->hyperv_stimer0_count; in arch_irq_stat_cpu()
233 sum += per_cpu(mce_exception_count, cpu); in arch_irq_stat_cpu()
234 sum += per_cpu(mce_poll_count, cpu); in arch_irq_stat_cpu()
263 ret = -EINVAL; in call_irq_handler()
278 * cross-CPU interrupts have their own entry points).
371 * VT-d spec. CH 9.11 in intel_posted_msi_init()
379 * De-multiplexing posted interrupts is on the performance path, the code
383 * accessed by both CPU and IOMMU.
384 * 2.During posted MSI processing, the CPU needs to do 64-bit read and xchg
389 * 4.The CPU can access the cache line a magnitude faster than the IOMMU.
392 * CPU IOMMU PID Cache line state
393 * ---------------------------------------------------------------
397 *...-------------------------------------------------------------
405 * (assuming non-zero PIR bits are present in the entire PIR), it does:
406 * read, read, read, read, xchg, xchg, xchg, xchg
408 * read, xchg, read, xchg, read, xchg, read, xchg
442 * For MSIs that are delivered as posted interrupts, the CPU notifications
459 * MAX_POSTED_MSI_COALESCING_LOOP - 1 loops are executed here. in DEFINE_IDTENTRY_SYSVEC()
462 if (!handle_pending_pir(pid->pir64, regs)) in DEFINE_IDTENTRY_SYSVEC()
477 handle_pending_pir(pid->pir64, regs); in DEFINE_IDTENTRY_SYSVEC()
486 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
498 * new cpu targets for all the irqs that were handled previously by in fixup_irqs()
499 * this cpu. While it works, I have seen spurious interrupt messages in fixup_irqs()
503 * interrupts to new targets as this cpu is already offlined... in fixup_irqs()
508 * We can walk the vector array of this cpu without holding in fixup_irqs()
509 * vector_lock because the cpu is already marked !online, so in fixup_irqs()
519 raw_spin_lock(&desc->lock); in fixup_irqs()
522 if (chip->irq_retrigger) { in fixup_irqs()
523 chip->irq_retrigger(data); in fixup_irqs()
526 raw_spin_unlock(&desc->lock); in fixup_irqs()
540 pr_err("CPU%d: Unexpected LVT thermal interrupt!\n", in smp_thermal_vector()