/linux-6.14.4/tools/testing/selftests/kvm/x86/ |
D | hyperv_features.c | 37 static bool is_write_only_msr(uint32_t msr) in is_write_only_msr() argument 39 return msr == HV_X64_MSR_EOI; in is_write_only_msr() 42 static void guest_msr(struct msr_data *msr) in guest_msr() argument 47 GUEST_ASSERT(msr->idx); in guest_msr() 49 if (msr->write) in guest_msr() 50 vector = wrmsr_safe(msr->idx, msr->write_val); in guest_msr() 52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) in guest_msr() 53 vector = rdmsr_safe(msr->idx, &msr_val); in guest_msr() 55 if (msr->fault_expected) in guest_msr() 58 msr->write ? "WR" : "RD", msr->idx, vector); in guest_msr() [all …]
|
D | feature_msrs_test.c | 15 static bool is_kvm_controlled_msr(uint32_t msr) in is_kvm_controlled_msr() argument 17 return msr == MSR_IA32_VMX_CR0_FIXED1 || msr == MSR_IA32_VMX_CR4_FIXED1; in is_kvm_controlled_msr() 22 * MSR, and doesn't allow setting the hidden version. 24 static bool is_hidden_vmx_msr(uint32_t msr) in is_hidden_vmx_msr() argument 26 switch (msr) { in is_hidden_vmx_msr() 37 static bool is_quirked_msr(uint32_t msr) in is_quirked_msr() argument 39 return msr != MSR_AMD64_DE_CFG; in is_quirked_msr() 42 static void test_feature_msr(uint32_t msr) in test_feature_msr() argument 44 const uint64_t supported_mask = kvm_get_feature_msr(msr); in test_feature_msr() 45 uint64_t reset_value = is_quirked_msr(msr) ? supported_mask : 0; in test_feature_msr() [all …]
|
D | userspace_msr_exit_test.c | 24 /* Test an MSR the kernel knows about. */ 31 /* Test an MSR the kernel doesn't know about. */ 38 /* Test a fabricated MSR that no one knows about. */ 80 static void deny_msr(uint8_t *bitmap, u32 msr) in deny_msr() argument 82 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); in deny_msr() 145 static noinline uint64_t test_rdmsr(uint32_t msr) in test_rdmsr() argument 152 "=a"(a), "=d"(d) : "c"(msr) : "memory"); in test_rdmsr() 161 static noinline void test_wrmsr(uint32_t msr, uint64_t value) in test_wrmsr() argument 169 "a"(a), "d"(d), "c"(msr) : "memory"); in test_wrmsr() 179 static noinline uint64_t test_em_rdmsr(uint32_t msr) in test_em_rdmsr() argument [all …]
|
/linux-6.14.4/arch/x86/lib/ |
D | msr.c | 5 #include <asm/msr.h> 7 #include <asm/msr-trace.h> 9 struct msr __percpu *msrs_alloc(void) in msrs_alloc() 11 struct msr __percpu *msrs = NULL; in msrs_alloc() 13 msrs = alloc_percpu(struct msr); in msrs_alloc() 23 void msrs_free(struct msr __percpu *msrs) in msrs_free() 30 * msr_read - Read an MSR with error handling 31 * @msr: MSR to read 39 static int msr_read(u32 msr, struct msr *m) in msr_read() argument 44 err = rdmsrl_safe(msr, &val); in msr_read() [all …]
|
D | msr-smp.c | 6 #include <asm/msr.h> 11 struct msr *reg; in __rdmsr_on_cpu() 24 struct msr *reg; in __wrmsr_on_cpu() 98 struct msr __percpu *msrs, in __rwmsr_on_cpus() 121 * @msr_no: which MSR 122 * @msrs: array of MSR values 125 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs) in rdmsr_on_cpus() 135 * @msr_no: which MSR 136 * @msrs: array of MSR values 139 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs) in wrmsr_on_cpus() [all …]
|
/linux-6.14.4/arch/x86/kernel/cpu/ |
D | perfctr-watchdog.c | 44 /* converts an msr to an appropriate reservation bit */ 45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument 51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit() 52 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit() 53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit() 56 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() 60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit() 62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit() 64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit() 69 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() [all …]
|
D | feat_ctl.c | 6 #include <asm/msr-index.h> 47 /* All 64 bits of tertiary controls MSR are allowed-1 settings. */ in init_vmx_capabilities() 57 * MSR, low for EPT, high for VPID. in init_vmx_capabilities() 119 u64 msr; in init_ia32_feat_ctl() local 121 if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { in init_ia32_feat_ctl() 141 if (msr & FEAT_CTL_LOCKED) in init_ia32_feat_ctl() 145 * Ignore whatever value BIOS left in the MSR to avoid enabling random in init_ia32_feat_ctl() 148 msr = FEAT_CTL_LOCKED; in init_ia32_feat_ctl() 156 msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; in init_ia32_feat_ctl() 159 msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; in init_ia32_feat_ctl() [all …]
|
/linux-6.14.4/arch/x86/include/asm/ |
D | msr.h | 5 #include "msr-index.h" 12 #include <uapi/asm/msr.h> 13 #include <asm/shared/msr.h> 19 struct msr reg; 20 struct msr __percpu *msrs; 66 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); 67 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); 68 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); 70 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} in do_trace_write_msr() argument 71 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} in do_trace_read_msr() argument [all …]
|
D | msr-trace.h | 3 #define TRACE_SYSTEM msr 6 #define TRACE_INCLUDE_FILE msr-trace 22 TP_PROTO(unsigned msr, u64 val, int failed), 23 TP_ARGS(msr, val, failed), 25 __field( unsigned, msr ) 30 __entry->msr = msr; 35 __entry->msr, 41 TP_PROTO(unsigned msr, u64 val, int failed), 42 TP_ARGS(msr, val, failed) 46 TP_PROTO(unsigned msr, u64 val, int failed), [all …]
|
/linux-6.14.4/arch/powerpc/kvm/ |
D | book3s_hv_tm.c | 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 25 if (msr & MSR_PR) { in emulate_tx_failure() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local 74 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 78 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation() 84 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation() 96 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation() 105 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 111 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation() [all …]
|
D | book3s_hv_tm_builtin.c | 18 * (MSR[TS] = S and the fake-suspend flag is not set). 23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local 45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early() 57 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early() 67 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early() 68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 77 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 82 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early() [all …]
|
/linux-6.14.4/drivers/powercap/ |
D | intel_rapl_msr.c | 3 * Intel Running Average Power Limit (RAPL) Driver via MSR interface 32 /* private data for RAPL MSR Interface */ 37 .reg_unit.msr = MSR_RAPL_POWER_UNIT, 38 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PKG_POWER_LIMIT, 39 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_PKG_ENERGY_STATUS, 40 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr = MSR_PKG_PERF_STATUS, 41 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr = MSR_PKG_POWER_INFO, 42 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP0_POWER_LIMIT, 43 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP0_ENERGY_STATUS, 44 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP0_POLICY, [all …]
|
/linux-6.14.4/arch/x86/kvm/ |
D | mtrr.c | 24 static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr) in find_mtrr() argument 28 switch (msr) { in find_mtrr() 30 index = msr - MTRRphysBase_MSR(0); in find_mtrr() 36 index = msr - MSR_MTRRfix16K_80000; in find_mtrr() 46 index = msr - MSR_MTRRfix4K_C0000; in find_mtrr() 61 static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument 66 if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid() 70 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid() 78 if (WARN_ON_ONCE(!(msr >= MTRRphysBase_MSR(0) && in kvm_mtrr_valid() 79 msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)))) in kvm_mtrr_valid() [all …]
|
/linux-6.14.4/arch/x86/events/ |
D | probe.c | 14 * Accepts msr[] array with non populated entries as long as either 15 * msr[i].msr is 0 or msr[i].grp is NULL. Note that the default sysfs 19 perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data) in perf_msr_probe() argument 29 if (!msr[bit].no_check) { in perf_msr_probe() 30 struct attribute_group *grp = msr[bit].grp; in perf_msr_probe() 40 if (!msr[bit].msr) in perf_msr_probe() 43 if (msr[bit].test && !msr[bit].test(bit, data)) in perf_msr_probe() 45 /* Virt sucks; you cannot tell if a R/O MSR is present :/ */ in perf_msr_probe() 46 if (rdmsrl_safe(msr[bit].msr, &val)) in perf_msr_probe() 49 mask = msr[bit].mask; in perf_msr_probe()
|
/linux-6.14.4/arch/powerpc/kernel/ |
D | signal_64.c | 130 unsigned long msr = regs->msr; in __unsafe_setup_sigcontext() local 144 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) in __unsafe_setup_sigcontext() 147 msr |= MSR_VEC; in __unsafe_setup_sigcontext() 160 * Clear the MSR VSX bit to indicate there is no valid state attached in __unsafe_setup_sigcontext() 163 msr &= ~MSR_VSX; in __unsafe_setup_sigcontext() 173 /* set MSR_VSX in the MSR value in the frame to in __unsafe_setup_sigcontext() 176 msr |= MSR_VSX; in __unsafe_setup_sigcontext() 181 unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); in __unsafe_setup_sigcontext() 210 unsigned long msr) in setup_tm_sigcontexts() argument 229 BUG_ON(!MSR_TM_ACTIVE(msr)); in setup_tm_sigcontexts() [all …]
|
D | signal_32.c | 158 /* copy up to but not including MSR */ in __unsafe_restore_general_regs() 161 /* copy from orig_r3 (the word after the MSR) up to the end */ in __unsafe_restore_general_regs() 271 unsigned long msr = regs->msr; in __unsafe_save_user_regs() local 281 /* set MSR_VEC in the saved MSR value to indicate that in __unsafe_save_user_regs() 283 msr |= MSR_VEC; in __unsafe_save_user_regs() 285 /* else assert((regs->msr & MSR_VEC) == 0) */ in __unsafe_save_user_regs() 299 * Clear the MSR VSX bit to indicate there is no valid state attached in __unsafe_save_user_regs() 302 msr &= ~MSR_VSX; in __unsafe_save_user_regs() 307 * the saved MSR value to indicate that frame->mc_vregs in __unsafe_save_user_regs() 312 msr |= MSR_VSX; in __unsafe_save_user_regs() [all …]
|
D | cpu_setup_power.c | 16 /* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */ 19 u64 msr; in init_hvmode_206() local 21 msr = mfmsr(); in init_hvmode_206() 22 if (msr & MSR_HV) in init_hvmode_206() 153 u64 msr; in __restore_cpu_power7() local 155 msr = mfmsr(); in __restore_cpu_power7() 156 if (!(msr & MSR_HV)) in __restore_cpu_power7() 185 u64 msr; in __restore_cpu_power8() local 191 msr = mfmsr(); in __restore_cpu_power8() 192 if (!(msr & MSR_HV)) in __restore_cpu_power8() [all …]
|
/linux-6.14.4/Documentation/trace/ |
D | events-msr.rst | 2 MSR Trace Events 5 The x86 kernel supports tracing most MSR (Model Specific Register) accesses. 11 /sys/kernel/tracing/events/msr/ 13 Trace MSR reads: 17 - msr: MSR number 22 Trace MSR writes: 26 - msr: MSR number 37 cat /sys/kernel/tracing/trace | decode_msr.py /usr/src/linux/include/asm/msr-index.h 39 to add symbolic MSR names.
|
/linux-6.14.4/arch/x86/xen/ |
D | pmu.c | 28 /* Macro for computing address of a PMU MSR bank */ 131 static inline bool is_amd_pmu_msr(unsigned int msr) in is_amd_pmu_msr() argument 137 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr() 138 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || in is_amd_pmu_msr() 139 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr() 140 msr < MSR_K7_PERFCTR0 + amd_num_counters)) in is_amd_pmu_msr() 197 static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, in xen_intel_pmu_emulate() argument 213 switch (msr) { in xen_intel_pmu_emulate() 251 if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) in xen_intel_pmu_emulate() 260 static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) in xen_amd_pmu_emulate() argument [all …]
|
/linux-6.14.4/arch/x86/kvm/svm/ |
D | pmu.c | 38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 47 switch (msr) { in get_gp_pmc_amd() 55 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); in get_gp_pmc_amd() 56 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) in get_gp_pmc_amd() 62 idx = msr - MSR_K7_EVNTSEL0; in get_gp_pmc_amd() 67 idx = msr - MSR_K7_PERFCTR0; in get_gp_pmc_amd() 93 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in amd_msr_idx_to_pmc() argument 98 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc() 99 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_msr_idx_to_pmc() 104 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument [all …]
|
/linux-6.14.4/arch/microblaze/kernel/ |
D | process.c | 44 pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", in show_regs() 45 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs() 72 local_save_flags(childregs->msr); in copy_thread() 73 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread() 83 childregs->msr |= MSR_UMS; in copy_thread() 87 * before enabling VM. This MSR will be restored in switch_to and in copy_thread() 91 * compose the right MSR for RETURN(). It will work for switch_to also in copy_thread() 94 * right now MSR is a copy of parent one */ in copy_thread() 95 childregs->msr &= ~MSR_EIP; in copy_thread() 96 childregs->msr |= MSR_IE; in copy_thread() [all …]
|
/linux-6.14.4/arch/arm64/kvm/hyp/nvhe/ |
D | hyp-init.S | 100 msr mair_el2, x1 103 msr hcr_el2, x1 112 msr tpidr_el2, x0 122 msr tpidr_el2, x1 125 msr vttbr_el2, x1 128 msr vtcr_el2, x1 135 msr ttbr0_el2, x2 138 msr tcr_el2, x0 160 msr sctlr_el2, x0 165 msr vbar_el2, x0 [all …]
|
/linux-6.14.4/arch/m68k/bvme6000/ |
D | config.c | 166 unsigned char msr; in bvme6000_timer_int() local 169 msr = rtc->msr & 0xc0; in bvme6000_timer_int() 170 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int() 191 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local 193 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init() 203 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init() 208 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init() 211 rtc->msr = msr; in bvme6000_sched_init() 233 unsigned char msr, msb; in bvme6000_read_clk() local 239 msr = rtc->msr & 0xc0; in bvme6000_read_clk() [all …]
|
/linux-6.14.4/tools/power/x86/turbostat/ |
D | turbostat.c | 400 int get_msr(int cpu, off_t offset, unsigned long long *msr); 456 unsigned long long msr = 3; in slm_bclk() local 460 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) in slm_bclk() 463 i = msr & 0xf; in slm_bclk() 1162 unsigned long long msr[NUM_RAPL_COUNTERS]; member 1180 unsigned long long msr; member 1183 …double *platform_rapl_msr_scale; /* Scale applied to values read by MSR (platform dependent, fille… 1195 .msr = MSR_PKG_ENERGY_STATUS, 1208 .msr = MSR_PKG_ENERGY_STAT, 1221 .msr = MSR_PP0_ENERGY_STATUS, [all …]
|
/linux-6.14.4/arch/arm64/mm/ |
D | proc.S | 125 msr tpidr_el0, x2 126 msr tpidrro_el0, x3 127 msr contextidr_el1, x4 128 msr cpacr_el1, x6 134 msr tcr_el1, x8 135 msr vbar_el1, x9 136 msr mdscr_el1, x10 138 msr sctlr_el1, x12 140 msr sp_el0, x14 144 msr osdlr_el1, x5 [all …]
|