Lines Matching +full:0 +full:xfb000000
67 * iff the exit map is 0 without taking a lock.
138 #define VSID_REAL 0x07ffffffffc00000ULL
139 #define VSID_BAT 0x07ffffffffb00000ULL
140 #define VSID_64K 0x0800000000000000ULL
141 #define VSID_1T 0x1000000000000000ULL
142 #define VSID_REAL_DR 0x2000000000000000ULL
143 #define VSID_REAL_IR 0x4000000000000000ULL
144 #define VSID_PR 0x8000000000000000ULL
361 return 0; in kvmhv_nestedv2_reload_ptregs()
368 return 0; in kvmhv_nestedv2_mark_dirty_ptregs()
375 return 0; in kvmhv_nestedv2_mark_dirty()
382 return 0; in kvmhv_nestedv2_cached_reload()
409 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0); in kvmppc_get_gpr()
421 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0); in kvmppc_get_cr()
433 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0); in kvmppc_get_xer()
445 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0); in kvmppc_get_ctr()
457 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0); in kvmppc_get_lr()
469 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0); in kvmppc_get_pc()
486 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); in kvmppc_get_fpr()
498 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0); in kvmppc_get_fpscr()
511 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); in kvmppc_get_vsx_fpr()
525 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0); in kvmppc_get_vsx_vr()
538 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0); in kvmppc_get_vscr()
560 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
587 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
609 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0); in kvmppc_get_dec_expires()
642 #define OSI_SC_MAGIC_R3 0x113724FA
643 #define OSI_SC_MAGIC_R4 0x77810F9B
645 #define INS_DCBZ 0x7c0007ec
647 #define INS_TW 0x7fe00008
649 #define SPLIT_HACK_MASK 0xff000000
650 #define SPLIT_HACK_OFFS 0xfb000000
653 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
654 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
658 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
659 * 0) unchanged: if the guest is filling each VCORE completely then it will be
686 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7}; in kvmppc_pack_vcpu_id()
692 return 0; in kvmppc_pack_vcpu_id()
695 return 0; in kvmppc_pack_vcpu_id()