@@ -78,6 +78,7 @@ struct kvmppc_vcpu_book3s {
u64 vsid;
} slb_shadow[64];
u8 slb_shadow_max;
+ u8 shadow_vcpu_paca;
struct kvmppc_sr sr[16];
struct kvmppc_bat ibat[8];
struct kvmppc_bat dbat[8];
@@ -151,9 +151,12 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
- if ( num < 14 )
- return get_paca()->shadow_vcpu.gpr[num];
- else
+ if ( num < 14 ) {
+ if (to_book3s(vcpu)->shadow_vcpu_paca)
+ return get_paca()->shadow_vcpu.gpr[num];
+ else
+ return to_book3s(vcpu)->shadow_vcpu.gpr[num];
+ } else
return vcpu->arch.gpr[num];
}
@@ -165,7 +168,10 @@ static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return get_paca()->shadow_vcpu.cr;
+ if (to_book3s(vcpu)->shadow_vcpu_paca)
+ return get_paca()->shadow_vcpu.cr;
+ else
+ return to_book3s(vcpu)->shadow_vcpu.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
@@ -176,7 +182,10 @@ static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
- return get_paca()->shadow_vcpu.xer;
+ if (to_book3s(vcpu)->shadow_vcpu_paca)
+ return get_paca()->shadow_vcpu.xer;
+ else
+ return to_book3s(vcpu)->shadow_vcpu.xer;
}
#else
@@ -73,10 +73,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
+ to_book3s(vcpu)->shadow_vcpu_paca = true;
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
+ to_book3s(vcpu)->shadow_vcpu_paca = false;
memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));