Message ID | 20220506092403.47406-2-pmorel@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | s390x: KVM: CPU Topology | expand |
On 06.05.22 11:24, Pierre Morel wrote: > The former check to chose between SIIF or not SIIF can be done > using the sclp.has_siif instead of accessing per vCPU structures > > When accessing the SCA, ipte lock and ipte_unlock do not need > to access any vcpu structures but only the KVM structure. > > Let's simplify the ipte handling. > > Signed-off-by: Pierre Morel <pmorel@linux.ibm.com> Much better Reviewed-by: David Hildenbrand <david@redhat.com>
On 5/6/22 11:24, Pierre Morel wrote: > The former check to chose between SIIF or not SIIF can be done > using the sclp.has_siif instead of accessing per vCPU structures Maybe replace this paragraph with: We can check if SIIF is enabled by testing the sclp_info struct instead of testing the sie control block eca variable. sclp.has_ssif is the only requirement to set ECA_SII anyway so we can go straight to the source for that. Reviewed-by: Janosch Frank <frankja@linux.ibm.com> > > When accessing the SCA, ipte lock and ipte_unlock do not need > to access any vcpu structures but only the KVM structure. > > Let's simplify the ipte handling. > > Signed-off-by: Pierre Morel <pmorel@linux.ibm.com> > --- > arch/s390/kvm/gaccess.c | 96 ++++++++++++++++++++--------------------- > arch/s390/kvm/gaccess.h | 6 +-- > arch/s390/kvm/priv.c | 6 +-- > 3 files changed, 54 insertions(+), 54 deletions(-) > > diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c > index d53a183c2005..0e1f6dd31882 100644 > --- a/arch/s390/kvm/gaccess.c > +++ b/arch/s390/kvm/gaccess.c > @@ -262,77 +262,77 @@ struct aste { > /* .. more fields there */ > }; > > -int ipte_lock_held(struct kvm_vcpu *vcpu) > +int ipte_lock_held(struct kvm *kvm) > { > - if (vcpu->arch.sie_block->eca & ECA_SII) { > + if (sclp.has_siif) { > int rc; > > - read_lock(&vcpu->kvm->arch.sca_lock); > - rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0; > - read_unlock(&vcpu->kvm->arch.sca_lock); > + read_lock(&kvm->arch.sca_lock); > + rc = kvm_s390_get_ipte_control(kvm)->kh != 0; > + read_unlock(&kvm->arch.sca_lock); > return rc; > } > - return vcpu->kvm->arch.ipte_lock_count != 0; > + return kvm->arch.ipte_lock_count != 0; > } > > -static void ipte_lock_simple(struct kvm_vcpu *vcpu) > +static void ipte_lock_simple(struct kvm *kvm) > { > union ipte_control old, new, *ic; > > - mutex_lock(&vcpu->kvm->arch.ipte_mutex); > - vcpu->kvm->arch.ipte_lock_count++; > - if (vcpu->kvm->arch.ipte_lock_count > 1) > + mutex_lock(&kvm->arch.ipte_mutex); > + kvm->arch.ipte_lock_count++; > + if (kvm->arch.ipte_lock_count > 1) > goto out; > retry: > - read_lock(&vcpu->kvm->arch.sca_lock); > - ic = kvm_s390_get_ipte_control(vcpu->kvm); > + read_lock(&kvm->arch.sca_lock); > + ic = kvm_s390_get_ipte_control(kvm); > do { > old = READ_ONCE(*ic); > if (old.k) { > - read_unlock(&vcpu->kvm->arch.sca_lock); > + read_unlock(&kvm->arch.sca_lock); > cond_resched(); > goto retry; > } > new = old; > new.k = 1; > } while (cmpxchg(&ic->val, old.val, new.val) != old.val); > - read_unlock(&vcpu->kvm->arch.sca_lock); > + read_unlock(&kvm->arch.sca_lock); > out: > - mutex_unlock(&vcpu->kvm->arch.ipte_mutex); > + mutex_unlock(&kvm->arch.ipte_mutex); > } > > -static void ipte_unlock_simple(struct kvm_vcpu *vcpu) > +static void ipte_unlock_simple(struct kvm *kvm) > { > union ipte_control old, new, *ic; > > - mutex_lock(&vcpu->kvm->arch.ipte_mutex); > - vcpu->kvm->arch.ipte_lock_count--; > - if (vcpu->kvm->arch.ipte_lock_count) > + mutex_lock(&kvm->arch.ipte_mutex); > + kvm->arch.ipte_lock_count--; > + if (kvm->arch.ipte_lock_count) > goto out; > - read_lock(&vcpu->kvm->arch.sca_lock); > - ic = kvm_s390_get_ipte_control(vcpu->kvm); > + read_lock(&kvm->arch.sca_lock); > + ic = kvm_s390_get_ipte_control(kvm); > do { > old = READ_ONCE(*ic); > new = old; > new.k = 0; > } while (cmpxchg(&ic->val, old.val, new.val) != old.val); > - read_unlock(&vcpu->kvm->arch.sca_lock); > - wake_up(&vcpu->kvm->arch.ipte_wq); > + read_unlock(&kvm->arch.sca_lock); > + wake_up(&kvm->arch.ipte_wq); > out: > - mutex_unlock(&vcpu->kvm->arch.ipte_mutex); > + mutex_unlock(&kvm->arch.ipte_mutex); > } > > -static void ipte_lock_siif(struct kvm_vcpu *vcpu) > +static void ipte_lock_siif(struct kvm *kvm) > { > union ipte_control old, new, *ic; > > retry: > - read_lock(&vcpu->kvm->arch.sca_lock); > - ic = kvm_s390_get_ipte_control(vcpu->kvm); > + read_lock(&kvm->arch.sca_lock); > + ic = kvm_s390_get_ipte_control(kvm); > do { > old = READ_ONCE(*ic); > if (old.kg) { > - read_unlock(&vcpu->kvm->arch.sca_lock); > + read_unlock(&kvm->arch.sca_lock); > cond_resched(); > goto retry; > } > @@ -340,15 +340,15 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) > new.k = 1; > new.kh++; > } while (cmpxchg(&ic->val, old.val, new.val) != old.val); > - read_unlock(&vcpu->kvm->arch.sca_lock); > + read_unlock(&kvm->arch.sca_lock); > } > > -static void ipte_unlock_siif(struct kvm_vcpu *vcpu) > +static void ipte_unlock_siif(struct kvm *kvm) > { > union ipte_control old, new, *ic; > > - read_lock(&vcpu->kvm->arch.sca_lock); > - ic = kvm_s390_get_ipte_control(vcpu->kvm); > + read_lock(&kvm->arch.sca_lock); > + ic = kvm_s390_get_ipte_control(kvm); > do { > old = READ_ONCE(*ic); > new = old; > @@ -356,25 +356,25 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) > if (!new.kh) > new.k = 0; > } while (cmpxchg(&ic->val, old.val, new.val) != old.val); > - read_unlock(&vcpu->kvm->arch.sca_lock); > + read_unlock(&kvm->arch.sca_lock); > if (!new.kh) > - wake_up(&vcpu->kvm->arch.ipte_wq); > + wake_up(&kvm->arch.ipte_wq); > } > > -void ipte_lock(struct kvm_vcpu *vcpu) > +void ipte_lock(struct kvm *kvm) > { > - if (vcpu->arch.sie_block->eca & ECA_SII) > - ipte_lock_siif(vcpu); > + if (sclp.has_siif) > + ipte_lock_siif(kvm); > else > - ipte_lock_simple(vcpu); > + ipte_lock_simple(kvm); > } > > -void ipte_unlock(struct kvm_vcpu *vcpu) > +void ipte_unlock(struct kvm *kvm) > { > - if (vcpu->arch.sie_block->eca & ECA_SII) > - ipte_unlock_siif(vcpu); > + if (sclp.has_siif) > + ipte_unlock_siif(kvm); > else > - ipte_unlock_simple(vcpu); > + ipte_unlock_simple(kvm); > } > > static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar, > @@ -1075,7 +1075,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, > try_storage_prot_override = storage_prot_override_applicable(vcpu); > need_ipte_lock = psw_bits(*psw).dat && !asce.r; > if (need_ipte_lock) > - ipte_lock(vcpu); > + ipte_lock(vcpu->kvm); > /* > * Since we do the access further down ultimately via a move instruction > * that does key checking and returns an error in case of a protection > @@ -1113,7 +1113,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, > rc = trans_exc(vcpu, rc, ga, ar, mode, prot); > out_unlock: > if (need_ipte_lock) > - ipte_unlock(vcpu); > + ipte_unlock(vcpu->kvm); > if (nr_pages > ARRAY_SIZE(gpa_array)) > vfree(gpas); > return rc; > @@ -1185,10 +1185,10 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, > rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); > if (rc) > return rc; > - ipte_lock(vcpu); > + ipte_lock(vcpu->kvm); > rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode, > access_key); > - ipte_unlock(vcpu); > + ipte_unlock(vcpu->kvm); > > return rc; > } > @@ -1451,7 +1451,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, > * tables/pointers we read stay valid - unshadowing is however > * always possible - only guest_table_lock protects us. > */ > - ipte_lock(vcpu); > + ipte_lock(vcpu->kvm); > > rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); > if (rc) > @@ -1485,7 +1485,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, > pte.p |= dat_protection; > if (!rc) > rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); > - ipte_unlock(vcpu); > + ipte_unlock(vcpu->kvm); > mmap_read_unlock(sg->mm); > return rc; > } > diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h > index 1124ff282012..9408d6cc8e2c 100644 > --- a/arch/s390/kvm/gaccess.h > +++ b/arch/s390/kvm/gaccess.h > @@ -440,9 +440,9 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, > return access_guest_real(vcpu, gra, data, len, 0); > } > > -void ipte_lock(struct kvm_vcpu *vcpu); > -void ipte_unlock(struct kvm_vcpu *vcpu); > -int ipte_lock_held(struct kvm_vcpu *vcpu); > +void ipte_lock(struct kvm *kvm); > +void ipte_unlock(struct kvm *kvm); > +int ipte_lock_held(struct kvm *kvm); > int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); > > /* MVPG PEI indication bits */ > diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c > index 5beb7a4a11b3..0e8603acc105 100644 > --- a/arch/s390/kvm/priv.c > +++ b/arch/s390/kvm/priv.c > @@ -443,7 +443,7 @@ static int handle_ipte_interlock(struct kvm_vcpu *vcpu) > vcpu->stat.instruction_ipte_interlock++; > if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) > return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); > - wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); > + wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm)); > kvm_s390_retry_instr(vcpu); > VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); > return 0; > @@ -1472,7 +1472,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) > access_key = (operand2 & 0xf0) >> 4; > > if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) > - ipte_lock(vcpu); > + ipte_lock(vcpu->kvm); > > ret = guest_translate_address_with_key(vcpu, address, ar, &gpa, > GACC_STORE, access_key); > @@ -1509,7 +1509,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) > } > > if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) > - ipte_unlock(vcpu); > + ipte_unlock(vcpu->kvm); > return ret; > } >
On 5/12/22 13:32, Janosch Frank wrote: > On 5/6/22 11:24, Pierre Morel wrote: >> The former check to chose between SIIF or not SIIF can be done >> using the sclp.has_siif instead of accessing per vCPU structures > > Maybe replace this paragraph with: > We can check if SIIF is enabled by testing the sclp_info struct instead > of testing the sie control block eca variable. sclp.has_ssif is the only > requirement to set ECA_SII anyway so we can go straight to the source > for that. > > Reviewed-by: Janosch Frank <frankja@linux.ibm.com> OK, thanks, Regards, Pierre > >> >> When accessing the SCA, ipte lock and ipte_unlock do not need >> to access any vcpu structures but only the KVM structure. >> >> Let's simplify the ipte handling. >> >> Signed-off-by: Pierre Morel <pmorel@linux.ibm.com> >> --- >> arch/s390/kvm/gaccess.c | 96 ++++++++++++++++++++--------------------- >> arch/s390/kvm/gaccess.h | 6 +-- >> arch/s390/kvm/priv.c | 6 +-- >> 3 files changed, 54 insertions(+), 54 deletions(-) >> >> diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c >> index d53a183c2005..0e1f6dd31882 100644 >> --- a/arch/s390/kvm/gaccess.c >> +++ b/arch/s390/kvm/gaccess.c >> @@ -262,77 +262,77 @@ struct aste { >> /* .. more fields there */ >> }; >> -int ipte_lock_held(struct kvm_vcpu *vcpu) >> +int ipte_lock_held(struct kvm *kvm) >> { >> - if (vcpu->arch.sie_block->eca & ECA_SII) { >> + if (sclp.has_siif) { >> int rc; >> - read_lock(&vcpu->kvm->arch.sca_lock); >> - rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0; >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> + read_lock(&kvm->arch.sca_lock); >> + rc = kvm_s390_get_ipte_control(kvm)->kh != 0; >> + read_unlock(&kvm->arch.sca_lock); >> return rc; >> } >> - return vcpu->kvm->arch.ipte_lock_count != 0; >> + return kvm->arch.ipte_lock_count != 0; >> } >> -static void ipte_lock_simple(struct kvm_vcpu *vcpu) >> +static void ipte_lock_simple(struct kvm *kvm) >> { >> union ipte_control old, new, *ic; >> - mutex_lock(&vcpu->kvm->arch.ipte_mutex); >> - vcpu->kvm->arch.ipte_lock_count++; >> - if (vcpu->kvm->arch.ipte_lock_count > 1) >> + mutex_lock(&kvm->arch.ipte_mutex); >> + kvm->arch.ipte_lock_count++; >> + if (kvm->arch.ipte_lock_count > 1) >> goto out; >> retry: >> - read_lock(&vcpu->kvm->arch.sca_lock); >> - ic = kvm_s390_get_ipte_control(vcpu->kvm); >> + read_lock(&kvm->arch.sca_lock); >> + ic = kvm_s390_get_ipte_control(kvm); >> do { >> old = READ_ONCE(*ic); >> if (old.k) { >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> + read_unlock(&kvm->arch.sca_lock); >> cond_resched(); >> goto retry; >> } >> new = old; >> new.k = 1; >> } while (cmpxchg(&ic->val, old.val, new.val) != old.val); >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> + read_unlock(&kvm->arch.sca_lock); >> out: >> - mutex_unlock(&vcpu->kvm->arch.ipte_mutex); >> + mutex_unlock(&kvm->arch.ipte_mutex); >> } >> -static void ipte_unlock_simple(struct kvm_vcpu *vcpu) >> +static void ipte_unlock_simple(struct kvm *kvm) >> { >> union ipte_control old, new, *ic; >> - mutex_lock(&vcpu->kvm->arch.ipte_mutex); >> - vcpu->kvm->arch.ipte_lock_count--; >> - if (vcpu->kvm->arch.ipte_lock_count) >> + mutex_lock(&kvm->arch.ipte_mutex); >> + kvm->arch.ipte_lock_count--; >> + if (kvm->arch.ipte_lock_count) >> goto out; >> - read_lock(&vcpu->kvm->arch.sca_lock); >> - ic = kvm_s390_get_ipte_control(vcpu->kvm); >> + read_lock(&kvm->arch.sca_lock); >> + ic = kvm_s390_get_ipte_control(kvm); >> do { >> old = READ_ONCE(*ic); >> new = old; >> new.k = 0; >> } while (cmpxchg(&ic->val, old.val, new.val) != old.val); >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> - wake_up(&vcpu->kvm->arch.ipte_wq); >> + read_unlock(&kvm->arch.sca_lock); >> + wake_up(&kvm->arch.ipte_wq); >> out: >> - mutex_unlock(&vcpu->kvm->arch.ipte_mutex); >> + mutex_unlock(&kvm->arch.ipte_mutex); >> } >> -static void ipte_lock_siif(struct kvm_vcpu *vcpu) >> +static void ipte_lock_siif(struct kvm *kvm) >> { >> union ipte_control old, new, *ic; >> retry: >> - read_lock(&vcpu->kvm->arch.sca_lock); >> - ic = kvm_s390_get_ipte_control(vcpu->kvm); >> + read_lock(&kvm->arch.sca_lock); >> + ic = kvm_s390_get_ipte_control(kvm); >> do { >> old = READ_ONCE(*ic); >> if (old.kg) { >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> + read_unlock(&kvm->arch.sca_lock); >> cond_resched(); >> goto retry; >> } >> @@ -340,15 +340,15 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) >> new.k = 1; >> new.kh++; >> } while (cmpxchg(&ic->val, old.val, new.val) != old.val); >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> + read_unlock(&kvm->arch.sca_lock); >> } >> -static void ipte_unlock_siif(struct kvm_vcpu *vcpu) >> +static void ipte_unlock_siif(struct kvm *kvm) >> { >> union ipte_control old, new, *ic; >> - read_lock(&vcpu->kvm->arch.sca_lock); >> - ic = kvm_s390_get_ipte_control(vcpu->kvm); >> + read_lock(&kvm->arch.sca_lock); >> + ic = kvm_s390_get_ipte_control(kvm); >> do { >> old = READ_ONCE(*ic); >> new = old; >> @@ -356,25 +356,25 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) >> if (!new.kh) >> new.k = 0; >> } while (cmpxchg(&ic->val, old.val, new.val) != old.val); >> - read_unlock(&vcpu->kvm->arch.sca_lock); >> + read_unlock(&kvm->arch.sca_lock); >> if (!new.kh) >> - wake_up(&vcpu->kvm->arch.ipte_wq); >> + wake_up(&kvm->arch.ipte_wq); >> } >> -void ipte_lock(struct kvm_vcpu *vcpu) >> +void ipte_lock(struct kvm *kvm) >> { >> - if (vcpu->arch.sie_block->eca & ECA_SII) >> - ipte_lock_siif(vcpu); >> + if (sclp.has_siif) >> + ipte_lock_siif(kvm); >> else >> - ipte_lock_simple(vcpu); >> + ipte_lock_simple(kvm); >> } >> -void ipte_unlock(struct kvm_vcpu *vcpu) >> +void ipte_unlock(struct kvm *kvm) >> { >> - if (vcpu->arch.sie_block->eca & ECA_SII) >> - ipte_unlock_siif(vcpu); >> + if (sclp.has_siif) >> + ipte_unlock_siif(kvm); >> else >> - ipte_unlock_simple(vcpu); >> + ipte_unlock_simple(kvm); >> } >> static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, >> u8 ar, >> @@ -1075,7 +1075,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, >> unsigned long ga, u8 ar, >> try_storage_prot_override = storage_prot_override_applicable(vcpu); >> need_ipte_lock = psw_bits(*psw).dat && !asce.r; >> if (need_ipte_lock) >> - ipte_lock(vcpu); >> + ipte_lock(vcpu->kvm); >> /* >> * Since we do the access further down ultimately via a move >> instruction >> * that does key checking and returns an error in case of a >> protection >> @@ -1113,7 +1113,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, >> unsigned long ga, u8 ar, >> rc = trans_exc(vcpu, rc, ga, ar, mode, prot); >> out_unlock: >> if (need_ipte_lock) >> - ipte_unlock(vcpu); >> + ipte_unlock(vcpu->kvm); >> if (nr_pages > ARRAY_SIZE(gpa_array)) >> vfree(gpas); >> return rc; >> @@ -1185,10 +1185,10 @@ int check_gva_range(struct kvm_vcpu *vcpu, >> unsigned long gva, u8 ar, >> rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); >> if (rc) >> return rc; >> - ipte_lock(vcpu); >> + ipte_lock(vcpu->kvm); >> rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode, >> access_key); >> - ipte_unlock(vcpu); >> + ipte_unlock(vcpu->kvm); >> return rc; >> } >> @@ -1451,7 +1451,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, >> struct gmap *sg, >> * tables/pointers we read stay valid - unshadowing is however >> * always possible - only guest_table_lock protects us. >> */ >> - ipte_lock(vcpu); >> + ipte_lock(vcpu->kvm); >> rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, >> &fake); >> if (rc) >> @@ -1485,7 +1485,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, >> struct gmap *sg, >> pte.p |= dat_protection; >> if (!rc) >> rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); >> - ipte_unlock(vcpu); >> + ipte_unlock(vcpu->kvm); >> mmap_read_unlock(sg->mm); >> return rc; >> } >> diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h >> index 1124ff282012..9408d6cc8e2c 100644 >> --- a/arch/s390/kvm/gaccess.h >> +++ b/arch/s390/kvm/gaccess.h >> @@ -440,9 +440,9 @@ int read_guest_real(struct kvm_vcpu *vcpu, >> unsigned long gra, void *data, >> return access_guest_real(vcpu, gra, data, len, 0); >> } >> -void ipte_lock(struct kvm_vcpu *vcpu); >> -void ipte_unlock(struct kvm_vcpu *vcpu); >> -int ipte_lock_held(struct kvm_vcpu *vcpu); >> +void ipte_lock(struct kvm *kvm); >> +void ipte_unlock(struct kvm *kvm); >> +int ipte_lock_held(struct kvm *kvm); >> int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, >> unsigned long gra); >> /* MVPG PEI indication bits */ >> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c >> index 5beb7a4a11b3..0e8603acc105 100644 >> --- a/arch/s390/kvm/priv.c >> +++ b/arch/s390/kvm/priv.c >> @@ -443,7 +443,7 @@ static int handle_ipte_interlock(struct kvm_vcpu >> *vcpu) >> vcpu->stat.instruction_ipte_interlock++; >> if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) >> return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); >> - wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); >> + wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm)); >> kvm_s390_retry_instr(vcpu); >> VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); >> return 0; >> @@ -1472,7 +1472,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) >> access_key = (operand2 & 0xf0) >> 4; >> if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) >> - ipte_lock(vcpu); >> + ipte_lock(vcpu->kvm); >> ret = guest_translate_address_with_key(vcpu, address, ar, &gpa, >> GACC_STORE, access_key); >> @@ -1509,7 +1509,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) >> } >> if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) >> - ipte_unlock(vcpu); >> + ipte_unlock(vcpu->kvm); >> return ret; >> } >
On 5/12/22 11:08, David Hildenbrand wrote: > On 06.05.22 11:24, Pierre Morel wrote: >> The former check to chose between SIIF or not SIIF can be done >> using the sclp.has_siif instead of accessing per vCPU structures >> >> When accessing the SCA, ipte lock and ipte_unlock do not need >> to access any vcpu structures but only the KVM structure. >> >> Let's simplify the ipte handling. >> >> Signed-off-by: Pierre Morel <pmorel@linux.ibm.com> > > Much better > > Reviewed-by: David Hildenbrand <david@redhat.com> > > Thanks, Regards, Pierre
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index d53a183c2005..0e1f6dd31882 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -262,77 +262,77 @@ struct aste { /* .. more fields there */ }; -int ipte_lock_held(struct kvm_vcpu *vcpu) +int ipte_lock_held(struct kvm *kvm) { - if (vcpu->arch.sie_block->eca & ECA_SII) { + if (sclp.has_siif) { int rc; - read_lock(&vcpu->kvm->arch.sca_lock); - rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0; - read_unlock(&vcpu->kvm->arch.sca_lock); + read_lock(&kvm->arch.sca_lock); + rc = kvm_s390_get_ipte_control(kvm)->kh != 0; + read_unlock(&kvm->arch.sca_lock); return rc; } - return vcpu->kvm->arch.ipte_lock_count != 0; + return kvm->arch.ipte_lock_count != 0; } -static void ipte_lock_simple(struct kvm_vcpu *vcpu) +static void ipte_lock_simple(struct kvm *kvm) { union ipte_control old, new, *ic; - mutex_lock(&vcpu->kvm->arch.ipte_mutex); - vcpu->kvm->arch.ipte_lock_count++; - if (vcpu->kvm->arch.ipte_lock_count > 1) + mutex_lock(&kvm->arch.ipte_mutex); + kvm->arch.ipte_lock_count++; + if (kvm->arch.ipte_lock_count > 1) goto out; retry: - read_lock(&vcpu->kvm->arch.sca_lock); - ic = kvm_s390_get_ipte_control(vcpu->kvm); + read_lock(&kvm->arch.sca_lock); + ic = kvm_s390_get_ipte_control(kvm); do { old = READ_ONCE(*ic); if (old.k) { - read_unlock(&vcpu->kvm->arch.sca_lock); + read_unlock(&kvm->arch.sca_lock); cond_resched(); goto retry; } new = old; new.k = 1; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); - read_unlock(&vcpu->kvm->arch.sca_lock); + read_unlock(&kvm->arch.sca_lock); out: - mutex_unlock(&vcpu->kvm->arch.ipte_mutex); + mutex_unlock(&kvm->arch.ipte_mutex); } -static void ipte_unlock_simple(struct kvm_vcpu *vcpu) +static void ipte_unlock_simple(struct kvm *kvm) { union ipte_control old, new, *ic; - mutex_lock(&vcpu->kvm->arch.ipte_mutex); - vcpu->kvm->arch.ipte_lock_count--; - if (vcpu->kvm->arch.ipte_lock_count) + mutex_lock(&kvm->arch.ipte_mutex); + kvm->arch.ipte_lock_count--; + if (kvm->arch.ipte_lock_count) goto out; - read_lock(&vcpu->kvm->arch.sca_lock); - ic = kvm_s390_get_ipte_control(vcpu->kvm); + read_lock(&kvm->arch.sca_lock); + ic = kvm_s390_get_ipte_control(kvm); do { old = READ_ONCE(*ic); new = old; new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); - read_unlock(&vcpu->kvm->arch.sca_lock); - wake_up(&vcpu->kvm->arch.ipte_wq); + read_unlock(&kvm->arch.sca_lock); + wake_up(&kvm->arch.ipte_wq); out: - mutex_unlock(&vcpu->kvm->arch.ipte_mutex); + mutex_unlock(&kvm->arch.ipte_mutex); } -static void ipte_lock_siif(struct kvm_vcpu *vcpu) +static void ipte_lock_siif(struct kvm *kvm) { union ipte_control old, new, *ic; retry: - read_lock(&vcpu->kvm->arch.sca_lock); - ic = kvm_s390_get_ipte_control(vcpu->kvm); + read_lock(&kvm->arch.sca_lock); + ic = kvm_s390_get_ipte_control(kvm); do { old = READ_ONCE(*ic); if (old.kg) { - read_unlock(&vcpu->kvm->arch.sca_lock); + read_unlock(&kvm->arch.sca_lock); cond_resched(); goto retry; } @@ -340,15 +340,15 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) new.k = 1; new.kh++; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); - read_unlock(&vcpu->kvm->arch.sca_lock); + read_unlock(&kvm->arch.sca_lock); } -static void ipte_unlock_siif(struct kvm_vcpu *vcpu) +static void ipte_unlock_siif(struct kvm *kvm) { union ipte_control old, new, *ic; - read_lock(&vcpu->kvm->arch.sca_lock); - ic = kvm_s390_get_ipte_control(vcpu->kvm); + read_lock(&kvm->arch.sca_lock); + ic = kvm_s390_get_ipte_control(kvm); do { old = READ_ONCE(*ic); new = old; @@ -356,25 +356,25 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) if (!new.kh) new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); - read_unlock(&vcpu->kvm->arch.sca_lock); + read_unlock(&kvm->arch.sca_lock); if (!new.kh) - wake_up(&vcpu->kvm->arch.ipte_wq); + wake_up(&kvm->arch.ipte_wq); } -void ipte_lock(struct kvm_vcpu *vcpu) +void ipte_lock(struct kvm *kvm) { - if (vcpu->arch.sie_block->eca & ECA_SII) - ipte_lock_siif(vcpu); + if (sclp.has_siif) + ipte_lock_siif(kvm); else - ipte_lock_simple(vcpu); + ipte_lock_simple(kvm); } -void ipte_unlock(struct kvm_vcpu *vcpu) +void ipte_unlock(struct kvm *kvm) { - if (vcpu->arch.sie_block->eca & ECA_SII) - ipte_unlock_siif(vcpu); + if (sclp.has_siif) + ipte_unlock_siif(kvm); else - ipte_unlock_simple(vcpu); + ipte_unlock_simple(kvm); } static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar, @@ -1075,7 +1075,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, try_storage_prot_override = storage_prot_override_applicable(vcpu); need_ipte_lock = psw_bits(*psw).dat && !asce.r; if (need_ipte_lock) - ipte_lock(vcpu); + ipte_lock(vcpu->kvm); /* * Since we do the access further down ultimately via a move instruction * that does key checking and returns an error in case of a protection @@ -1113,7 +1113,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, rc = trans_exc(vcpu, rc, ga, ar, mode, prot); out_unlock: if (need_ipte_lock) - ipte_unlock(vcpu); + ipte_unlock(vcpu->kvm); if (nr_pages > ARRAY_SIZE(gpa_array)) vfree(gpas); return rc; @@ -1185,10 +1185,10 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); if (rc) return rc; - ipte_lock(vcpu); + ipte_lock(vcpu->kvm); rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode, access_key); - ipte_unlock(vcpu); + ipte_unlock(vcpu->kvm); return rc; } @@ -1451,7 +1451,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, * tables/pointers we read stay valid - unshadowing is however * always possible - only guest_table_lock protects us. */ - ipte_lock(vcpu); + ipte_lock(vcpu->kvm); rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); if (rc) @@ -1485,7 +1485,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, pte.p |= dat_protection; if (!rc) rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); - ipte_unlock(vcpu); + ipte_unlock(vcpu->kvm); mmap_read_unlock(sg->mm); return rc; } diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 1124ff282012..9408d6cc8e2c 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -440,9 +440,9 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, return access_guest_real(vcpu, gra, data, len, 0); } -void ipte_lock(struct kvm_vcpu *vcpu); -void ipte_unlock(struct kvm_vcpu *vcpu); -int ipte_lock_held(struct kvm_vcpu *vcpu); +void ipte_lock(struct kvm *kvm); +void ipte_unlock(struct kvm *kvm); +int ipte_lock_held(struct kvm *kvm); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); /* MVPG PEI indication bits */ diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 5beb7a4a11b3..0e8603acc105 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -443,7 +443,7 @@ static int handle_ipte_interlock(struct kvm_vcpu *vcpu) vcpu->stat.instruction_ipte_interlock++; if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); + wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm)); kvm_s390_retry_instr(vcpu); VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); return 0; @@ -1472,7 +1472,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) access_key = (operand2 & 0xf0) >> 4; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) - ipte_lock(vcpu); + ipte_lock(vcpu->kvm); ret = guest_translate_address_with_key(vcpu, address, ar, &gpa, GACC_STORE, access_key); @@ -1509,7 +1509,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) } if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) - ipte_unlock(vcpu); + ipte_unlock(vcpu->kvm); return ret; }
The former check to chose between SIIF or not SIIF can be done using the sclp.has_siif instead of accessing per vCPU structures When accessing the SCA, ipte lock and ipte_unlock do not need to access any vcpu structures but only the KVM structure. Let's simplify the ipte handling. Signed-off-by: Pierre Morel <pmorel@linux.ibm.com> --- arch/s390/kvm/gaccess.c | 96 ++++++++++++++++++++--------------------- arch/s390/kvm/gaccess.h | 6 +-- arch/s390/kvm/priv.c | 6 +-- 3 files changed, 54 insertions(+), 54 deletions(-)