diff mbox

[1/2] KVM: s390: add vcpu stat counters for many instruction

Message ID 20180124113235.31485-2-borntraeger@de.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Christian Borntraeger Jan. 24, 2018, 11:32 a.m. UTC
The overall instruction counter is larger than the sum of the
single counters. We should try to catch all instruction handlers
to make this match the summary counter.
Let us add sck,tb,sske,iske,rrbe,tb,tpi,tsch,lpsw,pswe.....

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
---
 arch/s390/include/asm/kvm_host.h | 18 ++++++++++++++++--
 arch/s390/kvm/kvm-s390.c         | 18 ++++++++++++++++--
 arch/s390/kvm/priv.c             | 33 +++++++++++++++++++++++++++++++--
 3 files changed, 63 insertions(+), 6 deletions(-)

Comments

Janosch Frank Jan. 24, 2018, 12:27 p.m. UTC | #1
On 24.01.2018 12:32, Christian Borntraeger wrote:
> The overall instruction counter is larger than the sum of the
> single counters. We should try to catch all instruction handlers
> to make this match the summary counter.
> Let us add sck,tb,sske,iske,rrbe,tb,tpi,tsch,lpsw,pswe.....
> 
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>

Acked-by: Janosch Frank <frankja@linux.vnet.ibm.com>

[...]
> 
>  static int handle_io_inst(struct kvm_vcpu *vcpu)
>  {
> +
>  	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

Whitespace damage

> 
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
> @@ -527,6 +545,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
>  		if (vcpu->arch.sie_block->ipa == 0xb235)
>  			return handle_tsch(vcpu);
>  		/* Handle in userspace. */
> +		vcpu->stat.instruction_io++;
>  		return -EOPNOTSUPP;
>  	} else {
>  		/*
Christian Borntraeger Jan. 24, 2018, 12:31 p.m. UTC | #2
On 01/24/2018 01:27 PM, Janosch Frank wrote:
> On 24.01.2018 12:32, Christian Borntraeger wrote:
>> The overall instruction counter is larger than the sum of the
>> single counters. We should try to catch all instruction handlers
>> to make this match the summary counter.
>> Let us add sck,tb,sske,iske,rrbe,tb,tpi,tsch,lpsw,pswe.....
>>
>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> 
> Acked-by: Janosch Frank <frankja@linux.vnet.ibm.com>

Thanks
> 
> [...]
>>
>>  static int handle_io_inst(struct kvm_vcpu *vcpu)
>>  {
>> +
>>  	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
> 
> Whitespace damage

fixed.

> 
>>
>>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>> @@ -527,6 +545,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
>>  		if (vcpu->arch.sie_block->ipa == 0xb235)
>>  			return handle_tsch(vcpu);
>>  		/* Handle in userspace. */
>> +		vcpu->stat.instruction_io++;
>>  		return -EOPNOTSUPP;
>>  	} else {
>>  		/*
>
David Hildenbrand Jan. 24, 2018, 12:41 p.m. UTC | #3
On 24.01.2018 12:32, Christian Borntraeger wrote:
> The overall instruction counter is larger than the sum of the
> single counters. We should try to catch all instruction handlers
> to make this match the summary counter.
> Let us add sck,tb,sske,iske,rrbe,tb,tpi,tsch,lpsw,pswe.....
> 
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> ---
>  arch/s390/include/asm/kvm_host.h | 18 ++++++++++++++++--
>  arch/s390/kvm/kvm-s390.c         | 18 ++++++++++++++++--
>  arch/s390/kvm/priv.c             | 33 +++++++++++++++++++++++++++++++--
>  3 files changed, 63 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index 2aee050..5d47991 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -1,7 +1,7 @@
>  /*
>   * definition for kernel virtual machines on s390
>   *
> - * Copyright IBM Corp. 2008, 2009
> + * Copyright IBM Corp. 2008, 2018
>   *
>   * This program is free software; you can redistribute it and/or modify
>   * it under the terms of the GNU General Public License (version 2 only)
> @@ -323,18 +323,32 @@ struct kvm_vcpu_stat {
>  	u64 deliver_program_int;
>  	u64 deliver_io_int;
>  	u64 exit_wait_state;
> +	u64 instruction_epsw;
> +	u64 instruction_gs;
> +	u64 instruction_io;
> +	u64 instruction_lpsw;
> +	u64 instruction_lpswe;
>  	u64 instruction_pfmf;
> +	u64 instruction_ptff;
> +	u64 instruction_sck;
> +	u64 instruction_sckpf;
>  	u64 instruction_stidp;
>  	u64 instruction_spx;
>  	u64 instruction_stpx;
>  	u64 instruction_stap;
> -	u64 instruction_storage_key;
> +	u64 instruction_iske;
> +	u64 instruction_ri;
> +	u64 instruction_rrbe;
> +	u64 instruction_sske;
>  	u64 instruction_ipte_interlock;
>  	u64 instruction_stsch;
>  	u64 instruction_chsc;
>  	u64 instruction_stsi;
>  	u64 instruction_stfl;
> +	u64 instruction_tb;
> +	u64 instruction_tpi;
>  	u64 instruction_tprot;
> +	u64 instruction_tsch;
>  	u64 instruction_sie;
>  	u64 instruction_essa;
>  	u64 instruction_sthyi;
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 7926c3c..3abe177 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2,7 +2,7 @@
>  /*
>   * hosting IBM Z kernel virtual machines (s390x)
>   *
> - * Copyright IBM Corp. 2008, 2017
> + * Copyright IBM Corp. 2008, 2018
>   *
>   *    Author(s): Carsten Otte <cotte@de.ibm.com>
>   *               Christian Borntraeger <borntraeger@de.ibm.com>
> @@ -87,19 +87,33 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
>  	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
>  	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
> +	{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
> +	{ "instruction_gs", VCPU_STAT(instruction_gs) },
> +	{ "instruction_io", VCPU_STAT(instruction_io) },
> +	{ "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
> +	{ "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
>  	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
> +	{ "instruction_ptff", VCPU_STAT(instruction_ptff) },
>  	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
> +	{ "instruction_sck", VCPU_STAT(instruction_sck) },
> +	{ "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
>  	{ "instruction_spx", VCPU_STAT(instruction_spx) },
>  	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
>  	{ "instruction_stap", VCPU_STAT(instruction_stap) },
> -	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
> +	{ "instruction_iske", VCPU_STAT(instruction_iske) },
> +	{ "instruction_ri", VCPU_STAT(instruction_ri) },
> +	{ "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
> +	{ "instruction_sske", VCPU_STAT(instruction_sske) },
>  	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
>  	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
>  	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
>  	{ "instruction_essa", VCPU_STAT(instruction_essa) },
>  	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
>  	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
> +	{ "instruction_tb", VCPU_STAT(instruction_tb) },
> +	{ "instruction_tpi", VCPU_STAT(instruction_tpi) },
>  	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
> +	{ "instruction_tsch", VCPU_STAT(instruction_tsch) },
>  	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
>  	{ "instruction_sie", VCPU_STAT(instruction_sie) },
>  	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
> index 572496c..7523bc8 100644
> --- a/arch/s390/kvm/priv.c
> +++ b/arch/s390/kvm/priv.c
> @@ -2,7 +2,7 @@
>  /*
>   * handling privileged instructions
>   *
> - * Copyright IBM Corp. 2008, 2013
> + * Copyright IBM Corp. 2008, 2018
>   *
>   *    Author(s): Carsten Otte <cotte@de.ibm.com>
>   *               Christian Borntraeger <borntraeger@de.ibm.com>
> @@ -34,6 +34,8 @@
>  
>  static int handle_ri(struct kvm_vcpu *vcpu)
>  {
> +	vcpu->stat.instruction_ri++;
> +
>  	if (test_kvm_facility(vcpu->kvm, 64)) {
>  		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
>  		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
> @@ -53,6 +55,8 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
>  
>  static int handle_gs(struct kvm_vcpu *vcpu)
>  {
> +	vcpu->stat.instruction_gs++;
> +
>  	if (test_kvm_facility(vcpu->kvm, 133)) {
>  		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
>  		preempt_disable();
> @@ -85,6 +89,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
>  	u8 ar;
>  	u64 op2, val;
>  
> +	vcpu->stat.instruction_sck++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -222,7 +228,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
>  {
>  	int rc;
>  
> -	vcpu->stat.instruction_storage_key++;
>  	rc = kvm_s390_skey_check_enable(vcpu);
>  	if (rc)
>  		return rc;
> @@ -242,6 +247,8 @@ static int handle_iske(struct kvm_vcpu *vcpu)
>  	int reg1, reg2;
>  	int rc;
>  
> +	vcpu->stat.instruction_iske++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -274,6 +281,8 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
>  	int reg1, reg2;
>  	int rc;
>  
> +	vcpu->stat.instruction_rrbe++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -312,6 +321,8 @@ static int handle_sske(struct kvm_vcpu *vcpu)
>  	int reg1, reg2;
>  	int rc;
>  
> +	vcpu->stat.instruction_sske++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -392,6 +403,8 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
>  	gpa_t addr;
>  	int reg2;
>  
> +	vcpu->stat.instruction_tb++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -424,6 +437,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
>  	u64 addr;
>  	u8 ar;
>  
> +	vcpu->stat.instruction_tpi++;
> +
>  	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
>  	if (addr & 3)
>  		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
> @@ -484,6 +499,8 @@ static int handle_tsch(struct kvm_vcpu *vcpu)
>  	struct kvm_s390_interrupt_info *inti = NULL;
>  	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
>  
> +	vcpu->stat.instruction_tsch++;
> +
>  	/* a valid schid has at least one bit set */
>  	if (vcpu->run->s.regs.gprs[1])
>  		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
> @@ -512,6 +529,7 @@ static int handle_tsch(struct kvm_vcpu *vcpu)
>  
>  static int handle_io_inst(struct kvm_vcpu *vcpu)
>  {
> +
>  	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
>  
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
> @@ -527,6 +545,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
>  		if (vcpu->arch.sie_block->ipa == 0xb235)
>  			return handle_tsch(vcpu);
>  		/* Handle in userspace. */
> +		vcpu->stat.instruction_io++;
>  		return -EOPNOTSUPP;
>  	} else {
>  		/*
> @@ -592,6 +611,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
>  	int rc;
>  	u8 ar;
>  
> +	vcpu->stat.instruction_lpsw++;
> +
>  	if (gpsw->mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -619,6 +640,8 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
>  	int rc;
>  	u8 ar;
>  
> +	vcpu->stat.instruction_lpswe++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -828,6 +851,8 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
>  {
>  	int reg1, reg2;
>  
> +	vcpu->stat.instruction_epsw++;
> +
>  	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
>  
>  	/* This basically extracts the mask half of the psw. */
> @@ -1332,6 +1357,8 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
>  {
>  	u32 value;
>  
> +	vcpu->stat.instruction_sckpf++;
> +
>  	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>  		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>  
> @@ -1347,6 +1374,8 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
>  
>  static int handle_ptff(struct kvm_vcpu *vcpu)
>  {
> +	vcpu->stat.instruction_ptff++;
> +
>  	/* we don't emulate any control instructions yet */
>  	kvm_s390_set_psw_cc(vcpu, 3);
>  	return 0;
> 

Reviewed-by: David Hildenbrand <david@redhat.com>
Cornelia Huck Jan. 24, 2018, 2:45 p.m. UTC | #4
On Wed, 24 Jan 2018 12:32:34 +0100
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> The overall instruction counter is larger than the sum of the
> single counters. We should try to catch all instruction handlers
> to make this match the summary counter.
> Let us add sck,tb,sske,iske,rrbe,tb,tpi,tsch,lpsw,pswe.....
> 
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> ---
>  arch/s390/include/asm/kvm_host.h | 18 ++++++++++++++++--
>  arch/s390/kvm/kvm-s390.c         | 18 ++++++++++++++++--
>  arch/s390/kvm/priv.c             | 33 +++++++++++++++++++++++++++++++--
>  3 files changed, 63 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index 2aee050..5d47991 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -1,7 +1,7 @@
>  /*
>   * definition for kernel virtual machines on s390
>   *
> - * Copyright IBM Corp. 2008, 2009
> + * Copyright IBM Corp. 2008, 2018
>   *
>   * This program is free software; you can redistribute it and/or modify
>   * it under the terms of the GNU General Public License (version 2 only)
> @@ -323,18 +323,32 @@ struct kvm_vcpu_stat {
>  	u64 deliver_program_int;
>  	u64 deliver_io_int;
>  	u64 exit_wait_state;
> +	u64 instruction_epsw;
> +	u64 instruction_gs;
> +	u64 instruction_io;

I find the naming a bit confusing (tpi/tsch are I/O instructions, too)
-- call this instruction_io_other?

> +	u64 instruction_lpsw;
> +	u64 instruction_lpswe;
>  	u64 instruction_pfmf;
> +	u64 instruction_ptff;
> +	u64 instruction_sck;
> +	u64 instruction_sckpf;
>  	u64 instruction_stidp;
>  	u64 instruction_spx;
>  	u64 instruction_stpx;
>  	u64 instruction_stap;
> -	u64 instruction_storage_key;
> +	u64 instruction_iske;
> +	u64 instruction_ri;
> +	u64 instruction_rrbe;
> +	u64 instruction_sske;
>  	u64 instruction_ipte_interlock;
>  	u64 instruction_stsch;
>  	u64 instruction_chsc;

The stsch and chsc counters are dead (probably have been for quite some
time).

>  	u64 instruction_stsi;
>  	u64 instruction_stfl;
> +	u64 instruction_tb;
> +	u64 instruction_tpi;
>  	u64 instruction_tprot;
> +	u64 instruction_tsch;
>  	u64 instruction_sie;
>  	u64 instruction_essa;
>  	u64 instruction_sthyi;

If your goal is to catch all instructions, shouldn't you add a counter
for diagnose functions that don't have a kernel handler as well?

I've never used the counters much, but that change looks fine in
general.
diff mbox

Patch

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 2aee050..5d47991 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1,7 +1,7 @@ 
 /*
  * definition for kernel virtual machines on s390
  *
- * Copyright IBM Corp. 2008, 2009
+ * Copyright IBM Corp. 2008, 2018
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License (version 2 only)
@@ -323,18 +323,32 @@  struct kvm_vcpu_stat {
 	u64 deliver_program_int;
 	u64 deliver_io_int;
 	u64 exit_wait_state;
+	u64 instruction_epsw;
+	u64 instruction_gs;
+	u64 instruction_io;
+	u64 instruction_lpsw;
+	u64 instruction_lpswe;
 	u64 instruction_pfmf;
+	u64 instruction_ptff;
+	u64 instruction_sck;
+	u64 instruction_sckpf;
 	u64 instruction_stidp;
 	u64 instruction_spx;
 	u64 instruction_stpx;
 	u64 instruction_stap;
-	u64 instruction_storage_key;
+	u64 instruction_iske;
+	u64 instruction_ri;
+	u64 instruction_rrbe;
+	u64 instruction_sske;
 	u64 instruction_ipte_interlock;
 	u64 instruction_stsch;
 	u64 instruction_chsc;
 	u64 instruction_stsi;
 	u64 instruction_stfl;
+	u64 instruction_tb;
+	u64 instruction_tpi;
 	u64 instruction_tprot;
+	u64 instruction_tsch;
 	u64 instruction_sie;
 	u64 instruction_essa;
 	u64 instruction_sthyi;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7926c3c..3abe177 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2,7 +2,7 @@ 
 /*
  * hosting IBM Z kernel virtual machines (s390x)
  *
- * Copyright IBM Corp. 2008, 2017
+ * Copyright IBM Corp. 2008, 2018
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -87,19 +87,33 @@  struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
+	{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
+	{ "instruction_gs", VCPU_STAT(instruction_gs) },
+	{ "instruction_io", VCPU_STAT(instruction_io) },
+	{ "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
+	{ "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
+	{ "instruction_ptff", VCPU_STAT(instruction_ptff) },
 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
+	{ "instruction_sck", VCPU_STAT(instruction_sck) },
+	{ "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
-	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
+	{ "instruction_iske", VCPU_STAT(instruction_iske) },
+	{ "instruction_ri", VCPU_STAT(instruction_ri) },
+	{ "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
+	{ "instruction_sske", VCPU_STAT(instruction_sske) },
 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
+	{ "instruction_tb", VCPU_STAT(instruction_tb) },
+	{ "instruction_tpi", VCPU_STAT(instruction_tpi) },
 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
+	{ "instruction_tsch", VCPU_STAT(instruction_tsch) },
 	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
 	{ "instruction_sie", VCPU_STAT(instruction_sie) },
 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 572496c..7523bc8 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -2,7 +2,7 @@ 
 /*
  * handling privileged instructions
  *
- * Copyright IBM Corp. 2008, 2013
+ * Copyright IBM Corp. 2008, 2018
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -34,6 +34,8 @@ 
 
 static int handle_ri(struct kvm_vcpu *vcpu)
 {
+	vcpu->stat.instruction_ri++;
+
 	if (test_kvm_facility(vcpu->kvm, 64)) {
 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
@@ -53,6 +55,8 @@  int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
 
 static int handle_gs(struct kvm_vcpu *vcpu)
 {
+	vcpu->stat.instruction_gs++;
+
 	if (test_kvm_facility(vcpu->kvm, 133)) {
 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
 		preempt_disable();
@@ -85,6 +89,8 @@  static int handle_set_clock(struct kvm_vcpu *vcpu)
 	u8 ar;
 	u64 op2, val;
 
+	vcpu->stat.instruction_sck++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -222,7 +228,6 @@  static int try_handle_skey(struct kvm_vcpu *vcpu)
 {
 	int rc;
 
-	vcpu->stat.instruction_storage_key++;
 	rc = kvm_s390_skey_check_enable(vcpu);
 	if (rc)
 		return rc;
@@ -242,6 +247,8 @@  static int handle_iske(struct kvm_vcpu *vcpu)
 	int reg1, reg2;
 	int rc;
 
+	vcpu->stat.instruction_iske++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -274,6 +281,8 @@  static int handle_rrbe(struct kvm_vcpu *vcpu)
 	int reg1, reg2;
 	int rc;
 
+	vcpu->stat.instruction_rrbe++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -312,6 +321,8 @@  static int handle_sske(struct kvm_vcpu *vcpu)
 	int reg1, reg2;
 	int rc;
 
+	vcpu->stat.instruction_sske++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -392,6 +403,8 @@  static int handle_test_block(struct kvm_vcpu *vcpu)
 	gpa_t addr;
 	int reg2;
 
+	vcpu->stat.instruction_tb++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -424,6 +437,8 @@  static int handle_tpi(struct kvm_vcpu *vcpu)
 	u64 addr;
 	u8 ar;
 
+	vcpu->stat.instruction_tpi++;
+
 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
 	if (addr & 3)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -484,6 +499,8 @@  static int handle_tsch(struct kvm_vcpu *vcpu)
 	struct kvm_s390_interrupt_info *inti = NULL;
 	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
 
+	vcpu->stat.instruction_tsch++;
+
 	/* a valid schid has at least one bit set */
 	if (vcpu->run->s.regs.gprs[1])
 		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
@@ -512,6 +529,7 @@  static int handle_tsch(struct kvm_vcpu *vcpu)
 
 static int handle_io_inst(struct kvm_vcpu *vcpu)
 {
+
 	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
 
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -527,6 +545,7 @@  static int handle_io_inst(struct kvm_vcpu *vcpu)
 		if (vcpu->arch.sie_block->ipa == 0xb235)
 			return handle_tsch(vcpu);
 		/* Handle in userspace. */
+		vcpu->stat.instruction_io++;
 		return -EOPNOTSUPP;
 	} else {
 		/*
@@ -592,6 +611,8 @@  int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
 	int rc;
 	u8 ar;
 
+	vcpu->stat.instruction_lpsw++;
+
 	if (gpsw->mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -619,6 +640,8 @@  static int handle_lpswe(struct kvm_vcpu *vcpu)
 	int rc;
 	u8 ar;
 
+	vcpu->stat.instruction_lpswe++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -828,6 +851,8 @@  static int handle_epsw(struct kvm_vcpu *vcpu)
 {
 	int reg1, reg2;
 
+	vcpu->stat.instruction_epsw++;
+
 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 
 	/* This basically extracts the mask half of the psw. */
@@ -1332,6 +1357,8 @@  static int handle_sckpf(struct kvm_vcpu *vcpu)
 {
 	u32 value;
 
+	vcpu->stat.instruction_sckpf++;
+
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
@@ -1347,6 +1374,8 @@  static int handle_sckpf(struct kvm_vcpu *vcpu)
 
 static int handle_ptff(struct kvm_vcpu *vcpu)
 {
+	vcpu->stat.instruction_ptff++;
+
 	/* we don't emulate any control instructions yet */
 	kvm_s390_set_psw_cc(vcpu, 3);
 	return 0;