@@ -206,8 +206,14 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu;
};
+struct kvm_pmu_events {
+ u32 events_host;
+ u32 events_guest;
+};
+
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
+ struct kvm_pmu_events pmu_events;
};
typedef struct kvm_host_data kvm_host_data_t;
@@ -468,11 +474,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+#define KVM_PMU_EVENTS_HOST 1
+#define KVM_PMU_EVENTS_GUEST 2
+
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
+static inline void kvm_set_pmu_events(u32 set, int flags)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ if (flags & KVM_PMU_EVENTS_HOST)
+ ctx->pmu_events.events_host |= set;
+ if (flags & KVM_PMU_EVENTS_GUEST)
+ ctx->pmu_events.events_guest |= set;
+}
+static inline void kvm_clr_pmu_events(u32 clr)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ ctx->pmu_events.events_host &= ~clr;
+ ctx->pmu_events.events_guest &= ~clr;
+}
+#else
+static inline void kvm_set_pmu_events(u32 set, int flags) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
static inline void kvm_arm_vhe_guest_enter(void)