[RFC,v8,7/7] KVM: Adding tracepoints for guest page hinting
diff mbox series

Message ID 20190204201854.2328-8-nitesh@redhat.com
State New
Headers show
Series
  • KVM: Guest Free Page Hinting
Related show

Commit Message

Nitesh Narayan Lal Feb. 4, 2019, 8:18 p.m. UTC
This patch enables to track the pages freed by the guest and
the pages isolated by the page hinting code through kernel
tracepoints.

Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
---
 include/trace/events/kmem.h | 40 +++++++++++++++++++++++++++++++++++++
 virt/kvm/page_hinting.c     | 10 ++++++++++
 2 files changed, 50 insertions(+)

Patch
diff mbox series

diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index eb57e3037deb..69f6da9ff939 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -315,6 +315,46 @@  TRACE_EVENT(mm_page_alloc_extfrag,
 		__entry->change_ownership)
 );
 
+TRACE_EVENT(guest_free_page,
+	    TP_PROTO(struct page *page, unsigned int order),
+
+	TP_ARGS(page, order),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, pfn)
+		__field(unsigned int, order)
+	),
+
+	TP_fast_assign(
+		__entry->pfn            = page_to_pfn(page);
+		__entry->order          = order;
+	),
+
+	TP_printk("page=%p pfn=%lu number of pages=%d",
+		  pfn_to_page(__entry->pfn),
+		  __entry->pfn,
+		  (1 << __entry->order))
+);
+
+TRACE_EVENT(guest_isolated_pfn,
+	    TP_PROTO(unsigned long pfn, unsigned int pages),
+
+	TP_ARGS(pfn, pages),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, pfn)
+		__field(unsigned int, pages)
+	),
+
+	TP_fast_assign(
+		__entry->pfn            = pfn;
+		__entry->pages          = pages;
+	),
+
+	TP_printk("pfn=%lu number of pages=%u",
+		  __entry->pfn,
+		  __entry->pages)
+);
 #endif /* _TRACE_KMEM_H */
 
 /* This part must be outside protection */
diff --git a/virt/kvm/page_hinting.c b/virt/kvm/page_hinting.c
index 315099fcda43..395d94e52c74 100644
--- a/virt/kvm/page_hinting.c
+++ b/virt/kvm/page_hinting.c
@@ -4,6 +4,7 @@ 
 #include <linux/kvm_host.h>
 #include <linux/sort.h>
 #include <linux/kernel.h>
+#include <trace/events/kmem.h>
 
 /*
  * struct kvm_free_pages - Tracks the pages which are freed by the guest.
@@ -140,7 +141,11 @@  static void hinting_fn(unsigned int cpu)
 					int l_idx = page_hinting_obj->hyp_idx;
 					struct hypervisor_pages *l_obj =
 					page_hinting_obj->hypervisor_pagelist;
+					unsigned int buddy_pages =
+						1 << buddy_order;
 
+					trace_guest_isolated_pfn(pfn,
+								 buddy_pages);
 					l_obj[l_idx].pfn = pfn;
 					l_obj[l_idx].order = buddy_order;
 					page_hinting_obj->hyp_idx += 1;
@@ -163,7 +168,11 @@  static void hinting_fn(unsigned int cpu)
 					page_hinting_obj->hypervisor_pagelist;
 					unsigned long buddy_pfn =
 						page_to_pfn(buddy_page);
+					unsigned int buddy_pages =
+						1 << buddy_order;
 
+					trace_guest_isolated_pfn(pfn,
+								 buddy_pages);
 					l_obj[l_idx].pfn = buddy_pfn;
 					l_obj[l_idx].order = buddy_order;
 					page_hinting_obj->hyp_idx += 1;
@@ -294,6 +303,7 @@  void guest_free_page(struct page *page, int order)
 	local_irq_save(flags);
 	if (page_hinting_obj->kvm_pt_idx != MAX_FGPT_ENTRIES) {
 		disable_page_poisoning();
+		trace_guest_free_page(page, order);
 		page_hinting_obj->kvm_pt[page_hinting_obj->kvm_pt_idx].pfn =
 							page_to_pfn(page);
 		page_hinting_obj->kvm_pt[page_hinting_obj->kvm_pt_idx].zonenum =