diff mbox

[intel-sgx-kernel-dev,RFC,11/12] intel_sgx: add stats and events interfaces to EPC cgroup controller

Message ID 1497461858-20309-12-git-send-email-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sean Christopherson June 14, 2017, 5:37 p.m. UTC
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 drivers/platform/x86/intel_sgx/sgx_epc_cgroup.c | 84 +++++++++++++++++++++++--
 drivers/platform/x86/intel_sgx/sgx_epc_cgroup.h | 83 ++++++++++++++++++++++++
 drivers/platform/x86/intel_sgx/sgx_page_cache.c |  9 +++
 3 files changed, 172 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.c b/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.c
index 273555c..6b1d299 100644
--- a/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.c
+++ b/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.c
@@ -240,8 +240,11 @@  static bool sgx_epc_cgroup_lru_empty(struct sgx_epc_cgroup *root)
 
 static unsigned long sgx_epc_cgroup_swap_pages(unsigned long nr_pages,
 					       unsigned int flags,
-					       struct sgx_epc_cgroup *epc_cg)
+					       struct sgx_epc_cgroup *epc_cg,
+					       enum sgx_epc_cgroup_counter c)
 {
+	sgx_epc_cgroup_event(epc_cg, c, 1);
+
 	/*
 	 * Ensure sgx_swap_pages is called with a minimum and maximum
 	 * number of pages.  Attempting to swap only a few pages will
@@ -258,14 +261,16 @@  static inline unsigned long sgx_epc_cgroup_swap_max(unsigned long nr_pages,
 						    unsigned int flags,
 						    struct sgx_epc_cgroup *epc_cg)
 {
-	return sgx_epc_cgroup_swap_pages(nr_pages, flags, epc_cg);
+	return sgx_epc_cgroup_swap_pages(nr_pages, flags, epc_cg,
+					 SGX_EPC_CGROUP_MAX);
 }
 
 static inline unsigned long sgx_epc_cgroup_swap_high(unsigned long nr_pages,
 						     unsigned int flags,
 						     struct sgx_epc_cgroup *epc_cg)
 {
-	return sgx_epc_cgroup_swap_pages(nr_pages, flags, epc_cg);
+	return sgx_epc_cgroup_swap_pages(nr_pages, flags, epc_cg,
+					 SGX_EPC_CGROUP_HIGH);
 }
 
 static void sgx_epc_cgroup_reclaim_high(struct sgx_epc_cgroup *epc_cg)
@@ -436,8 +441,10 @@  int sgx_epc_cgroup_try_charge(struct mm_struct *mm,
 	ret = __sgx_epc_cgroup_try_charge(epc_cg, alloc_flags, nr_pages);
 	css_put(&epc_cg->css);
 
-	if (!ret)
+	if (!ret) {
 		*epc_cg_ptr = epc_cg;
+		sgx_epc_cgroup_cnt_add(epc_cg, SGX_EPC_CGROUP_PAGES, nr_pages);
+	}
 	return ret;
 }
 
@@ -456,6 +463,7 @@  void sgx_epc_cgroup_uncharge(struct sgx_epc_cgroup *epc_cg,
 		return;
 
 	page_counter_uncharge(&epc_cg->pc, nr_pages);
+	sgx_epc_cgroup_cnt_sub(epc_cg, SGX_EPC_CGROUP_PAGES, nr_pages);
 
 	if (epc_cg != root_epc_cgroup)
 		css_put_many(&epc_cg->css, nr_pages);
@@ -592,6 +600,62 @@  static u64 sgx_epc_current_read(struct cgroup_subsys_state *css,
 	return (u64)page_counter_read(&epc_cg->pc) * PAGE_SIZE;
 }
 
+static int sgx_epc_stats_show(struct seq_file *m, void *v)
+{
+	struct sgx_epc_cgroup *epc_cg = sgx_epc_cgroup_from_css(seq_css(m));
+
+	unsigned long cur, dir, rec, recs;
+	cur = page_counter_read(&epc_cg->pc);
+	dir = sgx_epc_cgroup_cnt_read(epc_cg, SGX_EPC_CGROUP_PAGES);
+	rec = sgx_epc_cgroup_cnt_read(epc_cg, SGX_EPC_CGROUP_RECLAIMED);
+	recs= sgx_epc_cgroup_cnt_read(epc_cg, SGX_EPC_CGROUP_RECLAMATIONS);
+
+	seq_printf(m, "pages            %lu\n", cur);
+	seq_printf(m, "direct           %lu\n", dir);
+	seq_printf(m, "indirect         %lu\n", (cur - dir));
+	seq_printf(m, "reclaimed        %lu\n", rec);
+	seq_printf(m, "reclamations	%lu\n", recs);
+
+	return 0;
+}
+
+static ssize_t sgx_epc_stats_reset(struct kernfs_open_file *of,
+				   char *buf, size_t nbytes, loff_t off)
+{
+	struct sgx_epc_cgroup *epc_cg = sgx_epc_cgroup_from_css(of_css(of));
+	sgx_epc_cgroup_cnt_reset(epc_cg, SGX_EPC_CGROUP_RECLAIMED);
+	sgx_epc_cgroup_cnt_reset(epc_cg, SGX_EPC_CGROUP_RECLAMATIONS);
+	return nbytes;
+}
+
+
+static int sgx_epc_events_show(struct seq_file *m, void *v)
+{
+	struct sgx_epc_cgroup *epc_cg = sgx_epc_cgroup_from_css(seq_css(m));
+
+	unsigned long low, high, max;
+	low  = sgx_epc_cgroup_cnt_read(epc_cg, SGX_EPC_CGROUP_LOW);
+	high = sgx_epc_cgroup_cnt_read(epc_cg, SGX_EPC_CGROUP_HIGH);
+	max  = sgx_epc_cgroup_cnt_read(epc_cg, SGX_EPC_CGROUP_MAX);
+
+	seq_printf(m, "low      %lu\n", low);
+	seq_printf(m, "high     %lu\n", high);
+	seq_printf(m, "max      %lu\n", max);
+
+	return 0;
+}
+
+static ssize_t sgx_epc_events_reset(struct kernfs_open_file *of,
+				    char *buf, size_t nbytes, loff_t off)
+{
+	struct sgx_epc_cgroup *epc_cg = sgx_epc_cgroup_from_css(of_css(of));
+	sgx_epc_cgroup_cnt_reset(epc_cg, SGX_EPC_CGROUP_LOW);
+	sgx_epc_cgroup_cnt_reset(epc_cg, SGX_EPC_CGROUP_HIGH);
+	sgx_epc_cgroup_cnt_reset(epc_cg, SGX_EPC_CGROUP_MAX);
+	return nbytes;
+}
+
+
 static int sgx_epc_low_show(struct seq_file *m, void *v)
 {
 	struct sgx_epc_cgroup *epc_cg = sgx_epc_cgroup_from_css(seq_css(m));
@@ -729,6 +793,18 @@  static struct cftype sgx_epc_cgroup_files[] = {
 		.read_u64 = sgx_epc_current_read,
 	},
 	{
+		.name = "stats",
+		.seq_show = sgx_epc_stats_show,
+		.write = sgx_epc_stats_reset,
+	},
+	{
+		.name = "events",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.file_offset = offsetof(struct sgx_epc_cgroup, events_file),
+		.seq_show = sgx_epc_events_show,
+		.write = sgx_epc_events_reset,
+	},
+	{
 		.name = "low",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = sgx_epc_low_show,
diff --git a/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.h b/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.h
index 40ba7fc..ca495ab 100644
--- a/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.h
+++ b/drivers/platform/x86/intel_sgx/sgx_epc_cgroup.h
@@ -25,6 +25,17 @@ 
 
 #ifdef CONFIG_CGROUP_SGX_EPC
 
+enum sgx_epc_cgroup_counter {
+	SGX_EPC_CGROUP_PAGES,
+	SGX_EPC_CGROUP_RECLAIMED,
+	SGX_EPC_CGROUP_RECLAMATIONS,
+	SGX_EPC_CGROUP_LOW,
+	SGX_EPC_CGROUP_HIGH,
+	SGX_EPC_CGROUP_MAX,
+	SGX_EPC_CGROUP_NR_COUNTERS,
+};
+
+
 struct sgx_epc_cgroup {
 	struct cgroup_subsys_state	css;
 
@@ -36,6 +47,10 @@  struct sgx_epc_cgroup {
 	struct sgx_epc_cgroup	*reclaim_iter;
 	struct work_struct	reclaim_work;
 	unsigned int		epoch;
+
+	atomic_long_t		cnt[SGX_EPC_CGROUP_NR_COUNTERS];
+
+	struct cgroup_file	events_file;
 };
 
 struct sgx_epc_reclaim {
@@ -57,6 +72,74 @@  bool sgx_epc_cgroup_is_low(struct sgx_epc_cgroup *root,
 			   struct sgx_epc_cgroup *epc_cg);
 bool sgx_epc_cgroup_all_in_use_are_low(struct sgx_epc_cgroup *root);
 
+
+/**
+ * sgx_epc_cgroup_cnt_read - read an EPC cgroup counter
+ * @epc_cg:	the EPC cgroup
+ * @i:		the counter index
+ */
+static inline unsigned long sgx_epc_cgroup_cnt_read(struct sgx_epc_cgroup *epc_cg,
+						    enum sgx_epc_cgroup_counter i)
+{
+	return atomic_long_read(&epc_cg->cnt[i]);
+}
+
+/**
+ * sgx_epc_cgroup_cnt_reset - reset an EPC cgroup counter
+ * @epc_cg:	the EPC cgroup
+ * @i:		the counter index
+ */
+static inline void sgx_epc_cgroup_cnt_reset(struct sgx_epc_cgroup *epc_cg,
+					    enum sgx_epc_cgroup_counter i)
+{
+	atomic_long_set(&epc_cg->cnt[i], 0);
+}
+
+/**
+ * sgx_epc_cgroup_cnt_add - increment an EPC cgroup counter
+ * @epc_cg:	the EPC cgroup
+ * @i:		the counter index
+ * @cnt:	the number of counts to add
+ */
+static inline void sgx_epc_cgroup_cnt_add(struct sgx_epc_cgroup *epc_cg,
+					  enum sgx_epc_cgroup_counter i,
+					  unsigned long cnt)
+{
+	atomic_long_add(cnt, &epc_cg->cnt[i]);
+}
+
+/**
+ * sgx_epc_cgroup_cnt_add - log an EPC cgroup event
+ * @epc_cg:	the EPC cgroup
+ * @i:		the counter index
+ * @cnt:	the number of event counts to add
+ */
+static inline void sgx_epc_cgroup_event(struct sgx_epc_cgroup *epc_cg,
+					enum sgx_epc_cgroup_counter i,
+					unsigned long cnt)
+{
+	sgx_epc_cgroup_cnt_add(epc_cg, i, cnt);
+
+	if (i == SGX_EPC_CGROUP_LOW ||
+	    i == SGX_EPC_CGROUP_HIGH ||
+	    i == SGX_EPC_CGROUP_MAX)
+		cgroup_file_notify(&epc_cg->events_file);
+}
+
+/**
+ * sgx_epc_cgroup_cnt_sub - decrement an EPC cgroup counter
+ * @epc_cg:	the EPC cgroup
+ * @i:		the counter index
+ * @cnt:	the number of counts to subtract
+ */
+static inline void sgx_epc_cgroup_cnt_sub(struct sgx_epc_cgroup *epc_cg,
+					  enum sgx_epc_cgroup_counter i,
+					  unsigned long cnt)
+{
+	atomic_long_sub(cnt, &epc_cg->cnt[i]);
+}
+
+
 #else
 
 struct sgx_epc_cgroup;
diff --git a/drivers/platform/x86/intel_sgx/sgx_page_cache.c b/drivers/platform/x86/intel_sgx/sgx_page_cache.c
index c5e7210..426d02e 100644
--- a/drivers/platform/x86/intel_sgx/sgx_page_cache.c
+++ b/drivers/platform/x86/intel_sgx/sgx_page_cache.c
@@ -213,7 +213,9 @@  static void sgx_isolate_pages(struct list_head *dst,
 			 */
 			if (!sgx_epc_cgroup_all_in_use_are_low(root))
 				continue;
+			sgx_epc_cgroup_event(epc_cg, SGX_EPC_CGROUP_LOW, 1);
 		}
+		sgx_epc_cgroup_event(epc_cg, SGX_EPC_CGROUP_RECLAIMED, 1);
 
 		nr_scanned += __sgx_isolate_pages(dst, nr_to_scan - nr_scanned, &epc_cg->lru);
 		if (nr_scanned >= nr_to_scan) {
@@ -302,6 +304,13 @@  static void sgx_evict_page(struct sgx_encl_page *entry,
 			   struct sgx_encl *encl)
 {
 	sgx_ewb(encl, entry);
+
+#ifdef CONFIG_CGROUP_SGX_EPC
+	if (entry->epc_page->epc_cg)
+		sgx_epc_cgroup_event(entry->epc_page->epc_cg,
+				     SGX_EPC_CGROUP_RECLAIMED, 1);
+#endif
+
 	sgx_free_page(entry->epc_page, encl);
 	entry->epc_page = NULL;
 	entry->flags &= ~SGX_ENCL_PAGE_RESERVED;