diff mbox

[v4,02/14] xen/x86: fix return value of *_set_allocation functions

Message ID 20161130164950.43543-3-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Roger Pau Monne Nov. 30, 2016, 4:49 p.m. UTC
Return should be an int.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Tim Deegan <tim@xen.org>
---
Changes since v2:
 - Also fix the callers to treat the return value as an int.
 - Don't convert the pages parameter to unsigned long.
---
 xen/arch/x86/mm/hap/hap.c       |  8 +++-----
 xen/arch/x86/mm/shadow/common.c | 12 +++++-------
 2 files changed, 8 insertions(+), 12 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 3218fa2..f099e94 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -334,7 +334,7 @@  hap_get_allocation(struct domain *d)
 
 /* Set the pool of pages to the required number of pages.
  * Returns 0 for success, non-zero for failure. */
-static unsigned int
+static int
 hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
 {
     struct page_info *pg;
@@ -468,14 +468,12 @@  int hap_enable(struct domain *d, u32 mode)
     old_pages = d->arch.paging.hap.total_pages;
     if ( old_pages == 0 )
     {
-        unsigned int r;
         paging_lock(d);
-        r = hap_set_allocation(d, 256, NULL);
-        if ( r != 0 )
+        rv = hap_set_allocation(d, 256, NULL);
+        if ( rv != 0 )
         {
             hap_set_allocation(d, 0, NULL);
             paging_unlock(d);
-            rv = -ENOMEM;
             goto out;
         }
         paging_unlock(d);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index ced2313..756c276 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1615,9 +1615,9 @@  shadow_free_p2m_page(struct domain *d, struct page_info *pg)
  * Input will be rounded up to at least shadow_min_acceptable_pages(),
  * plus space for the p2m table.
  * Returns 0 for success, non-zero for failure. */
-static unsigned int sh_set_allocation(struct domain *d,
-                                      unsigned int pages,
-                                      int *preempted)
+static int sh_set_allocation(struct domain *d,
+                             unsigned int pages,
+                             int *preempted)
 {
     struct page_info *sp;
     unsigned int lower_bound;
@@ -3153,13 +3153,11 @@  int shadow_enable(struct domain *d, u32 mode)
     old_pages = d->arch.paging.shadow.total_pages;
     if ( old_pages == 0 )
     {
-        unsigned int r;
         paging_lock(d);
-        r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
-        if ( r != 0 )
+        rv = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
+        if ( rv != 0 )
         {
             sh_set_allocation(d, 0, NULL);
-            rv = -ENOMEM;
             goto out_locked;
         }
         paging_unlock(d);