@@ -181,7 +181,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
return true;
}
-static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
+static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
unsigned int i;
@@ -192,18 +192,17 @@ static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
{
if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
{
- return d->arch.hvm_domain.ioreq_gfn.base + i;
+ return _gfn(d->arch.hvm_domain.ioreq_gfn.base + i);
}
}
- return gfn_x(INVALID_GFN);
+ return INVALID_GFN;
}
-static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
- unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
{
struct domain *d = s->domain;
- unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
+ unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
ASSERT(!s->is_default);
@@ -214,7 +213,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
destroy_ring_for_helper(&iorp->va, iorp->page);
@@ -223,7 +222,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
if ( !s->is_default )
hvm_free_ioreq_gfn(s, iorp->gfn);
- iorp->gfn = gfn_x(INVALID_GFN);
+ iorp->gfn = INVALID_GFN;
}
static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
@@ -236,16 +235,17 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
return -EINVAL;
if ( s->is_default )
- iorp->gfn = buf ?
- d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
- d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+ iorp->gfn = _gfn(buf ?
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+ d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
else
iorp->gfn = hvm_alloc_ioreq_gfn(s);
- if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return -ENOMEM;
- rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+ rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
+ &iorp->va);
if ( rc )
hvm_unmap_ioreq_gfn(s, buf);
@@ -282,10 +282,10 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
struct domain *d = s->domain;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( s->is_default || iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( s->is_default || gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
+ if ( guest_physmap_remove_page(d, iorp->gfn,
_mfn(page_to_mfn(iorp->page)), 0) )
domain_crash(d);
clear_page(iorp->va);
@@ -297,12 +297,12 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
- if ( s->is_default || iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( s->is_default || gfn_eq(iorp->gfn, INVALID_GFN) )
return 0;
clear_page(iorp->va);
- rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
+ rc = guest_physmap_add_page(d, iorp->gfn,
_mfn(page_to_mfn(iorp->page)), 0);
if ( rc == 0 )
paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
@@ -561,8 +561,8 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
- s->ioreq.gfn = gfn_x(INVALID_GFN);
- s->bufioreq.gfn = gfn_x(INVALID_GFN);
+ s->ioreq.gfn = INVALID_GFN;
+ s->bufioreq.gfn = INVALID_GFN;
rc = hvm_ioreq_server_alloc_rangesets(s);
if ( rc )
@@ -747,11 +747,11 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
if ( s->id != id )
continue;
- *ioreq_gfn = s->ioreq.gfn;
+ *ioreq_gfn = gfn_x(s->ioreq.gfn);
if ( s->bufioreq.va != NULL )
{
- *bufioreq_gfn = s->bufioreq.gfn;
+ *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
*bufioreq_port = s->bufioreq_evtchn;
}
@@ -36,7 +36,7 @@
#include <public/hvm/dm_op.h>
struct hvm_ioreq_page {
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
void *va;
};
This patch adjusts the IOREQ server code to use type-safe gfn_t values where possible. No functional change. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> --- Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> --- xen/arch/x86/hvm/ioreq.c | 42 ++++++++++++++++++++-------------------- xen/include/asm-x86/hvm/domain.h | 2 +- 2 files changed, 22 insertions(+), 22 deletions(-)