@@ -37,7 +37,7 @@ static struct page_info *translate_get_page(copy_info_t info, uint64_t addr,
return get_page_from_gva(info.gva.v, addr,
write ? GV2M_WRITE : GV2M_READ);
- page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(info.gpa.d, gaddr_to_gfn(addr), &p2mt, P2M_ALLOC);
if ( !page )
return NULL;
@@ -1446,7 +1446,7 @@ int xenmem_add_to_physmap_one(
/* Take reference to the foreign domain page.
* Reference will be released in XENMEM_remove_from_physmap */
- page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(od, _gfn(idx), &p2mt, P2M_ALLOC);
if ( !page )
{
put_pg_owner(od);
@@ -617,7 +617,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
struct vcpu *v;
struct vpmu_struct *vpmu;
struct page_info *page;
- uint64_t gfn = params->val;
+ gfn_t gfn = _gfn(params->val);
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
return -EINVAL;
@@ -391,7 +391,7 @@ long arch_do_domctl(
break;
}
- page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(gfn), &t, P2M_ALLOC);
if ( unlikely(!page) ||
unlikely(is_xen_heap_page(page)) )
@@ -461,11 +461,11 @@ long arch_do_domctl(
case XEN_DOMCTL_hypercall_init:
{
- unsigned long gmfn = domctl->u.hypercall_init.gmfn;
+ gfn_t gfn = _gfn(domctl->u.hypercall_init.gmfn);
struct page_info *page;
void *hypercall_page;
- page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
{
@@ -188,7 +188,7 @@ static int modified_memory(struct domain *d,
{
struct page_info *page;
- page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+ page = get_page_from_gfn(d, _gfn(pfn), NULL, P2M_UNSHARE);
if ( page )
{
paging_mark_pfn_dirty(d, _pfn(pfn));
@@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
- struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
+ struct page_info *page;
+
+ page = get_page_from_gfn(v->domain,
+ gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -2216,7 +2216,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
+ unsigned long old_value = v->arch.hvm.guest_cr[0];
struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -2271,7 +2271,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
if ( !paging_mode_hap(d) )
{
/* The guest CR3 must be pointing to the guest physical. */
- gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
+ gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]);
+
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
{
@@ -2363,7 +2364,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, cr3_to_gfn(value),
NULL, P2M_ALLOC);
if ( !page )
goto bad_cr3;
@@ -3191,7 +3192,7 @@ enum hvm_translation_result hvm_translate_get_page(
&& hvm_mmio_internal(gfn_to_gaddr(gfn)) )
return HVMTRANS_bad_gfn_to_mfn;
- page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
if ( !page )
return HVMTRANS_bad_gfn_to_mfn;
@@ -299,7 +299,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
if ( c->cr0 & X86_CR0_PG )
{
- page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, cr3_to_gfn(c->cr3),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -2230,9 +2230,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
return NULL;
/* Need to translate L1-GPA to MPA */
- page = get_page_from_gfn(v->domain,
- nv->nv_vvmcxaddr >> PAGE_SHIFT,
- &p2mt, P2M_ALLOC | P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain,
+ gaddr_to_gfn(nv->nv_vvmcxaddr),
+ &p2mt, P2M_ALLOC | P2M_UNSHARE);
if ( !page )
return NULL;
@@ -242,16 +242,16 @@ static void dump_hypercall(const struct domain *d)
static void enable_hypercall_page(struct domain *d)
{
- unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.pfn;
- struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ gfn_t gfn = _gfn(d->arch.hvm.viridian->hypercall_gpa.pfn);
+ struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
uint8_t *p;
if ( !page || !get_page_type(page, PGT_writable_page) )
{
if ( page )
put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
return;
}
@@ -719,13 +719,13 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name,
void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
{
- unsigned long gmfn = vp->msr.pfn;
+ gfn_t gfn = _gfn(vp->msr.pfn);
struct page_info *page;
if ( vp->ptr )
return;
- page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
goto fail;
@@ -746,8 +746,8 @@ void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
return;
fail:
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
}
void viridian_unmap_guest_page(struct viridian_page *vp)
@@ -681,7 +681,7 @@ static int vmx_restore_cr0_cr3(
{
if ( cr0 & X86_CR0_PG )
{
- page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -1321,7 +1321,7 @@ static void vmx_load_pdptrs(struct vcpu *v)
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
goto crash;
- page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE);
if ( !page )
{
/* Ideally you don't want to crash but rather go into a wait
@@ -718,11 +718,11 @@ static void nvmx_update_apic_access_address(struct vcpu *v)
if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
{
p2m_type_t p2mt;
- unsigned long apic_gpfn;
+ gfn_t apic_gfn;
struct page_info *apic_pg;
- apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT;
- apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+ apic_gfn = gaddr_to_gfn(get_vvmcs(v, APIC_ACCESS_ADDR));
+ apic_pg = get_page_from_gfn(v->domain, apic_gfn, &p2mt, P2M_ALLOC);
ASSERT(apic_pg && !p2m_is_paging(p2mt));
__vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
put_page(apic_pg);
@@ -739,11 +739,11 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v)
if ( ctrl & CPU_BASED_TPR_SHADOW )
{
p2m_type_t p2mt;
- unsigned long vapic_gpfn;
+ gfn_t vapic_gfn;
struct page_info *vapic_pg;
- vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT;
- vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+ vapic_gfn = gaddr_to_gfn(get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR));
+ vapic_pg = get_page_from_gfn(v->domain, vapic_gfn, &p2mt, P2M_ALLOC);
ASSERT(vapic_pg && !p2m_is_paging(p2mt));
__vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
put_page(vapic_pg);
@@ -2150,7 +2150,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ?
P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC;
- page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q);
+ page = get_page_from_gfn(pg_dom, _gfn(l1e_get_pfn(nl1e)), &p2mt, q);
if ( p2m_is_paged(p2mt) )
{
@@ -3433,7 +3433,8 @@ long do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+ P2M_ALLOC);
if ( unlikely(!page) )
{
rc = -EINVAL;
@@ -3499,7 +3500,8 @@ long do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+ P2M_ALLOC);
if ( unlikely(!page) )
{
gdprintk(XENLOG_WARNING,
@@ -3724,7 +3726,8 @@ long do_mmuext_op(
}
case MMUEXT_CLEAR_PAGE:
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
+ P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && page )
{
put_page(page);
@@ -3752,7 +3755,7 @@ long do_mmuext_op(
{
struct page_info *src_page, *dst_page;
- src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt,
+ src_page = get_page_from_gfn(pg_owner, _gfn(op.arg2.src_mfn), &p2mt,
P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && src_page )
{
@@ -3768,7 +3771,7 @@ long do_mmuext_op(
break;
}
- dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt,
+ dst_page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && dst_page )
{
@@ -3856,7 +3859,8 @@ long do_mmu_update(
{
struct mmu_update req;
void *va = NULL;
- unsigned long gpfn, gmfn;
+ unsigned long gpfn;
+ gfn_t gfn;
struct page_info *page;
unsigned int cmd, i = 0, done = 0, pt_dom;
struct vcpu *curr = current, *v = curr;
@@ -3969,8 +3973,8 @@ long do_mmu_update(
rc = -EINVAL;
req.ptr -= cmd;
- gmfn = req.ptr >> PAGE_SHIFT;
- page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC);
+ gfn = gaddr_to_gfn(req.ptr);
+ page = get_page_from_gfn(pt_owner, gfn, &p2mt, P2M_ALLOC);
if ( unlikely(!page) || p2mt != p2m_ram_rw )
{
@@ -3978,7 +3982,7 @@ long do_mmu_update(
put_page(page);
if ( p2m_is_paged(p2mt) )
{
- p2m_mem_paging_populate(pt_owner, gmfn);
+ p2m_mem_paging_populate(pt_owner, gfn_x(gfn));
rc = -ENOENT;
}
else
@@ -2967,7 +2967,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
* Take a refcnt on the mfn. NB: following supported for foreign mapping:
* ram_rw | ram_logdirty | ram_ro | paging_out.
*/
- page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(fdom, _gfn(fgfn), &p2mt, P2M_ALLOC);
if ( !page ||
!p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) )
{
@@ -398,15 +398,15 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
struct sh_emulate_ctxt *sh_ctxt)
{
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
mfn_t mfn;
p2m_type_t p2mt;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
/* Translate the VA to a GFN. */
- gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec);
- if ( gfn == gfn_x(INVALID_GFN) )
+ gfn = _gfn(paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec));
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt);
@@ -229,7 +229,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
ret = -EINVAL;
- page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(current->domain, _gfn(info.gmfn),
+ NULL, P2M_ALLOC);
if ( !page )
break;
if ( !get_page_type(page, PGT_writable_page) )
@@ -112,7 +112,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries)
{
struct page_info *page;
- page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(frames[i]), NULL, P2M_ALLOC);
if ( !page )
goto fail;
if ( !get_page_type(page, PGT_seg_desc_page) )
@@ -219,7 +219,7 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d)
if ( !IS_ALIGNED(gaddr, sizeof(d)) || !check_descriptor(currd, &d) )
return -EINVAL;
- page = get_page_from_gfn(currd, gfn_x(gfn), NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
@@ -756,12 +756,12 @@ static int write_cr(unsigned int reg, unsigned long val,
case 3: /* Write CR3 */
{
struct domain *currd = curr->domain;
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
int rc;
- gfn = !is_pv_32bit_domain(currd)
- ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val);
+ gfn = _gfn(!is_pv_32bit_domain(currd)
+ ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val));
page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( !page )
break;
@@ -110,7 +110,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
return false;
- page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, _gfn(l1e_get_pfn(gl1e)), NULL, P2M_ALLOC);
if ( unlikely(!page) )
return false;
@@ -826,7 +826,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
case 0: /* Write hypercall page */
{
void *hypercall_page;
- unsigned long gmfn = val >> PAGE_SHIFT;
+ gfn_t gfn = gaddr_to_gfn(val);
unsigned int page_index = val & (PAGE_SIZE - 1);
struct page_info *page;
p2m_type_t t;
@@ -839,7 +839,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
return X86EMUL_EXCEPTION;
}
- page = get_page_from_gfn(d, gmfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
{
@@ -848,13 +848,14 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, gmfn);
+ p2m_mem_paging_populate(d, gfn_x(gfn));
return X86EMUL_RETRY;
}
gdprintk(XENLOG_WARNING,
- "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base);
+ "Bad GFN %"PRI_gfn" (MFN %"PRI_mfn") to MSR %08x\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN),
+ base);
return X86EMUL_EXCEPTION;
}
@@ -1237,7 +1237,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
if ( (v != current) && !(v->pause_flags & VPF_down) )
return -EINVAL;
- page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(gfn), NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
@@ -361,7 +361,7 @@ static const struct evtchn_port_ops evtchn_port_ops_fifo =
.print_state = evtchn_fifo_print_state,
};
-static int map_guest_page(struct domain *d, uint64_t gfn, void **virt)
+static int map_guest_page(struct domain *d, gfn_t gfn, void **virt)
{
struct page_info *p;
@@ -422,7 +422,7 @@ static int setup_control_block(struct vcpu *v)
return 0;
}
-static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
+static int map_control_block(struct vcpu *v, gfn_t gfn, uint32_t offset)
{
void *virt;
unsigned int i;
@@ -508,7 +508,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
{
struct domain *d = current->domain;
uint32_t vcpu_id;
- uint64_t gfn;
+ gfn_t gfn;
uint32_t offset;
struct vcpu *v;
int rc;
@@ -516,7 +516,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
vcpu_id = init_control->vcpu;
- gfn = init_control->control_gfn;
+ gfn = _gfn(init_control->control_gfn);
offset = init_control->offset;
if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
@@ -578,7 +578,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
return rc;
}
-static int add_page_to_event_array(struct domain *d, unsigned long gfn)
+static int add_page_to_event_array(struct domain *d, gfn_t gfn)
{
void *virt;
unsigned int slot;
@@ -628,7 +628,7 @@ int evtchn_fifo_expand_array(const struct evtchn_expand_array *expand_array)
return -EOPNOTSUPP;
spin_lock(&d->event_lock);
- rc = add_page_to_event_array(d, expand_array->array_gfn);
+ rc = add_page_to_event_array(d, _gfn(expand_array->array_gfn));
spin_unlock(&d->event_lock);
return rc;
@@ -1388,7 +1388,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
- page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(xrfp.gpfn), NULL, P2M_ALLOC);
if ( page )
{
rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
@@ -1659,7 +1659,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
p2m_type_t p2mt;
struct page_info *page;
- page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
+ page = get_page_from_gfn(d, gfn, &p2mt, q);
#ifdef CONFIG_HAS_MEM_PAGING
if ( p2m_is_paging(p2mt) )
@@ -304,7 +304,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn,
p2m_type_t *t);
static inline struct page_info *get_page_from_gfn(
- struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+ struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
{
mfn_t mfn;
p2m_type_t _t;
@@ -315,7 +315,7 @@ static inline struct page_info *get_page_from_gfn(
* not auto-translated.
*/
if ( likely(d != dom_xen) )
- return p2m_get_page_from_gfn(d, _gfn(gfn), t);
+ return p2m_get_page_from_gfn(d, gfn, t);
if ( !t )
t = &_t;
@@ -326,7 +326,7 @@ static inline struct page_info *get_page_from_gfn(
* DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the
* page.
*/
- mfn = _mfn(gfn);
+ mfn = _mfn(gfn_x(gfn));
page = mfn_to_page(mfn);
if ( !mfn_valid(mfn) || !get_page(page, d) )
@@ -487,18 +487,22 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
p2m_query_t q);
static inline struct page_info *get_page_from_gfn(
- struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+ struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
{
struct page_info *page;
+ mfn_t mfn;
if ( paging_mode_translate(d) )
- return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q);
+ return p2m_get_page_from_gfn(p2m_get_hostp2m(d), gfn, t, NULL, q);
/* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
if ( t )
*t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
- page = mfn_to_page(_mfn(gfn));
- return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
+
+ mfn = _mfn(gfn_x(gfn));
+
+ page = mfn_to_page(mfn);
+ return mfn_valid(mfn) && get_page(page, d) ? page : NULL;
}
/* General conversion function from mfn to gfn */