@@ -688,7 +688,7 @@ int arch_sanitise_domain_config(struct xen_domctl_createdomain *config)
return -EINVAL;
}
- if ( config->altp2m.opts )
+ if ( config->altp2m.opts || config->altp2m.nr )
{
dprintk(XENLOG_INFO, "Altp2m not supported\n");
return -EINVAL;
@@ -724,16 +724,42 @@ int arch_sanitise_domain_config(struct xen_domctl_createdomain *config)
return -EINVAL;
}
- if ( altp2m_mode && nested_virt )
+ if ( altp2m_mode )
{
- dprintk(XENLOG_INFO,
- "Nested virt and altp2m are not supported together\n");
- return -EINVAL;
- }
+ if ( nested_virt )
+ {
+ dprintk(XENLOG_INFO,
+ "Nested virt and altp2m are not supported together\n");
+ return -EINVAL;
+ }
+
+ if ( !hap )
+ {
+ dprintk(XENLOG_INFO, "altp2m is only supported with HAP\n");
+ return -EINVAL;
+ }
+
+ if ( !hvm_altp2m_supported() )
+ {
+ dprintk(XENLOG_INFO, "altp2m is not supported\n");
+ return -EINVAL;
+ }
+
+ if ( !config->altp2m.nr )
+ {
+ /* Fix the value to the legacy default */
+ config->altp2m.nr = 10;
+ }
- if ( altp2m_mode && !hap )
+ if ( config->altp2m.nr > MAX_NR_ALTP2M )
+ {
+ dprintk(XENLOG_INFO, "altp2m.nr must be <= %lu\n", MAX_NR_ALTP2M);
+ return -EINVAL;
+ }
+ }
+ else if ( config->altp2m.nr )
{
- dprintk(XENLOG_INFO, "altp2m is only supported with HAP\n");
+ dprintk(XENLOG_INFO, "altp2m.nr must be zero when altp2m is off\n");
return -EINVAL;
}
@@ -4657,6 +4657,12 @@ static int do_altp2m_op(
goto out;
}
+ if ( d->nr_altp2m == 0 )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) )
goto out;
@@ -5245,7 +5251,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
if ( !hvm_is_singlestep_supported() )
return;
- if ( p2midx >= MAX_ALTP2M )
+ if ( p2midx >= v->domain->nr_altp2m )
return;
v->arch.hvm.single_step = true;
@@ -4885,7 +4885,7 @@ bool asmlinkage vmx_vmenter_helper(const struct cpu_user_regs *regs)
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; ++i )
+ for ( i = 0; i < currd->nr_altp2m; ++i )
{
if ( currd->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
@@ -258,11 +258,12 @@ struct paging_vcpu {
struct shadow_vcpu shadow;
};
-#define MAX_NESTEDP2M 10
+#define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t))
+#define MAX_NR_ALTP2M MAX_EPTP
+#define MAX_NESTEDP2M 10
-#define MAX_ALTP2M 10 /* arbitrary */
#define INVALID_ALTP2M 0xffff
-#define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t))
+
struct p2m_domain;
struct time_scale {
int shift;
@@ -353,7 +354,7 @@ struct arch_domain
/* altp2m: allow multiple copies of host p2m */
bool altp2m_active;
- struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
+ struct p2m_domain **altp2m_p2m;
mm_lock_t altp2m_list_lock;
uint64_t *altp2m_eptp;
uint64_t *altp2m_visible_eptp;
@@ -828,6 +828,11 @@ static inline bool hvm_hap_supported(void)
return false;
}
+static inline bool hvm_altp2m_supported(void)
+{
+ return false;
+}
+
static inline bool hvm_nested_virt_supported(void)
{
return false;
@@ -886,7 +886,7 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
if ( index == INVALID_ALTP2M )
return NULL;
- BUG_ON(index >= MAX_ALTP2M);
+ BUG_ON(index >= v->domain->nr_altp2m);
return v->domain->arch.altp2m_p2m[index];
}
@@ -896,7 +896,7 @@ static inline bool p2m_set_altp2m(struct vcpu *v, unsigned int idx)
{
struct p2m_domain *orig;
- BUG_ON(idx >= MAX_ALTP2M);
+ BUG_ON(idx >= v->domain->nr_altp2m);
if ( idx == vcpu_altp2m(v).p2midx )
return false;
@@ -15,6 +15,9 @@
void
altp2m_vcpu_initialise(struct vcpu *v)
{
+ if ( v->domain->nr_altp2m == 0 )
+ return;
+
if ( v != current )
vcpu_pause(v);
@@ -32,6 +35,9 @@ altp2m_vcpu_destroy(struct vcpu *v)
{
struct p2m_domain *p2m;
+ if ( v->domain->nr_altp2m == 0 )
+ return;
+
if ( v != current )
vcpu_pause(v);
@@ -122,7 +128,12 @@ int p2m_init_altp2m(struct domain *d)
struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
mm_lock_init(&d->arch.altp2m_list_lock);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ d->arch.altp2m_p2m = xzalloc_array(struct p2m_domain *, d->nr_altp2m);
+
+ if ( !d->arch.altp2m_p2m )
+ return -ENOMEM;
+
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
if ( p2m == NULL )
@@ -143,7 +154,10 @@ void p2m_teardown_altp2m(struct domain *d)
unsigned int i;
struct p2m_domain *p2m;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ if ( !d->arch.altp2m_p2m )
+ return;
+
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( !d->arch.altp2m_p2m[i] )
continue;
@@ -151,6 +165,8 @@ void p2m_teardown_altp2m(struct domain *d)
d->arch.altp2m_p2m[i] = NULL;
p2m_free_one(p2m);
}
+
+ XFREE(d->arch.altp2m_p2m);
}
int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn,
@@ -200,7 +216,7 @@ bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
struct domain *d = v->domain;
bool rc = false;
- if ( idx >= MAX_ALTP2M )
+ if ( idx >= d->nr_altp2m )
return rc;
altp2m_list_lock(d);
@@ -306,8 +322,8 @@ static void p2m_reset_altp2m(struct domain *d, unsigned int idx,
{
struct p2m_domain *p2m;
- ASSERT(idx < MAX_ALTP2M);
- p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ ASSERT(idx < d->nr_altp2m);
+ p2m = d->arch.altp2m_p2m[array_index_nospec(idx, d->nr_altp2m)];
p2m_lock(p2m);
@@ -332,7 +348,7 @@ void p2m_flush_altp2m(struct domain *d)
altp2m_list_lock(d);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
p2m_reset_altp2m(d, i, ALTP2M_DEACTIVATE);
d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
@@ -348,9 +364,9 @@ static int p2m_activate_altp2m(struct domain *d, unsigned int idx,
struct p2m_domain *hostp2m, *p2m;
int rc;
- ASSERT(idx < MAX_ALTP2M);
+ ASSERT(idx < d->nr_altp2m);
- p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ p2m = d->arch.altp2m_p2m[array_index_nospec(idx, d->nr_altp2m)];
hostp2m = p2m_get_hostp2m(d);
p2m_lock(p2m);
@@ -388,7 +404,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
int rc = -EINVAL;
struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
- if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
+ if ( idx >= d->nr_altp2m )
return rc;
altp2m_list_lock(d);
@@ -415,7 +431,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx,
altp2m_list_lock(d);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
@@ -437,7 +453,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
struct p2m_domain *p2m;
int rc = -EBUSY;
- if ( !idx || idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
+ if ( !idx || idx >= d->nr_altp2m )
return rc;
rc = domain_pause_except_self(d);
@@ -450,7 +466,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] !=
mfn_x(INVALID_MFN) )
{
- p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ p2m = d->arch.altp2m_p2m[array_index_nospec(idx, d->nr_altp2m)];
if ( !_atomic_read(p2m->active_vcpus) )
{
@@ -475,7 +491,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx)
struct vcpu *v;
int rc = -EINVAL;
- if ( idx >= MAX_ALTP2M )
+ if ( idx >= d->nr_altp2m )
return rc;
rc = domain_pause_except_self(d);
@@ -510,13 +526,13 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
mfn_t mfn;
int rc = -EINVAL;
- if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( idx >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return rc;
hp2m = p2m_get_hostp2m(d);
- ap2m = array_access_nospec(d->arch.altp2m_p2m, idx);
+ ap2m = d->arch.altp2m_p2m[array_index_nospec(idx, d->nr_altp2m)];
p2m_lock(hp2m);
p2m_lock(ap2m);
@@ -572,7 +588,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
altp2m_list_lock(d);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
p2m_type_t t;
p2m_access_t a;
@@ -595,7 +611,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
else
{
/* At least 2 altp2m's impacted, so reset everything */
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( i == last_reset_idx ||
d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
@@ -659,12 +675,13 @@ int p2m_set_suppress_ve_multi(struct domain *d,
if ( sve->view > 0 )
{
- if ( sve->view >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( sve->view >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, sve->view);
+ p2m = ap2m =
+ d->arch.altp2m_p2m[array_index_nospec(sve->view, d->nr_altp2m)];
}
p2m_lock(host_p2m);
@@ -727,12 +744,13 @@ int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
if ( altp2m_idx > 0 )
{
- if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( altp2m_idx >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ p2m = ap2m =
+ d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, d->nr_altp2m)];
}
else
p2m = host_p2m;
@@ -763,9 +781,9 @@ int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
/*
* Eptp index is correlated with altp2m index and should not exceed
- * min(MAX_ALTP2M, MAX_EPTP).
+ * d->nr_altp2m.
*/
- if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( altp2m_idx >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
rc = -EINVAL;
@@ -515,7 +515,7 @@ int hap_enable(struct domain *d, u32 mode)
d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
}
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
if ( rv != 0 )
@@ -538,7 +538,7 @@ void hap_final_teardown(struct domain *d)
unsigned int i;
if ( hvm_altp2m_supported() )
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
p2m_teardown(d->arch.altp2m_p2m[i], true, NULL);
/* Destroy nestedp2m's first */
@@ -590,7 +590,7 @@ void hap_teardown(struct domain *d, bool *preempted)
FREE_XENHEAP_PAGE(d->arch.altp2m_eptp);
FREE_XENHEAP_PAGE(d->arch.altp2m_visible_eptp);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
p2m_teardown(d->arch.altp2m_p2m[i], false, preempted);
if ( preempted && *preempted )
@@ -347,12 +347,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
/* altp2m view 0 is treated as the hostp2m */
if ( altp2m_idx )
{
- if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( altp2m_idx >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return -EINVAL;
- ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ ap2m = d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, d->nr_altp2m)];
}
if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
@@ -403,12 +403,12 @@ long p2m_set_mem_access_multi(struct domain *d,
/* altp2m view 0 is treated as the hostp2m */
if ( altp2m_idx )
{
- if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( altp2m_idx >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return -EINVAL;
- ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ ap2m = d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, d->nr_altp2m)];
}
p2m_lock(p2m);
@@ -466,12 +466,12 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
}
else if ( altp2m_idx ) /* altp2m view 0 is treated as the hostp2m */
{
- if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ if ( altp2m_idx >= d->nr_altp2m ||
d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ p2m = d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, d->nr_altp2m)];
}
return _p2m_get_mem_access(p2m, gfn, access);
@@ -486,7 +486,7 @@ void arch_p2m_set_access_required(struct domain *d, bool access_required)
if ( altp2m_active(d) )
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
@@ -912,7 +912,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
altp2m_list_lock(d);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
ap2m = d->arch.altp2m_p2m[i];
if ( !ap2m )
@@ -1293,7 +1293,7 @@ static void ept_set_ad_sync(struct domain *d, bool value)
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
struct p2m_domain *p2m;
@@ -1500,7 +1500,8 @@ void setup_ept_dump(void)
void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
{
- struct p2m_domain *p2m = array_access_nospec(d->arch.altp2m_p2m, i);
+ struct p2m_domain *p2m =
+ d->arch.altp2m_p2m[array_index_nospec(i, d->nr_altp2m)];
struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
struct ept_data *ept;
@@ -1519,7 +1520,7 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
altp2m_list_lock(d);
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
@@ -105,7 +105,7 @@ void p2m_change_entry_type_global(struct domain *d,
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
{
@@ -140,7 +140,7 @@ void p2m_memory_type_changed(struct domain *d)
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
{
@@ -913,7 +913,7 @@ void p2m_change_type_range(struct domain *d,
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
{
@@ -986,7 +986,7 @@ int p2m_finish_type_change(struct domain *d,
{
unsigned int i;
- for ( i = 0; i < MAX_ALTP2M; i++ )
+ for ( i = 0; i < d->nr_altp2m; i++ )
{
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
{
@@ -610,6 +610,7 @@ struct domain *domain_create(domid_t domid,
if ( config )
{
d->options = config->flags;
+ d->nr_altp2m = config->altp2m.nr;
d->vmtrace_size = config->vmtrace_size;
}
@@ -103,7 +103,10 @@ struct xen_domctl_createdomain {
/* Altp2m mode signaling uses bits [0, 1]. */
#define XEN_DOMCTL_ALTP2M_mode_mask (0x3U)
#define XEN_DOMCTL_ALTP2M_mode(m) ((m) & XEN_DOMCTL_ALTP2M_mode_mask)
- uint32_t opts;
+ uint16_t opts;
+
+ /* Number of altp2ms to allocate. */
+ uint16_t nr;
} altp2m;
/* Per-vCPU buffer size in bytes. 0 to disable. */
@@ -610,6 +610,8 @@ struct domain
unsigned int guest_request_sync : 1;
} monitor;
+ unsigned int nr_altp2m; /* Number of altp2m tables */
+
unsigned int vmtrace_size; /* Buffer size in bytes, or 0 to disable. */
#ifdef CONFIG_ARGO