Message ID | 20220706210454.30096-15-dpsmith@apertussolutions.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Hyperlaunch | expand |
On 06.07.2022 23:04, Daniel P. Smith wrote: > Here, the vcpu initialization code for dom0 creation is generalized for use for > other domains. Yet with "other domains" still only ones created during boot, aiui. Imo such details want spelling out. The title also is too generic / imprecise. > --- a/xen/arch/x86/domain_builder.c > +++ b/xen/arch/x86/domain_builder.c > @@ -28,6 +28,18 @@ static unsigned int __init dom_max_vcpus(struct boot_domain *bd) > return bd->ncpus; > } > > +struct vcpu *__init alloc_dom_vcpu0(struct boot_domain *bd) domain_alloc_vcpu0()? > +{ > + if ( bd->functions & BUILD_FUNCTION_INITIAL_DOM ) > + return alloc_dom0_vcpu0(bd->domain); > + > + bd->domain->node_affinity = node_online_map; > + bd->domain->auto_node_affinity = true; I can spot neither consumers of nor code being replaced by this. > + return vcpu_create(bd->domain, 0); > +} > + > + > void __init arch_create_dom( No double blank lines please. > --- a/xen/common/sched/core.c > +++ b/xen/common/sched/core.c > @@ -14,6 +14,8 @@ > */ > > #ifndef COMPAT > +#include <xen/bootdomain.h> > +#include <xen/domain_builder.h> > #include <xen/init.h> > #include <xen/lib.h> > #include <xen/param.h> > @@ -3399,13 +3401,13 @@ void wait(void) > } > > #ifdef CONFIG_X86 > -void __init sched_setup_dom0_vcpus(struct domain *d) > +void __init sched_setup_dom_vcpus(struct boot_domain *bd) Perhaps simply drop the original _dom0 infix? > { > unsigned int i; > struct sched_unit *unit; > > - for ( i = 1; i < d->max_vcpus; i++ ) > - vcpu_create(d, i); > + for ( i = 1; i < bd->domain->max_vcpus; i++ ) > + vcpu_create(bd->domain, i); Seeing the further uses below, perhaps better introduce a local variable "d", like you do elsewhere? > @@ -3413,19 +3415,24 @@ void __init sched_setup_dom0_vcpus(struct domain *d) > * onlining them. This avoids pinning a vcpu to a not yet online cpu here. > */ > if ( pv_shim ) > - sched_set_affinity(d->vcpu[0]->sched_unit, > + sched_set_affinity(bd->domain->vcpu[0]->sched_unit, > cpumask_of(0), cpumask_of(0)); > else > { > - for_each_sched_unit ( d, unit ) > + for_each_sched_unit ( bd->domain, unit ) > { > - if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed ) > - sched_set_affinity(unit, &dom0_cpus, NULL); > - sched_set_affinity(unit, NULL, &dom0_cpus); > + if ( builder_is_initdom(bd) ) > + { > + if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed ) > + sched_set_affinity(unit, &dom0_cpus, NULL); > + sched_set_affinity(unit, NULL, &dom0_cpus); > + } > + else > + sched_set_affinity(unit, NULL, cpupool_valid_cpus(cpupool0)); Hard-coded cpupool0? > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -2,6 +2,7 @@ > #ifndef __SCHED_H__ > #define __SCHED_H__ > > +#include <xen/bootdomain.h> Please don't - this header has already too many dependencies. All you really need ... > @@ -1003,7 +1004,7 @@ static inline bool sched_has_urgent_vcpu(void) > } > > void vcpu_set_periodic_timer(struct vcpu *v, s_time_t value); > -void sched_setup_dom0_vcpus(struct domain *d); > +void sched_setup_dom_vcpus(struct boot_domain *d); ... for this is a forward declaration of struct boot_domain. Jan
diff --git a/xen/arch/x86/domain_builder.c b/xen/arch/x86/domain_builder.c index 308e1a1c67..1a4a6b1ca7 100644 --- a/xen/arch/x86/domain_builder.c +++ b/xen/arch/x86/domain_builder.c @@ -28,6 +28,18 @@ static unsigned int __init dom_max_vcpus(struct boot_domain *bd) return bd->ncpus; } +struct vcpu *__init alloc_dom_vcpu0(struct boot_domain *bd) +{ + if ( bd->functions & BUILD_FUNCTION_INITIAL_DOM ) + return alloc_dom0_vcpu0(bd->domain); + + bd->domain->node_affinity = node_online_map; + bd->domain->auto_node_affinity = true; + + return vcpu_create(bd->domain, 0); +} + + void __init arch_create_dom( const struct boot_info *bi, struct boot_domain *bd) { @@ -83,7 +95,7 @@ void __init arch_create_dom( init_dom0_cpuid_policy(bd->domain); - if ( alloc_dom0_vcpu0(bd->domain) == NULL ) + if ( alloc_dom_vcpu0(bd) == NULL ) panic("Error creating d%uv0\n", bd->domid); /* Grab the DOM0 command line. */ diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index 2fee2ed926..ae3ffc614d 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -696,9 +696,10 @@ static int __init pvh_load_kernel( return 0; } -static int __init pvh_setup_cpus(struct domain *d, paddr_t entry, +static int __init pvh_setup_cpus(struct boot_domain *bd, paddr_t entry, paddr_t start_info) { + struct domain *d = bd->domain; struct vcpu *v = d->vcpu[0]; int rc; /* @@ -722,7 +723,7 @@ static int __init pvh_setup_cpus(struct domain *d, paddr_t entry, .cpu_regs.x86_32.tr_ar = 0x8b, }; - sched_setup_dom0_vcpus(d); + sched_setup_dom_vcpus(bd); rc = arch_set_info_hvm_guest(v, &cpu_ctx); if ( rc ) @@ -1257,7 +1258,7 @@ int __init dom0_construct_pvh(struct boot_domain *bd) return rc; } - rc = pvh_setup_cpus(d, entry, start_info); + rc = pvh_setup_cpus(bd, entry, start_info); if ( rc ) { printk("Failed to setup Dom0 CPUs: %d\n", rc); diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index f1ea0575f0..9d1c9fb8b0 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -729,7 +729,7 @@ int __init dom0_construct_pv(struct boot_domain *bd) printk("Dom%u has maximum %u VCPUs\n", d->domain_id, d->max_vcpus); - sched_setup_dom0_vcpus(d); + sched_setup_dom_vcpus(bd); d->arch.paging.mode = 0; diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c index 250207038e..029f5ea24e 100644 --- a/xen/common/sched/core.c +++ b/xen/common/sched/core.c @@ -14,6 +14,8 @@ */ #ifndef COMPAT +#include <xen/bootdomain.h> +#include <xen/domain_builder.h> #include <xen/init.h> #include <xen/lib.h> #include <xen/param.h> @@ -3399,13 +3401,13 @@ void wait(void) } #ifdef CONFIG_X86 -void __init sched_setup_dom0_vcpus(struct domain *d) +void __init sched_setup_dom_vcpus(struct boot_domain *bd) { unsigned int i; struct sched_unit *unit; - for ( i = 1; i < d->max_vcpus; i++ ) - vcpu_create(d, i); + for ( i = 1; i < bd->domain->max_vcpus; i++ ) + vcpu_create(bd->domain, i); /* * PV-shim: vcpus are pinned 1:1. @@ -3413,19 +3415,24 @@ void __init sched_setup_dom0_vcpus(struct domain *d) * onlining them. This avoids pinning a vcpu to a not yet online cpu here. */ if ( pv_shim ) - sched_set_affinity(d->vcpu[0]->sched_unit, + sched_set_affinity(bd->domain->vcpu[0]->sched_unit, cpumask_of(0), cpumask_of(0)); else { - for_each_sched_unit ( d, unit ) + for_each_sched_unit ( bd->domain, unit ) { - if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed ) - sched_set_affinity(unit, &dom0_cpus, NULL); - sched_set_affinity(unit, NULL, &dom0_cpus); + if ( builder_is_initdom(bd) ) + { + if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed ) + sched_set_affinity(unit, &dom0_cpus, NULL); + sched_set_affinity(unit, NULL, &dom0_cpus); + } + else + sched_set_affinity(unit, NULL, cpupool_valid_cpus(cpupool0)); } } - domain_update_node_affinity(d); + domain_update_node_affinity(bd->domain); } #endif diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index b9515eb497..6ab7d69cbd 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -2,6 +2,7 @@ #ifndef __SCHED_H__ #define __SCHED_H__ +#include <xen/bootdomain.h> #include <xen/types.h> #include <xen/spinlock.h> #include <xen/rwlock.h> @@ -1003,7 +1004,7 @@ static inline bool sched_has_urgent_vcpu(void) } void vcpu_set_periodic_timer(struct vcpu *v, s_time_t value); -void sched_setup_dom0_vcpus(struct domain *d); +void sched_setup_dom_vcpus(struct boot_domain *d); int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason); int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity); void restore_vcpu_affinity(struct domain *d);