@@ -376,6 +376,47 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
address_space_unmap(CPU(cpu)->as, regs, len, len, true);
}
+static
+SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr,
+ target_ulong lpid)
+{
+ SpaprMachineStateNestedGuest *guest;
+
+ guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(lpid));
+ return guest;
+}
+
+static bool vcpu_check(SpaprMachineStateNestedGuest *guest,
+ target_ulong vcpuid,
+ bool inoutbuf)
+{
+ struct SpaprMachineStateNestedGuestVcpu *vcpu;
+
+ if (vcpuid >= NESTED_GUEST_VCPU_MAX) {
+ return false;
+ }
+
+ if (!(vcpuid < guest->vcpus)) {
+ return false;
+ }
+
+ vcpu = &guest->vcpu[vcpuid];
+ if (!vcpu->enabled) {
+ return false;
+ }
+
+ if (!inoutbuf) {
+ return true;
+ }
+
+ /* Check to see if the in/out buffers are registered */
+ if (vcpu->runbufin.addr && vcpu->runbufout.addr) {
+ return true;
+ }
+
+ return false;
+}
+
static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu,
SpaprMachineState *spapr,
target_ulong opcode,
@@ -448,6 +489,11 @@ static void
destroy_guest_helper(gpointer value)
{
struct SpaprMachineStateNestedGuest *guest = value;
+ int i = 0;
+ for (i = 0; i < guest->vcpus; i++) {
+ cpu_ppc_tb_free(&guest->vcpu[i].env);
+ }
+ g_free(guest->vcpu);
g_free(guest);
}
@@ -518,6 +564,69 @@ static target_ulong h_guest_create(PowerPCCPU *cpu,
return H_SUCCESS;
}
+static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ CPUPPCState *env = &cpu->env, *l2env;
+ target_ulong flags = args[0];
+ target_ulong lpid = args[1];
+ target_ulong vcpuid = args[2];
+ SpaprMachineStateNestedGuest *guest;
+
+ if (flags) { /* don't handle any flags for now */
+ return H_UNSUPPORTED_FLAG;
+ }
+
+ guest = spapr_get_nested_guest(spapr, lpid);
+ if (!guest) {
+ return H_P2;
+ }
+
+ if (vcpuid < guest->vcpus) {
+ return H_IN_USE;
+ }
+
+ if (guest->vcpus >= NESTED_GUEST_VCPU_MAX) {
+ return H_P3;
+ }
+
+ if (guest->vcpus) {
+ struct SpaprMachineStateNestedGuestVcpu *vcpus;
+ vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu,
+ guest->vcpu,
+ guest->vcpus + 1);
+ if (!vcpus) {
+ return H_NO_MEM;
+ }
+ memset(&vcpus[guest->vcpus], 0,
+ sizeof(struct SpaprMachineStateNestedGuestVcpu));
+ guest->vcpu = vcpus;
+ l2env = &vcpus[guest->vcpus].env;
+ } else {
+ guest->vcpu = g_try_new0(struct SpaprMachineStateNestedGuestVcpu, 1);
+ if (guest->vcpu == NULL) {
+ return H_NO_MEM;
+ }
+ l2env = &guest->vcpu->env;
+ }
+ /* need to memset to zero otherwise we leak L1 state to L2 */
+ memset(l2env, 0, sizeof(CPUPPCState));
+ /* Copy L1 PVR to L2 */
+ l2env->spr[SPR_PVR] = env->spr[SPR_PVR];
+ cpu_ppc_tb_init(l2env, SPAPR_TIMEBASE_FREQ);
+
+ guest->vcpus++;
+ assert(vcpuid < guest->vcpus); /* linear vcpuid allocation only */
+ guest->vcpu[vcpuid].enabled = true;
+
+ if (!vcpu_check(guest, vcpuid, false)) {
+ return H_PARAMETER;
+ }
+ return H_SUCCESS;
+}
+
void spapr_register_nested(void)
{
spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl);
@@ -531,6 +640,7 @@ void spapr_register_nested_phyp(void)
spapr_register_hypercall(H_GUEST_GET_CAPABILITIES, h_guest_get_capabilities);
spapr_register_hypercall(H_GUEST_SET_CAPABILITIES, h_guest_set_capabilities);
spapr_register_hypercall(H_GUEST_CREATE , h_guest_create);
+ spapr_register_hypercall(H_GUEST_CREATE_VCPU , h_guest_create_vcpu);
}
#else
@@ -371,6 +371,7 @@ struct SpaprMachineState {
#define H_UNSUPPORTED -67
#define H_OVERLAP -68
#define H_STATE -75
+#define H_IN_USE -77
#define H_INVALID_ELEMENT_ID -79
#define H_INVALID_ELEMENT_SIZE -80
#define H_INVALID_ELEMENT_VALUE -81
@@ -199,6 +199,7 @@
/* Nested PAPR API macros */
#define NESTED_GUEST_MAX 4096
+#define NESTED_GUEST_VCPU_MAX 2048
typedef struct SpaprMachineStateNestedGuest {
unsigned long vcpus;