diff mbox series

[v5,04/11] xen/arm: introduce allocate_domheap_memory and guest_physmap_memory

Message ID 20231206090623.1932275-5-Penny.Zheng@arm.com (mailing list archive)
State New, archived
Headers show
Series Follow-up static shared memory PART I | expand

Commit Message

Penny Zheng Dec. 6, 2023, 9:06 a.m. UTC
We split the code of allocate_bank_memory into two parts,
allocate_domheap_memory and guest_physmap_memory.

One is about allocating guest RAM from heap, which could be re-used later for
allocating static shared memory from heap when host address is not provided.
The other is building up guest P2M mapping.

We also define a set of MACRO helpers to access common fields in data
structure of "meminfo" type, e.g. "struct meminfo" is one of them, and
later new "struct shm_meminfo" is also one of them.
This kind of structures must have the following characteristics:
- an array of "struct membank"
- a member called "nr_banks" indicating current array size
- a field indicating the maximum array size
When introducing a new data structure, according callbacks with function type
"retrieve_fn" shall be defined for using MACRO helpers.
This commit defines callback "retrieve_meminfo" for data structure
"struct meminfo".

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v1 -> v2:
-  define a set of MACRO helpers to access common fields in data structure of
"meminfo" type. "struct meminfo" is one of them, and according callback
"retrieve_meminfo" is also introduced here.
- typo of changing 1ULL to 1UL
---
v2 -> v3
- rebase and no changes
---
v3 -> v4:
rebase and no change
---
v4 -> v5:
rebase and no change
---
 xen/arch/arm/domain_build.c      | 119 +++++++++++++++++++++++++------
 xen/arch/arm/include/asm/setup.h |  33 +++++++++
 2 files changed, 129 insertions(+), 23 deletions(-)

Comments

Michal Orzel Dec. 7, 2023, 9:38 a.m. UTC | #1
Hi Penny,

On 06/12/2023 10:06, Penny Zheng wrote:
> 
> 
> We split the code of allocate_bank_memory into two parts,
> allocate_domheap_memory and guest_physmap_memory.
> 
> One is about allocating guest RAM from heap, which could be re-used later for
> allocating static shared memory from heap when host address is not provided.
> The other is building up guest P2M mapping.
> 
> We also define a set of MACRO helpers to access common fields in data
> structure of "meminfo" type, e.g. "struct meminfo" is one of them, and
> later new "struct shm_meminfo" is also one of them.
> This kind of structures must have the following characteristics:
> - an array of "struct membank"
> - a member called "nr_banks" indicating current array size
> - a field indicating the maximum array size
> When introducing a new data structure, according callbacks with function type
> "retrieve_fn" shall be defined for using MACRO helpers.
> This commit defines callback "retrieve_meminfo" for data structure
> "struct meminfo".
This patch introduces quite a bit of complexity with all these helpers, so adding a rationale is crucial.
AFAIU, all of this is because we don't want to reuse struct meminfo where NR_MEM_BANKS is defined as 256,
which is a lot more than we need for shmem. Am I right?

I would like others to share the opinion here as well.

If we decide to go with this approach, what about static inline helpers instead of macros here for type checking and overall
protection such as when there is no retriever function defined (at the moment it would most probably result
in some trap).

> 
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> ---
> v1 -> v2:
> -  define a set of MACRO helpers to access common fields in data structure of
> "meminfo" type. "struct meminfo" is one of them, and according callback
> "retrieve_meminfo" is also introduced here.
> - typo of changing 1ULL to 1UL
> ---
> v2 -> v3
> - rebase and no changes
> ---
> v3 -> v4:
> rebase and no change
> ---
> v4 -> v5:
> rebase and no change
> ---
>  xen/arch/arm/domain_build.c      | 119 +++++++++++++++++++++++++------
>  xen/arch/arm/include/asm/setup.h |  33 +++++++++
>  2 files changed, 129 insertions(+), 23 deletions(-)
> 
> diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> index 64ae944431..a8bc78baa5 100644
> --- a/xen/arch/arm/domain_build.c
> +++ b/xen/arch/arm/domain_build.c
> @@ -51,6 +51,28 @@ boolean_param("ext_regions", opt_ext_regions);
>  static u64 __initdata dom0_mem;
>  static bool __initdata dom0_mem_set;
> 
> +#ifdef CONFIG_DOM0LESS_BOOT
> +static void __init retrieve_meminfo(void *mem, unsigned int *max_mem_banks,
const void *mem

> +                                    struct membank **bank,
> +                                    unsigned int **nr_banks)
> +{
> +    struct meminfo *meminfo = (struct meminfo *)mem;
> +
> +    if ( max_mem_banks )
> +        *max_mem_banks = NR_MEM_BANKS;
> +
> +    if ( nr_banks )
> +        *nr_banks = &(meminfo->nr_banks);
> +
> +    if ( bank )
> +        *bank = meminfo->bank;
> +}
> +
> +retrieve_fn __initdata retrievers[MAX_MEMINFO_TYPE] = {
meminfo_retrievers

> +    [NORMAL_MEMINFO] = retrieve_meminfo,
> +};
> +#endif
> +
>  static int __init parse_dom0_mem(const char *s)
>  {
>      dom0_mem_set = true;
> @@ -415,32 +437,20 @@ static void __init allocate_memory_11(struct domain *d,
>  }
> 
>  #ifdef CONFIG_DOM0LESS_BOOT
> -bool __init allocate_bank_memory(struct domain *d, struct kernel_info *kinfo,
> -                                 gfn_t sgfn, paddr_t tot_size)
> +static bool __init allocate_domheap_memory(struct domain *d,
> +                                           paddr_t tot_size,
> +                                           void *mem, enum meminfo_type type)
>  {
> -    int res;
>      struct page_info *pg;
> -    struct membank *bank;
>      unsigned int max_order = ~0;
> -
> -    /*
> -     * allocate_bank_memory can be called with a tot_size of zero for
> -     * the second memory bank. It is not an error and we can safely
> -     * avoid creating a zero-size memory bank.
> -     */
> -    if ( tot_size == 0 )
> -        return true;
> -
> -    bank = &kinfo->mem.bank[kinfo->mem.nr_banks];
> -    bank->start = gfn_to_gaddr(sgfn);
> -    bank->size = tot_size;
> +    unsigned int *nr_banks = GET_NR_BANKS(mem, type);
what if mem is NULL? You would end up dereferencing NULL pointer

> 
>      while ( tot_size > 0 )
>      {
>          unsigned int order = get_allocation_size(tot_size);
> +        struct membank *membank;
> 
>          order = min(max_order, order);
> -
>          pg = alloc_domheap_pages(d, order, 0);
>          if ( !pg )
>          {
> @@ -460,15 +470,78 @@ bool __init allocate_bank_memory(struct domain *d, struct kernel_info *kinfo,
>              continue;
>          }
> 
> -        res = guest_physmap_add_page(d, sgfn, page_to_mfn(pg), order);
> -        if ( res )
> -        {
> -            dprintk(XENLOG_ERR, "Failed map pages to DOMU: %d", res);
> +        if ( *nr_banks == MAX_MEM_BANKS(type) )
shouldn't you move this check at the beginning of the loop before the allocation?
what if you enter this function with this condition being true.

>              return false;
> -        }
> +
The name of the function does not reflect that apart from allocation you will also
register the allocated memory regions.

> +        membank = GET_MEMBANK(mem, type, *nr_banks);
> +        membank->start = mfn_to_maddr(page_to_mfn(pg));
page_to_maddr

> +        membank->size = 1ULL << (PAGE_SHIFT + order);
> +        (*nr_banks)++;
> +        tot_size -= membank->size;
> +    }
> +
> +    return true;
> +}
> +
> +static int __init guest_physmap_memory(struct domain *d,
> +                                       void *mem, enum meminfo_type type,
> +                                       gfn_t sgfn)
> +{
> +    unsigned int i;
> +    int res;
> +    unsigned int *nr_banks = GET_NR_BANKS(mem, type);
what if mem is NULL? You would end up dereferencing NULL pointer

> +
> +    for ( i = 0; i < *nr_banks; i++ )
> +    {
> +        struct membank *membank = GET_MEMBANK(mem, type, i);
> +        paddr_t start = membank->start;
> +        paddr_t size = membank->size;
> +        unsigned int order = get_order_from_bytes(size);
> +
> +        /* Size must be power of two */
> +        BUG_ON(!size || (size & (size - 1)));
> +        res = guest_physmap_add_page(d, sgfn, maddr_to_mfn(start), order);
> +        if ( res )
> +            return res;
Here, you return a rc, but ...

> 
>          sgfn = gfn_add(sgfn, 1UL << order);
> -        tot_size -= (1ULL << (PAGE_SHIFT + order));
> +    }
> +
> +    return 0;
> +}
> +
> +bool __init allocate_bank_memory(struct domain *d,
> +                                 struct kernel_info *kinfo,
> +                                 gfn_t sgfn,
> +                                 paddr_t total_size)
> +{
> +    struct membank *bank;
> +    struct meminfo host = { 0 };
> +
> +    /*
> +     * allocate_bank_memory can be called with a total_size of zero for
> +     * the second memory bank. It is not an error and we can safely
> +     * avoid creating a zero-size memory bank.
> +     */
> +    if ( total_size == 0 )
> +        return true;
> +
> +    bank = &kinfo->mem.bank[kinfo->mem.nr_banks];
> +    bank->start = gfn_to_gaddr(sgfn);
> +    bank->size = total_size;
> +
> +    if ( !allocate_domheap_memory(d, total_size, (void *)&host, NORMAL_MEMINFO) )
> +    {
> +        printk(XENLOG_ERR "Failed to allocate (%"PRIpaddr"MB) pages to %pd\n",
> +               total_size >> 20, d);
> +        return false;
> +    }
> +
> +    if ( guest_physmap_memory(d, (void *)&host, NORMAL_MEMINFO, sgfn) )
... here you are not making use of it (to print the error code).

> +    {
> +        printk(XENLOG_ERR "Failed to map (%"PRIpaddr"MB) pages to %pd\n",
> +               total_size >> 20, d);
> +        return false;
>      }
> 
>      kinfo->mem.nr_banks++;
> diff --git a/xen/arch/arm/include/asm/setup.h b/xen/arch/arm/include/asm/setup.h
> index 3a2b35ea46..bc5f08be97 100644
> --- a/xen/arch/arm/include/asm/setup.h
> +++ b/xen/arch/arm/include/asm/setup.h
> @@ -57,6 +57,39 @@ struct meminfo {
>      struct membank bank[NR_MEM_BANKS];
>  };
> 
A comment with a meaning of each value would be handy
> +enum meminfo_type {
> +    NORMAL_MEMINFO,
> +    MAX_MEMINFO_TYPE,
do you have a need for checking the max? If so, I would try to name the values in a similar, more informative way,
e.g. MEMINFO_TYPE_NORMAL

> +};
> +
> +/*
> + * Define a set of MACRO helpers to access meminfo_type, like "struct meminfo"
> + * as type of NORMAL_MEMINFO, etc.
> + * This kind of structure must have a array of "struct membank",
> + * a member called nr_banks indicating the current array size, and also a field
> + * indicating the maximum array size.
field? Looking at struct meminfo and then shm_meminfo you just have a macro to indicate max nr.

> + */
> +typedef void (*retrieve_fn)(void *, unsigned int *, struct membank **,
I think MISRA requires that parameters should be named.
void * can be const since there is no helper modifying the passed structure (here and in macros when casting)

> +                            unsigned int **);
> +
> +#define MAX_MEM_BANKS(type) ({                              \
> +    unsigned int _max_mem_banks;                            \
> +    retrievers[type](NULL, &_max_mem_banks, NULL, NULL);    \
> +    _max_mem_banks;                                         \
> +})
> +
> +#define GET_MEMBANK(mem, type, index) ({                    \
> +    struct membank *_bank;                                  \
> +    retrievers[type]((void *)(mem), NULL, &_bank, NULL);    \
> +    &(_bank[index]);                                        \
> +})
> +
> +#define GET_NR_BANKS(mem, type) ({                          \
> +    unsigned int *_nr_banks;                                \
> +    retrievers[type]((void *)mem, NULL, NULL, &_nr_banks);  \
> +    _nr_banks;                                              \
> +})
> +
>  /*
>   * The domU flag is set for kernels and ramdisks of "xen,domain" nodes.
>   * The purpose of the domU flag is to avoid getting confused in
> --
> 2.25.1
> 

~Michal
Julien Grall Dec. 8, 2023, 3:09 p.m. UTC | #2
Hi,

On 07/12/2023 09:38, Michal Orzel wrote:
> Hi Penny,
> 
> On 06/12/2023 10:06, Penny Zheng wrote:
>>
>>
>> We split the code of allocate_bank_memory into two parts,
>> allocate_domheap_memory and guest_physmap_memory.
>>
>> One is about allocating guest RAM from heap, which could be re-used later for
>> allocating static shared memory from heap when host address is not provided.
>> The other is building up guest P2M mapping.
>>
>> We also define a set of MACRO helpers to access common fields in data
>> structure of "meminfo" type, e.g. "struct meminfo" is one of them, and
>> later new "struct shm_meminfo" is also one of them.
>> This kind of structures must have the following characteristics:
>> - an array of "struct membank"
>> - a member called "nr_banks" indicating current array size
>> - a field indicating the maximum array size
>> When introducing a new data structure, according callbacks with function type
>> "retrieve_fn" shall be defined for using MACRO helpers.
>> This commit defines callback "retrieve_meminfo" for data structure
>> "struct meminfo".
> This patch introduces quite a bit of complexity with all these helpers, so adding a rationale is crucial.
> AFAIU, all of this is because we don't want to reuse struct meminfo where NR_MEM_BANKS is defined as 256,
> which is a lot more than we need for shmem. Am I right?

+1.

> 
> I would like others to share the opinion here as well.

If possible, I'd like to reduce the footprint of the shared memory. But 
this should be balanced with the complexity of the code.

Briefly looking at the patch series, we have two structures:

struct meminfo {
     unsigned int nr_banks;
     struct membank bank[NR_MEM_BANKS];
};

struct shm_meminfo {
     unsigned int nr_banks;
     struct membank bank[NR_SHM_BANKS];
     paddr_t tot_size;
};

IIUC, the logic is mostly to be able to know the maximum size of the 
array and also the number of slots already used.

So we could have the following common structure:

struct membanks {
    unsigned int nr_banks;
    unsigned int max_banks;
    struct membank *banks;
}

Then the definition for the two other structures could be:

struct meminfo {
     struct membank holders[NR_MEM_BANKS];

     struct membanks banks;
}

struct shm_meminfo {
     struct membank holders[NR_SHM_BANKS];

     struct membanks banks;

     paddr_t tot_size;
}

And then 'banks.banks' would point to the 'holders'. And 'max_banks' 
would be set to the maximum.

There might be other way to make the structure more nicer. Like (untested):

struct membanks {
     unsigned int nr_banks;
     unsigned int max_banks;
     struct membank[];
}

struct meminfo {
     struct membanks common;
     // We should ensure there are no padding
     struct membank[NR_MEM_BANKS];
}

We would then pass &meminfo.common to allocate_domainheap_memory().

With that there should be no need for extra helpers.

What do you think?

Cheers,
Michal Orzel Dec. 11, 2023, 8:31 a.m. UTC | #3
Hi Julien,

On 08/12/2023 16:09, Julien Grall wrote:
> 
> 
> Hi,
> 
> On 07/12/2023 09:38, Michal Orzel wrote:
>> Hi Penny,
>>
>> On 06/12/2023 10:06, Penny Zheng wrote:
>>>
>>>
>>> We split the code of allocate_bank_memory into two parts,
>>> allocate_domheap_memory and guest_physmap_memory.
>>>
>>> One is about allocating guest RAM from heap, which could be re-used later for
>>> allocating static shared memory from heap when host address is not provided.
>>> The other is building up guest P2M mapping.
>>>
>>> We also define a set of MACRO helpers to access common fields in data
>>> structure of "meminfo" type, e.g. "struct meminfo" is one of them, and
>>> later new "struct shm_meminfo" is also one of them.
>>> This kind of structures must have the following characteristics:
>>> - an array of "struct membank"
>>> - a member called "nr_banks" indicating current array size
>>> - a field indicating the maximum array size
>>> When introducing a new data structure, according callbacks with function type
>>> "retrieve_fn" shall be defined for using MACRO helpers.
>>> This commit defines callback "retrieve_meminfo" for data structure
>>> "struct meminfo".
>> This patch introduces quite a bit of complexity with all these helpers, so adding a rationale is crucial.
>> AFAIU, all of this is because we don't want to reuse struct meminfo where NR_MEM_BANKS is defined as 256,
>> which is a lot more than we need for shmem. Am I right?
> 
> +1.
> 
>>
>> I would like others to share the opinion here as well.
> 
> If possible, I'd like to reduce the footprint of the shared memory. But
> this should be balanced with the complexity of the code.
> 
> Briefly looking at the patch series, we have two structures:
> 
> struct meminfo {
>      unsigned int nr_banks;
>      struct membank bank[NR_MEM_BANKS];
> };
> 
> struct shm_meminfo {
>      unsigned int nr_banks;
>      struct membank bank[NR_SHM_BANKS];
>      paddr_t tot_size;
> };
> 
> IIUC, the logic is mostly to be able to know the maximum size of the
> array and also the number of slots already used.
> 
> So we could have the following common structure:
> 
> struct membanks {
>     unsigned int nr_banks;
>     unsigned int max_banks;
>     struct membank *banks;
> }
> 
> Then the definition for the two other structures could be:
> 
> struct meminfo {
>      struct membank holders[NR_MEM_BANKS];
> 
>      struct membanks banks;
> }
> 
> struct shm_meminfo {
>      struct membank holders[NR_SHM_BANKS];
> 
>      struct membanks banks;
> 
>      paddr_t tot_size;
> }
> 
> And then 'banks.banks' would point to the 'holders'. And 'max_banks'
> would be set to the maximum.
> 
> There might be other way to make the structure more nicer. Like (untested):
> 
> struct membanks {
>      unsigned int nr_banks;
>      unsigned int max_banks;
>      struct membank[];
> }
> 
> struct meminfo {
>      struct membanks common;
>      // We should ensure there are no padding
>      struct membank[NR_MEM_BANKS];
> }
> 
> We would then pass &meminfo.common to allocate_domainheap_memory().
> 
> With that there should be no need for extra helpers.
> 
> What do you think?
I would go for flexible array member solution which looks much nicer and as far as I can tell
would solve the issue with extra helpers.

The only problem is that there are quite a lot of places where we reference nr_banks of meminfo e.g. mem.nr_banks
which we would need to modify to mem.common.nr_banks. Maybe we could have *nr_banks in membanks that would point
to nr_banks in meminfo/shm_meminfo? At some point we still need to set common.max_banks to e.g. NR_MEM_BANKS.

~Michal
Julien Grall Dec. 11, 2023, 10:01 a.m. UTC | #4
Hi Michal,

On 11/12/2023 08:31, Michal Orzel wrote:
> On 08/12/2023 16:09, Julien Grall wrote:
>>
>>
>> Hi,
>>
>> On 07/12/2023 09:38, Michal Orzel wrote:
>>> Hi Penny,
>>>
>>> On 06/12/2023 10:06, Penny Zheng wrote:
>>>>
>>>>
>>>> We split the code of allocate_bank_memory into two parts,
>>>> allocate_domheap_memory and guest_physmap_memory.
>>>>
>>>> One is about allocating guest RAM from heap, which could be re-used later for
>>>> allocating static shared memory from heap when host address is not provided.
>>>> The other is building up guest P2M mapping.
>>>>
>>>> We also define a set of MACRO helpers to access common fields in data
>>>> structure of "meminfo" type, e.g. "struct meminfo" is one of them, and
>>>> later new "struct shm_meminfo" is also one of them.
>>>> This kind of structures must have the following characteristics:
>>>> - an array of "struct membank"
>>>> - a member called "nr_banks" indicating current array size
>>>> - a field indicating the maximum array size
>>>> When introducing a new data structure, according callbacks with function type
>>>> "retrieve_fn" shall be defined for using MACRO helpers.
>>>> This commit defines callback "retrieve_meminfo" for data structure
>>>> "struct meminfo".
>>> This patch introduces quite a bit of complexity with all these helpers, so adding a rationale is crucial.
>>> AFAIU, all of this is because we don't want to reuse struct meminfo where NR_MEM_BANKS is defined as 256,
>>> which is a lot more than we need for shmem. Am I right?
>>
>> +1.
>>
>>>
>>> I would like others to share the opinion here as well.
>>
>> If possible, I'd like to reduce the footprint of the shared memory. But
>> this should be balanced with the complexity of the code.
>>
>> Briefly looking at the patch series, we have two structures:
>>
>> struct meminfo {
>>       unsigned int nr_banks;
>>       struct membank bank[NR_MEM_BANKS];
>> };
>>
>> struct shm_meminfo {
>>       unsigned int nr_banks;
>>       struct membank bank[NR_SHM_BANKS];
>>       paddr_t tot_size;
>> };
>>
>> IIUC, the logic is mostly to be able to know the maximum size of the
>> array and also the number of slots already used.
>>
>> So we could have the following common structure:
>>
>> struct membanks {
>>      unsigned int nr_banks;
>>      unsigned int max_banks;
>>      struct membank *banks;
>> }
>>
>> Then the definition for the two other structures could be:
>>
>> struct meminfo {
>>       struct membank holders[NR_MEM_BANKS];
>>
>>       struct membanks banks;
>> }
>>
>> struct shm_meminfo {
>>       struct membank holders[NR_SHM_BANKS];
>>
>>       struct membanks banks;
>>
>>       paddr_t tot_size;
>> }
>>
>> And then 'banks.banks' would point to the 'holders'. And 'max_banks'
>> would be set to the maximum.
>>
>> There might be other way to make the structure more nicer. Like (untested):
>>
>> struct membanks {
>>       unsigned int nr_banks;
>>       unsigned int max_banks;
>>       struct membank[];
>> }
>>
>> struct meminfo {
>>       struct membanks common;
>>       // We should ensure there are no padding
>>       struct membank[NR_MEM_BANKS];
>> }
>>
>> We would then pass &meminfo.common to allocate_domainheap_memory().
>>
>> With that there should be no need for extra helpers.
>>
>> What do you think?
> I would go for flexible array member solution which looks much nicer and as far as I can tell
> would solve the issue with extra helpers.
> 
> The only problem is that there are quite a lot of places where we reference nr_banks of meminfo e.g. mem.nr_banks
> which we would need to modify to mem.common.nr_banks. 

Possibly yes. But it is not clear what's the problem here. Are you 
concerned about the churn? Or is it just a long name?

At least in the case of meminfo. We could possibly replace all the use 
with a pointer to "struct membank common". This would reduce the amount 
of churn and the expression length.

Cheers,
Michal Orzel Dec. 11, 2023, 10:12 a.m. UTC | #5
On 11/12/2023 11:01, Julien Grall wrote:
> 
> 
> Hi Michal,
> 
> On 11/12/2023 08:31, Michal Orzel wrote:
>> On 08/12/2023 16:09, Julien Grall wrote:
>>>
>>>
>>> Hi,
>>>
>>> On 07/12/2023 09:38, Michal Orzel wrote:
>>>> Hi Penny,
>>>>
>>>> On 06/12/2023 10:06, Penny Zheng wrote:
>>>>>
>>>>>
>>>>> We split the code of allocate_bank_memory into two parts,
>>>>> allocate_domheap_memory and guest_physmap_memory.
>>>>>
>>>>> One is about allocating guest RAM from heap, which could be re-used later for
>>>>> allocating static shared memory from heap when host address is not provided.
>>>>> The other is building up guest P2M mapping.
>>>>>
>>>>> We also define a set of MACRO helpers to access common fields in data
>>>>> structure of "meminfo" type, e.g. "struct meminfo" is one of them, and
>>>>> later new "struct shm_meminfo" is also one of them.
>>>>> This kind of structures must have the following characteristics:
>>>>> - an array of "struct membank"
>>>>> - a member called "nr_banks" indicating current array size
>>>>> - a field indicating the maximum array size
>>>>> When introducing a new data structure, according callbacks with function type
>>>>> "retrieve_fn" shall be defined for using MACRO helpers.
>>>>> This commit defines callback "retrieve_meminfo" for data structure
>>>>> "struct meminfo".
>>>> This patch introduces quite a bit of complexity with all these helpers, so adding a rationale is crucial.
>>>> AFAIU, all of this is because we don't want to reuse struct meminfo where NR_MEM_BANKS is defined as 256,
>>>> which is a lot more than we need for shmem. Am I right?
>>>
>>> +1.
>>>
>>>>
>>>> I would like others to share the opinion here as well.
>>>
>>> If possible, I'd like to reduce the footprint of the shared memory. But
>>> this should be balanced with the complexity of the code.
>>>
>>> Briefly looking at the patch series, we have two structures:
>>>
>>> struct meminfo {
>>>       unsigned int nr_banks;
>>>       struct membank bank[NR_MEM_BANKS];
>>> };
>>>
>>> struct shm_meminfo {
>>>       unsigned int nr_banks;
>>>       struct membank bank[NR_SHM_BANKS];
>>>       paddr_t tot_size;
>>> };
>>>
>>> IIUC, the logic is mostly to be able to know the maximum size of the
>>> array and also the number of slots already used.
>>>
>>> So we could have the following common structure:
>>>
>>> struct membanks {
>>>      unsigned int nr_banks;
>>>      unsigned int max_banks;
>>>      struct membank *banks;
>>> }
>>>
>>> Then the definition for the two other structures could be:
>>>
>>> struct meminfo {
>>>       struct membank holders[NR_MEM_BANKS];
>>>
>>>       struct membanks banks;
>>> }
>>>
>>> struct shm_meminfo {
>>>       struct membank holders[NR_SHM_BANKS];
>>>
>>>       struct membanks banks;
>>>
>>>       paddr_t tot_size;
>>> }
>>>
>>> And then 'banks.banks' would point to the 'holders'. And 'max_banks'
>>> would be set to the maximum.
>>>
>>> There might be other way to make the structure more nicer. Like (untested):
>>>
>>> struct membanks {
>>>       unsigned int nr_banks;
>>>       unsigned int max_banks;
>>>       struct membank[];
>>> }
>>>
>>> struct meminfo {
>>>       struct membanks common;
>>>       // We should ensure there are no padding
>>>       struct membank[NR_MEM_BANKS];
>>> }
>>>
>>> We would then pass &meminfo.common to allocate_domainheap_memory().
>>>
>>> With that there should be no need for extra helpers.
>>>
>>> What do you think?
>> I would go for flexible array member solution which looks much nicer and as far as I can tell
>> would solve the issue with extra helpers.
>>
>> The only problem is that there are quite a lot of places where we reference nr_banks of meminfo e.g. mem.nr_banks
>> which we would need to modify to mem.common.nr_banks.
> 
> Possibly yes. But it is not clear what's the problem here. Are you
> concerned about the churn? Or is it just a long name?
I am concerned about the churn. I did a grep and we have almost 100 instances of e.g. mem{.,->}nr_banks,
which in our solution would need to be replaced with mem{.,->}common.nr_banks. That said ...

> 
> At least in the case of meminfo. We could possibly replace all the use
> with a pointer to "struct membank common". This would reduce the amount
> of churn and the expression length.
this could help to limit the overall churn.

~Michal
diff mbox series

Patch

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 64ae944431..a8bc78baa5 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -51,6 +51,28 @@  boolean_param("ext_regions", opt_ext_regions);
 static u64 __initdata dom0_mem;
 static bool __initdata dom0_mem_set;
 
+#ifdef CONFIG_DOM0LESS_BOOT
+static void __init retrieve_meminfo(void *mem, unsigned int *max_mem_banks,
+                                    struct membank **bank,
+                                    unsigned int **nr_banks)
+{
+    struct meminfo *meminfo = (struct meminfo *)mem;
+
+    if ( max_mem_banks )
+        *max_mem_banks = NR_MEM_BANKS;
+
+    if ( nr_banks )
+        *nr_banks = &(meminfo->nr_banks);
+
+    if ( bank )
+        *bank = meminfo->bank;
+}
+
+retrieve_fn __initdata retrievers[MAX_MEMINFO_TYPE] = {
+    [NORMAL_MEMINFO] = retrieve_meminfo,
+};
+#endif
+
 static int __init parse_dom0_mem(const char *s)
 {
     dom0_mem_set = true;
@@ -415,32 +437,20 @@  static void __init allocate_memory_11(struct domain *d,
 }
 
 #ifdef CONFIG_DOM0LESS_BOOT
-bool __init allocate_bank_memory(struct domain *d, struct kernel_info *kinfo,
-                                 gfn_t sgfn, paddr_t tot_size)
+static bool __init allocate_domheap_memory(struct domain *d,
+                                           paddr_t tot_size,
+                                           void *mem, enum meminfo_type type)
 {
-    int res;
     struct page_info *pg;
-    struct membank *bank;
     unsigned int max_order = ~0;
-
-    /*
-     * allocate_bank_memory can be called with a tot_size of zero for
-     * the second memory bank. It is not an error and we can safely
-     * avoid creating a zero-size memory bank.
-     */
-    if ( tot_size == 0 )
-        return true;
-
-    bank = &kinfo->mem.bank[kinfo->mem.nr_banks];
-    bank->start = gfn_to_gaddr(sgfn);
-    bank->size = tot_size;
+    unsigned int *nr_banks = GET_NR_BANKS(mem, type);
 
     while ( tot_size > 0 )
     {
         unsigned int order = get_allocation_size(tot_size);
+        struct membank *membank;
 
         order = min(max_order, order);
-
         pg = alloc_domheap_pages(d, order, 0);
         if ( !pg )
         {
@@ -460,15 +470,78 @@  bool __init allocate_bank_memory(struct domain *d, struct kernel_info *kinfo,
             continue;
         }
 
-        res = guest_physmap_add_page(d, sgfn, page_to_mfn(pg), order);
-        if ( res )
-        {
-            dprintk(XENLOG_ERR, "Failed map pages to DOMU: %d", res);
+        if ( *nr_banks == MAX_MEM_BANKS(type) )
             return false;
-        }
+
+        membank = GET_MEMBANK(mem, type, *nr_banks);
+        membank->start = mfn_to_maddr(page_to_mfn(pg));
+        membank->size = 1ULL << (PAGE_SHIFT + order);
+        (*nr_banks)++;
+        tot_size -= membank->size;
+    }
+
+    return true;
+}
+
+static int __init guest_physmap_memory(struct domain *d,
+                                       void *mem, enum meminfo_type type,
+                                       gfn_t sgfn)
+{
+    unsigned int i;
+    int res;
+    unsigned int *nr_banks = GET_NR_BANKS(mem, type);
+
+    for ( i = 0; i < *nr_banks; i++ )
+    {
+        struct membank *membank = GET_MEMBANK(mem, type, i);
+        paddr_t start = membank->start;
+        paddr_t size = membank->size;
+        unsigned int order = get_order_from_bytes(size);
+
+        /* Size must be power of two */
+        BUG_ON(!size || (size & (size - 1)));
+        res = guest_physmap_add_page(d, sgfn, maddr_to_mfn(start), order);
+        if ( res )
+            return res;
 
         sgfn = gfn_add(sgfn, 1UL << order);
-        tot_size -= (1ULL << (PAGE_SHIFT + order));
+    }
+
+    return 0;
+}
+
+bool __init allocate_bank_memory(struct domain *d,
+                                 struct kernel_info *kinfo,
+                                 gfn_t sgfn,
+                                 paddr_t total_size)
+{
+    struct membank *bank;
+    struct meminfo host = { 0 };
+
+    /*
+     * allocate_bank_memory can be called with a total_size of zero for
+     * the second memory bank. It is not an error and we can safely
+     * avoid creating a zero-size memory bank.
+     */
+    if ( total_size == 0 )
+        return true;
+
+    bank = &kinfo->mem.bank[kinfo->mem.nr_banks];
+    bank->start = gfn_to_gaddr(sgfn);
+    bank->size = total_size;
+
+    if ( !allocate_domheap_memory(d, total_size, (void *)&host, NORMAL_MEMINFO) )
+    {
+        printk(XENLOG_ERR "Failed to allocate (%"PRIpaddr"MB) pages to %pd\n",
+               total_size >> 20, d);
+        return false;
+    }
+
+    if ( guest_physmap_memory(d, (void *)&host, NORMAL_MEMINFO, sgfn) )
+    {
+        printk(XENLOG_ERR "Failed to map (%"PRIpaddr"MB) pages to %pd\n",
+               total_size >> 20, d);
+        return false;
     }
 
     kinfo->mem.nr_banks++;
diff --git a/xen/arch/arm/include/asm/setup.h b/xen/arch/arm/include/asm/setup.h
index 3a2b35ea46..bc5f08be97 100644
--- a/xen/arch/arm/include/asm/setup.h
+++ b/xen/arch/arm/include/asm/setup.h
@@ -57,6 +57,39 @@  struct meminfo {
     struct membank bank[NR_MEM_BANKS];
 };
 
+enum meminfo_type {
+    NORMAL_MEMINFO,
+    MAX_MEMINFO_TYPE,
+};
+
+/*
+ * Define a set of MACRO helpers to access meminfo_type, like "struct meminfo"
+ * as type of NORMAL_MEMINFO, etc.
+ * This kind of structure must have a array of "struct membank",
+ * a member called nr_banks indicating the current array size, and also a field
+ * indicating the maximum array size.
+ */
+typedef void (*retrieve_fn)(void *, unsigned int *, struct membank **,
+                            unsigned int **);
+
+#define MAX_MEM_BANKS(type) ({                              \
+    unsigned int _max_mem_banks;                            \
+    retrievers[type](NULL, &_max_mem_banks, NULL, NULL);    \
+    _max_mem_banks;                                         \
+})
+
+#define GET_MEMBANK(mem, type, index) ({                    \
+    struct membank *_bank;                                  \
+    retrievers[type]((void *)(mem), NULL, &_bank, NULL);    \
+    &(_bank[index]);                                        \
+})
+
+#define GET_NR_BANKS(mem, type) ({                          \
+    unsigned int *_nr_banks;                                \
+    retrievers[type]((void *)mem, NULL, NULL, &_nr_banks);  \
+    _nr_banks;                                              \
+})
+
 /*
  * The domU flag is set for kernels and ramdisks of "xen,domain" nodes.
  * The purpose of the domU flag is to avoid getting confused in