Message ID | 20230904080451.424731-7-eric.auger@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | VIRTIO-IOMMU/VFIO: Don't assume 64b IOVA space | expand |
On 4/9/23 10:03, Eric Auger wrote: > This helper reverses an array of regions, turning original > regions into holes and original holes into actual regions, > covering the whole UINT64_MAX span. > > Signed-off-by: Eric Auger <eric.auger@redhat.com> > --- > include/qemu/range.h | 3 +++ > util/range.c | 35 +++++++++++++++++++++++++++++++++++ > 2 files changed, 38 insertions(+) > > diff --git a/include/qemu/range.h b/include/qemu/range.h > index 7e2b1cc447..fc1d3dabe6 100644 > --- a/include/qemu/range.h > +++ b/include/qemu/range.h > @@ -219,4 +219,7 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1, > > GList *range_list_insert(GList *list, Range *data); > > +void range_inverse_array(uint32_t nr_ranges, Range *ranges, > + uint32_t *nr_inv_ranges, Range **inv_ranges); > + > #endif > diff --git a/util/range.c b/util/range.c > index 098d9d2dc0..11c4ff0b78 100644 > --- a/util/range.c > +++ b/util/range.c > @@ -70,3 +70,38 @@ GList *range_list_insert(GList *list, Range *data) > > return list; > } > + > +/* > + * Inverse an array of sorted ranges over the UINT64_MAX span, ie. > + * original ranges becomes holes in the newly allocated inv_ranges > + */ Most of the functions are described in the header; could you move this description with the declaration? > +void range_inverse_array(uint32_t nr_ranges, Range *ranges, > + uint32_t *nr_inv_ranges, Range **inv_ranges) > +{ > + Range *resv; > + int i = 0, j = 0; > + > + resv = g_malloc0_n(nr_ranges + 1, sizeof(Range)); > + > + /* first range lob is greater than 0, insert a first range */ > + if (range_lob(&ranges[0]) > 0) { > + range_set_bounds(&resv[i++], 0, > + range_lob(&ranges[0]) - 1); > + } > + > + /* insert a range inbetween each original range */ > + for (; j < nr_ranges - 1; j++) { > + if (range_compare(&ranges[j], &ranges[j + 1])) { > + range_set_bounds(&resv[i++], range_upb(&ranges[j]) + 1, > + range_lob(&ranges[j + 1]) - 1); > + } > + } > + /* last range upb is less than UINT64_MAX, insert a last range */ In order to use this new function with variable range sizes, can we pass UINT64_MAX as an 'inv_range_upb' argument? > + if (range_upb(&ranges[nr_ranges - 1]) < UINT64_MAX) { > + range_set_bounds(&resv[i++], > + range_upb(&ranges[nr_ranges - 1]) + 1, UINT64_MAX); > + } > + *nr_inv_ranges = i; > + resv = g_realloc(resv, i * sizeof(Range)); > + *inv_ranges = resv; > +}
Hi Philippe, On 9/4/23 10:18, Philippe Mathieu-Daudé wrote: > On 4/9/23 10:03, Eric Auger wrote: >> This helper reverses an array of regions, turning original >> regions into holes and original holes into actual regions, >> covering the whole UINT64_MAX span. >> >> Signed-off-by: Eric Auger <eric.auger@redhat.com> >> --- >> include/qemu/range.h | 3 +++ >> util/range.c | 35 +++++++++++++++++++++++++++++++++++ >> 2 files changed, 38 insertions(+) >> >> diff --git a/include/qemu/range.h b/include/qemu/range.h >> index 7e2b1cc447..fc1d3dabe6 100644 >> --- a/include/qemu/range.h >> +++ b/include/qemu/range.h >> @@ -219,4 +219,7 @@ static inline int ranges_overlap(uint64_t first1, >> uint64_t len1, >> GList *range_list_insert(GList *list, Range *data); >> +void range_inverse_array(uint32_t nr_ranges, Range *ranges, >> + uint32_t *nr_inv_ranges, Range **inv_ranges); >> + >> #endif >> diff --git a/util/range.c b/util/range.c >> index 098d9d2dc0..11c4ff0b78 100644 >> --- a/util/range.c >> +++ b/util/range.c >> @@ -70,3 +70,38 @@ GList *range_list_insert(GList *list, Range *data) >> return list; >> } >> + >> +/* >> + * Inverse an array of sorted ranges over the UINT64_MAX span, ie. >> + * original ranges becomes holes in the newly allocated inv_ranges >> + */ > > Most of the functions are described in the header; could you move this > description with the declaration? this is the case for all static inline primitives but not for range_list_insert(), hence that choice. Now I don't have a strong opinion. > >> +void range_inverse_array(uint32_t nr_ranges, Range *ranges, >> + uint32_t *nr_inv_ranges, Range **inv_ranges) >> +{ >> + Range *resv; >> + int i = 0, j = 0; >> + >> + resv = g_malloc0_n(nr_ranges + 1, sizeof(Range)); >> + >> + /* first range lob is greater than 0, insert a first range */ >> + if (range_lob(&ranges[0]) > 0) { >> + range_set_bounds(&resv[i++], 0, >> + range_lob(&ranges[0]) - 1); >> + } >> + >> + /* insert a range inbetween each original range */ >> + for (; j < nr_ranges - 1; j++) { >> + if (range_compare(&ranges[j], &ranges[j + 1])) { >> + range_set_bounds(&resv[i++], range_upb(&ranges[j]) + 1, >> + range_lob(&ranges[j + 1]) - 1); >> + } >> + } >> + /* last range upb is less than UINT64_MAX, insert a last range */ > > In order to use this new function with variable range sizes, > can we pass UINT64_MAX as an 'inv_range_upb' argument? Indeed I hesitated to bring enhanced comodity by letting the caller pass the upper bound and allow values less than UINT64_MAX. But I was afraid this would complexify the implementation, hence the current choice. I will have a look & see. Thanks Eric > >> + if (range_upb(&ranges[nr_ranges - 1]) < UINT64_MAX) { >> + range_set_bounds(&resv[i++], >> + range_upb(&ranges[nr_ranges - 1]) + 1, >> UINT64_MAX); >> + } >> + *nr_inv_ranges = i; >> + resv = g_realloc(resv, i * sizeof(Range)); >> + *inv_ranges = resv; >> +} >
diff --git a/include/qemu/range.h b/include/qemu/range.h index 7e2b1cc447..fc1d3dabe6 100644 --- a/include/qemu/range.h +++ b/include/qemu/range.h @@ -219,4 +219,7 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1, GList *range_list_insert(GList *list, Range *data); +void range_inverse_array(uint32_t nr_ranges, Range *ranges, + uint32_t *nr_inv_ranges, Range **inv_ranges); + #endif diff --git a/util/range.c b/util/range.c index 098d9d2dc0..11c4ff0b78 100644 --- a/util/range.c +++ b/util/range.c @@ -70,3 +70,38 @@ GList *range_list_insert(GList *list, Range *data) return list; } + +/* + * Inverse an array of sorted ranges over the UINT64_MAX span, ie. + * original ranges becomes holes in the newly allocated inv_ranges + */ +void range_inverse_array(uint32_t nr_ranges, Range *ranges, + uint32_t *nr_inv_ranges, Range **inv_ranges) +{ + Range *resv; + int i = 0, j = 0; + + resv = g_malloc0_n(nr_ranges + 1, sizeof(Range)); + + /* first range lob is greater than 0, insert a first range */ + if (range_lob(&ranges[0]) > 0) { + range_set_bounds(&resv[i++], 0, + range_lob(&ranges[0]) - 1); + } + + /* insert a range inbetween each original range */ + for (; j < nr_ranges - 1; j++) { + if (range_compare(&ranges[j], &ranges[j + 1])) { + range_set_bounds(&resv[i++], range_upb(&ranges[j]) + 1, + range_lob(&ranges[j + 1]) - 1); + } + } + /* last range upb is less than UINT64_MAX, insert a last range */ + if (range_upb(&ranges[nr_ranges - 1]) < UINT64_MAX) { + range_set_bounds(&resv[i++], + range_upb(&ranges[nr_ranges - 1]) + 1, UINT64_MAX); + } + *nr_inv_ranges = i; + resv = g_realloc(resv, i * sizeof(Range)); + *inv_ranges = resv; +}
This helper reverses an array of regions, turning original regions into holes and original holes into actual regions, covering the whole UINT64_MAX span. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- include/qemu/range.h | 3 +++ util/range.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+)