Message ID | 20241114160131.48616-13-richard.henderson@linaro.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | accel/tcg: Convert victim tlb to IntervalTree | expand |
On 11/14/24 08:00, Richard Henderson wrote: > Flush a masked range of pages from the IntervalTree cache. > When the mask is not used there is a redundant comparison, > but that is better than duplicating code at this point. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > accel/tcg/cputlb.c | 25 +++++++++++++++++++++++++ > 1 file changed, 25 insertions(+) > > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index d532d69083..e2c855f147 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -311,6 +311,13 @@ static CPUTLBEntryTree *tlbtree_lookup_range(CPUTLBDesc *desc, vaddr s, vaddr l) > return i ? container_of(i, CPUTLBEntryTree, itree) : NULL; > } > > +static CPUTLBEntryTree *tlbtree_lookup_range_next(CPUTLBEntryTree *prev, > + vaddr s, vaddr l) > +{ > + IntervalTreeNode *i = interval_tree_iter_next(&prev->itree, s, l); > + return i ? container_of(i, CPUTLBEntryTree, itree) : NULL; > +} > + > static CPUTLBEntryTree *tlbtree_lookup_addr(CPUTLBDesc *desc, vaddr addr) > { > return tlbtree_lookup_range(desc, addr, addr); > @@ -739,6 +746,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, > CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; > CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; > vaddr mask = MAKE_64BIT_MASK(0, bits); > + CPUTLBEntryTree *node; > + vaddr addr_mask, last_mask, last_imask; > > /* > * Check if we need to flush due to large pages. > @@ -759,6 +768,22 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, > vaddr page = addr + i; > tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask); > } > + > + addr_mask = addr & mask; > + last_mask = addr_mask + len - 1; > + last_imask = last_mask | ~mask; > + node = tlbtree_lookup_range(d, addr_mask, last_imask); > + while (node) { > + CPUTLBEntryTree *next = > + tlbtree_lookup_range_next(node, addr_mask, last_imask); > + vaddr page_mask = node->itree.start & mask; > + > + if (page_mask >= addr_mask && page_mask < last_mask) { > + interval_tree_remove(&node->itree, &d->iroot); > + g_free(node); > + } > + node = next; > + } > } > > typedef struct { Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d532d69083..e2c855f147 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -311,6 +311,13 @@ static CPUTLBEntryTree *tlbtree_lookup_range(CPUTLBDesc *desc, vaddr s, vaddr l) return i ? container_of(i, CPUTLBEntryTree, itree) : NULL; } +static CPUTLBEntryTree *tlbtree_lookup_range_next(CPUTLBEntryTree *prev, + vaddr s, vaddr l) +{ + IntervalTreeNode *i = interval_tree_iter_next(&prev->itree, s, l); + return i ? container_of(i, CPUTLBEntryTree, itree) : NULL; +} + static CPUTLBEntryTree *tlbtree_lookup_addr(CPUTLBDesc *desc, vaddr addr) { return tlbtree_lookup_range(desc, addr, addr); @@ -739,6 +746,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; vaddr mask = MAKE_64BIT_MASK(0, bits); + CPUTLBEntryTree *node; + vaddr addr_mask, last_mask, last_imask; /* * Check if we need to flush due to large pages. @@ -759,6 +768,22 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, vaddr page = addr + i; tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask); } + + addr_mask = addr & mask; + last_mask = addr_mask + len - 1; + last_imask = last_mask | ~mask; + node = tlbtree_lookup_range(d, addr_mask, last_imask); + while (node) { + CPUTLBEntryTree *next = + tlbtree_lookup_range_next(node, addr_mask, last_imask); + vaddr page_mask = node->itree.start & mask; + + if (page_mask >= addr_mask && page_mask < last_mask) { + interval_tree_remove(&node->itree, &d->iroot); + g_free(node); + } + node = next; + } } typedef struct {
Flush a masked range of pages from the IntervalTree cache. When the mask is not used there is a redundant comparison, but that is better than duplicating code at this point. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/cputlb.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+)