diff mbox series

[v2,11/54] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked

Message ID 20241114160131.48616-12-richard.henderson@linaro.org (mailing list archive)
State New
Headers show
Series accel/tcg: Convert victim tlb to IntervalTree | expand

Commit Message

Richard Henderson Nov. 14, 2024, 4 p.m. UTC
Flush a page from the IntervalTree cache.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cputlb.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

Comments

Pierrick Bouvier Nov. 14, 2024, 6:01 p.m. UTC | #1
On 11/14/24 08:00, Richard Henderson wrote:
> Flush a page from the IntervalTree cache.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/cputlb.c | 16 ++++++++++++----
>   1 file changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index ea6a5177de..d532d69083 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -568,6 +568,7 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
>       CPUTLBDesc *desc = &cpu->neg.tlb.d[midx];
>       vaddr lp_addr = desc->large_page_addr;
>       vaddr lp_mask = desc->large_page_mask;
> +    CPUTLBEntryTree *node;
>   
>       /* Check if we need to flush due to large pages.  */
>       if ((page & lp_mask) == lp_addr) {
> @@ -575,10 +576,17 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
>                     VADDR_PRIx "/%016" VADDR_PRIx ")\n",
>                     midx, lp_addr, lp_mask);
>           tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
> -    } else {
> -        tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
> -                                   page, TARGET_PAGE_SIZE, -1);
> -        tlb_flush_vtlb_page_locked(cpu, midx, page);
> +        return;
> +    }
> +
> +    tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
> +                               page, TARGET_PAGE_SIZE, -1);
> +    tlb_flush_vtlb_page_locked(cpu, midx, page);
> +
> +    node = tlbtree_lookup_addr(desc, page);
> +    if (node) {
> +        interval_tree_remove(&node->itree, &desc->iroot);
> +        g_free(node);
>       }
>   }
>   

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff mbox series

Patch

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index ea6a5177de..d532d69083 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -568,6 +568,7 @@  static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
     CPUTLBDesc *desc = &cpu->neg.tlb.d[midx];
     vaddr lp_addr = desc->large_page_addr;
     vaddr lp_mask = desc->large_page_mask;
+    CPUTLBEntryTree *node;
 
     /* Check if we need to flush due to large pages.  */
     if ((page & lp_mask) == lp_addr) {
@@ -575,10 +576,17 @@  static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
                   midx, lp_addr, lp_mask);
         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
-    } else {
-        tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
-                                   page, TARGET_PAGE_SIZE, -1);
-        tlb_flush_vtlb_page_locked(cpu, midx, page);
+        return;
+    }
+
+    tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
+                               page, TARGET_PAGE_SIZE, -1);
+    tlb_flush_vtlb_page_locked(cpu, midx, page);
+
+    node = tlbtree_lookup_addr(desc, page);
+    if (node) {
+        interval_tree_remove(&node->itree, &desc->iroot);
+        g_free(node);
     }
 }