diff mbox series

[v2,2/9] x86/HVM: drop stdvga's "stdvga" struct member

Message ID 08544fc6-bca0-4c69-8f88-3745ed351d53@suse.com (mailing list archive)
State New
Headers show
Series x86/HVM: drop stdvga caching mode | expand

Commit Message

Jan Beulich Sept. 11, 2024, 12:28 p.m. UTC
Two of its consumers are dead (in compile-time constant conditionals)
and the only remaining ones are merely controlling debug logging. Hence
the field is now pointless to set, which in particular allows to get rid
of the questionable conditional from which the field's value was
established (afaict 551ceee97513 ["x86, hvm: stdvga cache always on"]
had dropped too much of the earlier extra check that was there, and
quite likely further checks were missing).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Reduce a (largely pre-existing) comment. Re-base over new earlier
    patch.

Comments

Andrew Cooper Sept. 11, 2024, 1:47 p.m. UTC | #1
On 11/09/2024 1:28 pm, Jan Beulich wrote:
> Two of its consumers are dead (in compile-time constant conditionals)
> and the only remaining ones are merely controlling debug logging. Hence
> the field is now pointless to set, which in particular allows to get rid
> of the questionable conditional from which the field's value was
> established (afaict 551ceee97513 ["x86, hvm: stdvga cache always on"]
> had dropped too much of the earlier extra check that was there, and
> quite likely further checks were missing).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff mbox series

Patch

--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -103,7 +103,7 @@  static void vram_put(struct hvm_hw_stdvg
 static int stdvga_outb(uint64_t addr, uint8_t val)
 {
     struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
-    int rc = 1, prev_stdvga = s->stdvga;
+    int rc = 1;
 
     switch ( addr )
     {
@@ -132,19 +132,6 @@  static int stdvga_outb(uint64_t addr, ui
         break;
     }
 
-    /* When in standard vga mode, emulate here all writes to the vram buffer
-     * so we can immediately satisfy reads without waiting for qemu. */
-    s->stdvga = (s->sr[7] == 0x00);
-
-    if ( !prev_stdvga && s->stdvga )
-    {
-        gdprintk(XENLOG_INFO, "entering stdvga mode\n");
-    }
-    else if ( prev_stdvga && !s->stdvga )
-    {
-        gdprintk(XENLOG_INFO, "leaving stdvga mode\n");
-    }
-
     return rc;
 }
 
@@ -425,7 +412,6 @@  static int cf_check stdvga_mem_write(
     const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
     uint64_t data)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     ioreq_t p = {
         .type = IOREQ_TYPE_COPY,
         .addr = addr,
@@ -436,8 +422,7 @@  static int cf_check stdvga_mem_write(
     };
     struct ioreq_server *srv;
 
-    if ( true || !s->stdvga )
-        goto done;
+    goto done;
 
     /* Intercept mmio write */
     switch ( size )
@@ -498,19 +483,14 @@  static bool cf_check stdvga_mem_accept(
 
     spin_lock(&s->lock);
 
-    if ( p->dir == IOREQ_WRITE && (p->data_is_ptr || p->count != 1) )
+    if ( p->dir != IOREQ_WRITE || p->data_is_ptr || p->count != 1 )
     {
         /*
-         * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
-         * first cycle of an I/O. So, since we cannot guarantee to always be
-         * able to send buffered writes, we have to reject any multi-cycle
-         * or "indirect" I/O.
+         * Only accept single direct writes, as that's the only thing we can
+         * accelerate using buffered ioreq handling.
          */
         goto reject;
     }
-    else if ( p->dir == IOREQ_READ &&
-              (true || !s->stdvga) )
-        goto reject;
 
     /* s->lock intentionally held */
     return 1;
--- a/xen/arch/x86/include/asm/hvm/io.h
+++ b/xen/arch/x86/include/asm/hvm/io.h
@@ -115,7 +115,6 @@  struct hvm_hw_stdvga {
     uint8_t sr[8];
     uint8_t gr_index;
     uint8_t gr[9];
-    bool stdvga;
     uint32_t latch;
     struct page_info *vram_page[64];  /* shadow of 0xa0000-0xaffff */
     spinlock_t lock;