diff mbox series

[2/7] x86/HVM: drop stdvga's "stdvga" struct member

Message ID 836afb28-581c-4ab8-a0a9-badf29a51b5e@suse.com (mailing list archive)
State Superseded
Headers show
Series x86/HVM: drop stdvga caching mode | expand

Commit Message

Jan Beulich Sept. 10, 2024, 2:40 p.m. UTC
Two of its consumers are dead (in compile-time constant conditionals)
and the only remaining ones are merely controlling (debugging) log
messages. Hence the field is now pointless to set, which in particular
allows to get rid of the questionable conditional from which the field's
value was established (afaict 551ceee97513 ["x86, hvm: stdvga cache
always on"] had dropped too much of the earlier extra check that was
there, and quite likely further checks were missing).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Andrew Cooper Sept. 10, 2024, 5:47 p.m. UTC | #1
On 10/09/2024 3:40 pm, Jan Beulich wrote:
> Two of its consumers are dead (in compile-time constant conditionals)
> and the only remaining ones are merely controlling (debugging) log
> messages.

Minor, but I'd phrase this as "merely controlling debug logging."

>  Hence the field is now pointless to set, which in particular
> allows to get rid of the questionable conditional from which the field's
> value was established (afaict 551ceee97513 ["x86, hvm: stdvga cache
> always on"] had dropped too much of the earlier extra check that was
> there, and quite likely further checks were missing).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/stdvga.c
> +++ b/xen/arch/x86/hvm/stdvga.c
> @@ -103,7 +103,7 @@ static void vram_put(struct hvm_hw_stdvg
>  static int stdvga_outb(uint64_t addr, uint8_t val)
>  {
>      struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
> -    int rc = 1, prev_stdvga = s->stdvga;
> +    int rc = 1;

This looks to also drop a MISRA essential-type complaint.

>  
>      switch ( addr )
>      {
> @@ -132,19 +132,6 @@ static int stdvga_outb(uint64_t addr, ui
>          break;
>      }
>  
> -    /* When in standard vga mode, emulate here all writes to the vram buffer
> -     * so we can immediately satisfy reads without waiting for qemu. */
> -    s->stdvga = (s->sr[7] == 0x00);
> -
> -    if ( !prev_stdvga && s->stdvga )
> -    {
> -        gdprintk(XENLOG_INFO, "entering stdvga mode\n");
> -    }
> -    else if ( prev_stdvga && !s->stdvga )
> -    {
> -        gdprintk(XENLOG_INFO, "leaving stdvga mode\n");
> -    }
> -
>      return rc;
>  }
>  
> @@ -425,7 +412,6 @@ static int cf_check stdvga_mem_write(
>      const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
>      uint64_t data)
>  {
> -    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
>      ioreq_t p = {
>          .type = IOREQ_TYPE_COPY,
>          .addr = addr,
> @@ -436,8 +422,7 @@ static int cf_check stdvga_mem_write(
>      };
>      struct ioreq_server *srv;
>  
> -    if ( true || !s->stdvga )
> -        goto done;
> +    goto done;
>  
>      /* Intercept mmio write */
>      switch ( size )
> @@ -498,19 +483,17 @@ static bool cf_check stdvga_mem_accept(
>  
>      spin_lock(&s->lock);
>  
> -    if ( p->dir == IOREQ_WRITE && p->count > 1 )
> +    if ( p->dir != IOREQ_WRITE || p->count > 1 )
>      {
>          /*
>           * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
>           * first cycle of an I/O. So, since we cannot guarantee to always be
>           * able to send buffered writes, we have to reject any multi-cycle
> -         * I/O.
> +         * I/O. And of course we have to reject all reads, for not being
> +         * able to service them.
>           */
>          goto reject;
>      }
> -    else if ( p->dir == IOREQ_READ &&
> -              (true || !s->stdvga) )
> -        goto reject;

Before, we rejected on (WRITE && count) or READ, and I think we still do
afterwards given the boolean-ness of read/write.  However, the comment
is distinctly less relevant.

I think we now just end up with /* Only accept single writes. */ which
matches with with shuffling these into the bufioreq ring.

~Andrew
Jan Beulich Sept. 11, 2024, 9:05 a.m. UTC | #2
On 10.09.2024 19:47, Andrew Cooper wrote:
> On 10/09/2024 3:40 pm, Jan Beulich wrote:
>> @@ -498,19 +483,17 @@ static bool cf_check stdvga_mem_accept(
>>  
>>      spin_lock(&s->lock);
>>  
>> -    if ( p->dir == IOREQ_WRITE && p->count > 1 )
>> +    if ( p->dir != IOREQ_WRITE || p->count > 1 )
>>      {
>>          /*
>>           * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
>>           * first cycle of an I/O. So, since we cannot guarantee to always be
>>           * able to send buffered writes, we have to reject any multi-cycle
>> -         * I/O.
>> +         * I/O. And of course we have to reject all reads, for not being
>> +         * able to service them.
>>           */
>>          goto reject;
>>      }
>> -    else if ( p->dir == IOREQ_READ &&
>> -              (true || !s->stdvga) )
>> -        goto reject;
> 
> Before, we rejected on (WRITE && count) or READ, and I think we still do
> afterwards given the boolean-ness of read/write.  However, the comment
> is distinctly less relevant.
> 
> I think we now just end up with /* Only accept single writes. */ which
> matches with with shuffling these into the bufioreq ring.

Fine with me. As usually I'm commenting rather too little, I was fearing
I'd remove too much if I shrunk the comment like this.

Jan
Jan Beulich Sept. 11, 2024, 9:46 a.m. UTC | #3
On 10.09.2024 19:47, Andrew Cooper wrote:
> On 10/09/2024 3:40 pm, Jan Beulich wrote:
>> @@ -498,19 +483,17 @@ static bool cf_check stdvga_mem_accept(
>>  
>>      spin_lock(&s->lock);
>>  
>> -    if ( p->dir == IOREQ_WRITE && p->count > 1 )
>> +    if ( p->dir != IOREQ_WRITE || p->count > 1 )
>>      {
>>          /*
>>           * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
>>           * first cycle of an I/O. So, since we cannot guarantee to always be
>>           * able to send buffered writes, we have to reject any multi-cycle
>> -         * I/O.
>> +         * I/O. And of course we have to reject all reads, for not being
>> +         * able to service them.
>>           */
>>          goto reject;
>>      }
>> -    else if ( p->dir == IOREQ_READ &&
>> -              (true || !s->stdvga) )
>> -        goto reject;
> 
> Before, we rejected on (WRITE && count) or READ, and I think we still do
> afterwards given the boolean-ness of read/write.  However, the comment
> is distinctly less relevant.
> 
> I think we now just end up with /* Only accept single writes. */ which
> matches with with shuffling these into the bufioreq ring.

I'd like to keep a little more, if you don't mind:

        /*
         * Only accept single writes, as that's the only thing we accelerate
         * using buffered ioreq handling. */
         */

Not the least because writing this I noticed another flaw (or even two,
depending on how one looks at it): ioreq_send_buffered() further refuses
data_is_ptr requests. (Checking ->data_is_ptr is actually more important
than ->count, as ->count will be unequal to 1 only if ->data_is_ptr is
set, whereas ->count can also be 1 in that case.) Not the least because
that "refusing" is actually returning "success" (0 == X86EMUL_OKAY ==
IOREQ_STATUS_HANDLED) on x86, and IO_ABORT on Arm. I.e. such requests
would simply be lost on x86, and whether IO_ABORT is okay(ish) on Arm I
simply don't know (I'll see if digging through history reveals intentions).

And then - how can a buffered ioreq be a read, when there's nothing being
returned? (I.e. I consider this an omission in what ioreq_send_buffered()
refuses to process.)

Jan
diff mbox series

Patch

--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -103,7 +103,7 @@  static void vram_put(struct hvm_hw_stdvg
 static int stdvga_outb(uint64_t addr, uint8_t val)
 {
     struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
-    int rc = 1, prev_stdvga = s->stdvga;
+    int rc = 1;
 
     switch ( addr )
     {
@@ -132,19 +132,6 @@  static int stdvga_outb(uint64_t addr, ui
         break;
     }
 
-    /* When in standard vga mode, emulate here all writes to the vram buffer
-     * so we can immediately satisfy reads without waiting for qemu. */
-    s->stdvga = (s->sr[7] == 0x00);
-
-    if ( !prev_stdvga && s->stdvga )
-    {
-        gdprintk(XENLOG_INFO, "entering stdvga mode\n");
-    }
-    else if ( prev_stdvga && !s->stdvga )
-    {
-        gdprintk(XENLOG_INFO, "leaving stdvga mode\n");
-    }
-
     return rc;
 }
 
@@ -425,7 +412,6 @@  static int cf_check stdvga_mem_write(
     const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
     uint64_t data)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     ioreq_t p = {
         .type = IOREQ_TYPE_COPY,
         .addr = addr,
@@ -436,8 +422,7 @@  static int cf_check stdvga_mem_write(
     };
     struct ioreq_server *srv;
 
-    if ( true || !s->stdvga )
-        goto done;
+    goto done;
 
     /* Intercept mmio write */
     switch ( size )
@@ -498,19 +483,17 @@  static bool cf_check stdvga_mem_accept(
 
     spin_lock(&s->lock);
 
-    if ( p->dir == IOREQ_WRITE && p->count > 1 )
+    if ( p->dir != IOREQ_WRITE || p->count > 1 )
     {
         /*
          * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
          * first cycle of an I/O. So, since we cannot guarantee to always be
          * able to send buffered writes, we have to reject any multi-cycle
-         * I/O.
+         * I/O. And of course we have to reject all reads, for not being
+         * able to service them.
          */
         goto reject;
     }
-    else if ( p->dir == IOREQ_READ &&
-              (true || !s->stdvga) )
-        goto reject;
 
     /* s->lock intentionally held */
     return 1;
--- a/xen/arch/x86/include/asm/hvm/io.h
+++ b/xen/arch/x86/include/asm/hvm/io.h
@@ -115,7 +115,6 @@  struct hvm_hw_stdvga {
     uint8_t sr[8];
     uint8_t gr_index;
     uint8_t gr[9];
-    bool stdvga;
     uint32_t latch;
     struct page_info *vram_page[64];  /* shadow of 0xa0000-0xaffff */
     spinlock_t lock;