diff mbox

[v4,3/3] hw/vfio/display: add ramfb support

Message ID 20180613084149.14523-4-kraxel@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gerd Hoffmann June 13, 2018, 8:41 a.m. UTC
So we have a boot display when using a vgpu as primary display.

Use vfio-pci-ramfb instead of vfio-pci to enable it.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 include/hw/vfio/vfio-common.h |  2 ++
 hw/vfio/display.c             | 10 ++++++++++
 hw/vfio/pci.c                 | 15 +++++++++++++++
 3 files changed, 27 insertions(+)

Comments

Alex Williamson June 13, 2018, 7:50 p.m. UTC | #1
On Wed, 13 Jun 2018 10:41:49 +0200
Gerd Hoffmann <kraxel@redhat.com> wrote:

> So we have a boot display when using a vgpu as primary display.
> 
> Use vfio-pci-ramfb instead of vfio-pci to enable it.

Using a different device here seems like it almost guarantees a very
complicated path to support under libvirt.  What necessitates this
versus a simple ramfb=on option to vfio-pci?

I'm also not sure I understand the usage model, SeaBIOS and OVMF know
how to write to this display, but it seems that the guest does not.  I
suppose in the UEFI case runtime services can be used to continue
writing this display, but BIOS doesn't have such an option, unless
we're somehow emulating VGA here.  So for UEFI, I can imagine this
covers us from power on through firmware boot and up to guest drivers
initializing the GPU (assuming the vGPU supports a kernel mode driver,
does NVIDIA?), but for BIOS it seems we likely still have a break from
the bootloader to GPU driver initialization. For instance, what driver
is used to draw the boot animation (or blue screen) on SeaBIOS Windows
VM?  I'm assuming that this display and the vGPU display are one in the
same, so there's some cut from one to the other.  Thanks,

Alex

> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> ---
>  include/hw/vfio/vfio-common.h |  2 ++
>  hw/vfio/display.c             | 10 ++++++++++
>  hw/vfio/pci.c                 | 15 +++++++++++++++
>  3 files changed, 27 insertions(+)
> 
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index a9036929b2..a58d7e7e77 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -26,6 +26,7 @@
>  #include "qemu/queue.h"
>  #include "qemu/notify.h"
>  #include "ui/console.h"
> +#include "hw/display/ramfb.h"
>  #ifdef CONFIG_LINUX
>  #include <linux/vfio.h>
>  #endif
> @@ -143,6 +144,7 @@ typedef struct VFIODMABuf {
>  
>  typedef struct VFIODisplay {
>      QemuConsole *con;
> +    RAMFBState *ramfb;
>      struct {
>          VFIORegion buffer;
>          DisplaySurface *surface;
> diff --git a/hw/vfio/display.c b/hw/vfio/display.c
> index 59c0e5d1d7..409d5a2e3a 100644
> --- a/hw/vfio/display.c
> +++ b/hw/vfio/display.c
> @@ -124,6 +124,9 @@ static void vfio_display_dmabuf_update(void *opaque)
>  
>      primary = vfio_display_get_dmabuf(vdev, DRM_PLANE_TYPE_PRIMARY);
>      if (primary == NULL) {
> +        if (dpy->ramfb) {
> +            ramfb_display_update(dpy->con, dpy->ramfb);
> +        }
>          return;
>      }
>  
> @@ -181,6 +184,8 @@ static int vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp)
>      vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
>                                            &vfio_display_dmabuf_ops,
>                                            vdev);
> +    if (strcmp(object_get_typename(OBJECT(vdev)), "vfio-pci-ramfb") == 0)
> +        vdev->dpy->ramfb = ramfb_setup(errp);
>      return 0;
>  }
>  
> @@ -228,6 +233,9 @@ static void vfio_display_region_update(void *opaque)
>          return;
>      }
>      if (!plane.drm_format || !plane.size) {
> +        if (dpy->ramfb) {
> +            ramfb_display_update(dpy->con, dpy->ramfb);
> +        }
>          return;
>      }
>      format = qemu_drm_format_to_pixman(plane.drm_format);
> @@ -300,6 +308,8 @@ static int vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp)
>      vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
>                                            &vfio_display_region_ops,
>                                            vdev);
> +    if (strcmp(object_get_typename(OBJECT(vdev)), "vfio-pci-ramfb") == 0)
> +        vdev->dpy->ramfb = ramfb_setup(errp);
>      return 0;
>  }
>  
> diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
> index 18c493b49e..6a2b42a595 100644
> --- a/hw/vfio/pci.c
> +++ b/hw/vfio/pci.c
> @@ -3234,9 +3234,24 @@ static const TypeInfo vfio_pci_dev_info = {
>      },
>  };
>  
> +static void vfio_pci_ramfb_dev_class_init(ObjectClass *klass, void *data)
> +{
> +    DeviceClass *dc = DEVICE_CLASS(klass);
> +
> +    dc->hotpluggable = false;
> +}
> +
> +static const TypeInfo vfio_pci_ramfb_dev_info = {
> +    .name = "vfio-pci-ramfb",
> +    .parent = "vfio-pci",
> +    .instance_size = sizeof(VFIOPCIDevice),
> +    .class_init = vfio_pci_ramfb_dev_class_init,
> +};
> +
>  static void register_vfio_pci_dev_type(void)
>  {
>      type_register_static(&vfio_pci_dev_info);
> +    type_register_static(&vfio_pci_ramfb_dev_info);
>  }
>  
>  type_init(register_vfio_pci_dev_type)
Gerd Hoffmann June 13, 2018, 10:36 p.m. UTC | #2
On Wed, Jun 13, 2018 at 01:50:47PM -0600, Alex Williamson wrote:
> On Wed, 13 Jun 2018 10:41:49 +0200
> Gerd Hoffmann <kraxel@redhat.com> wrote:
> 
> > So we have a boot display when using a vgpu as primary display.
> > 
> > Use vfio-pci-ramfb instead of vfio-pci to enable it.
> 
> Using a different device here seems like it almost guarantees a very
> complicated path to support under libvirt.  What necessitates this
> versus a simple ramfb=on option to vfio-pci?

Well, it's simliar to qxl vs. qxl-vga.  It's not qxl,vga={on,off} and
libvirt has no problems to deal with that ...

Another more technical reason is (again) hotplug.  ramfb needs an fw_cfg
entry for configuration, and fw_cfg entries can't be hotplugged.  So
hotplugging vfio-pci with ramfb=on isn't going to fly.  So we need a
separate device with hotplug turned off.

> I'm also not sure I understand the usage model, SeaBIOS and OVMF know
> how to write to this display, but it seems that the guest does not.

Yes.

> I suppose in the UEFI case runtime services can be used to continue
> writing this display,

Yes.

> but BIOS doesn't have such an option, unless we're somehow emulating
> VGA here.

vgabios support is in the pipeline, including text mode emulation (at
vgabios level, direct access to vga window @ 0xa0000 doesn't work).

> So for UEFI, I can imagine this
> covers us from power on through firmware boot and up to guest drivers
> initializing the GPU (assuming the vGPU supports a kernel mode driver,
> does NVIDIA?),

Yes.  Shouldn't matter whenever the driver is kernel or userspace.

> but for BIOS it seems we likely still have a break from
> the bootloader to GPU driver initialization.

Depends.  vgacon (text mode console) doesn't work.  fbcon @ vesafb works.

> For instance, what driver
> is used to draw the boot animation (or blue screen) on SeaBIOS Windows
> VM?

Windows depends on vgabios for that and it works fine.

> I'm assuming that this display and the vGPU display are one in the
> same, so there's some cut from one to the other.

Yes.  If the vfio query plane ioctl reports a valid guest video mode
configuration the vgpu display will be used, ramfb otherwise.

cheers,
  Gerd
Laszlo Ersek June 14, 2018, 8:32 a.m. UTC | #3
On 06/14/18 00:36, Gerd Hoffmann wrote:
> On Wed, Jun 13, 2018 at 01:50:47PM -0600, Alex Williamson wrote:

>> I suppose in the UEFI case runtime services can be used to continue
>> writing this display,
> 
> Yes.

Small clarification for the wording -- "UEFI runtime services" do not
include anything display- or graphics-related. Instead, the OS kernel
may inherit the framebuffer properties (base address, size, pixel
format), and continue accessing the framebuffer directly.

Thanks
Laszlo
Erik Skultety June 14, 2018, 9:48 a.m. UTC | #4
On Thu, Jun 14, 2018 at 12:36:25AM +0200, Gerd Hoffmann wrote:
> On Wed, Jun 13, 2018 at 01:50:47PM -0600, Alex Williamson wrote:
> > On Wed, 13 Jun 2018 10:41:49 +0200
> > Gerd Hoffmann <kraxel@redhat.com> wrote:
> >
> > > So we have a boot display when using a vgpu as primary display.
> > >
> > > Use vfio-pci-ramfb instead of vfio-pci to enable it.
> >
> > Using a different device here seems like it almost guarantees a very
> > complicated path to support under libvirt.  What necessitates this
> > versus a simple ramfb=on option to vfio-pci?
>
> Well, it's simliar to qxl vs. qxl-vga.  It's not qxl,vga={on,off} and
> libvirt has no problems to deal with that ...
>
> Another more technical reason is (again) hotplug.  ramfb needs an fw_cfg
> entry for configuration, and fw_cfg entries can't be hotplugged.  So
> hotplugging vfio-pci with ramfb=on isn't going to fly.  So we need a
> separate device with hotplug turned off.

Well if that's not supposed to work ever, libvirt's hotplug code could format
the following FWIW:
"-device vfio-pci [opts],ramfb=off"

As such, new device wouldn't be that of an issue for libvirt if vfio-pci and
vfio-pci-ramfb are back to back compatible with all the device options that are
available for vfio-pci (I mean in terms of using an mdev). Because in that
case, what libvirt could do is to look whether we're supposed to turn on the
display, if so, then we need support for this in capabilities to query and then
we could prefer this new device over the "legacy" vfio-pci one. However, if we
expect a case where QEMU would succeed to start with an mdev mapped to this
new ramfb device but not with vfio-pci, then that's an issue. Otherwise I don't
necessarily see a problem, if QEMU supports this new device and we need
display, let's use that, otherwise let's use the old vfio-pci device. But I'm
still curious about the ramfb=off possibility I asked above for hotplug
nonetheless.

Thanks,
Erik

>
> > I'm also not sure I understand the usage model, SeaBIOS and OVMF know
> > how to write to this display, but it seems that the guest does not.
>
> Yes.
>
> > I suppose in the UEFI case runtime services can be used to continue
> > writing this display,
>
> Yes.
>
> > but BIOS doesn't have such an option, unless we're somehow emulating
> > VGA here.
>
> vgabios support is in the pipeline, including text mode emulation (at
> vgabios level, direct access to vga window @ 0xa0000 doesn't work).
>
> > So for UEFI, I can imagine this
> > covers us from power on through firmware boot and up to guest drivers
> > initializing the GPU (assuming the vGPU supports a kernel mode driver,
> > does NVIDIA?),
>
> Yes.  Shouldn't matter whenever the driver is kernel or userspace.
>
> > but for BIOS it seems we likely still have a break from
> > the bootloader to GPU driver initialization.
>
> Depends.  vgacon (text mode console) doesn't work.  fbcon @ vesafb works.
>
> > For instance, what driver
> > is used to draw the boot animation (or blue screen) on SeaBIOS Windows
> > VM?
>
> Windows depends on vgabios for that and it works fine.
>
> > I'm assuming that this display and the vGPU display are one in the
> > same, so there's some cut from one to the other.
>
> Yes.  If the vfio query plane ioctl reports a valid guest video mode
> configuration the vgpu display will be used, ramfb otherwise.
>
> cheers,
>   Gerd
>
Gerd Hoffmann June 15, 2018, 7:53 a.m. UTC | #5
> > Well, it's simliar to qxl vs. qxl-vga.  It's not qxl,vga={on,off} and
> > libvirt has no problems to deal with that ...
> >
> > Another more technical reason is (again) hotplug.  ramfb needs an fw_cfg
> > entry for configuration, and fw_cfg entries can't be hotplugged.  So
> > hotplugging vfio-pci with ramfb=on isn't going to fly.  So we need a
> > separate device with hotplug turned off.
> 
> Well if that's not supposed to work ever, libvirt's hotplug code could format
> the following FWIW:
> "-device vfio-pci [opts],ramfb=off"
> 
> As such, new device wouldn't be that of an issue for libvirt if vfio-pci and
> vfio-pci-ramfb are back to back compatible with all the device options that are
> available for vfio-pci (I mean in terms of using an mdev). Because in that
> case, what libvirt could do is to look whether we're supposed to turn on the
> display, if so, then we need support for this in capabilities to query and then
> we could prefer this new device over the "legacy" vfio-pci one. However, if we
> expect a case where QEMU would succeed to start with an mdev mapped to this
> new ramfb device but not with vfio-pci, then that's an issue.

vfio-pci and vfio-pci-ramfb are identical, except for the later having a
boot display (with display=on), and vfio-pci-ramfb not being
hotplugable.  So, yes, all pcu/mdev devices should work with both
vfio-pci variants.

> But I'm still curious about the ramfb=off possibility I asked above
> for hotplug nonetheless.

Well, the problem is introspection.  qemu can report via qmp whenever a
specific device supports hotplug.  A device which can both be
hot-pluggable or not hot-pluggable depending on some condition is a
problem here ...

cheers,
  Gerd
diff mbox

Patch

diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index a9036929b2..a58d7e7e77 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -26,6 +26,7 @@ 
 #include "qemu/queue.h"
 #include "qemu/notify.h"
 #include "ui/console.h"
+#include "hw/display/ramfb.h"
 #ifdef CONFIG_LINUX
 #include <linux/vfio.h>
 #endif
@@ -143,6 +144,7 @@  typedef struct VFIODMABuf {
 
 typedef struct VFIODisplay {
     QemuConsole *con;
+    RAMFBState *ramfb;
     struct {
         VFIORegion buffer;
         DisplaySurface *surface;
diff --git a/hw/vfio/display.c b/hw/vfio/display.c
index 59c0e5d1d7..409d5a2e3a 100644
--- a/hw/vfio/display.c
+++ b/hw/vfio/display.c
@@ -124,6 +124,9 @@  static void vfio_display_dmabuf_update(void *opaque)
 
     primary = vfio_display_get_dmabuf(vdev, DRM_PLANE_TYPE_PRIMARY);
     if (primary == NULL) {
+        if (dpy->ramfb) {
+            ramfb_display_update(dpy->con, dpy->ramfb);
+        }
         return;
     }
 
@@ -181,6 +184,8 @@  static int vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp)
     vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
                                           &vfio_display_dmabuf_ops,
                                           vdev);
+    if (strcmp(object_get_typename(OBJECT(vdev)), "vfio-pci-ramfb") == 0)
+        vdev->dpy->ramfb = ramfb_setup(errp);
     return 0;
 }
 
@@ -228,6 +233,9 @@  static void vfio_display_region_update(void *opaque)
         return;
     }
     if (!plane.drm_format || !plane.size) {
+        if (dpy->ramfb) {
+            ramfb_display_update(dpy->con, dpy->ramfb);
+        }
         return;
     }
     format = qemu_drm_format_to_pixman(plane.drm_format);
@@ -300,6 +308,8 @@  static int vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp)
     vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
                                           &vfio_display_region_ops,
                                           vdev);
+    if (strcmp(object_get_typename(OBJECT(vdev)), "vfio-pci-ramfb") == 0)
+        vdev->dpy->ramfb = ramfb_setup(errp);
     return 0;
 }
 
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 18c493b49e..6a2b42a595 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3234,9 +3234,24 @@  static const TypeInfo vfio_pci_dev_info = {
     },
 };
 
+static void vfio_pci_ramfb_dev_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    dc->hotpluggable = false;
+}
+
+static const TypeInfo vfio_pci_ramfb_dev_info = {
+    .name = "vfio-pci-ramfb",
+    .parent = "vfio-pci",
+    .instance_size = sizeof(VFIOPCIDevice),
+    .class_init = vfio_pci_ramfb_dev_class_init,
+};
+
 static void register_vfio_pci_dev_type(void)
 {
     type_register_static(&vfio_pci_dev_info);
+    type_register_static(&vfio_pci_ramfb_dev_info);
 }
 
 type_init(register_vfio_pci_dev_type)