diff mbox series

[v10,3/6] tpm: allocate/map buffer for TPM Physical Presence interface

Message ID 20180831172424.12029-4-marcandre.lureau@redhat.com (mailing list archive)
State New, archived
Headers show
Series Add support for TPM Physical Presence interface | expand

Commit Message

Marc-André Lureau Aug. 31, 2018, 5:24 p.m. UTC
From: Stefan Berger <stefanb@linux.vnet.ibm.com>

Implement a virtual memory device for the TPM Physical Presence interface.
The memory is located at 0xFED45000 and used by ACPI to send messages to the
firmware (BIOS) and by the firmware to provide parameters for each one of
the supported codes.

This interface should be used by all TPM devices on x86 and can be
added by calling tpm_ppi_init_io().

Note: bios_linker cannot be used to allocate the PPI memory region,
since the reserved memory should stay stable across reboots, and might
be needed before the ACPI tables are installed.

Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
---
 hw/tpm/tpm_ppi.h      | 26 ++++++++++++++++++++++++++
 include/hw/acpi/tpm.h |  6 ++++++
 hw/tpm/tpm_crb.c      |  8 ++++++++
 hw/tpm/tpm_ppi.c      | 31 +++++++++++++++++++++++++++++++
 hw/tpm/tpm_tis.c      |  8 ++++++++
 hw/tpm/Makefile.objs  |  1 +
 6 files changed, 80 insertions(+)
 create mode 100644 hw/tpm/tpm_ppi.h
 create mode 100644 hw/tpm/tpm_ppi.c

Comments

Marc-André Lureau Aug. 31, 2018, 11:28 p.m. UTC | #1
Hi

On Fri, Aug 31, 2018 at 7:32 PM Marc-André Lureau
<marcandre.lureau@redhat.com> wrote:
>
> From: Stefan Berger <stefanb@linux.vnet.ibm.com>
>
> Implement a virtual memory device for the TPM Physical Presence interface.
> The memory is located at 0xFED45000 and used by ACPI to send messages to the
> firmware (BIOS) and by the firmware to provide parameters for each one of
> the supported codes.
>
> This interface should be used by all TPM devices on x86 and can be
> added by calling tpm_ppi_init_io().
>
> Note: bios_linker cannot be used to allocate the PPI memory region,
> since the reserved memory should stay stable across reboots, and might
> be needed before the ACPI tables are installed.
>
> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> Reviewed-by: Igor Mammedov <imammedo@redhat.com>
> ---
>  hw/tpm/tpm_ppi.h      | 26 ++++++++++++++++++++++++++
>  include/hw/acpi/tpm.h |  6 ++++++
>  hw/tpm/tpm_crb.c      |  8 ++++++++
>  hw/tpm/tpm_ppi.c      | 31 +++++++++++++++++++++++++++++++
>  hw/tpm/tpm_tis.c      |  8 ++++++++
>  hw/tpm/Makefile.objs  |  1 +
>  6 files changed, 80 insertions(+)
>  create mode 100644 hw/tpm/tpm_ppi.h
>  create mode 100644 hw/tpm/tpm_ppi.c
>
> diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h
> new file mode 100644
> index 0000000000..f6458bf87e
> --- /dev/null
> +++ b/hw/tpm/tpm_ppi.h
> @@ -0,0 +1,26 @@
> +/*
> + * TPM Physical Presence Interface
> + *
> + * Copyright (C) 2018 IBM Corporation
> + *
> + * Authors:
> + *  Stefan Berger    <stefanb@us.ibm.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or later.
> + * See the COPYING file in the top-level directory.
> + */
> +#ifndef TPM_TPM_PPI_H
> +#define TPM_TPM_PPI_H
> +
> +#include "hw/acpi/tpm.h"
> +#include "exec/address-spaces.h"
> +
> +typedef struct TPMPPI {
> +    MemoryRegion ram;
> +    uint8_t buf[TPM_PPI_ADDR_SIZE];
> +} TPMPPI;
> +
> +bool tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m,
> +                  hwaddr addr, Object *obj, Error **errp);
> +
> +#endif /* TPM_TPM_PPI_H */
> diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h
> index 3580ffd50c..b8796df916 100644
> --- a/include/hw/acpi/tpm.h
> +++ b/include/hw/acpi/tpm.h
> @@ -188,4 +188,10 @@ REG32(CRB_DATA_BUFFER, 0x80)
>  #define TPM2_START_METHOD_MMIO      6
>  #define TPM2_START_METHOD_CRB       7
>
> +/*
> + * Physical Presence Interface
> + */
> +#define TPM_PPI_ADDR_SIZE           0x400
> +#define TPM_PPI_ADDR_BASE           0xFED45000
> +
>  #endif /* HW_ACPI_TPM_H */
> diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c
> index d5b0ac5920..b243222fd6 100644
> --- a/hw/tpm/tpm_crb.c
> +++ b/hw/tpm/tpm_crb.c
> @@ -29,6 +29,7 @@
>  #include "sysemu/reset.h"
>  #include "tpm_int.h"
>  #include "tpm_util.h"
> +#include "tpm_ppi.h"
>  #include "trace.h"
>
>  typedef struct CRBState {
> @@ -43,6 +44,7 @@ typedef struct CRBState {
>      size_t be_buffer_size;
>
>      bool ppi_enabled;
> +    TPMPPI ppi;
>  } CRBState;
>
>  #define CRB(obj) OBJECT_CHECK(CRBState, (obj), TYPE_TPM_CRB)
> @@ -294,6 +296,12 @@ static void tpm_crb_realize(DeviceState *dev, Error **errp)
>      memory_region_add_subregion(get_system_memory(),
>          TPM_CRB_ADDR_BASE + sizeof(s->regs), &s->cmdmem);
>
> +    if (s->ppi_enabled &&
> +        !tpm_ppi_init(&s->ppi, get_system_memory(),
> +                      TPM_PPI_ADDR_BASE, OBJECT(s), errp)) {
> +        return;
> +    }
> +
>      qemu_register_reset(tpm_crb_reset, dev);
>  }
>
> diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c
> new file mode 100644
> index 0000000000..8b46b9dd4b
> --- /dev/null
> +++ b/hw/tpm/tpm_ppi.c
> @@ -0,0 +1,31 @@
> +/*
> + * tpm_ppi.c - TPM Physical Presence Interface
> + *
> + * Copyright (C) 2018 IBM Corporation
> + *
> + * Authors:
> + *  Stefan Berger <stefanb@us.ibm.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or later.
> + * See the COPYING file in the top-level directory.
> + *
> + */
> +
> +#include "qemu/osdep.h"
> +
> +#include "qapi/error.h"
> +#include "cpu.h"
> +#include "sysemu/memory_mapping.h"
> +#include "migration/vmstate.h"
> +#include "tpm_ppi.h"
> +
> +bool tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m,
> +                  hwaddr addr, Object *obj, Error **errp)
> +{
> +    memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi",
> +                                      TPM_PPI_ADDR_SIZE, tpmppi->buf);

There is a (new) issue with the PPI ram region:

READ of size 32 at 0x61d000090480 thread T6
    #0 0x5622bd8de0f4 in buffer_zero_avx2
/home/elmarco/src/qq/util/bufferiszero.c:169
    #1 0x5622bd8de899 in select_accel_fn
/home/elmarco/src/qq/util/bufferiszero.c:282
    #2 0x5622bd8de8f1 in buffer_is_zero
/home/elmarco/src/qq/util/bufferiszero.c:309
    #3 0x5622bc209f94 in is_zero_range /home/elmarco/src/qq/migration/ram.c:82
    #4 0x5622bc21938d in save_zero_page_to_file
/home/elmarco/src/qq/migration/ram.c:1694
    #5 0x5622bc219452 in save_zero_page
/home/elmarco/src/qq/migration/ram.c:1713
    #6 0x5622bc21db67 in ram_save_target_page
/home/elmarco/src/qq/migration/ram.c:2289
    #7 0x5622bc21e13e in ram_save_host_page
/home/elmarco/src/qq/migration/ram.c:2351
    #8 0x5622bc21ea3a in ram_find_and_save_block
/home/elmarco/src/qq/migration/ram.c:2413
    #9 0x5622bc223b5d in ram_save_iterate
/home/elmarco/src/qq/migration/ram.c:3193
    #10 0x5622bd16f544 in qemu_savevm_state_iterate
/home/elmarco/src/qq/migration/savevm.c:1103
    #11 0x5622bd157e75 in migration_iteration_run
/home/elmarco/src/qq/migration/migration.c:2897
    #12 0x5622bd15892e in migration_thread
/home/elmarco/src/qq/migration/migration.c:3018
    #13 0x5622bd902f31 in qemu_thread_start
/home/elmarco/src/qq/util/qemu-thread-posix.c:504
    #14 0x7f42f0ef4593 in start_thread (/lib64/libpthread.so.0+0x7593)
    #15 0x7f42f0c280de in clone (/lib64/libc.so.6+0xfa0de)
0x61d000090490 is located 0 bytes to the right of 2064-byte region
[0x61d00008fc80,0x61d000090490)

migration code is assuming RAM is multiple of TARGET_PAGE_SIZE.

Should the migration code be fixed, or should the TPM code allocate
ram differently?

In all case, I think the migration code should either be fixed or have
an assert.


> +    vmstate_register_ram(&tpmppi->ram, DEVICE(obj));
> +
> +    memory_region_add_subregion(m, addr, &tpmppi->ram);
> +    return true;
> +}
> diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
> index d9ddf9b723..70432ffe8b 100644
> --- a/hw/tpm/tpm_tis.c
> +++ b/hw/tpm/tpm_tis.c
> @@ -31,6 +31,7 @@
>  #include "sysemu/tpm_backend.h"
>  #include "tpm_int.h"
>  #include "tpm_util.h"
> +#include "tpm_ppi.h"
>  #include "trace.h"
>
>  #define TPM_TIS_NUM_LOCALITIES      5     /* per spec */
> @@ -83,6 +84,7 @@ typedef struct TPMState {
>      size_t be_buffer_size;
>
>      bool ppi_enabled;
> +    TPMPPI ppi;
>  } TPMState;
>
>  #define TPM(obj) OBJECT_CHECK(TPMState, (obj), TYPE_TPM_TIS)
> @@ -979,6 +981,12 @@ static void tpm_tis_realizefn(DeviceState *dev, Error **errp)
>
>      memory_region_add_subregion(isa_address_space(ISA_DEVICE(dev)),
>                                  TPM_TIS_ADDR_BASE, &s->mmio);
> +
> +    if (s->ppi_enabled &&
> +        !tpm_ppi_init(&s->ppi, isa_address_space(ISA_DEVICE(dev)),
> +                      TPM_PPI_ADDR_BASE, OBJECT(s), errp)) {
> +        return;
> +    }
>  }
>
>  static void tpm_tis_initfn(Object *obj)
> diff --git a/hw/tpm/Makefile.objs b/hw/tpm/Makefile.objs
> index 1dc9f8bf2c..700c878622 100644
> --- a/hw/tpm/Makefile.objs
> +++ b/hw/tpm/Makefile.objs
> @@ -1,4 +1,5 @@
>  common-obj-y += tpm_util.o
> +obj-y += tpm_ppi.o
>  common-obj-$(CONFIG_TPM_TIS) += tpm_tis.o
>  common-obj-$(CONFIG_TPM_CRB) += tpm_crb.o
>  common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o
> --
> 2.19.0.rc1
>
>
Juan Quintela Sept. 3, 2018, 9:48 p.m. UTC | #2
Marc-André Lureau <marcandre.lureau@gmail.com> wrote:
> Hi
>
> On Fri, Aug 31, 2018 at 7:32 PM Marc-André Lureau
> <marcandre.lureau@redhat.com> wrote:
>>
>> From: Stefan Berger <stefanb@linux.vnet.ibm.com>
>>
>> Implement a virtual memory device for the TPM Physical Presence interface.
>> The memory is located at 0xFED45000 and used by ACPI to send messages to the
>> firmware (BIOS) and by the firmware to provide parameters for each one of
>> the supported codes.
>>
>> This interface should be used by all TPM devices on x86 and can be
>> added by calling tpm_ppi_init_io().
>>
>> Note: bios_linker cannot be used to allocate the PPI memory region,
>> since the reserved memory should stay stable across reboots, and might
>> be needed before the ACPI tables are installed.
>>
>> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
>> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
>> Reviewed-by: Igor Mammedov <imammedo@redhat.com>


....

>> + */
>> +#define TPM_PPI_ADDR_SIZE           0x400
>> +#define TPM_PPI_ADDR_BASE           0xFED45000

> There is a (new) issue with the PPI ram region:
>
> READ of size 32 at 0x61d000090480 thread T6
>     #0 0x5622bd8de0f4 in buffer_zero_avx2
> /home/elmarco/src/qq/util/bufferiszero.c:169
>     #1 0x5622bd8de899 in select_accel_fn
> /home/elmarco/src/qq/util/bufferiszero.c:282
>     #2 0x5622bd8de8f1 in buffer_is_zero
> /home/elmarco/src/qq/util/bufferiszero.c:309
>     #3 0x5622bc209f94 in is_zero_range /home/elmarco/src/qq/migration/ram.c:82
>     #4 0x5622bc21938d in save_zero_page_to_file
> /home/elmarco/src/qq/migration/ram.c:1694
>     #5 0x5622bc219452 in save_zero_page
> /home/elmarco/src/qq/migration/ram.c:1713
>     #6 0x5622bc21db67 in ram_save_target_page
> /home/elmarco/src/qq/migration/ram.c:2289
>     #7 0x5622bc21e13e in ram_save_host_page
> /home/elmarco/src/qq/migration/ram.c:2351
>     #8 0x5622bc21ea3a in ram_find_and_save_block
> /home/elmarco/src/qq/migration/ram.c:2413
>     #9 0x5622bc223b5d in ram_save_iterate
> /home/elmarco/src/qq/migration/ram.c:3193
>     #10 0x5622bd16f544 in qemu_savevm_state_iterate
> /home/elmarco/src/qq/migration/savevm.c:1103
>     #11 0x5622bd157e75 in migration_iteration_run
> /home/elmarco/src/qq/migration/migration.c:2897
>     #12 0x5622bd15892e in migration_thread
> /home/elmarco/src/qq/migration/migration.c:3018
>     #13 0x5622bd902f31 in qemu_thread_start
> /home/elmarco/src/qq/util/qemu-thread-posix.c:504
>     #14 0x7f42f0ef4593 in start_thread (/lib64/libpthread.so.0+0x7593)
>     #15 0x7f42f0c280de in clone (/lib64/libc.so.6+0xfa0de)
> 0x61d000090490 is located 0 bytes to the right of 2064-byte region
> [0x61d00008fc80,0x61d000090490)
>
> migration code is assuming RAM is multiple of TARGET_PAGE_SIZE.

Physical RAM is multiple of TARGET_PAGE_SIZE O:-)
That assumtion is held in too many places, if you can change the size to
be multiple of page size, that would be greate.

> Should the migration code be fixed, or should the TPM code allocate
> ram differently?

Migration people (i.e. me) would preffer that you fix the TPM
allocation.  Or you can decide that this is *not* RAM.  The unit of
transfer for migrate ram is ram pages, a.k.a. TARGET_PAGE_SIZE.

> In all case, I think the migration code should either be fixed or have
> an assert.

An assert for sure.

Fixed .... Do we have real devices that put ram regions that are smaller
than page size?

Later, Juan.
Igor Mammedov Sept. 4, 2018, 6:51 a.m. UTC | #3
On Mon, 03 Sep 2018 23:48:15 +0200
Juan Quintela <quintela@redhat.com> wrote:

> Marc-André Lureau <marcandre.lureau@gmail.com> wrote:
> > Hi
> >
> > On Fri, Aug 31, 2018 at 7:32 PM Marc-André Lureau
> > <marcandre.lureau@redhat.com> wrote:  
> >>
> >> From: Stefan Berger <stefanb@linux.vnet.ibm.com>
> >>
> >> Implement a virtual memory device for the TPM Physical Presence interface.
> >> The memory is located at 0xFED45000 and used by ACPI to send messages to the
> >> firmware (BIOS) and by the firmware to provide parameters for each one of
> >> the supported codes.
> >>
> >> This interface should be used by all TPM devices on x86 and can be
> >> added by calling tpm_ppi_init_io().
> >>
> >> Note: bios_linker cannot be used to allocate the PPI memory region,
> >> since the reserved memory should stay stable across reboots, and might
> >> be needed before the ACPI tables are installed.
> >>
> >> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
> >> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> >> Reviewed-by: Igor Mammedov <imammedo@redhat.com>  
> 
> 
> ....
> 
> >> + */
> >> +#define TPM_PPI_ADDR_SIZE           0x400
> >> +#define TPM_PPI_ADDR_BASE           0xFED45000  
> 
> > There is a (new) issue with the PPI ram region:
> >
> > READ of size 32 at 0x61d000090480 thread T6
> >     #0 0x5622bd8de0f4 in buffer_zero_avx2
> > /home/elmarco/src/qq/util/bufferiszero.c:169
> >     #1 0x5622bd8de899 in select_accel_fn
> > /home/elmarco/src/qq/util/bufferiszero.c:282
> >     #2 0x5622bd8de8f1 in buffer_is_zero
> > /home/elmarco/src/qq/util/bufferiszero.c:309
> >     #3 0x5622bc209f94 in is_zero_range /home/elmarco/src/qq/migration/ram.c:82
> >     #4 0x5622bc21938d in save_zero_page_to_file
> > /home/elmarco/src/qq/migration/ram.c:1694
> >     #5 0x5622bc219452 in save_zero_page
> > /home/elmarco/src/qq/migration/ram.c:1713
> >     #6 0x5622bc21db67 in ram_save_target_page
> > /home/elmarco/src/qq/migration/ram.c:2289
> >     #7 0x5622bc21e13e in ram_save_host_page
> > /home/elmarco/src/qq/migration/ram.c:2351
> >     #8 0x5622bc21ea3a in ram_find_and_save_block
> > /home/elmarco/src/qq/migration/ram.c:2413
> >     #9 0x5622bc223b5d in ram_save_iterate
> > /home/elmarco/src/qq/migration/ram.c:3193
> >     #10 0x5622bd16f544 in qemu_savevm_state_iterate
> > /home/elmarco/src/qq/migration/savevm.c:1103
> >     #11 0x5622bd157e75 in migration_iteration_run
> > /home/elmarco/src/qq/migration/migration.c:2897
> >     #12 0x5622bd15892e in migration_thread
> > /home/elmarco/src/qq/migration/migration.c:3018
> >     #13 0x5622bd902f31 in qemu_thread_start
> > /home/elmarco/src/qq/util/qemu-thread-posix.c:504
> >     #14 0x7f42f0ef4593 in start_thread (/lib64/libpthread.so.0+0x7593)
> >     #15 0x7f42f0c280de in clone (/lib64/libc.so.6+0xfa0de)
> > 0x61d000090490 is located 0 bytes to the right of 2064-byte region
> > [0x61d00008fc80,0x61d000090490)
> >
> > migration code is assuming RAM is multiple of TARGET_PAGE_SIZE.  
> 
> Physical RAM is multiple of TARGET_PAGE_SIZE O:-)
> That assumtion is held in too many places, if you can change the size to
> be multiple of page size, that would be greate.
> 
> > Should the migration code be fixed, or should the TPM code allocate
> > ram differently?  
> 
> Migration people (i.e. me) would preffer that you fix the TPM
> allocation.  Or you can decide that this is *not* RAM.  The unit of
> transfer for migrate ram is ram pages, a.k.a. TARGET_PAGE_SIZE.
I'd loath to waste whole page in already cramped area.
Can we implement it as mmio region? (it will add some bolerplate code for read/write
handlers/migration and cause vmexits on every read/write but it's not a hot path so we might not care)
 
> > In all case, I think the migration code should either be fixed or have
> > an assert.  
> 
> An assert for sure.
> 
> Fixed .... Do we have real devices that put ram regions that are smaller
> than page size?
> 
> Later, Juan.
Marc-André Lureau Sept. 5, 2018, 8:21 a.m. UTC | #4
Hi

On Tue, Sep 4, 2018 at 10:51 AM Igor Mammedov <imammedo@redhat.com> wrote:
>
> On Mon, 03 Sep 2018 23:48:15 +0200
> Juan Quintela <quintela@redhat.com> wrote:
>
> > Marc-André Lureau <marcandre.lureau@gmail.com> wrote:
> > > Hi
> > >
> > > On Fri, Aug 31, 2018 at 7:32 PM Marc-André Lureau
> > > <marcandre.lureau@redhat.com> wrote:
> > >>
> > >> From: Stefan Berger <stefanb@linux.vnet.ibm.com>
> > >>
> > >> Implement a virtual memory device for the TPM Physical Presence interface.
> > >> The memory is located at 0xFED45000 and used by ACPI to send messages to the
> > >> firmware (BIOS) and by the firmware to provide parameters for each one of
> > >> the supported codes.
> > >>
> > >> This interface should be used by all TPM devices on x86 and can be
> > >> added by calling tpm_ppi_init_io().
> > >>
> > >> Note: bios_linker cannot be used to allocate the PPI memory region,
> > >> since the reserved memory should stay stable across reboots, and might
> > >> be needed before the ACPI tables are installed.
> > >>
> > >> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
> > >> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> > >> Reviewed-by: Igor Mammedov <imammedo@redhat.com>
> >
> >
> > ....
> >
> > >> + */
> > >> +#define TPM_PPI_ADDR_SIZE           0x400
> > >> +#define TPM_PPI_ADDR_BASE           0xFED45000
> >
> > > There is a (new) issue with the PPI ram region:
> > >
> > > READ of size 32 at 0x61d000090480 thread T6
> > >     #0 0x5622bd8de0f4 in buffer_zero_avx2
> > > /home/elmarco/src/qq/util/bufferiszero.c:169
> > >     #1 0x5622bd8de899 in select_accel_fn
> > > /home/elmarco/src/qq/util/bufferiszero.c:282
> > >     #2 0x5622bd8de8f1 in buffer_is_zero
> > > /home/elmarco/src/qq/util/bufferiszero.c:309
> > >     #3 0x5622bc209f94 in is_zero_range /home/elmarco/src/qq/migration/ram.c:82
> > >     #4 0x5622bc21938d in save_zero_page_to_file
> > > /home/elmarco/src/qq/migration/ram.c:1694
> > >     #5 0x5622bc219452 in save_zero_page
> > > /home/elmarco/src/qq/migration/ram.c:1713
> > >     #6 0x5622bc21db67 in ram_save_target_page
> > > /home/elmarco/src/qq/migration/ram.c:2289
> > >     #7 0x5622bc21e13e in ram_save_host_page
> > > /home/elmarco/src/qq/migration/ram.c:2351
> > >     #8 0x5622bc21ea3a in ram_find_and_save_block
> > > /home/elmarco/src/qq/migration/ram.c:2413
> > >     #9 0x5622bc223b5d in ram_save_iterate
> > > /home/elmarco/src/qq/migration/ram.c:3193
> > >     #10 0x5622bd16f544 in qemu_savevm_state_iterate
> > > /home/elmarco/src/qq/migration/savevm.c:1103
> > >     #11 0x5622bd157e75 in migration_iteration_run
> > > /home/elmarco/src/qq/migration/migration.c:2897
> > >     #12 0x5622bd15892e in migration_thread
> > > /home/elmarco/src/qq/migration/migration.c:3018
> > >     #13 0x5622bd902f31 in qemu_thread_start
> > > /home/elmarco/src/qq/util/qemu-thread-posix.c:504
> > >     #14 0x7f42f0ef4593 in start_thread (/lib64/libpthread.so.0+0x7593)
> > >     #15 0x7f42f0c280de in clone (/lib64/libc.so.6+0xfa0de)
> > > 0x61d000090490 is located 0 bytes to the right of 2064-byte region
> > > [0x61d00008fc80,0x61d000090490)
> > >
> > > migration code is assuming RAM is multiple of TARGET_PAGE_SIZE.
> >
> > Physical RAM is multiple of TARGET_PAGE_SIZE O:-)
> > That assumtion is held in too many places, if you can change the size to
> > be multiple of page size, that would be greate.

It needs the RAMBlock to be a multiple of TARGET_PAGE_SIZE, but the
memory region (in guest and qemu MemoryRegion) can be less. This is
already the case with for ex, "/rom@etc/acpi/rsdp", which is 36 bytes.

I suppose 2 different MemoryRegions (backed by different RAMBlock)
that would be placed on the same page (for ex, at offset 0x0, size
0x10 and offset 0x10, size 0x100) would work fine in general and with
migration?

> >
> > > Should the migration code be fixed, or should the TPM code allocate
> > > ram differently?
> >
> > Migration people (i.e. me) would preffer that you fix the TPM
> > allocation.  Or you can decide that this is *not* RAM.  The unit of
> > transfer for migrate ram is ram pages, a.k.a. TARGET_PAGE_SIZE.
> I'd loath to waste whole page in already cramped area.
> Can we implement it as mmio region? (it will add some bolerplate code for read/write
> handlers/migration and cause vmexits on every read/write but it's not a hot path so we might not care)
>


I have done some small adjustments to allow
memory_region_init_ram_device_ptr() to work with a MemoryRegions size
!= backed RAMBlock, and it seems to work fine (and doesn't need to
allocate more space of guest memory range, or fall back to mmio)

thanks

> > > In all case, I think the migration code should either be fixed or have
> > > an assert.
> >
> > An assert for sure.
> >
> > Fixed .... Do we have real devices that put ram regions that are smaller
> > than page size?
> >
> > Later, Juan.
>
Juan Quintela Sept. 5, 2018, 8:36 a.m. UTC | #5
Marc-André Lureau <marcandre.lureau@gmail.com> wrote:
Hi

>> > Physical RAM is multiple of TARGET_PAGE_SIZE O:-)
>> > That assumtion is held in too many places, if you can change the size to
>> > be multiple of page size, that would be greate.
>
> It needs the RAMBlock to be a multiple of TARGET_PAGE_SIZE, but the
> memory region (in guest and qemu MemoryRegion) can be less. This is
> already the case with for ex, "/rom@etc/acpi/rsdp", which is 36 bytes.
>
> I suppose 2 different MemoryRegions (backed by different RAMBlock)
> that would be placed on the same page (for ex, at offset 0x0, size
> 0x10 and offset 0x10, size 0x100) would work fine in general and with
> migration?

Honestly I don't know.  But if the ramblock is correct, I think that we
have no problem at all.  Migration code just does:

for each ramblock
  for each page in ramblock

(Yes, I know,  I have oversimplified)

>> > > Should the migration code be fixed, or should the TPM code allocate
>> > > ram differently?
>> >
>> > Migration people (i.e. me) would preffer that you fix the TPM
>> > allocation.  Or you can decide that this is *not* RAM.  The unit of
>> > transfer for migrate ram is ram pages, a.k.a. TARGET_PAGE_SIZE.
>> I'd loath to waste whole page in already cramped area.
>> Can we implement it as mmio region? (it will add some bolerplate
>> code for read/write
>> handlers/migration and cause vmexits on every read/write but it's
>> not a hot path so we might not care)
>>
>
>
> I have done some small adjustments to allow
> memory_region_init_ram_device_ptr() to work with a MemoryRegions size
> != backed RAMBlock, and it seems to work fine (and doesn't need to
> allocate more space of guest memory range, or fall back to mmio)

If the ramblock are full pages, everything should be ok.

Later, Juan.
Igor Mammedov Sept. 5, 2018, 9:17 a.m. UTC | #6
On Wed, 5 Sep 2018 12:21:39 +0400
Marc-André Lureau <marcandre.lureau@gmail.com> wrote:

> Hi
> 
> On Tue, Sep 4, 2018 at 10:51 AM Igor Mammedov <imammedo@redhat.com> wrote:
> >
> > On Mon, 03 Sep 2018 23:48:15 +0200
> > Juan Quintela <quintela@redhat.com> wrote:
> >
> > > Marc-André Lureau <marcandre.lureau@gmail.com> wrote:
> > > > Hi
> > > >
> > > > On Fri, Aug 31, 2018 at 7:32 PM Marc-André Lureau
> > > > <marcandre.lureau@redhat.com> wrote:
> > > >>
> > > >> From: Stefan Berger <stefanb@linux.vnet.ibm.com>
> > > >>
> > > >> Implement a virtual memory device for the TPM Physical Presence interface.
> > > >> The memory is located at 0xFED45000 and used by ACPI to send messages to the
> > > >> firmware (BIOS) and by the firmware to provide parameters for each one of
> > > >> the supported codes.
> > > >>
> > > >> This interface should be used by all TPM devices on x86 and can be
> > > >> added by calling tpm_ppi_init_io().
> > > >>
> > > >> Note: bios_linker cannot be used to allocate the PPI memory region,
> > > >> since the reserved memory should stay stable across reboots, and might
> > > >> be needed before the ACPI tables are installed.
> > > >>
> > > >> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
> > > >> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> > > >> Reviewed-by: Igor Mammedov <imammedo@redhat.com>
> > >
> > >
> > > ....
> > >
> > > >> + */
> > > >> +#define TPM_PPI_ADDR_SIZE           0x400
> > > >> +#define TPM_PPI_ADDR_BASE           0xFED45000
> > >
> > > > There is a (new) issue with the PPI ram region:
> > > >
> > > > READ of size 32 at 0x61d000090480 thread T6
> > > >     #0 0x5622bd8de0f4 in buffer_zero_avx2
> > > > /home/elmarco/src/qq/util/bufferiszero.c:169
> > > >     #1 0x5622bd8de899 in select_accel_fn
> > > > /home/elmarco/src/qq/util/bufferiszero.c:282
> > > >     #2 0x5622bd8de8f1 in buffer_is_zero
> > > > /home/elmarco/src/qq/util/bufferiszero.c:309
> > > >     #3 0x5622bc209f94 in is_zero_range /home/elmarco/src/qq/migration/ram.c:82
> > > >     #4 0x5622bc21938d in save_zero_page_to_file
> > > > /home/elmarco/src/qq/migration/ram.c:1694
> > > >     #5 0x5622bc219452 in save_zero_page
> > > > /home/elmarco/src/qq/migration/ram.c:1713
> > > >     #6 0x5622bc21db67 in ram_save_target_page
> > > > /home/elmarco/src/qq/migration/ram.c:2289
> > > >     #7 0x5622bc21e13e in ram_save_host_page
> > > > /home/elmarco/src/qq/migration/ram.c:2351
> > > >     #8 0x5622bc21ea3a in ram_find_and_save_block
> > > > /home/elmarco/src/qq/migration/ram.c:2413
> > > >     #9 0x5622bc223b5d in ram_save_iterate
> > > > /home/elmarco/src/qq/migration/ram.c:3193
> > > >     #10 0x5622bd16f544 in qemu_savevm_state_iterate
> > > > /home/elmarco/src/qq/migration/savevm.c:1103
> > > >     #11 0x5622bd157e75 in migration_iteration_run
> > > > /home/elmarco/src/qq/migration/migration.c:2897
> > > >     #12 0x5622bd15892e in migration_thread
> > > > /home/elmarco/src/qq/migration/migration.c:3018
> > > >     #13 0x5622bd902f31 in qemu_thread_start
> > > > /home/elmarco/src/qq/util/qemu-thread-posix.c:504
> > > >     #14 0x7f42f0ef4593 in start_thread (/lib64/libpthread.so.0+0x7593)
> > > >     #15 0x7f42f0c280de in clone (/lib64/libc.so.6+0xfa0de)
> > > > 0x61d000090490 is located 0 bytes to the right of 2064-byte region
> > > > [0x61d00008fc80,0x61d000090490)
> > > >
> > > > migration code is assuming RAM is multiple of TARGET_PAGE_SIZE.
> > >
> > > Physical RAM is multiple of TARGET_PAGE_SIZE O:-)
> > > That assumtion is held in too many places, if you can change the size to
> > > be multiple of page size, that would be greate.
> 
> It needs the RAMBlock to be a multiple of TARGET_PAGE_SIZE, but the
> memory region (in guest and qemu MemoryRegion) can be less. This is
> already the case with for ex, "/rom@etc/acpi/rsdp", which is 36 bytes.
RSDP is different in sense that it's not mapped to GPA


> I suppose 2 different MemoryRegions (backed by different RAMBlock)
> that would be placed on the same page (for ex, at offset 0x0, size
> 0x10 and offset 0x10, size 0x100) would work fine in general and with
> migration?
memory_region_add_subregion() maps whole region only,
theoretically you can make an alias region and map that but it would
be rather ugly.

> 
> > >
> > > > Should the migration code be fixed, or should the TPM code allocate
> > > > ram differently?
> > >
> > > Migration people (i.e. me) would preffer that you fix the TPM
> > > allocation.  Or you can decide that this is *not* RAM.  The unit of
> > > transfer for migrate ram is ram pages, a.k.a. TARGET_PAGE_SIZE.
> > I'd loath to waste whole page in already cramped area.
> > Can we implement it as mmio region? (it will add some bolerplate code for read/write
> > handlers/migration and cause vmexits on every read/write but it's not a hot path so we might not care)
> >
> 
> 
> I have done some small adjustments to allow
> memory_region_init_ram_device_ptr() to work with a MemoryRegions size
> != backed RAMBlock, and it seems to work fine (and doesn't need to
> allocate more space of guest memory range, or fall back to mmio)
> 
> thanks
> 
> > > > In all case, I think the migration code should either be fixed or have
> > > > an assert.
> > >
> > > An assert for sure.
> > >
> > > Fixed .... Do we have real devices that put ram regions that are smaller
> > > than page size?
> > >
> > > Later, Juan.
> >
> 
>
diff mbox series

Patch

diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h
new file mode 100644
index 0000000000..f6458bf87e
--- /dev/null
+++ b/hw/tpm/tpm_ppi.h
@@ -0,0 +1,26 @@ 
+/*
+ * TPM Physical Presence Interface
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * Authors:
+ *  Stefan Berger    <stefanb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef TPM_TPM_PPI_H
+#define TPM_TPM_PPI_H
+
+#include "hw/acpi/tpm.h"
+#include "exec/address-spaces.h"
+
+typedef struct TPMPPI {
+    MemoryRegion ram;
+    uint8_t buf[TPM_PPI_ADDR_SIZE];
+} TPMPPI;
+
+bool tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m,
+                  hwaddr addr, Object *obj, Error **errp);
+
+#endif /* TPM_TPM_PPI_H */
diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h
index 3580ffd50c..b8796df916 100644
--- a/include/hw/acpi/tpm.h
+++ b/include/hw/acpi/tpm.h
@@ -188,4 +188,10 @@  REG32(CRB_DATA_BUFFER, 0x80)
 #define TPM2_START_METHOD_MMIO      6
 #define TPM2_START_METHOD_CRB       7
 
+/*
+ * Physical Presence Interface
+ */
+#define TPM_PPI_ADDR_SIZE           0x400
+#define TPM_PPI_ADDR_BASE           0xFED45000
+
 #endif /* HW_ACPI_TPM_H */
diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c
index d5b0ac5920..b243222fd6 100644
--- a/hw/tpm/tpm_crb.c
+++ b/hw/tpm/tpm_crb.c
@@ -29,6 +29,7 @@ 
 #include "sysemu/reset.h"
 #include "tpm_int.h"
 #include "tpm_util.h"
+#include "tpm_ppi.h"
 #include "trace.h"
 
 typedef struct CRBState {
@@ -43,6 +44,7 @@  typedef struct CRBState {
     size_t be_buffer_size;
 
     bool ppi_enabled;
+    TPMPPI ppi;
 } CRBState;
 
 #define CRB(obj) OBJECT_CHECK(CRBState, (obj), TYPE_TPM_CRB)
@@ -294,6 +296,12 @@  static void tpm_crb_realize(DeviceState *dev, Error **errp)
     memory_region_add_subregion(get_system_memory(),
         TPM_CRB_ADDR_BASE + sizeof(s->regs), &s->cmdmem);
 
+    if (s->ppi_enabled &&
+        !tpm_ppi_init(&s->ppi, get_system_memory(),
+                      TPM_PPI_ADDR_BASE, OBJECT(s), errp)) {
+        return;
+    }
+
     qemu_register_reset(tpm_crb_reset, dev);
 }
 
diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c
new file mode 100644
index 0000000000..8b46b9dd4b
--- /dev/null
+++ b/hw/tpm/tpm_ppi.c
@@ -0,0 +1,31 @@ 
+/*
+ * tpm_ppi.c - TPM Physical Presence Interface
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * Authors:
+ *  Stefan Berger <stefanb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include "qapi/error.h"
+#include "cpu.h"
+#include "sysemu/memory_mapping.h"
+#include "migration/vmstate.h"
+#include "tpm_ppi.h"
+
+bool tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m,
+                  hwaddr addr, Object *obj, Error **errp)
+{
+    memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi",
+                                      TPM_PPI_ADDR_SIZE, tpmppi->buf);
+    vmstate_register_ram(&tpmppi->ram, DEVICE(obj));
+
+    memory_region_add_subregion(m, addr, &tpmppi->ram);
+    return true;
+}
diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
index d9ddf9b723..70432ffe8b 100644
--- a/hw/tpm/tpm_tis.c
+++ b/hw/tpm/tpm_tis.c
@@ -31,6 +31,7 @@ 
 #include "sysemu/tpm_backend.h"
 #include "tpm_int.h"
 #include "tpm_util.h"
+#include "tpm_ppi.h"
 #include "trace.h"
 
 #define TPM_TIS_NUM_LOCALITIES      5     /* per spec */
@@ -83,6 +84,7 @@  typedef struct TPMState {
     size_t be_buffer_size;
 
     bool ppi_enabled;
+    TPMPPI ppi;
 } TPMState;
 
 #define TPM(obj) OBJECT_CHECK(TPMState, (obj), TYPE_TPM_TIS)
@@ -979,6 +981,12 @@  static void tpm_tis_realizefn(DeviceState *dev, Error **errp)
 
     memory_region_add_subregion(isa_address_space(ISA_DEVICE(dev)),
                                 TPM_TIS_ADDR_BASE, &s->mmio);
+
+    if (s->ppi_enabled &&
+        !tpm_ppi_init(&s->ppi, isa_address_space(ISA_DEVICE(dev)),
+                      TPM_PPI_ADDR_BASE, OBJECT(s), errp)) {
+        return;
+    }
 }
 
 static void tpm_tis_initfn(Object *obj)
diff --git a/hw/tpm/Makefile.objs b/hw/tpm/Makefile.objs
index 1dc9f8bf2c..700c878622 100644
--- a/hw/tpm/Makefile.objs
+++ b/hw/tpm/Makefile.objs
@@ -1,4 +1,5 @@ 
 common-obj-y += tpm_util.o
+obj-y += tpm_ppi.o
 common-obj-$(CONFIG_TPM_TIS) += tpm_tis.o
 common-obj-$(CONFIG_TPM_CRB) += tpm_crb.o
 common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o