diff mbox

[V14,2/7] Add TPM (frontend) hardware interface (TPM TIS) to Qemu

Message ID 1323870202-25742-3-git-send-email-stefanb@linux.vnet.ibm.com
State New
Headers show

Commit Message

Stefan Berger Dec. 14, 2011, 1:43 p.m. UTC
This patch adds the main code of the TPM frontend driver, the TPM TIS
interface, to Qemu. The code is largely based on the previous implementation
for Xen but has been significantly extended to meet the standard's
requirements, such as the support for changing of localities and all the
functionality of the available flags.

Communication with the backend (i.e., for Xen or the libtpms-based one)
is cleanly separated through an interface which the backend driver needs
to implement.

The TPM TIS driver's backend was previously chosen in the code added
to arch_init. The frontend holds a pointer to the chosen backend (interface).

Communication with the backend is largely based on signals and conditions.
Whenever the frontend has collected a complete packet, it will signal
the backend, which then starts processing the command. Once the result
has been returned, the backend invokes a callback function
(tis_tpm_receive_cb()).

The one tricky part is support for VM suspend while the TPM is processing
a command. In this case the frontend driver is waiting for the backend
to return the result of the last command before shutting down. It waits
on a condition for a signal from the backend, which is delivered in
tis_tpm_receive_cb().

Testing the proper functioning of the different flags and localities
cannot be done from user space when running in Linux for example, since
access to the address space of the TPM TIS interface is not possible. Also
the Linux driver itself does not exercise all functionality. So, for
testing there is a fairly extensive test suite as part of the SeaBIOS patches
since from within the BIOS one can have full access to all the TPM's registers.


Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
---
 hw/tpm_tis.c |  807 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 807 insertions(+), 0 deletions(-)
 create mode 100644 hw/tpm_tis.c

Comments

Michael S. Tsirkin Feb. 20, 2012, 8:51 a.m. UTC | #1
On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
> This patch adds the main code of the TPM frontend driver, the TPM TIS
> interface, to Qemu. The code is largely based on the previous implementation
> for Xen but has been significantly extended to meet the standard's
> requirements, such as the support for changing of localities and all the
> functionality of the available flags.
> 
> Communication with the backend (i.e., for Xen or the libtpms-based one)
> is cleanly separated through an interface which the backend driver needs
> to implement.
> 
> The TPM TIS driver's backend was previously chosen in the code added
> to arch_init. The frontend holds a pointer to the chosen backend (interface).
> 
> Communication with the backend is largely based on signals and conditions.
> Whenever the frontend has collected a complete packet, it will signal
> the backend, which then starts processing the command. Once the result
> has been returned, the backend invokes a callback function
> (tis_tpm_receive_cb()).
> 
> The one tricky part is support for VM suspend while the TPM is processing
> a command. In this case the frontend driver is waiting for the backend
> to return the result of the last command before shutting down. It waits
> on a condition for a signal from the backend, which is delivered in
> tis_tpm_receive_cb().
> 
> Testing the proper functioning of the different flags and localities
> cannot be done from user space when running in Linux for example, since
> access to the address space of the TPM TIS interface is not possible. Also
> the Linux driver itself does not exercise all functionality. So, for
> testing there is a fairly extensive test suite as part of the SeaBIOS patches
> since from within the BIOS one can have full access to all the TPM's registers.
> 
> 
> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>


looks good overall
some questions on erro handling and behaviour with
a malicious guest, below.

> ---
>  hw/tpm_tis.c |  807 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 files changed, 807 insertions(+), 0 deletions(-)
>  create mode 100644 hw/tpm_tis.c
> 
> diff --git a/hw/tpm_tis.c b/hw/tpm_tis.c
> new file mode 100644
> index 0000000..bcbc987
> --- /dev/null
> +++ b/hw/tpm_tis.c
> @@ -0,0 +1,807 @@
> +/*
> + * tpm_tis.c - QEMU's TPM TIS interface emulator
> + *
> + * Copyright (C) 2006,2010,2011 IBM Corporation
> + *
> + * Authors:
> + *  Stefan Berger <stefanb@us.ibm.com>
> + *  David Safford <safford@us.ibm.com>
> + *
> + * Xen 4 support: Andrease Niederl <andreas.niederl@iaik.tugraz.at>
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License as
> + * published by the Free Software Foundation, version 2 of the
> + * License.
> + *
> + *
> + * Implementation of the TIS interface according to specs found at
> + * http://www.trustedcomputiggroup.org.
> + * In the developers menu choose the PC Client section then find the TIS
> + * specification.
> + */
> +
> +#include "tpm.h"
> +#include "block.h"
> +#include "exec-memory.h"
> +#include "hw/hw.h"
> +#include "hw/pc.h"
> +#include "hw/tpm_tis.h"
> +#include "qemu-error.h"
> +#include "qemu-common.h"
> +
> +/*#define DEBUG_TIS */
> +
> +#ifdef DEBUG_TIS
> +#define dprintf(fmt, ...) \
> +    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
> +#else
> +#define dprintf(fmt, ...) \
> +    do { } while (0)
> +#endif
> +
> +/* whether the STS interrupt is supported */
> +/*#define RAISE_STS_IRQ */
> +
> +/* tis registers */
> +#define TPM_TIS_REG_ACCESS                0x00
> +#define TPM_TIS_REG_INT_ENABLE            0x08
> +#define TPM_TIS_REG_INT_VECTOR            0x0c
> +#define TPM_TIS_REG_INT_STATUS            0x10
> +#define TPM_TIS_REG_INTF_CAPABILITY       0x14
> +#define TPM_TIS_REG_STS                   0x18
> +#define TPM_TIS_REG_DATA_FIFO             0x24
> +#define TPM_TIS_REG_DID_VID               0xf00
> +#define TPM_TIS_REG_RID                   0xf04
> +
> +#define TPM_TIS_STS_VALID                 (1 << 7)
> +#define TPM_TIS_STS_COMMAND_READY         (1 << 6)
> +#define TPM_TIS_STS_TPM_GO                (1 << 5)
> +#define TPM_TIS_STS_DATA_AVAILABLE        (1 << 4)
> +#define TPM_TIS_STS_EXPECT                (1 << 3)
> +#define TPM_TIS_STS_RESPONSE_RETRY        (1 << 1)
> +
> +#define TPM_TIS_ACCESS_TPM_REG_VALID_STS  (1 << 7)
> +#define TPM_TIS_ACCESS_ACTIVE_LOCALITY    (1 << 5)
> +#define TPM_TIS_ACCESS_BEEN_SEIZED        (1 << 4)
> +#define TPM_TIS_ACCESS_SEIZE              (1 << 3)
> +#define TPM_TIS_ACCESS_PENDING_REQUEST    (1 << 2)
> +#define TPM_TIS_ACCESS_REQUEST_USE        (1 << 1)
> +#define TPM_TIS_ACCESS_TPM_ESTABLISHMENT  (1 << 0)
> +
> +#define TPM_TIS_INT_ENABLED               (1 << 31)
> +#define TPM_TIS_INT_DATA_AVAILABLE        (1 << 0)
> +#define TPM_TIS_INT_STS_VALID             (1 << 1)
> +#define TPM_TIS_INT_LOCALITY_CHANGED      (1 << 2)
> +#define TPM_TIS_INT_COMMAND_READY         (1 << 7)
> +
> +#ifndef RAISE_STS_IRQ
> +
> +# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
> +                                       TPM_TIS_INT_DATA_AVAILABLE   | \
> +                                       TPM_TIS_INT_COMMAND_READY)
> +
> +#else
> +
> +# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
> +                                       TPM_TIS_INT_DATA_AVAILABLE   | \
> +                                       TPM_TIS_INT_STS_VALID | \
> +                                       TPM_TIS_INT_COMMAND_READY)
> +

let's keep these as #define

> +#endif
> +
> +#define TPM_TIS_CAPABILITIES_SUPPORTED   ((1 << 4) | \
> +                                          TPM_TIS_INTERRUPTS_SUPPORTED)
> +
> +#define TPM_TIS_TPM_DID       0x0001
> +#define TPM_TIS_TPM_VID       0x0001
> +#define TPM_TIS_TPM_RID       0x0001
> +
> +#define TPM_TIS_NO_DATA_BYTE  0xff
> +
> +/* utility functions */
> +
> +static uint8_t tpm_tis_locality_from_addr(target_phys_addr_t addr)
> +{
> +    return (uint8_t)((addr >> 12) & 0x7);
> +}
> +

note this can give a number 0..6

> +static uint32_t tpm_tis_get_size_from_buffer(const TPMSizedBuffer *sb)
> +{
> +    return be32_to_cpu(*(uint32_t *)&sb->buffer[2]);
> +}
> +
> +static void tpm_tis_show_buffer(const TPMSizedBuffer *sb, const char *string)
> +{
> +#ifdef DEBUG_TIS
> +    uint32_t len, i;
> +
> +    len = tpm_tis_get_size_from_buffer(sb);
> +    dprintf("tpm_tis: %s length = %d\n", string, len);
> +    for (i = 0; i < len; i++) {
> +        if (i && !(i & 16)) {
> +            dprintf("\n");
> +        }
> +        dprintf("%.2X ", sb->buffer[i]);
> +    }
> +    dprintf("\n");
> +#endif
> +}
> +
> +
> +/*
> + * Send a TPM request.
> + * Call this with the state_lock held so we can sync with the receive
> + * callback.
> + */
> +static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +
> +    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
> +
> +    s->command_locty = locty;
> +    s->cmd_locty     = &tis->loc[locty];
> +
> +    /* w_offset serves as length indicator for length of data;
> +       it's reset when the response comes back */
> +    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
> +    tis->loc[locty].sts &= ~TPM_TIS_STS_EXPECT;
> +
> +    s->to_tpm_execute = true;
> +    qemu_cond_signal(&s->to_tpm_cond);
> +}
> +
> +/* raise an interrupt if allowed */
> +static void tpm_tis_raise_irq(TPMState *s, uint8_t locty, uint32_t irqmask)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +
> +    if (!TPM_TIS_IS_VALID_LOCTY(locty)) {
> +        return;
> +    }
> +
> +    if ((tis->loc[locty].inte & TPM_TIS_INT_ENABLED) &&
> +        (tis->loc[locty].inte & irqmask)) {
> +        dprintf("tpm_tis: Raising IRQ for flag %08x\n", irqmask);
> +        qemu_irq_raise(s->s.tis.irq);
> +        tis->loc[locty].ints |= irqmask;
> +    }
> +}
> +
> +static uint32_t tpm_tis_check_request_use_except(TPMState *s, uint8_t locty)
> +{
> +    uint8_t l;
> +
> +    for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
> +        if (l == locty) {
> +            continue;
> +        }
> +        if ((s->s.tis.loc[l].access & TPM_TIS_ACCESS_REQUEST_USE)) {
> +            return 1;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +static void tpm_tis_new_active_locality(TPMState *s, uint8_t new_active_locty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +    int change = (s->s.tis.active_locty != new_active_locty);
> +
> +    if (change && TPM_TIS_IS_VALID_LOCTY(s->s.tis.active_locty)) {
> +        /* reset flags on the old active locality */
> +        tis->loc[s->s.tis.active_locty].access &=
> +                 ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY|TPM_TIS_ACCESS_REQUEST_USE);
> +        if (TPM_TIS_IS_VALID_LOCTY(new_active_locty) &&
> +            tis->loc[new_active_locty].access & TPM_TIS_ACCESS_SEIZE) {
> +            tis->loc[tis->active_locty].access |= TPM_TIS_ACCESS_BEEN_SEIZED;
> +        }
> +    }
> +
> +    tis->active_locty = new_active_locty;

should this happen with invalid locality?

> +
> +    dprintf("tpm_tis: Active locality is now %d\n", s->s.tis.active_locty);
> +
> +    if (TPM_TIS_IS_VALID_LOCTY(new_active_locty)) {
> +        /* set flags on the new active locality */
> +        tis->loc[new_active_locty].access |= TPM_TIS_ACCESS_ACTIVE_LOCALITY;
> +        tis->loc[new_active_locty].access &= ~(TPM_TIS_ACCESS_REQUEST_USE |
> +                                               TPM_TIS_ACCESS_SEIZE);
> +    }
> +
> +    if (change) {
> +        tpm_tis_raise_irq(s, tis->active_locty, TPM_TIS_INT_LOCALITY_CHANGED);
> +    }
> +}
> +
> +/* abort -- this function switches the locality */
> +static void tpm_tis_abort(TPMState *s, uint8_t locty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +
> +    tis->loc[locty].r_offset = 0;
> +    tis->loc[locty].w_offset = 0;
> +
> +    dprintf("tpm_tis: tis_abort: new active locality is %d\n", tis->next_locty);
> +
> +    /*
> +     * Need to react differently depending on who's aborting now and
> +     * which locality will become active afterwards.
> +     */
> +    if (tis->aborting_locty == tis->next_locty) {
> +        tis->loc[tis->aborting_locty].status = TPM_TIS_STATUS_READY;
> +        tis->loc[tis->aborting_locty].sts = TPM_TIS_STS_COMMAND_READY;
> +        tpm_tis_raise_irq(s, tis->aborting_locty, TPM_TIS_INT_COMMAND_READY);
> +    }
> +
> +    /* locality after abort is another one than the current one */
> +    tpm_tis_new_active_locality(s, tis->next_locty);
> +
> +    tis->next_locty = TPM_TIS_NO_LOCALITY;
> +    /* nobody's aborting a command anymore */
> +    tis->aborting_locty = TPM_TIS_NO_LOCALITY;
> +}
> +
> +/* prepare aborting current command */
> +static void tpm_tis_prep_abort(TPMState *s, uint8_t locty, uint8_t newlocty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +    uint8_t busy_locty;
> +
> +    tis->aborting_locty = locty;
> +    tis->next_locty = newlocty;  /* locality after successful abort */
> +
> +    /*
> +     * only abort a command using an interrupt if currently executing
> +     * a command AND if there's a valid connection to the vTPM.
> +     */
> +    for (busy_locty = 0; busy_locty < TPM_TIS_NUM_LOCALITIES; busy_locty++) {
> +        if (tis->loc[busy_locty].status == TPM_TIS_STATUS_EXECUTION) {
> +            /* there is currently no way to interrupt the TPM's operations
> +               while it's executing a command; once the TPM is done and
> +               returns the buffer, it will switch to the next_locty; */
> +            dprintf("tpm_tis: Locality %d is busy - deferring abort\n",
> +                    busy_locty);
> +            return;
> +        }
> +    }
> +
> +    tpm_tis_abort(s, locty);
> +}
> +
> +/*
> + * Callback from the TPM to indicate that the response was received.
> + */
> +static void tpm_tis_receive_cb(TPMState *s, uint8_t locty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +
> +    qemu_mutex_lock(&s->state_lock);
> +
> +    tis->loc[locty].sts = TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE;
> +    tis->loc[locty].status = TPM_TIS_STATUS_COMPLETION;
> +    tis->loc[locty].r_offset = 0;
> +    tis->loc[locty].w_offset = 0;
> +
> +    if (TPM_TIS_IS_VALID_LOCTY(tis->next_locty)) {
> +        tpm_tis_abort(s, locty);
> +    }
> +
> +    qemu_cond_signal(&s->from_tpm_cond);
> +
> +    qemu_mutex_unlock(&s->state_lock);
> +
> +#ifndef RAISE_STS_IRQ
> +    tpm_tis_raise_irq(s, locty, TPM_TIS_INT_DATA_AVAILABLE);
> +#else
> +    tpm_tis_raise_irq(s, locty,
> +                      TPM_TIS_INT_DATA_AVAILABLE | TPM_TIS_INT_STS_VALID);
> +#endif
> +}
> +
> +/*
> + * read a byte of response data
> + * call this with s->state_lock held
> + */
> +static uint32_t tpm_tis_data_read(TPMState *s, uint8_t locty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +    uint32_t ret = TPM_TIS_NO_DATA_BYTE;
> +    uint16_t len;
> +
> +    if ((tis->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
> +        len = tpm_tis_get_size_from_buffer(&tis->loc[locty].r_buffer);
> +
> +        ret = tis->loc[locty].r_buffer.buffer[tis->loc[locty].r_offset++];

what prevents this from being called when r_offset == len,
with a malicious guest?

> +        if (tis->loc[locty].r_offset >= len) {
> +            /* got last byte */
> +            tis->loc[locty].sts = TPM_TIS_STS_VALID;
> +#ifdef RAISE_STS_IRQ
> +            tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
> +#endif
> +        }
> +        dprintf("tpm_tis: tpm_tis_data_read byte 0x%02x   [%d]\n",
> +                ret, tis->loc[locty].r_offset-1);
> +    }
> +
> +    return ret;
> +}
> +
> +/*
> + * Read a register of the TIS interface
> + * See specs pages 33-63 for description of the registers
> + */
> +static uint64_t tpm_tis_mmio_read(void *opaque, target_phys_addr_t addr,
> +                                  unsigned size)
> +{
> +    TPMState *s = opaque;
> +    TPMTISState *tis = &s->s.tis;
> +    uint16_t offset = addr & 0xffc;
> +    uint8_t shift = (addr & 0x3) * 8;
> +    uint32_t val = 0xffffffff;
> +    uint8_t locty = tpm_tis_locality_from_addr(addr);

Apparently locty can be 0..6 but loc array only has 5 entries.
What if a guest accesses an invalid one?

> +
> +    qemu_mutex_lock(&s->state_lock);
> +
> +    if (s->be_driver->ops->had_startup_error(s->be_driver)) {
> +        qemu_mutex_unlock(&s->state_lock);
> +        return val;
> +    }
> +
> +    switch (offset) {
> +    case TPM_TIS_REG_ACCESS:
> +        /* never show the SEIZE flag even though we use it internally */
> +        val = tis->loc[locty].access & ~TPM_TIS_ACCESS_SEIZE;
> +        /* the pending flag is alawys calculated */
> +        if (tpm_tis_check_request_use_except(s, locty)) {
> +            val |= TPM_TIS_ACCESS_PENDING_REQUEST;
> +        }
> +        val |= !s->be_driver->ops->get_tpm_established_flag(s->be_driver);
> +        break;
> +    case TPM_TIS_REG_INT_ENABLE:
> +        val = tis->loc[locty].inte;
> +        break;
> +    case TPM_TIS_REG_INT_VECTOR:
> +        val = tis->irq_num;
> +        break;
> +    case TPM_TIS_REG_INT_STATUS:
> +        val = tis->loc[locty].ints;
> +        break;
> +    case TPM_TIS_REG_INTF_CAPABILITY:
> +        val = TPM_TIS_CAPABILITIES_SUPPORTED;
> +        break;
> +    case TPM_TIS_REG_STS:
> +        if (tis->active_locty == locty) {
> +            if ((tis->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
> +                val =  (tpm_tis_get_size_from_buffer(&tis->loc[locty].r_buffer)
> +                        - tis->loc[locty].r_offset) << 8 | tis->loc[locty].sts;
> +            } else {
> +                val = (tis->loc[locty].w_buffer.size -
> +                       tis->loc[locty].w_offset) << 8 | tis->loc[locty].sts;
> +            }
> +        }
> +        break;
> +    case TPM_TIS_REG_DATA_FIFO:
> +        if (tis->active_locty == locty) {
> +            switch (tis->loc[locty].status) {
> +            case TPM_TIS_STATUS_COMPLETION:
> +                val = tpm_tis_data_read(s, locty);
> +                break;
> +            default:
> +                val = TPM_TIS_NO_DATA_BYTE;
> +                break;
> +            }
> +        }
> +        break;
> +    case TPM_TIS_REG_DID_VID:
> +        val = (TPM_TIS_TPM_DID << 16) | TPM_TIS_TPM_VID;
> +        break;
> +    case TPM_TIS_REG_RID:
> +        val = TPM_TIS_TPM_RID;
> +        break;
> +    }
> +
> +    qemu_mutex_unlock(&s->state_lock);
> +
> +    if (shift) {
> +        val >>= shift;
> +    }
> +
> +    dprintf("tpm_tis:  read.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val);
> +
> +    return val;
> +}
> +
> +/*
> + * Write a value to a register of the TIS interface
> + * See specs pages 33-63 for description of the registers
> + */
> +static void tpm_tis_mmio_write_intern(void *opaque, target_phys_addr_t addr,
> +                                      uint64_t val, unsigned size,
> +                                      bool hw_access)
> +{
> +    TPMState *s = opaque;
> +    TPMTISState *tis = &s->s.tis;
> +    uint16_t off = addr & 0xfff;
> +    uint8_t locty = tpm_tis_locality_from_addr(addr);
> +    uint8_t active_locty, l;
> +    int c, set_new_locty = 1;
> +    uint16_t len;
> +
> +    dprintf("tpm_tis: write.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val);
> +
> +    qemu_mutex_lock(&s->state_lock);
> +
> +    if (s->be_driver->ops->had_startup_error(s->be_driver)) {
> +        qemu_mutex_unlock(&s->state_lock);
> +        return;
> +    }
> +
> +    switch (off) {
> +    case TPM_TIS_REG_ACCESS:
> +
> +        if ((val & TPM_TIS_ACCESS_SEIZE)) {
> +            val &= ~(TPM_TIS_ACCESS_REQUEST_USE |
> +                     TPM_TIS_ACCESS_ACTIVE_LOCALITY);
> +        }
> +
> +        active_locty = tis->active_locty;
> +
> +        if ((val & TPM_TIS_ACCESS_ACTIVE_LOCALITY)) {
> +            /* give up locality if currently owned */
> +            if (tis->active_locty == locty) {
> +                dprintf("tpm_tis: Releasing locality %d\n", locty);
> +
> +                uint8_t newlocty = TPM_TIS_NO_LOCALITY;
> +                /* anybody wants the locality ? */
> +                for (c = TPM_TIS_NUM_LOCALITIES - 1; c >= 0; c--) {
> +                    if ((tis->loc[c].access & TPM_TIS_ACCESS_REQUEST_USE)) {
> +                        dprintf("tpm_tis: Locality %d requests use.\n", c);
> +                        newlocty = c;
> +                        break;
> +                    }
> +                }
> +                dprintf("tpm_tis: TPM_TIS_ACCESS_ACTIVE_LOCALITY: "
> +                        "Next active locality: %d\n",
> +                        newlocty);
> +
> +                if (TPM_TIS_IS_VALID_LOCTY(newlocty)) {
> +                    set_new_locty = 0;
> +                    tpm_tis_prep_abort(s, locty, newlocty);
> +                } else {
> +                    active_locty = TPM_TIS_NO_LOCALITY;
> +                }
> +            } else {
> +                /* not currently the owner; clear a pending request */
> +                tis->loc[locty].access &= ~TPM_TIS_ACCESS_REQUEST_USE;
> +            }
> +        }
> +
> +        if ((val & TPM_TIS_ACCESS_BEEN_SEIZED)) {
> +            tis->loc[locty].access &= ~TPM_TIS_ACCESS_BEEN_SEIZED;
> +        }
> +
> +        if ((val & TPM_TIS_ACCESS_SEIZE)) {
> +            /* allow seize if a locality is active and the requesting
> +               locality is higher than the one that's active
> +               OR
> +               allow seize for requesting locality if no locality is
> +               active */

/*
 * multiline comments should look like this
 */

> +            while ((TPM_TIS_IS_VALID_LOCTY(tis->active_locty) &&
> +                    locty > tis->active_locty) ||
> +                   (!TPM_TIS_IS_VALID_LOCTY(tis->active_locty))) {

Clearer as:
(locty > tis->active_locty ||
!TPM_TIS_IS_VALID_LOCTY(tis->active_locty))

> +
> +                /* already a pending SEIZE ? */
> +                if ((tis->loc[locty].access & TPM_TIS_ACCESS_SEIZE)) {
> +                    break;
> +                }
> +
> +                /* check for ongoing seize by a higher locality */
> +                for (l = locty + 1; l < TPM_TIS_NUM_LOCALITIES; l++) {
> +                    if ((tis->loc[l].access & TPM_TIS_ACCESS_SEIZE)) {
> +                        break;
> +                    }
> +                }
> +
> +                /* cancel any seize by a lower locality */
> +                for (l = 0; l < locty - 1; l++) {
> +                    tis->loc[l].access &= ~TPM_TIS_ACCESS_SEIZE;
> +                }
> +
> +                tis->loc[locty].access |= TPM_TIS_ACCESS_SEIZE;
> +                dprintf("tpm_tis: TPM_TIS_ACCESS_SEIZE: "
> +                        "Locality %d seized from locality %d\n",
> +                        locty, tis->active_locty);
> +                dprintf("tpm_tis: TPM_TIS_ACCESS_SEIZE: Initiating abort.\n");
> +                set_new_locty = 0;
> +                tpm_tis_prep_abort(s, tis->active_locty, locty);
> +                break;
> +            }
> +        }
> +
> +        if ((val & TPM_TIS_ACCESS_REQUEST_USE)) {
> +            if (tis->active_locty != locty) {
> +                if (TPM_TIS_IS_VALID_LOCTY(tis->active_locty)) {
> +                    tis->loc[locty].access |= TPM_TIS_ACCESS_REQUEST_USE;
> +                } else {
> +                    /* no locality active -> make this one active now */
> +                    active_locty = locty;
> +                }
> +            }
> +        }
> +
> +        if (set_new_locty) {
> +            tpm_tis_new_active_locality(s, active_locty);
> +        }
> +
> +        break;
> +    case TPM_TIS_REG_INT_ENABLE:
> +        if (tis->active_locty != locty) {
> +            break;
> +        }
> +
> +        tis->loc[locty].inte = (val & (TPM_TIS_INT_ENABLED | (0x3 << 3) |
> +                                     TPM_TIS_INTERRUPTS_SUPPORTED));
> +        break;
> +    case TPM_TIS_REG_INT_VECTOR:
> +        /* hard wired -- ignore */
> +        break;
> +    case TPM_TIS_REG_INT_STATUS:
> +        if (tis->active_locty != locty) {
> +            break;
> +        }
> +
> +        /* clearing of interrupt flags */
> +        if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) &&
> +            (tis->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) {
> +            tis->loc[locty].ints &= ~val;
> +            if (tis->loc[locty].ints == 0) {
> +                qemu_irq_lower(tis->irq);
> +                dprintf("tpm_tis: Lowering IRQ\n");
> +            }
> +        }
> +        tis->loc[locty].ints &= ~(val & TPM_TIS_INTERRUPTS_SUPPORTED);
> +        break;
> +    case TPM_TIS_REG_STS:
> +        if (tis->active_locty != locty) {
> +            break;
> +        }
> +
> +        val &= (TPM_TIS_STS_COMMAND_READY | TPM_TIS_STS_TPM_GO |
> +                TPM_TIS_STS_RESPONSE_RETRY);
> +
> +        if (val == TPM_TIS_STS_COMMAND_READY) {
> +            switch (tis->loc[locty].status) {
> +
> +            case TPM_TIS_STATUS_READY:
> +                tis->loc[locty].w_offset = 0;
> +                tis->loc[locty].r_offset = 0;
> +            break;
> +
> +            case TPM_TIS_STATUS_IDLE:
> +                tis->loc[locty].sts = TPM_TIS_STS_COMMAND_READY;
> +                tis->loc[locty].status = TPM_TIS_STATUS_READY;
> +                tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY);
> +            break;
> +
> +            case TPM_TIS_STATUS_EXECUTION:
> +            case TPM_TIS_STATUS_RECEPTION:
> +                /* abort currently running command */
> +                dprintf("tpm_tis: %s: Initiating abort.\n",
> +                        __func__);
> +                tpm_tis_prep_abort(s, locty, locty);
> +            break;
> +
> +            case TPM_TIS_STATUS_COMPLETION:
> +                tis->loc[locty].w_offset = 0;
> +                tis->loc[locty].r_offset = 0;
> +                /* shortcut to ready state with C/R set */
> +                tis->loc[locty].status = TPM_TIS_STATUS_READY;
> +                if (!(tis->loc[locty].sts & TPM_TIS_STS_COMMAND_READY)) {
> +                    tis->loc[locty].sts   = TPM_TIS_STS_COMMAND_READY;
> +                    tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY);
> +                }
> +            break;
> +
> +            }
> +        } else if (val == TPM_TIS_STS_TPM_GO) {
> +            switch (tis->loc[locty].status) {
> +            case TPM_TIS_STATUS_RECEPTION:
> +                tpm_tis_tpm_send(s, locty);
> +                break;
> +            default:
> +                /* ignore */
> +                break;
> +            }
> +        } else if (val == TPM_TIS_STS_RESPONSE_RETRY) {
> +            switch (tis->loc[locty].status) {
> +            case TPM_TIS_STATUS_COMPLETION:
> +                tis->loc[locty].r_offset = 0;
> +                tis->loc[locty].sts = TPM_TIS_STS_VALID |
> +                                      TPM_TIS_STS_DATA_AVAILABLE;
> +                break;
> +            default:
> +                /* ignore */
> +                break;
> +            }
> +        }
> +        break;
> +    case TPM_TIS_REG_DATA_FIFO:
> +        /* data fifo */
> +        if (tis->active_locty != locty) {
> +            break;
> +        }
> +
> +        if (tis->loc[locty].status == TPM_TIS_STATUS_IDLE ||
> +            tis->loc[locty].status == TPM_TIS_STATUS_EXECUTION ||
> +            tis->loc[locty].status == TPM_TIS_STATUS_COMPLETION) {
> +            /* drop the byte */
> +        } else {
> +            dprintf("tpm_tis: Byte to send to TPM: %02x\n", (uint8_t)val);
> +            if (tis->loc[locty].status == TPM_TIS_STATUS_READY) {
> +                tis->loc[locty].status = TPM_TIS_STATUS_RECEPTION;
> +                tis->loc[locty].sts = TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID;
> +            }
> +
> +            if ((tis->loc[locty].sts & TPM_TIS_STS_EXPECT)) {
> +                if (tis->loc[locty].w_offset < tis->loc[locty].w_buffer.size) {
> +                    tis->loc[locty].w_buffer.
> +                        buffer[tis->loc[locty].w_offset++] = (uint8_t)val;
> +                } else {
> +                    tis->loc[locty].sts = TPM_TIS_STS_VALID;
> +                }
> +            }
> +
> +            /* check for complete packet */
> +            if (tis->loc[locty].w_offset > 5 &&
> +                (tis->loc[locty].sts & TPM_TIS_STS_EXPECT)) {
> +                /* we have a packet length - see if we have all of it */
> +#ifdef RAISE_STS_IRQ
> +                bool needIrq = !(tis->loc[locty].sts & TPM_TIS_STS_VALID);
> +#endif
> +                len = tpm_tis_get_size_from_buffer(&tis->loc[locty].w_buffer);
> +                if (len > tis->loc[locty].w_offset) {
> +                    tis->loc[locty].sts = TPM_TIS_STS_EXPECT |
> +                                          TPM_TIS_STS_VALID;
> +                } else {
> +                    /* packet complete */
> +                    tis->loc[locty].sts = TPM_TIS_STS_VALID;
> +                }
> +#ifdef RAISE_STS_IRQ
> +                if (needIrq) {
> +                    tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
> +                }
> +#endif
> +            }
> +        }
> +        break;
> +    }
> +
> +    qemu_mutex_unlock(&s->state_lock);
> +}
> +
> +static void tpm_tis_mmio_write(void *opaque, target_phys_addr_t addr,
> +                               uint64_t val, unsigned size)
> +{
> +    return tpm_tis_mmio_write_intern(opaque, addr, val, size, false);
> +}
> +
> +static const MemoryRegionOps tpm_tis_memory_ops = {
> +    .read = tpm_tis_mmio_read,
> +    .write = tpm_tis_mmio_write,
> +    .endianness = DEVICE_NATIVE_ENDIAN,

Are you sure? Most devices are BIG or LITTLE.

> +    .valid = {
> +        .min_access_size = 1,
> +        .max_access_size = 4,
> +    },
> +};
> +
> +static int tpm_tis_do_startup_tpm(TPMState *s)
> +{
> +    return s->be_driver->ops->startup_tpm(s->be_driver);
> +}
> +
> +/*
> + * This function is called when the machine starts, resets or due to
> + * S3 resume.
> + */
> +static void tpm_tis_reset(DeviceState *d)
> +{
> +    TPMState *s = container_of(d, TPMState, busdev.qdev);
> +    TPMTISState *tis = &s->s.tis;
> +    int c;
> +
> +    s->tpm_initialized = false;
> +
> +    s->be_driver->ops->reset(s->be_driver);
> +
> +    tis->active_locty = TPM_TIS_NO_LOCALITY;
> +    tis->next_locty = TPM_TIS_NO_LOCALITY;
> +    tis->aborting_locty = TPM_TIS_NO_LOCALITY;
> +
> +    for (c = 0; c < TPM_TIS_NUM_LOCALITIES; c++) {
> +        tis->loc[c].access = TPM_TIS_ACCESS_TPM_REG_VALID_STS;
> +        tis->loc[c].sts = 0;
> +        tis->loc[c].inte = (1 << 3);
> +        tis->loc[c].ints = 0;
> +        tis->loc[c].status = TPM_TIS_STATUS_IDLE;
> +
> +        tis->loc[c].w_offset = 0;
> +        s->be_driver->ops->realloc_buffer(&tis->loc[c].w_buffer);
> +        tis->loc[c].r_offset = 0;
> +        s->be_driver->ops->realloc_buffer(&tis->loc[c].r_buffer);
> +    }
> +
> +    tpm_tis_do_startup_tpm(s);
> +}
> +
> +static int tpm_tis_init(ISADevice *dev)
> +{
> +    TPMState *s = DO_UPCAST(TPMState, busdev, dev);

Let's stay consistent DO_UPCAST versus container_of.

> +    TPMTISState *tis = &s->s.tis;
> +    int rc;
> +
> +    qemu_mutex_init(&s->state_lock);
> +    qemu_cond_init(&s->from_tpm_cond);
> +    qemu_cond_init(&s->to_tpm_cond);
> +
> +    s->be_driver = qemu_find_tpm(s->backend);
> +    if (!s->be_driver) {
> +        error_report("tpm_tis: backend driver with id %s could not be "
> +                     "found.n\n", s->backend);
> +        return -1;
goto forward to have a single exit point?
> +    }
> +
> +    s->be_driver->fe_model = "tpm-tis";
> +
> +    if (s->be_driver->ops->init(s->be_driver, s, tpm_tis_receive_cb)) {
> +        return -1;

goto forward?

> +    }
> +
> +    isa_init_irq(dev, &tis->irq, tis->irq_num);

probably should validate irq_num so an illegal value
gives us an error not an assert.

> +
> +    memory_region_init_io(&s->mmio, &tpm_tis_memory_ops, s, "tpm-tis-mmio",
> +                          0x1000 * TPM_TIS_NUM_LOCALITIES);
> +    memory_region_add_subregion(get_system_memory(), TPM_TIS_ADDR_BASE,
> +                                &s->mmio);
> +
> +    rc = tpm_tis_do_startup_tpm(s);
> +    if (rc != 0) {
> +        goto err_destroy_memory;
> +    }
> +
> +    return 0;
> +
> + err_destroy_memory:

You must delete mmio as subregion before destroy.

> +    memory_region_destroy(&s->mmio);
> +
> +    return -1;
> +}
> +
> +static const VMStateDescription vmstate_tpm_tis = {
> +    .name = "tpm",
> +    .unmigratable = 1,

I think the passthrough backend is what makes
it unmigrateable, no?
If so in the future it would be better to get that info from backend.
For now I guess we are ok as is, but pls add a comment.

> +};
> +
> +static ISADeviceInfo tpm_tis_device_info = {
> +    .init         = tpm_tis_init,
> +    .qdev.name    = "tpm-tis",
> +    .qdev.size    = sizeof(TPMState),
> +    .qdev.vmsd    = &vmstate_tpm_tis,
> +    .qdev.reset   = tpm_tis_reset,
> +    .qdev.props = (Property[]) {
> +        DEFINE_PROP_UINT32("irq", TPMState,
> +                           s.tis.irq_num, TPM_TIS_IRQ),
> +        DEFINE_PROP_STRING("tpmdev", TPMState, backend),
> +        DEFINE_PROP_END_OF_LIST(),
> +    },
> +};
> +
> +static void tpm_tis_register_device(void)
> +{
> +    isa_qdev_register(&tpm_tis_device_info);

So this needs to be rebased to the QOM.

Also a question: looking at the linux drivers
there's pnp support but we don't have
that yet, right? So you need to force
non-pnp mode in guest?

> +}
> +
> +device_init(tpm_tis_register_device)
> -- 
> 1.7.6.4
>
Stefan Berger Feb. 20, 2012, 3:48 p.m. UTC | #2
On 02/20/2012 03:51 AM, Michael S. Tsirkin wrote:
> On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
>> This patch adds the main code of the TPM frontend driver, the TPM TIS
>> interface, to Qemu. The code is largely based on the previous implementation
>> for Xen but has been significantly extended to meet the standard's
>> requirements, such as the support for changing of localities and all the
>> functionality of the available flags.
>>
> looks good overall
> some questions on erro handling and behaviour with
> a malicious guest, below.
>

[...]
>> +#ifndef RAISE_STS_IRQ
>> +
>> +# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
>> +                                       TPM_TIS_INT_DATA_AVAILABLE   | \
>> +                                       TPM_TIS_INT_COMMAND_READY)
>> +
>> +#else
>> +
>> +# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
>> +                                       TPM_TIS_INT_DATA_AVAILABLE   | \
>> +                                       TPM_TIS_INT_STS_VALID | \
>> +                                       TPM_TIS_INT_COMMAND_READY)
>> +
> let's keep these as #define
>

Will fix it.
>> +/* utility functions */
>> +
>> +static uint8_t tpm_tis_locality_from_addr(target_phys_addr_t addr)
>> +{
>> +    return (uint8_t)((addr>>  12)&  0x7);
>> +}
>> +
> note this can give a number 0..6

Yes, but that's ok. We are only registering the MMIO region [fed4 0000, 
fed4 5000[, so anything larger than 4 cannot actually come from this 
function.


>> +static void tpm_tis_new_active_locality(TPMState *s, uint8_t new_active_locty)
>> +{
>> +    TPMTISState *tis =&s->s.tis;
>> +    int change = (s->s.tis.active_locty != new_active_locty);
>> +
>> +    if (change&&  TPM_TIS_IS_VALID_LOCTY(s->s.tis.active_locty)) {
>> +        /* reset flags on the old active locality */
>> +        tis->loc[s->s.tis.active_locty].access&=
>> +                 ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY|TPM_TIS_ACCESS_REQUEST_USE);
>> +        if (TPM_TIS_IS_VALID_LOCTY(new_active_locty)&&
>> +            tis->loc[new_active_locty].access&  TPM_TIS_ACCESS_SEIZE) {
>> +            tis->loc[tis->active_locty].access |= TPM_TIS_ACCESS_BEEN_SEIZED;
>> +        }
>> +    }
>> +
>> +    tis->active_locty = new_active_locty;
> should this happen with invalid locality?
>



>> + * read a byte of response data
>> + * call this with s->state_lock held
>> + */
>> +static uint32_t tpm_tis_data_read(TPMState *s, uint8_t locty)
>> +{
>> +    TPMTISState *tis =&s->s.tis;
>> +    uint32_t ret = TPM_TIS_NO_DATA_BYTE;
>> +    uint16_t len;
>> +
>> +    if ((tis->loc[locty].sts&  TPM_TIS_STS_DATA_AVAILABLE)) {
>> +        len = tpm_tis_get_size_from_buffer(&tis->loc[locty].r_buffer);
>> +
>> +        ret = tis->loc[locty].r_buffer.buffer[tis->loc[locty].r_offset++];
> what prevents this from being called when r_offset == len,
> with a malicious guest?
>

The device user has no control over the index of the byte stream he 
wants to read. Once he starts reading the bytes from the device starting 
at index 0, the index automatically increases, as seen above. The flag 
TPM_TIS_STS_DATA_AVAILABLE is set for as long as there are bytes 
available. The user can see this flag in the status register. Then once...

>> +        if (tis->loc[locty].r_offset>= len) {
>> +            /* got last byte */
>> +            tis->loc[locty].sts = TPM_TIS_STS_VALID;

... all bytes have been read the status flag changes and no more bytes 
can be read. The tis code here also goes by these flags as you can see.


>> +/*
>> + * Read a register of the TIS interface
>> + * See specs pages 33-63 for description of the registers
>> + */
>> +static uint64_t tpm_tis_mmio_read(void *opaque, target_phys_addr_t addr,
>> +                                  unsigned size)
>> +{
>> +    TPMState *s = opaque;
>> +    TPMTISState *tis =&s->s.tis;
>> +    uint16_t offset = addr&  0xffc;
>> +    uint8_t shift = (addr&  0x3) * 8;
>> +    uint32_t val = 0xffffffff;
>> +    uint8_t locty = tpm_tis_locality_from_addr(addr);
> Apparently locty can be 0..6 but loc array only has 5 entries.
> What if a guest accesses an invalid one?

See comment at the beginning. Any locality beyond value '4' cannot be 
reached since it's not within the MMIO region of the device.

>> +            /* allow seize if a locality is active and the requesting
>> +               locality is higher than the one that's active
>> +               OR
>> +               allow seize for requesting locality if no locality is
>> +               active */
> /*
>   * multiline comments should look like this
>   */
>

  I will fix 'them'.


>> +            while ((TPM_TIS_IS_VALID_LOCTY(tis->active_locty)&&
>> +                    locty>  tis->active_locty) ||
>> +                   (!TPM_TIS_IS_VALID_LOCTY(tis->active_locty))) {
> Clearer as:
> (locty>  tis->active_locty ||
> !TPM_TIS_IS_VALID_LOCTY(tis->active_locty))
>

Ok.

>> +static const MemoryRegionOps tpm_tis_memory_ops = {
>> +    .read = tpm_tis_mmio_read,
>> +    .write = tpm_tis_mmio_write,
>> +    .endianness = DEVICE_NATIVE_ENDIAN,
> Are you sure? Most devices are BIG or LITTLE.
>

Right, it should be BIG endian. I'll fix it. Luckily there are only a 
few 32bit accesses (and no 16bit access) also in SeaBIOS and those that 
did accessed it via 32 bit didn't fail due to the mix-up (excluding the 
test suite).

>> +    .valid = {
>> +        .min_access_size = 1,
>> +        .max_access_size = 4,
>> +    },
>> +};
>> +
>> +static int tpm_tis_do_startup_tpm(TPMState *s)
>> +{
>> +    return s->be_driver->ops->startup_tpm(s->be_driver);
>> +}
>> +
>> +/*
>> + * This function is called when the machine starts, resets or due to
>> + * S3 resume.
>> + */
>> +static void tpm_tis_reset(DeviceState *d)
>> +{
>> +    TPMState *s = container_of(d, TPMState, busdev.qdev);
>> +    TPMTISState *tis =&s->s.tis;
>> +    int c;
>> +
>> +    s->tpm_initialized = false;
>> +
>> +    s->be_driver->ops->reset(s->be_driver);
>> +
>> +    tis->active_locty = TPM_TIS_NO_LOCALITY;
>> +    tis->next_locty = TPM_TIS_NO_LOCALITY;
>> +    tis->aborting_locty = TPM_TIS_NO_LOCALITY;
>> +
>> +    for (c = 0; c<  TPM_TIS_NUM_LOCALITIES; c++) {
>> +        tis->loc[c].access = TPM_TIS_ACCESS_TPM_REG_VALID_STS;
>> +        tis->loc[c].sts = 0;
>> +        tis->loc[c].inte = (1<<  3);
>> +        tis->loc[c].ints = 0;
>> +        tis->loc[c].status = TPM_TIS_STATUS_IDLE;
>> +
>> +        tis->loc[c].w_offset = 0;
>> +        s->be_driver->ops->realloc_buffer(&tis->loc[c].w_buffer);
>> +        tis->loc[c].r_offset = 0;
>> +        s->be_driver->ops->realloc_buffer(&tis->loc[c].r_buffer);
>> +    }
>> +
>> +    tpm_tis_do_startup_tpm(s);
>> +}
>> +
>> +static int tpm_tis_init(ISADevice *dev)
>> +{
>> +    TPMState *s = DO_UPCAST(TPMState, busdev, dev);
> Let's stay consistent DO_UPCAST versus container_of.

Ok.

>> +    TPMTISState *tis =&s->s.tis;
>> +    int rc;
>> +
>> +    qemu_mutex_init(&s->state_lock);
>> +    qemu_cond_init(&s->from_tpm_cond);
>> +    qemu_cond_init(&s->to_tpm_cond);
>> +
>> +    s->be_driver = qemu_find_tpm(s->backend);
>> +    if (!s->be_driver) {
>> +        error_report("tpm_tis: backend driver with id %s could not be "
>> +                     "found.n\n", s->backend);
>> +        return -1;
> goto forward to have a single exit point?

Will fix it.

>> +    }
>> +
>> +    s->be_driver->fe_model = "tpm-tis";
>> +
>> +    if (s->be_driver->ops->init(s->be_driver, s, tpm_tis_receive_cb)) {
>> +        return -1;
> goto forward?
>
>> +    }
>> +
>> +    isa_init_irq(dev,&tis->irq, tis->irq_num);
> probably should validate irq_num so an illegal value
> gives us an error not an assert.
>

Ok.

>> +
>> +    memory_region_init_io(&s->mmio,&tpm_tis_memory_ops, s, "tpm-tis-mmio",
>> +                          0x1000 * TPM_TIS_NUM_LOCALITIES);
>> +    memory_region_add_subregion(get_system_memory(), TPM_TIS_ADDR_BASE,
>> +&s->mmio);
>> +
>> +    rc = tpm_tis_do_startup_tpm(s);
>> +    if (rc != 0) {
>> +        goto err_destroy_memory;
>> +    }
>> +
>> +    return 0;
>> +
>> + err_destroy_memory:
> You must delete mmio as subregion before destroy.
>

Will fix it.

>> +    memory_region_destroy(&s->mmio);
>> +
>> +    return -1;
>> +}
>> +
>> +static const VMStateDescription vmstate_tpm_tis = {
>> +    .name = "tpm",
>> +    .unmigratable = 1,
> I think the passthrough backend is what makes
> it unmigrateable, no?
> If so in the future it would be better to get that info from backend.
> For now I guess we are ok as is, but pls add a comment.
>

In the patches following this passthrough driver I am pushing this into 
the backend where I then register a migration blocker. While only the 
passthrough driver is here, this should be fine.

>> +};
>> +
>> +static ISADeviceInfo tpm_tis_device_info = {
>> +    .init         = tpm_tis_init,
>> +    .qdev.name    = "tpm-tis",
>> +    .qdev.size    = sizeof(TPMState),
>> +    .qdev.vmsd    =&vmstate_tpm_tis,
>> +    .qdev.reset   = tpm_tis_reset,
>> +    .qdev.props = (Property[]) {
>> +        DEFINE_PROP_UINT32("irq", TPMState,
>> +                           s.tis.irq_num, TPM_TIS_IRQ),
>> +        DEFINE_PROP_STRING("tpmdev", TPMState, backend),
>> +        DEFINE_PROP_END_OF_LIST(),
>> +    },
>> +};
>> +
>> +static void tpm_tis_register_device(void)
>> +{
>> +    isa_qdev_register(&tpm_tis_device_info);
> So this needs to be rebased to the QOM.
>

I followed the tree.

> Also a question: looking at the linux drivers
> there's pnp support but we don't have
> that yet, right? So you need to force
> non-pnp mode in guest?
>

You only need to force non-pnp mode in the guest if you don't have 
SeaBIOS with the TPM support. There I create the SSDT for the TPM device 
that then triggers the pnp driver mode inside Linux.


Thanks for the review.

    Stefan
Michael S. Tsirkin Feb. 20, 2012, 7:37 p.m. UTC | #3
On Mon, Feb 20, 2012 at 10:48:09AM -0500, Stefan Berger wrote:
> On 02/20/2012 03:51 AM, Michael S. Tsirkin wrote:
> >On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
> >>This patch adds the main code of the TPM frontend driver, the TPM TIS
> >>interface, to Qemu. The code is largely based on the previous implementation
> >>for Xen but has been significantly extended to meet the standard's
> >>requirements, such as the support for changing of localities and all the
> >>functionality of the available flags.
> >>
> >looks good overall
> >some questions on erro handling and behaviour with
> >a malicious guest, below.
> >
> 
> [...]
> >>+#ifndef RAISE_STS_IRQ
> >>+
> >>+# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
> >>+                                       TPM_TIS_INT_DATA_AVAILABLE   | \
> >>+                                       TPM_TIS_INT_COMMAND_READY)
> >>+
> >>+#else
> >>+
> >>+# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
> >>+                                       TPM_TIS_INT_DATA_AVAILABLE   | \
> >>+                                       TPM_TIS_INT_STS_VALID | \
> >>+                                       TPM_TIS_INT_COMMAND_READY)
> >>+
> >let's keep these as #define
> >
> 
> Will fix it.
> >>+/* utility functions */
> >>+
> >>+static uint8_t tpm_tis_locality_from_addr(target_phys_addr_t addr)
> >>+{
> >>+    return (uint8_t)((addr>>  12)&  0x7);
> >>+}
> >>+
> >note this can give a number 0..6
> 
> Yes, but that's ok. We are only registering the MMIO region [fed4
> 0000, fed4 5000[, so anything larger than 4 cannot actually come
> from this function.

I see 12 matches 0x1000 where region is created.
I think there should be a macro like TPM_TIS_LOCALITY_SHIFT
and then you can register region of size
 NUM_LOCALITIES << TPM_TIS_LOCALITY_SHIFT.
Stefan Berger Feb. 20, 2012, 7:58 p.m. UTC | #4
On 02/20/2012 02:37 PM, Michael S. Tsirkin wrote:
> On Mon, Feb 20, 2012 at 10:48:09AM -0500, Stefan Berger wrote:
>> On 02/20/2012 03:51 AM, Michael S. Tsirkin wrote:
>>> On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
>>>
>> Yes, but that's ok. We are only registering the MMIO region [fed4
>> 0000, fed4 5000[, so anything larger than 4 cannot actually come
>> from this function.
> I see 12 matches 0x1000 where region is created.
> I think there should be a macro like TPM_TIS_LOCALITY_SHIFT
> and then you can register region of size
>   NUM_LOCALITIES<<  TPM_TIS_LOCALITY_SHIFT.
>
>

Done.

    Stefan
Michael S. Tsirkin Feb. 20, 2012, 10:02 p.m. UTC | #5
On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
> +/*
> + * Send a TPM request.
> + * Call this with the state_lock held so we can sync with the receive
> + * callback.
> + */
> +static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
> +{
> +    TPMTISState *tis = &s->s.tis;
> +
> +    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
> +
> +    s->command_locty = locty;
> +    s->cmd_locty     = &tis->loc[locty];
> +
> +    /* w_offset serves as length indicator for length of data;
> +       it's reset when the response comes back */
> +    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
> +    tis->loc[locty].sts &= ~TPM_TIS_STS_EXPECT;
> +
> +    s->to_tpm_execute = true;
> +    qemu_cond_signal(&s->to_tpm_cond);
> +}

What happens IIUC is that frondend sets to_tpm_execute
and signals a condition, and backend clears it
and waits on a condition.

So how about moving all the signalling
and locking out to backend, and have frontend
invoke a callback to signal it?

The whole threading thing then becomes a work-around
for a backend that does not support select,
instead of spilling out into frontend?
Stefan Berger Feb. 21, 2012, 12:43 a.m. UTC | #6
On 02/20/2012 05:02 PM, Michael S. Tsirkin wrote:
> On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
>> +/*
>> + * Send a TPM request.
>> + * Call this with the state_lock held so we can sync with the receive
>> + * callback.
>> + */
>> +static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
>> +{
>> +    TPMTISState *tis =&s->s.tis;
>> +
>> +    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
>> +
>> +    s->command_locty = locty;
>> +    s->cmd_locty     =&tis->loc[locty];
>> +
>> +    /* w_offset serves as length indicator for length of data;
>> +       it's reset when the response comes back */
>> +    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
>> +    tis->loc[locty].sts&= ~TPM_TIS_STS_EXPECT;
>> +
>> +    s->to_tpm_execute = true;
>> +    qemu_cond_signal(&s->to_tpm_cond);
>> +}
> What happens IIUC is that frondend sets to_tpm_execute
> and signals a condition, and backend clears it
> and waits on a condition.
>
> So how about moving all the signalling
> and locking out to backend, and have frontend
> invoke a callback to signal it?
>
> The whole threading thing then becomes a work-around
> for a backend that does not support select,
> instead of spilling out into frontend?
>

How do I get the lock calls (qemu_mutex_lock(&s->state_lock)) out of the 
frontend? Do you want me to add callbacks to the backend interface for 
locking (s->be_driver->ops->state_lock(s)) and one for unlocking 
(s->be_driver->ops->state_unlock(tpm_be)) of the state that really 
belongs to the front-end (state is 's') and invoke it as shown in 
parenthesis and still keep s->state_lock around? Ideally the locks would 
end up being 'nop's' if select() was available, but in the end all 
backend will need to support that lock.

[The lock protects the common structure so that the thread in the 
backend can deliver the response to a request while the OS for example 
polls the hardware interface for its current state.]


    Stefan
Michael S. Tsirkin Feb. 21, 2012, 3:18 a.m. UTC | #7
On Mon, Feb 20, 2012 at 07:43:05PM -0500, Stefan Berger wrote:
> On 02/20/2012 05:02 PM, Michael S. Tsirkin wrote:
> >On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
> >>+/*
> >>+ * Send a TPM request.
> >>+ * Call this with the state_lock held so we can sync with the receive
> >>+ * callback.
> >>+ */
> >>+static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
> >>+{
> >>+    TPMTISState *tis =&s->s.tis;
> >>+
> >>+    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
> >>+
> >>+    s->command_locty = locty;
> >>+    s->cmd_locty     =&tis->loc[locty];
> >>+
> >>+    /* w_offset serves as length indicator for length of data;
> >>+       it's reset when the response comes back */
> >>+    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
> >>+    tis->loc[locty].sts&= ~TPM_TIS_STS_EXPECT;
> >>+
> >>+    s->to_tpm_execute = true;
> >>+    qemu_cond_signal(&s->to_tpm_cond);
> >>+}
> >What happens IIUC is that frondend sets to_tpm_execute
> >and signals a condition, and backend clears it
> >and waits on a condition.
> >
> >So how about moving all the signalling
> >and locking out to backend, and have frontend
> >invoke a callback to signal it?
> >
> >The whole threading thing then becomes a work-around
> >for a backend that does not support select,
> >instead of spilling out into frontend?
> >
> 
> How do I get the lock calls (qemu_mutex_lock(&s->state_lock)) out of
> the frontend? Do you want me to add callbacks to the backend
> interface for locking (s->be_driver->ops->state_lock(s)) and one for
> unlocking (s->be_driver->ops->state_unlock(tpm_be)) of the state
> that really belongs to the front-end (state is 's') and invoke it as
> shown in parenthesis and still keep s->state_lock around? Ideally
> the locks would end up being 'nop's' if select() was available, but
> in the end all backend will need to support that lock.
> 
> [The lock protects the common structure so that the thread in the
> backend can deliver the response to a request while the OS for
> example polls the hardware interface for its current state.]
> 
> 
>    Stefan


Well, this is just an idea, please do not take this as
a request or anything like that. Maybe it is a dumb one.

Maybe something like what you describe.

Alternatively, I imagined that you can pass a copy
or pointer of the necessary state to the backend,
which queues the command and wakes the worker.
In the reverse direction, backend queues a response
and when OS polls you dequeue it and update state.

Can this work?
Stefan Berger Feb. 21, 2012, 11:19 a.m. UTC | #8
On 02/20/2012 10:18 PM, Michael S. Tsirkin wrote:
> On Mon, Feb 20, 2012 at 07:43:05PM -0500, Stefan Berger wrote:
>> On 02/20/2012 05:02 PM, Michael S. Tsirkin wrote:
>>> On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
>>>> +/*
>>>> + * Send a TPM request.
>>>> + * Call this with the state_lock held so we can sync with the receive
>>>> + * callback.
>>>> + */
>>>> +static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
>>>> +{
>>>> +    TPMTISState *tis =&s->s.tis;
>>>> +
>>>> +    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
>>>> +
>>>> +    s->command_locty = locty;
>>>> +    s->cmd_locty     =&tis->loc[locty];
>>>> +
>>>> +    /* w_offset serves as length indicator for length of data;
>>>> +       it's reset when the response comes back */
>>>> +    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
>>>> +    tis->loc[locty].sts&= ~TPM_TIS_STS_EXPECT;
>>>> +
>>>> +    s->to_tpm_execute = true;
>>>> +    qemu_cond_signal(&s->to_tpm_cond);
>>>> +}
>>> What happens IIUC is that frondend sets to_tpm_execute
>>> and signals a condition, and backend clears it
>>> and waits on a condition.
>>>
>>> So how about moving all the signalling
>>> and locking out to backend, and have frontend
>>> invoke a callback to signal it?
>>>
>>> The whole threading thing then becomes a work-around
>>> for a backend that does not support select,
>>> instead of spilling out into frontend?
>>>
>> How do I get the lock calls (qemu_mutex_lock(&s->state_lock)) out of
>> the frontend? Do you want me to add callbacks to the backend
>> interface for locking (s->be_driver->ops->state_lock(s)) and one for
>> unlocking (s->be_driver->ops->state_unlock(tpm_be)) of the state
>> that really belongs to the front-end (state is 's') and invoke it as
>> shown in parenthesis and still keep s->state_lock around? Ideally
>> the locks would end up being 'nop's' if select() was available, but
>> in the end all backend will need to support that lock.
>>
>> [The lock protects the common structure so that the thread in the
>> backend can deliver the response to a request while the OS for
>> example polls the hardware interface for its current state.]
>>
>>
>>     Stefan
>
> Well, this is just an idea, please do not take this as
> a request or anything like that. Maybe it is a dumb one.
>
> Maybe something like what you describe.

I am starting to wonder what we're trying to achieve? We have a 
producer-consumer problem here with different threads. Both threads need 
to have some locking constructs along with the signalling (condition). 
The backend needs to be written in a certain way to work with the 
frontend, locking and signalling is a part of this. So I don't see it 
makes much sense to move all that code around, especially since there is 
only one backend right now. Maybe something really great can be done 
once there is a 2nd backend.

> Alternatively, I imagined that you can pass a copy
> or pointer of the necessary state to the backend,
> which queues the command and wakes the worker.
> In the reverse direction, backend queues a response
> and when OS polls you dequeue it and update state.
>

The OS doesn't necessarily need to poll. It is just one mode of 
operation of the OS, the other being interrupt-driven where the backend 
raises the interrupt once it has delivered the response to the frontend.


    Stefan

> Can this work?
Michael S. Tsirkin Feb. 21, 2012, 12:18 p.m. UTC | #9
On Tue, Feb 21, 2012 at 06:19:26AM -0500, Stefan Berger wrote:
> On 02/20/2012 10:18 PM, Michael S. Tsirkin wrote:
> >On Mon, Feb 20, 2012 at 07:43:05PM -0500, Stefan Berger wrote:
> >>On 02/20/2012 05:02 PM, Michael S. Tsirkin wrote:
> >>>On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
> >>>>+/*
> >>>>+ * Send a TPM request.
> >>>>+ * Call this with the state_lock held so we can sync with the receive
> >>>>+ * callback.
> >>>>+ */
> >>>>+static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
> >>>>+{
> >>>>+    TPMTISState *tis =&s->s.tis;
> >>>>+
> >>>>+    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
> >>>>+
> >>>>+    s->command_locty = locty;
> >>>>+    s->cmd_locty     =&tis->loc[locty];
> >>>>+
> >>>>+    /* w_offset serves as length indicator for length of data;
> >>>>+       it's reset when the response comes back */
> >>>>+    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
> >>>>+    tis->loc[locty].sts&= ~TPM_TIS_STS_EXPECT;
> >>>>+
> >>>>+    s->to_tpm_execute = true;
> >>>>+    qemu_cond_signal(&s->to_tpm_cond);
> >>>>+}
> >>>What happens IIUC is that frondend sets to_tpm_execute
> >>>and signals a condition, and backend clears it
> >>>and waits on a condition.
> >>>
> >>>So how about moving all the signalling
> >>>and locking out to backend, and have frontend
> >>>invoke a callback to signal it?
> >>>
> >>>The whole threading thing then becomes a work-around
> >>>for a backend that does not support select,
> >>>instead of spilling out into frontend?
> >>>
> >>How do I get the lock calls (qemu_mutex_lock(&s->state_lock)) out of
> >>the frontend? Do you want me to add callbacks to the backend
> >>interface for locking (s->be_driver->ops->state_lock(s)) and one for
> >>unlocking (s->be_driver->ops->state_unlock(tpm_be)) of the state
> >>that really belongs to the front-end (state is 's') and invoke it as
> >>shown in parenthesis and still keep s->state_lock around? Ideally
> >>the locks would end up being 'nop's' if select() was available, but
> >>in the end all backend will need to support that lock.
> >>
> >>[The lock protects the common structure so that the thread in the
> >>backend can deliver the response to a request while the OS for
> >>example polls the hardware interface for its current state.]
> >>
> >>
> >>    Stefan
> >
> >Well, this is just an idea, please do not take this as
> >a request or anything like that. Maybe it is a dumb one.
> >
> >Maybe something like what you describe.
> 
> I am starting to wonder what we're trying to achieve? We have a
> producer-consumer problem here with different threads. Both threads
> need to have some locking constructs along with the signalling
> (condition). The backend needs to be written in a certain way to
> work with the frontend, locking and signalling is a part of this. So
> I don't see it makes much sense to move all that code around,
> especially since there is only one backend right now. Maybe
> something really great can be done once there is a 2nd backend.

There are three reasons I think where I think code
could be improved:

1. Your backend does not expose a reentrant asynchronous API,
   but another backend might.
   So it might be a better idea to hide this detail, and build a
   reentrant asynchronous API on top of what the OS supplies.
2. Your backend looks into the frontend data structures.
   This will make it impossible to implement another frontend.
3. I personally find it very hard to follow inter-thread
   communication based on shared memory and conditions
   if it is spread around between 2 different patches
   and different files. This can alternatively be addressed
   by documenting the synchronization/locking strategy.


> >Alternatively, I imagined that you can pass a copy
> >or pointer of the necessary state to the backend,
> >which queues the command and wakes the worker.
> >In the reverse direction, backend queues a response
> >and when OS polls you dequeue it and update state.
> >
> 
> The OS doesn't necessarily need to poll. It is just one mode of
> operation of the OS, the other being interrupt-driven where the
> backend raises the interrupt once it has delivered the response to
> the frontend.
> 
> 
>    Stefan

So you will also need to signal the frontend when it
must interrupt the guest. This is not a problem,
for example you can use a qemu_eventfd object for this.

> 
> >Can this work?
Stefan Berger Feb. 21, 2012, 3:05 p.m. UTC | #10
On 02/21/2012 07:18 AM, Michael S. Tsirkin wrote:
> On Tue, Feb 21, 2012 at 06:19:26AM -0500, Stefan Berger wrote:
>> On 02/20/2012 10:18 PM, Michael S. Tsirkin wrote:
>>> On Mon, Feb 20, 2012 at 07:43:05PM -0500, Stefan Berger wrote:
>>>> On 02/20/2012 05:02 PM, Michael S. Tsirkin wrote:
>>>>> On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
>>> Alternatively, I imagined that you can pass a copy
>>> or pointer of the necessary state to the backend,
>>> which queues the command and wakes the worker.
>>> In the reverse direction, backend queues a response
>>> and when OS polls you dequeue it and update state.
>>>
>> The OS doesn't necessarily need to poll. It is just one mode of
>> operation of the OS, the other being interrupt-driven where the
>> backend raises the interrupt once it has delivered the response to
>> the frontend.
>>
>>
>>     Stefan
> So you will also need to signal the frontend when it
> must interrupt the guest. This is not a problem,
> for example you can use a qemu_eventfd object for this.
>

When the backend delivers the response it checks whether the interface 
is used in interrupt mode and raises the interrupt. The backend enters 
the frontend code with a callback. In this function also a signal is 
sent that may wake up the main thread that, upon suspend, may be waiting 
for the last command to be processed and be sleeping on a condition 
variable.

I now added a function to the backend interface that is invoked by the 
frontend to notify the backend of a TPM request. The backend code can 
then either notify a thread (passthrough and libtpms driver) or create a 
response right away and invoke that callback to the front-end to deliver 
the response (null driver). How frontend and backend handle 
notifications is isolated to the frontend and backend with some backends 
(libtpms, passthough) sharing the code for how the notification is done.

Stefan
Michael S. Tsirkin Feb. 21, 2012, 7:58 p.m. UTC | #11
On Tue, Feb 21, 2012 at 10:05:26AM -0500, Stefan Berger wrote:
> On 02/21/2012 07:18 AM, Michael S. Tsirkin wrote:
> >On Tue, Feb 21, 2012 at 06:19:26AM -0500, Stefan Berger wrote:
> >>On 02/20/2012 10:18 PM, Michael S. Tsirkin wrote:
> >>>On Mon, Feb 20, 2012 at 07:43:05PM -0500, Stefan Berger wrote:
> >>>>On 02/20/2012 05:02 PM, Michael S. Tsirkin wrote:
> >>>>>On Wed, Dec 14, 2011 at 08:43:17AM -0500, Stefan Berger wrote:
> >>>Alternatively, I imagined that you can pass a copy
> >>>or pointer of the necessary state to the backend,
> >>>which queues the command and wakes the worker.
> >>>In the reverse direction, backend queues a response
> >>>and when OS polls you dequeue it and update state.
> >>>
> >>The OS doesn't necessarily need to poll. It is just one mode of
> >>operation of the OS, the other being interrupt-driven where the
> >>backend raises the interrupt once it has delivered the response to
> >>the frontend.
> >>
> >>
> >>    Stefan
> >So you will also need to signal the frontend when it
> >must interrupt the guest. This is not a problem,
> >for example you can use a qemu_eventfd object for this.
> >
> 
> When the backend delivers the response it checks whether the
> interface is used in interrupt mode and raises the interrupt.

IMO it's the frontend that should send interrupts.
Yes it kind of works for isa anyway, but e.g. pci
needs to update configuration etc.

> The
> backend enters the frontend code with a callback. In this function
> also a signal is sent that may wake up the main thread that, upon
> suspend, may be waiting for the last command to be processed and be
> sleeping on a condition variable.
> 
> I now added a function to the backend interface that is invoked by
> the frontend to notify the backend of a TPM request. The backend
> code can then either notify a thread (passthrough and libtpms
> driver) or create a response right away and invoke that callback to
> the front-end to deliver the response (null driver). How frontend
> and backend handle notifications is isolated to the frontend and
> backend with some backends (libtpms, passthough) sharing the code
> for how the notification is done.
> 
> Stefan

Right. And all the locking/threading can then be internal to the backend.
Stefan Berger Feb. 21, 2012, 10:30 p.m. UTC | #12
On 02/21/2012 02:58 PM, Michael S. Tsirkin wrote:
> On Tue, Feb 21, 2012 at 10:05:26AM -0500, Stefan Berger wrote:
>> On 02/21/2012 07:18 AM, Michael S. Tsirkin wrote:
>>>
>> When the backend delivers the response it checks whether the
>> interface is used in interrupt mode and raises the interrupt.
> IMO it's the frontend that should send interrupts.
> Yes it kind of works for isa anyway, but e.g. pci
> needs to update configuration etc.
>

The code that causes the interrupt to be raised is in the frontend. The 
function doing that is invoked via callback from the backend. This 
should be ok?

>> The
>> backend enters the frontend code with a callback. In this function
>> also a signal is sent that may wake up the main thread that, upon
>> suspend, may be waiting for the last command to be processed and be
>> sleeping on a condition variable.
>>
>> I now added a function to the backend interface that is invoked by
>> the frontend to notify the backend of a TPM request. The backend
>> code can then either notify a thread (passthrough and libtpms
>> driver) or create a response right away and invoke that callback to
>> the front-end to deliver the response (null driver). How frontend
>> and backend handle notifications is isolated to the frontend and
>> backend with some backends (libtpms, passthough) sharing the code
>> for how the notification is done.
>>
>> Stefan
> Right. And all the locking/threading can then be internal to the backend.
>

Do you really want to replace code like this in the frontend:

qemu_mutex_lock(&s->state_lock)
[...]
qemu_mutex_unlock(&s->state_lock)


with


s->be_driver->ops->state_lock(s->be_driver)
[...]
s->be_driver->ops->state_unlock(s->be_driver))


where the backend starts protecting frontend data structures ?

At the moment there are two backends that need threading: the libtpms 
and passthrough backends. Both will require locking of datastructures 
that belong to the frontend. Only the null driver doesn't need a thread 
and the main thread can call into the backend, create the response and 
call via callback into the frontend to deliver the repsonse. If 
structures are protected via mutxes then only the NULL driver (which we 
don't want anyway) may end up grabbing mutexes that really aren't 
necessary while the two other backends need them. I don't see the 
mitextes as problematic. The frontend at least protects its data 
structures for the callbacks and other API calls it offers and they 
simply are thread-safe.

     Stefan
Michael S. Tsirkin Feb. 21, 2012, 11:08 p.m. UTC | #13
On Tue, Feb 21, 2012 at 05:30:32PM -0500, Stefan Berger wrote:
> On 02/21/2012 02:58 PM, Michael S. Tsirkin wrote:
> >On Tue, Feb 21, 2012 at 10:05:26AM -0500, Stefan Berger wrote:
> >>On 02/21/2012 07:18 AM, Michael S. Tsirkin wrote:
> >>>
> >>When the backend delivers the response it checks whether the
> >>interface is used in interrupt mode and raises the interrupt.
> >IMO it's the frontend that should send interrupts.
> >Yes it kind of works for isa anyway, but e.g. pci
> >needs to update configuration etc.
> >
> 
> The code that causes the interrupt to be raised is in the frontend.
> The function doing that is invoked via callback from the backend.
> This should be ok?

Absolutely.

> >>The
> >>backend enters the frontend code with a callback. In this function
> >>also a signal is sent that may wake up the main thread that, upon
> >>suspend, may be waiting for the last command to be processed and be
> >>sleeping on a condition variable.
> >>
> >>I now added a function to the backend interface that is invoked by
> >>the frontend to notify the backend of a TPM request. The backend
> >>code can then either notify a thread (passthrough and libtpms
> >>driver) or create a response right away and invoke that callback to
> >>the front-end to deliver the response (null driver). How frontend
> >>and backend handle notifications is isolated to the frontend and
> >>backend with some backends (libtpms, passthough) sharing the code
> >>for how the notification is done.
> >>
> >>Stefan
> >Right. And all the locking/threading can then be internal to the backend.
> >
> 
> Do you really want to replace code like this in the frontend:
> 
> qemu_mutex_lock(&s->state_lock)
> [...]
> qemu_mutex_unlock(&s->state_lock)
> 
> 
> with
> 
> 
> s->be_driver->ops->state_lock(s->be_driver)
> [...]
> s->be_driver->ops->state_unlock(s->be_driver))
> 
> 
> where the backend starts protecting frontend data structures ?

It's ugly I hope you can do something saner:
ops->send_command(....)
with command encapsulating the relevant info.

> At the moment there are two backends that need threading: the
> libtpms and passthrough backends. Both will require locking of
> datastructures that belong to the frontend. Only the null driver
> doesn't need a thread and the main thread can call into the backend,
> create the response and call via callback into the frontend to
> deliver the repsonse. If structures are protected via mutxes then
> only the NULL driver (which we don't want anyway) may end up
> grabbing mutexes that really aren't necessary while the two other
> backends need them. I don't see the mitextes as problematic. The
> frontend at least protects its data structures for the callbacks and
> other API calls it offers and they simply are thread-safe.
> 
>     Stefan

Worst case, you can take a qemu mutex. Is tpm very
performance-sensitive to make contention on that
lock a problem?
Stefan Berger Feb. 22, 2012, 12:21 a.m. UTC | #14
On 02/21/2012 06:08 PM, Michael S. Tsirkin wrote:
> On Tue, Feb 21, 2012 at 05:30:32PM -0500, Stefan Berger wrote:
>
>
>> At the moment there are two backends that need threading: the
>> libtpms and passthrough backends. Both will require locking of
>> datastructures that belong to the frontend. Only the null driver
>> doesn't need a thread and the main thread can call into the backend,
>> create the response and call via callback into the frontend to
>> deliver the repsonse. If structures are protected via mutxes then
>> only the NULL driver (which we don't want anyway) may end up
>> grabbing mutexes that really aren't necessary while the two other
>> backends need them. I don't see the mitextes as problematic. The
>> frontend at least protects its data structures for the callbacks and
>> other API calls it offers and they simply are thread-safe.
>>
>>      Stefan
> Worst case, you can take a qemu mutex. Is tpm very
> performance-sensitive to make contention on that
> lock a problem?
>
We have to lock a common data structure 'somehow'. I don't see a way 
around it. The locking times are short since no major computations are 
done while the lock is held.
Considering that the TPM TIS interface is a non-DMA, byte-by-byte 
send/receive interface, the performance problems, if at all a problem, 
are to be found somewhere else : VMExits for example; if interface is 
used in polling mode, then the interval between polls.

    Stefan
Michael S. Tsirkin Feb. 22, 2012, 4:34 a.m. UTC | #15
On Tue, Feb 21, 2012 at 07:21:28PM -0500, Stefan Berger wrote:
> On 02/21/2012 06:08 PM, Michael S. Tsirkin wrote:
> >On Tue, Feb 21, 2012 at 05:30:32PM -0500, Stefan Berger wrote:
> >
> >
> >>At the moment there are two backends that need threading: the
> >>libtpms and passthrough backends. Both will require locking of
> >>datastructures that belong to the frontend. Only the null driver
> >>doesn't need a thread and the main thread can call into the backend,
> >>create the response and call via callback into the frontend to
> >>deliver the repsonse. If structures are protected via mutxes then
> >>only the NULL driver (which we don't want anyway) may end up
> >>grabbing mutexes that really aren't necessary while the two other
> >>backends need them. I don't see the mitextes as problematic. The
> >>frontend at least protects its data structures for the callbacks and
> >>other API calls it offers and they simply are thread-safe.
> >>
> >>     Stefan
> >Worst case, you can take a qemu mutex. Is tpm very
> >performance-sensitive to make contention on that
> >lock a problem?
> >
> We have to lock a common data structure 'somehow'. I don't see a way
> around it.

Naturally. But it could be an implementation detail of
the backend.

> The locking times are short since no major computations
> are done while the lock is held.
> Considering that the TPM TIS interface is a non-DMA, byte-by-byte
> send/receive interface, the performance problems, if at all a
> problem, are to be found somewhere else : VMExits for example; if
> interface is used in polling mode, then the interval between polls.
> 
>    Stefan

In that case, you can take the qemu lock in your backend
and avoid locking in the frontend completely.
Stefan Berger Feb. 22, 2012, 3:03 p.m. UTC | #16
On 02/21/2012 11:34 PM, Michael S. Tsirkin wrote:
> On Tue, Feb 21, 2012 at 07:21:28PM -0500, Stefan Berger wrote:
>> On 02/21/2012 06:08 PM, Michael S. Tsirkin wrote:
>>
>
>> The locking times are short since no major computations
>> are done while the lock is held.
>> Considering that the TPM TIS interface is a non-DMA, byte-by-byte
>> send/receive interface, the performance problems, if at all a
>> problem, are to be found somewhere else : VMExits for example; if
>> interface is used in polling mode, then the interval between polls.
>>
>>     Stefan
> In that case, you can take the qemu lock in your backend
> and avoid locking in the frontend completely.
>
Maybe it's just me, but I prefer to have locks around the code that has 
the criticial section, rather than entering a function with locks held, 
especially since it would be in a different file (backend) where the 
lock then is grabbed.
Also I do need a lock in the frontend for the condition that is used there.

     Stefan
Stefan Berger Feb. 22, 2012, 5:55 p.m. UTC | #17
On 02/21/2012 06:08 PM, Michael S. Tsirkin wrote:
> On Tue, Feb 21, 2012 at 05:30:32PM -0500, Stefan Berger wrote:
>> On 02/21/2012 02:58 PM, Michael S. Tsirkin wrote:
>
> Worst case, you can take a qemu mutex. Is tpm very
> performance-sensitive to make contention on that
> lock a problem?
>

FYI: Some performance measurements with the libtpms backend and linux 
3.3.0-rc3 with IMA enabled. IMA takes ~1324 measurements while booting 
and sends TPM_Extend()s. I implemented a 'fast-path' for TPM ordinals 
that are considered 'short' in terms of duration of execution and where 
this TPM_Extend operation is one of them. In this case the Qemu main 
thread enters the TPM code rather than using the thread. Here are the 
measurements with the 1st one taken when dmesg states 'mtrr: no MTRR for 
fc000..'  and the 2nd one 'eth0: no IPv6 routers present'. Both may not 
be a great way to measure but at least show some 'trends':

                                                                       
                                               average    percentage

no IMA                        14.17 / 21.93    14.28 / 21.24    12.83 / 
20.18        13.76 / 21.12    100 / 100
w/ fast path + IRQ:     15.50 / 22.72    14.58 / 21.41    15.30 / 
22.37        15.12 / 22.17    110 / 105
w/ fast path + poll:     16.17 / 22.96    14.67 / 21.66    14.43 / 
22.17        15.09 / 22.26    110 / 105

no fast path + IRQ:    16.93 / 23.51    13.94 / 20.72    16.39 / 
23.48        15.75 / 22.57    114 / 107
no fast path + poll:    17.03 / 23.95    18.11 / 25.38    16.67 / 
24.08        17.27 / 24.47    126 / 119


      Stefan
Stefan Berger Feb. 23, 2012, 8:47 p.m. UTC | #18
On 02/20/2012 10:48 AM, Stefan Berger wrote:
> On 02/20/2012 03:51 AM, Michael S. Tsirkin wrote:
>
>
>>> +static const MemoryRegionOps tpm_tis_memory_ops = {
>>> +    .read = tpm_tis_mmio_read,
>>> +    .write = tpm_tis_mmio_write,
>>> +    .endianness = DEVICE_NATIVE_ENDIAN,
>> Are you sure? Most devices are BIG or LITTLE.
>>
>
> Right, it should be BIG endian. I'll fix it. Luckily there are only a 
> few 32bit accesses (and no 16bit access) also in 

Replace that with 'LITTLE endian'.

    Stefan
Stefan Berger March 2, 2012, 12:02 p.m. UTC | #19
On 02/21/2012 06:08 PM, Michael S. Tsirkin wrote:
> On Tue, Feb 21, 2012 at 05:30:32PM -0500, Stefan Berger wrote:
>> On 02/21/2012 02:58 PM, Michael S. Tsirkin wrote:
>>> On Tue, Feb 21, 2012 at 10:05:26AM -0500, Stefan Berger wrote:
>>>> On 02/21/2012 07:18 AM, Michael S. Tsirkin wrote:
>>>> When the backend delivers the response it checks whether the
>>>> interface is used in interrupt mode and raises the interrupt.
>>> IMO it's the frontend that should send interrupts.
>>> Yes it kind of works for isa anyway, but e.g. pci
>>> needs to update configuration etc.
>>>
>> The code that causes the interrupt to be raised is in the frontend.
>> The function doing that is invoked via callback from the backend.
>> This should be ok?
> Absolutely.
>
>>>> The
>>>> backend enters the frontend code with a callback. In this function
>>>> also a signal is sent that may wake up the main thread that, upon
>>>> suspend, may be waiting for the last command to be processed and be
>>>> sleeping on a condition variable.
>>>>
>>>> I now added a function to the backend interface that is invoked by
>>>> the frontend to notify the backend of a TPM request. The backend
>>>> code can then either notify a thread (passthrough and libtpms
>>>> driver) or create a response right away and invoke that callback to
>>>> the front-end to deliver the response (null driver). How frontend
>>>> and backend handle notifications is isolated to the frontend and
>>>> backend with some backends (libtpms, passthough) sharing the code
>>>> for how the notification is done.
>>>>
>>>> Stefan
>>> Right. And all the locking/threading can then be internal to the backend.
>>>
>> Do you really want to replace code like this in the frontend:
>>
>> qemu_mutex_lock(&s->state_lock)
>> [...]
>> qemu_mutex_unlock(&s->state_lock)
>>
>>
>> with
>>
>>
>> s->be_driver->ops->state_lock(s->be_driver)
>> [...]
>> s->be_driver->ops->state_unlock(s->be_driver))
>>
>>
>> where the backend starts protecting frontend data structures ?
> It's ugly I hope you can do something saner:
> ops->send_command(....)
> with command encapsulating the relevant info.
>
>> At the moment there are two backends that need threading: the
>> libtpms and passthrough backends. Both will require locking of
>> datastructures that belong to the frontend. Only the null driver
>> doesn't need a thread and the main thread can call into the backend,
>> create the response and call via callback into the frontend to
>> deliver the repsonse. If structures are protected via mutxes then
>> only the NULL driver (which we don't want anyway) may end up
>> grabbing mutexes that really aren't necessary while the two other
>> backends need them. I don't see the mitextes as problematic. The
>> frontend at least protects its data structures for the callbacks and
>> other API calls it offers and they simply are thread-safe.
>>
>>      Stefan
> Worst case, you can take a qemu mutex. Is tpm very
> performance-sensitive to make contention on that
> lock a problem?
>

Having instrumented the code with qemu_mutex_trylock() and a counter for 
failed attempts with subsequent qemu_mutex_lock() practically shows no 
lock contention at all for either polling or IRQ mode being used by the 
Linux driver.

IRQ mode: 4 failed lock attempts out of 1726208 locks -> 0.00%
polling mode: 2 failed lock attempts out of 1545216 locks -> 0.00%

I used the libtpms based backend with and ran IMA and a test suite in 
the VM.


     Stefan
Michael S. Tsirkin March 4, 2012, 10:59 p.m. UTC | #20
On Fri, Mar 02, 2012 at 07:02:21AM -0500, Stefan Berger wrote:
> On 02/21/2012 06:08 PM, Michael S. Tsirkin wrote:
> >On Tue, Feb 21, 2012 at 05:30:32PM -0500, Stefan Berger wrote:
> >>On 02/21/2012 02:58 PM, Michael S. Tsirkin wrote:
> >>>On Tue, Feb 21, 2012 at 10:05:26AM -0500, Stefan Berger wrote:
> >>>>On 02/21/2012 07:18 AM, Michael S. Tsirkin wrote:
> >>>>When the backend delivers the response it checks whether the
> >>>>interface is used in interrupt mode and raises the interrupt.
> >>>IMO it's the frontend that should send interrupts.
> >>>Yes it kind of works for isa anyway, but e.g. pci
> >>>needs to update configuration etc.
> >>>
> >>The code that causes the interrupt to be raised is in the frontend.
> >>The function doing that is invoked via callback from the backend.
> >>This should be ok?
> >Absolutely.
> >
> >>>>The
> >>>>backend enters the frontend code with a callback. In this function
> >>>>also a signal is sent that may wake up the main thread that, upon
> >>>>suspend, may be waiting for the last command to be processed and be
> >>>>sleeping on a condition variable.
> >>>>
> >>>>I now added a function to the backend interface that is invoked by
> >>>>the frontend to notify the backend of a TPM request. The backend
> >>>>code can then either notify a thread (passthrough and libtpms
> >>>>driver) or create a response right away and invoke that callback to
> >>>>the front-end to deliver the response (null driver). How frontend
> >>>>and backend handle notifications is isolated to the frontend and
> >>>>backend with some backends (libtpms, passthough) sharing the code
> >>>>for how the notification is done.
> >>>>
> >>>>Stefan
> >>>Right. And all the locking/threading can then be internal to the backend.
> >>>
> >>Do you really want to replace code like this in the frontend:
> >>
> >>qemu_mutex_lock(&s->state_lock)
> >>[...]
> >>qemu_mutex_unlock(&s->state_lock)
> >>
> >>
> >>with
> >>
> >>
> >>s->be_driver->ops->state_lock(s->be_driver)
> >>[...]
> >>s->be_driver->ops->state_unlock(s->be_driver))
> >>
> >>
> >>where the backend starts protecting frontend data structures ?
> >It's ugly I hope you can do something saner:
> >ops->send_command(....)
> >with command encapsulating the relevant info.
> >
> >>At the moment there are two backends that need threading: the
> >>libtpms and passthrough backends. Both will require locking of
> >>datastructures that belong to the frontend. Only the null driver
> >>doesn't need a thread and the main thread can call into the backend,
> >>create the response and call via callback into the frontend to
> >>deliver the repsonse. If structures are protected via mutxes then
> >>only the NULL driver (which we don't want anyway) may end up
> >>grabbing mutexes that really aren't necessary while the two other
> >>backends need them. I don't see the mitextes as problematic. The
> >>frontend at least protects its data structures for the callbacks and
> >>other API calls it offers and they simply are thread-safe.
> >>
> >>     Stefan
> >Worst case, you can take a qemu mutex. Is tpm very
> >performance-sensitive to make contention on that
> >lock a problem?
> >
> 
> Having instrumented the code with qemu_mutex_trylock() and a counter
> for failed attempts with subsequent qemu_mutex_lock() practically
> shows no lock contention at all for either polling or IRQ mode being
> used by the Linux driver.
> 
> IRQ mode: 4 failed lock attempts out of 1726208 locks -> 0.00%
> polling mode: 2 failed lock attempts out of 1545216 locks -> 0.00%
> 
> I used the libtpms based backend with and ran IMA and a test suite
> in the VM.
> 
> 
>     Stefan

so maybe you can just do qemu_mutex_lock_iothread similar
to what kvm does.
Stefan Berger March 5, 2012, 3:44 p.m. UTC | #21
On 03/04/2012 05:59 PM, Michael S. Tsirkin wrote:
> On Fri, Mar 02, 2012 at 07:02:21AM -0500, Stefan Berger wrote:
>> Having instrumented the code with qemu_mutex_trylock() and a counter
>> for failed attempts with subsequent qemu_mutex_lock() practically
>> shows no lock contention at all for either polling or IRQ mode being
>> used by the Linux driver.
>>
>> IRQ mode: 4 failed lock attempts out of 1726208 locks ->  0.00%
>> polling mode: 2 failed lock attempts out of 1545216 locks ->  0.00%
>>
>> I used the libtpms based backend with and ran IMA and a test suite
>> in the VM.
>>
>>
>>      Stefan
> so maybe you can just do qemu_mutex_lock_iothread similar
> to what kvm does.
>

I have tried that now. I ended up having to remove the locking from the 
TIS except for one place where the result is delivered from the backend 
to the frontend via the thread. The reason why I had to remove the lock 
is because the pthread library detects a deadlock, likely due to 
attempted double-locking of the global lock that is not allowed to be 
locked more than once. That by itself is not the scary part but here's 
the stacktrace while I still had the lock in there:

#0  0x00007ffff37e2215 in __GI_raise (sig=6)
     at ../nptl/sysdeps/unix/sysv/linux/raise.c:64
#1  0x00007ffff37e3b2b in __GI_abort () at abort.c:93
#2  0x00005555555c1069 in error_exit (err=<optimized out>, msg=
     0x55555583f350 "qemu_mutex_lock") at qemu-thread-posix.c:25
#3  0x00005555556daf10 in qemu_mutex_lock (mutex=<optimized out>)
     at qemu-thread-posix.c:56
#4  0x00005555557a6d43 in tpm_tis_mmio_read (opaque=0x555556e06a40, 
addr=3840,
     size=<optimized out>) at 
/home/stefanb/qemu/qemu-git.pt/hw/tpm_tis.c:448
#5  0x000055555575e157 in memory_region_read_accessor (opaque=<optimized 
out>,
     addr=<optimized out>, value=0x7fffef596c60, size=<optimized out>, 
shift=0,
     mask=4294967295) at /home/stefanb/qemu/qemu-git.pt/memory.c:259
#6  0x000055555575e270 in access_with_adjusted_size (addr=3840, value=
     0x7fffef596c60, size=4, access_size_min=<optimized out>,
     access_size_max=<optimized out>, access=
     0x55555575e120 <memory_region_read_accessor>, opaque=0x555556e06af0)
     at /home/stefanb/qemu/qemu-git.pt/memory.c:304
#7  0x0000555555762b80 in memory_region_dispatch_read1 (size=4, 
addr=3840, mr=
     0x555556e06af0) at /home/stefanb/qemu/qemu-git.pt/memory.c:928
#8  memory_region_dispatch_read (size=4, addr=3840, mr=0x555556e06af0)
     at /home/stefanb/qemu/qemu-git.pt/memory.c:960
#9  io_mem_read (io_index=<optimized out>, addr=3840, size=4)
     at /home/stefanb/qemu/qemu-git.pt/memory.c:1558
#10 0x000055555573a876 in cpu_physical_memory_rw (addr=4275310336, buf=
     0x7ffff7ff4028 "", len=4, is_write=0)
     at /home/stefanb/qemu/qemu-git.pt/exec.c:3620
#11 0x0000555555755225 in kvm_cpu_exec (env=0x555556561100)
     at /home/stefanb/qemu/qemu-git.pt/kvm-all.c:1173
#12 0x0000555555731201 in qemu_kvm_cpu_thread_fn (arg=0x555556561100)
     at /home/stefanb/qemu/qemu-git.pt/cpus.c:732
#13 0x00007ffff64a0b41 in start_thread (arg=0x7fffef597700)
     at pthread_create.c:305
#14 0x00007ffff388c49d in clone ()
     at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115

It may say qemu_mutex_lock but it is really calling 
qemu_mutex_lock_iothread.
The initial lock happends in kvm_cpu_exec (#11). The subsequent lock 
then occurs in tpm_tis_mmio_read (#4), so far away from the first lock. 
Well, if something in the code there changes we may end up having a race 
in the TPM TIS driver if we end up factoring the lock out. So personally 
I would prefer local locking where we can withstand changes in other 
parts of the code unless one can be sure that that global lock is never 
going to go away and that all code paths reaching the TIS driver 
function that need the locking have that global lock held.

     Stefan
Stefan Berger March 5, 2012, 3:46 p.m. UTC | #22
Resending with people properly cc'ed.

On 03/04/2012 05:59 PM, Michael S. Tsirkin wrote:
> On Fri, Mar 02, 2012 at 07:02:21AM -0500, Stefan Berger wrote:
>> Having instrumented the code with qemu_mutex_trylock() and a counter
>> for failed attempts with subsequent qemu_mutex_lock() practically
>> shows no lock contention at all for either polling or IRQ mode being
>> used by the Linux driver.
>>
>> IRQ mode: 4 failed lock attempts out of 1726208 locks ->  0.00%
>> polling mode: 2 failed lock attempts out of 1545216 locks ->  0.00%
>>
>> I used the libtpms based backend with and ran IMA and a test suite
>> in the VM.
>>
>>
>>      Stefan
> so maybe you can just do qemu_mutex_lock_iothread similar
> to what kvm does.
>

I have tried that now. I ended up having to remove the locking from the 
TIS except for one place where the result is delivered from the backend 
to the frontend via the thread. The reason why I had to remove the lock 
is because the pthread library detects a deadlock, likely due to 
attempted double-locking of the global lock that is not allowed to be 
locked more than once. That by itself is not the scary part but here's 
the stacktrace while I still had the lock in there:

#0  0x00007ffff37e2215 in __GI_raise (sig=6)
     at ../nptl/sysdeps/unix/sysv/linux/raise.c:64
#1  0x00007ffff37e3b2b in __GI_abort () at abort.c:93
#2  0x00005555555c1069 in error_exit (err=<optimized out>, msg=
     0x55555583f350 "qemu_mutex_lock") at qemu-thread-posix.c:25
#3  0x00005555556daf10 in qemu_mutex_lock (mutex=<optimized out>)
     at qemu-thread-posix.c:56
#4  0x00005555557a6d43 in tpm_tis_mmio_read (opaque=0x555556e06a40, 
addr=3840,
     size=<optimized out>) at 
/home/stefanb/qemu/qemu-git.pt/hw/tpm_tis.c:448
#5  0x000055555575e157 in memory_region_read_accessor (opaque=<optimized 
out>,
     addr=<optimized out>, value=0x7fffef596c60, size=<optimized out>, 
shift=0,
     mask=4294967295) at /home/stefanb/qemu/qemu-git.pt/memory.c:259
#6  0x000055555575e270 in access_with_adjusted_size (addr=3840, value=
     0x7fffef596c60, size=4, access_size_min=<optimized out>,
     access_size_max=<optimized out>, access=
     0x55555575e120 <memory_region_read_accessor>, opaque=0x555556e06af0)
     at /home/stefanb/qemu/qemu-git.pt/memory.c:304
#7  0x0000555555762b80 in memory_region_dispatch_read1 (size=4, 
addr=3840, mr=
     0x555556e06af0) at /home/stefanb/qemu/qemu-git.pt/memory.c:928
#8  memory_region_dispatch_read (size=4, addr=3840, mr=0x555556e06af0)
     at /home/stefanb/qemu/qemu-git.pt/memory.c:960
#9  io_mem_read (io_index=<optimized out>, addr=3840, size=4)
     at /home/stefanb/qemu/qemu-git.pt/memory.c:1558
#10 0x000055555573a876 in cpu_physical_memory_rw (addr=4275310336, buf=
     0x7ffff7ff4028 "", len=4, is_write=0)
     at /home/stefanb/qemu/qemu-git.pt/exec.c:3620
#11 0x0000555555755225 in kvm_cpu_exec (env=0x555556561100)
     at /home/stefanb/qemu/qemu-git.pt/kvm-all.c:1173
#12 0x0000555555731201 in qemu_kvm_cpu_thread_fn (arg=0x555556561100)
     at /home/stefanb/qemu/qemu-git.pt/cpus.c:732
#13 0x00007ffff64a0b41 in start_thread (arg=0x7fffef597700)
     at pthread_create.c:305
#14 0x00007ffff388c49d in clone ()
     at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115

It may say qemu_mutex_lock but it is really calling 
qemu_mutex_lock_iothread.
The initial lock happends in kvm_cpu_exec (#11). The subsequent lock 
then occurs in tpm_tis_mmio_read (#4), so far away from the first lock. 
Well, if something in the code there changes we may end up having a race 
in the TPM TIS driver if we end up factoring the lock out. So personally 
I would prefer local locking where we can withstand changes in other 
parts of the code unless one can be sure that that global lock is never 
going to go away and that all code paths reaching the TIS driver 
function that need the locking have that global lock held.

     Stefan
diff mbox

Patch

diff --git a/hw/tpm_tis.c b/hw/tpm_tis.c
new file mode 100644
index 0000000..bcbc987
--- /dev/null
+++ b/hw/tpm_tis.c
@@ -0,0 +1,807 @@ 
+/*
+ * tpm_tis.c - QEMU's TPM TIS interface emulator
+ *
+ * Copyright (C) 2006,2010,2011 IBM Corporation
+ *
+ * Authors:
+ *  Stefan Berger <stefanb@us.ibm.com>
+ *  David Safford <safford@us.ibm.com>
+ *
+ * Xen 4 support: Andrease Niederl <andreas.niederl@iaik.tugraz.at>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * Implementation of the TIS interface according to specs found at
+ * http://www.trustedcomputiggroup.org.
+ * In the developers menu choose the PC Client section then find the TIS
+ * specification.
+ */
+
+#include "tpm.h"
+#include "block.h"
+#include "exec-memory.h"
+#include "hw/hw.h"
+#include "hw/pc.h"
+#include "hw/tpm_tis.h"
+#include "qemu-error.h"
+#include "qemu-common.h"
+
+/*#define DEBUG_TIS */
+
+#ifdef DEBUG_TIS
+#define dprintf(fmt, ...) \
+    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define dprintf(fmt, ...) \
+    do { } while (0)
+#endif
+
+/* whether the STS interrupt is supported */
+/*#define RAISE_STS_IRQ */
+
+/* tis registers */
+#define TPM_TIS_REG_ACCESS                0x00
+#define TPM_TIS_REG_INT_ENABLE            0x08
+#define TPM_TIS_REG_INT_VECTOR            0x0c
+#define TPM_TIS_REG_INT_STATUS            0x10
+#define TPM_TIS_REG_INTF_CAPABILITY       0x14
+#define TPM_TIS_REG_STS                   0x18
+#define TPM_TIS_REG_DATA_FIFO             0x24
+#define TPM_TIS_REG_DID_VID               0xf00
+#define TPM_TIS_REG_RID                   0xf04
+
+#define TPM_TIS_STS_VALID                 (1 << 7)
+#define TPM_TIS_STS_COMMAND_READY         (1 << 6)
+#define TPM_TIS_STS_TPM_GO                (1 << 5)
+#define TPM_TIS_STS_DATA_AVAILABLE        (1 << 4)
+#define TPM_TIS_STS_EXPECT                (1 << 3)
+#define TPM_TIS_STS_RESPONSE_RETRY        (1 << 1)
+
+#define TPM_TIS_ACCESS_TPM_REG_VALID_STS  (1 << 7)
+#define TPM_TIS_ACCESS_ACTIVE_LOCALITY    (1 << 5)
+#define TPM_TIS_ACCESS_BEEN_SEIZED        (1 << 4)
+#define TPM_TIS_ACCESS_SEIZE              (1 << 3)
+#define TPM_TIS_ACCESS_PENDING_REQUEST    (1 << 2)
+#define TPM_TIS_ACCESS_REQUEST_USE        (1 << 1)
+#define TPM_TIS_ACCESS_TPM_ESTABLISHMENT  (1 << 0)
+
+#define TPM_TIS_INT_ENABLED               (1 << 31)
+#define TPM_TIS_INT_DATA_AVAILABLE        (1 << 0)
+#define TPM_TIS_INT_STS_VALID             (1 << 1)
+#define TPM_TIS_INT_LOCALITY_CHANGED      (1 << 2)
+#define TPM_TIS_INT_COMMAND_READY         (1 << 7)
+
+#ifndef RAISE_STS_IRQ
+
+# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
+                                       TPM_TIS_INT_DATA_AVAILABLE   | \
+                                       TPM_TIS_INT_COMMAND_READY)
+
+#else
+
+# define TPM_TIS_INTERRUPTS_SUPPORTED (TPM_TIS_INT_LOCALITY_CHANGED | \
+                                       TPM_TIS_INT_DATA_AVAILABLE   | \
+                                       TPM_TIS_INT_STS_VALID | \
+                                       TPM_TIS_INT_COMMAND_READY)
+
+#endif
+
+#define TPM_TIS_CAPABILITIES_SUPPORTED   ((1 << 4) | \
+                                          TPM_TIS_INTERRUPTS_SUPPORTED)
+
+#define TPM_TIS_TPM_DID       0x0001
+#define TPM_TIS_TPM_VID       0x0001
+#define TPM_TIS_TPM_RID       0x0001
+
+#define TPM_TIS_NO_DATA_BYTE  0xff
+
+/* utility functions */
+
+static uint8_t tpm_tis_locality_from_addr(target_phys_addr_t addr)
+{
+    return (uint8_t)((addr >> 12) & 0x7);
+}
+
+static uint32_t tpm_tis_get_size_from_buffer(const TPMSizedBuffer *sb)
+{
+    return be32_to_cpu(*(uint32_t *)&sb->buffer[2]);
+}
+
+static void tpm_tis_show_buffer(const TPMSizedBuffer *sb, const char *string)
+{
+#ifdef DEBUG_TIS
+    uint32_t len, i;
+
+    len = tpm_tis_get_size_from_buffer(sb);
+    dprintf("tpm_tis: %s length = %d\n", string, len);
+    for (i = 0; i < len; i++) {
+        if (i && !(i & 16)) {
+            dprintf("\n");
+        }
+        dprintf("%.2X ", sb->buffer[i]);
+    }
+    dprintf("\n");
+#endif
+}
+
+
+/*
+ * Send a TPM request.
+ * Call this with the state_lock held so we can sync with the receive
+ * callback.
+ */
+static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
+{
+    TPMTISState *tis = &s->s.tis;
+
+    tpm_tis_show_buffer(&tis->loc[locty].w_buffer, "tpm_tis: To TPM");
+
+    s->command_locty = locty;
+    s->cmd_locty     = &tis->loc[locty];
+
+    /* w_offset serves as length indicator for length of data;
+       it's reset when the response comes back */
+    tis->loc[locty].status = TPM_TIS_STATUS_EXECUTION;
+    tis->loc[locty].sts &= ~TPM_TIS_STS_EXPECT;
+
+    s->to_tpm_execute = true;
+    qemu_cond_signal(&s->to_tpm_cond);
+}
+
+/* raise an interrupt if allowed */
+static void tpm_tis_raise_irq(TPMState *s, uint8_t locty, uint32_t irqmask)
+{
+    TPMTISState *tis = &s->s.tis;
+
+    if (!TPM_TIS_IS_VALID_LOCTY(locty)) {
+        return;
+    }
+
+    if ((tis->loc[locty].inte & TPM_TIS_INT_ENABLED) &&
+        (tis->loc[locty].inte & irqmask)) {
+        dprintf("tpm_tis: Raising IRQ for flag %08x\n", irqmask);
+        qemu_irq_raise(s->s.tis.irq);
+        tis->loc[locty].ints |= irqmask;
+    }
+}
+
+static uint32_t tpm_tis_check_request_use_except(TPMState *s, uint8_t locty)
+{
+    uint8_t l;
+
+    for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
+        if (l == locty) {
+            continue;
+        }
+        if ((s->s.tis.loc[l].access & TPM_TIS_ACCESS_REQUEST_USE)) {
+            return 1;
+        }
+    }
+
+    return 0;
+}
+
+static void tpm_tis_new_active_locality(TPMState *s, uint8_t new_active_locty)
+{
+    TPMTISState *tis = &s->s.tis;
+    int change = (s->s.tis.active_locty != new_active_locty);
+
+    if (change && TPM_TIS_IS_VALID_LOCTY(s->s.tis.active_locty)) {
+        /* reset flags on the old active locality */
+        tis->loc[s->s.tis.active_locty].access &=
+                 ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY|TPM_TIS_ACCESS_REQUEST_USE);
+        if (TPM_TIS_IS_VALID_LOCTY(new_active_locty) &&
+            tis->loc[new_active_locty].access & TPM_TIS_ACCESS_SEIZE) {
+            tis->loc[tis->active_locty].access |= TPM_TIS_ACCESS_BEEN_SEIZED;
+        }
+    }
+
+    tis->active_locty = new_active_locty;
+
+    dprintf("tpm_tis: Active locality is now %d\n", s->s.tis.active_locty);
+
+    if (TPM_TIS_IS_VALID_LOCTY(new_active_locty)) {
+        /* set flags on the new active locality */
+        tis->loc[new_active_locty].access |= TPM_TIS_ACCESS_ACTIVE_LOCALITY;
+        tis->loc[new_active_locty].access &= ~(TPM_TIS_ACCESS_REQUEST_USE |
+                                               TPM_TIS_ACCESS_SEIZE);
+    }
+
+    if (change) {
+        tpm_tis_raise_irq(s, tis->active_locty, TPM_TIS_INT_LOCALITY_CHANGED);
+    }
+}
+
+/* abort -- this function switches the locality */
+static void tpm_tis_abort(TPMState *s, uint8_t locty)
+{
+    TPMTISState *tis = &s->s.tis;
+
+    tis->loc[locty].r_offset = 0;
+    tis->loc[locty].w_offset = 0;
+
+    dprintf("tpm_tis: tis_abort: new active locality is %d\n", tis->next_locty);
+
+    /*
+     * Need to react differently depending on who's aborting now and
+     * which locality will become active afterwards.
+     */
+    if (tis->aborting_locty == tis->next_locty) {
+        tis->loc[tis->aborting_locty].status = TPM_TIS_STATUS_READY;
+        tis->loc[tis->aborting_locty].sts = TPM_TIS_STS_COMMAND_READY;
+        tpm_tis_raise_irq(s, tis->aborting_locty, TPM_TIS_INT_COMMAND_READY);
+    }
+
+    /* locality after abort is another one than the current one */
+    tpm_tis_new_active_locality(s, tis->next_locty);
+
+    tis->next_locty = TPM_TIS_NO_LOCALITY;
+    /* nobody's aborting a command anymore */
+    tis->aborting_locty = TPM_TIS_NO_LOCALITY;
+}
+
+/* prepare aborting current command */
+static void tpm_tis_prep_abort(TPMState *s, uint8_t locty, uint8_t newlocty)
+{
+    TPMTISState *tis = &s->s.tis;
+    uint8_t busy_locty;
+
+    tis->aborting_locty = locty;
+    tis->next_locty = newlocty;  /* locality after successful abort */
+
+    /*
+     * only abort a command using an interrupt if currently executing
+     * a command AND if there's a valid connection to the vTPM.
+     */
+    for (busy_locty = 0; busy_locty < TPM_TIS_NUM_LOCALITIES; busy_locty++) {
+        if (tis->loc[busy_locty].status == TPM_TIS_STATUS_EXECUTION) {
+            /* there is currently no way to interrupt the TPM's operations
+               while it's executing a command; once the TPM is done and
+               returns the buffer, it will switch to the next_locty; */
+            dprintf("tpm_tis: Locality %d is busy - deferring abort\n",
+                    busy_locty);
+            return;
+        }
+    }
+
+    tpm_tis_abort(s, locty);
+}
+
+/*
+ * Callback from the TPM to indicate that the response was received.
+ */
+static void tpm_tis_receive_cb(TPMState *s, uint8_t locty)
+{
+    TPMTISState *tis = &s->s.tis;
+
+    qemu_mutex_lock(&s->state_lock);
+
+    tis->loc[locty].sts = TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE;
+    tis->loc[locty].status = TPM_TIS_STATUS_COMPLETION;
+    tis->loc[locty].r_offset = 0;
+    tis->loc[locty].w_offset = 0;
+
+    if (TPM_TIS_IS_VALID_LOCTY(tis->next_locty)) {
+        tpm_tis_abort(s, locty);
+    }
+
+    qemu_cond_signal(&s->from_tpm_cond);
+
+    qemu_mutex_unlock(&s->state_lock);
+
+#ifndef RAISE_STS_IRQ
+    tpm_tis_raise_irq(s, locty, TPM_TIS_INT_DATA_AVAILABLE);
+#else
+    tpm_tis_raise_irq(s, locty,
+                      TPM_TIS_INT_DATA_AVAILABLE | TPM_TIS_INT_STS_VALID);
+#endif
+}
+
+/*
+ * read a byte of response data
+ * call this with s->state_lock held
+ */
+static uint32_t tpm_tis_data_read(TPMState *s, uint8_t locty)
+{
+    TPMTISState *tis = &s->s.tis;
+    uint32_t ret = TPM_TIS_NO_DATA_BYTE;
+    uint16_t len;
+
+    if ((tis->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
+        len = tpm_tis_get_size_from_buffer(&tis->loc[locty].r_buffer);
+
+        ret = tis->loc[locty].r_buffer.buffer[tis->loc[locty].r_offset++];
+        if (tis->loc[locty].r_offset >= len) {
+            /* got last byte */
+            tis->loc[locty].sts = TPM_TIS_STS_VALID;
+#ifdef RAISE_STS_IRQ
+            tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
+#endif
+        }
+        dprintf("tpm_tis: tpm_tis_data_read byte 0x%02x   [%d]\n",
+                ret, tis->loc[locty].r_offset-1);
+    }
+
+    return ret;
+}
+
+/*
+ * Read a register of the TIS interface
+ * See specs pages 33-63 for description of the registers
+ */
+static uint64_t tpm_tis_mmio_read(void *opaque, target_phys_addr_t addr,
+                                  unsigned size)
+{
+    TPMState *s = opaque;
+    TPMTISState *tis = &s->s.tis;
+    uint16_t offset = addr & 0xffc;
+    uint8_t shift = (addr & 0x3) * 8;
+    uint32_t val = 0xffffffff;
+    uint8_t locty = tpm_tis_locality_from_addr(addr);
+
+    qemu_mutex_lock(&s->state_lock);
+
+    if (s->be_driver->ops->had_startup_error(s->be_driver)) {
+        qemu_mutex_unlock(&s->state_lock);
+        return val;
+    }
+
+    switch (offset) {
+    case TPM_TIS_REG_ACCESS:
+        /* never show the SEIZE flag even though we use it internally */
+        val = tis->loc[locty].access & ~TPM_TIS_ACCESS_SEIZE;
+        /* the pending flag is alawys calculated */
+        if (tpm_tis_check_request_use_except(s, locty)) {
+            val |= TPM_TIS_ACCESS_PENDING_REQUEST;
+        }
+        val |= !s->be_driver->ops->get_tpm_established_flag(s->be_driver);
+        break;
+    case TPM_TIS_REG_INT_ENABLE:
+        val = tis->loc[locty].inte;
+        break;
+    case TPM_TIS_REG_INT_VECTOR:
+        val = tis->irq_num;
+        break;
+    case TPM_TIS_REG_INT_STATUS:
+        val = tis->loc[locty].ints;
+        break;
+    case TPM_TIS_REG_INTF_CAPABILITY:
+        val = TPM_TIS_CAPABILITIES_SUPPORTED;
+        break;
+    case TPM_TIS_REG_STS:
+        if (tis->active_locty == locty) {
+            if ((tis->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
+                val =  (tpm_tis_get_size_from_buffer(&tis->loc[locty].r_buffer)
+                        - tis->loc[locty].r_offset) << 8 | tis->loc[locty].sts;
+            } else {
+                val = (tis->loc[locty].w_buffer.size -
+                       tis->loc[locty].w_offset) << 8 | tis->loc[locty].sts;
+            }
+        }
+        break;
+    case TPM_TIS_REG_DATA_FIFO:
+        if (tis->active_locty == locty) {
+            switch (tis->loc[locty].status) {
+            case TPM_TIS_STATUS_COMPLETION:
+                val = tpm_tis_data_read(s, locty);
+                break;
+            default:
+                val = TPM_TIS_NO_DATA_BYTE;
+                break;
+            }
+        }
+        break;
+    case TPM_TIS_REG_DID_VID:
+        val = (TPM_TIS_TPM_DID << 16) | TPM_TIS_TPM_VID;
+        break;
+    case TPM_TIS_REG_RID:
+        val = TPM_TIS_TPM_RID;
+        break;
+    }
+
+    qemu_mutex_unlock(&s->state_lock);
+
+    if (shift) {
+        val >>= shift;
+    }
+
+    dprintf("tpm_tis:  read.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val);
+
+    return val;
+}
+
+/*
+ * Write a value to a register of the TIS interface
+ * See specs pages 33-63 for description of the registers
+ */
+static void tpm_tis_mmio_write_intern(void *opaque, target_phys_addr_t addr,
+                                      uint64_t val, unsigned size,
+                                      bool hw_access)
+{
+    TPMState *s = opaque;
+    TPMTISState *tis = &s->s.tis;
+    uint16_t off = addr & 0xfff;
+    uint8_t locty = tpm_tis_locality_from_addr(addr);
+    uint8_t active_locty, l;
+    int c, set_new_locty = 1;
+    uint16_t len;
+
+    dprintf("tpm_tis: write.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val);
+
+    qemu_mutex_lock(&s->state_lock);
+
+    if (s->be_driver->ops->had_startup_error(s->be_driver)) {
+        qemu_mutex_unlock(&s->state_lock);
+        return;
+    }
+
+    switch (off) {
+    case TPM_TIS_REG_ACCESS:
+
+        if ((val & TPM_TIS_ACCESS_SEIZE)) {
+            val &= ~(TPM_TIS_ACCESS_REQUEST_USE |
+                     TPM_TIS_ACCESS_ACTIVE_LOCALITY);
+        }
+
+        active_locty = tis->active_locty;
+
+        if ((val & TPM_TIS_ACCESS_ACTIVE_LOCALITY)) {
+            /* give up locality if currently owned */
+            if (tis->active_locty == locty) {
+                dprintf("tpm_tis: Releasing locality %d\n", locty);
+
+                uint8_t newlocty = TPM_TIS_NO_LOCALITY;
+                /* anybody wants the locality ? */
+                for (c = TPM_TIS_NUM_LOCALITIES - 1; c >= 0; c--) {
+                    if ((tis->loc[c].access & TPM_TIS_ACCESS_REQUEST_USE)) {
+                        dprintf("tpm_tis: Locality %d requests use.\n", c);
+                        newlocty = c;
+                        break;
+                    }
+                }
+                dprintf("tpm_tis: TPM_TIS_ACCESS_ACTIVE_LOCALITY: "
+                        "Next active locality: %d\n",
+                        newlocty);
+
+                if (TPM_TIS_IS_VALID_LOCTY(newlocty)) {
+                    set_new_locty = 0;
+                    tpm_tis_prep_abort(s, locty, newlocty);
+                } else {
+                    active_locty = TPM_TIS_NO_LOCALITY;
+                }
+            } else {
+                /* not currently the owner; clear a pending request */
+                tis->loc[locty].access &= ~TPM_TIS_ACCESS_REQUEST_USE;
+            }
+        }
+
+        if ((val & TPM_TIS_ACCESS_BEEN_SEIZED)) {
+            tis->loc[locty].access &= ~TPM_TIS_ACCESS_BEEN_SEIZED;
+        }
+
+        if ((val & TPM_TIS_ACCESS_SEIZE)) {
+            /* allow seize if a locality is active and the requesting
+               locality is higher than the one that's active
+               OR
+               allow seize for requesting locality if no locality is
+               active */
+            while ((TPM_TIS_IS_VALID_LOCTY(tis->active_locty) &&
+                    locty > tis->active_locty) ||
+                   (!TPM_TIS_IS_VALID_LOCTY(tis->active_locty))) {
+
+                /* already a pending SEIZE ? */
+                if ((tis->loc[locty].access & TPM_TIS_ACCESS_SEIZE)) {
+                    break;
+                }
+
+                /* check for ongoing seize by a higher locality */
+                for (l = locty + 1; l < TPM_TIS_NUM_LOCALITIES; l++) {
+                    if ((tis->loc[l].access & TPM_TIS_ACCESS_SEIZE)) {
+                        break;
+                    }
+                }
+
+                /* cancel any seize by a lower locality */
+                for (l = 0; l < locty - 1; l++) {
+                    tis->loc[l].access &= ~TPM_TIS_ACCESS_SEIZE;
+                }
+
+                tis->loc[locty].access |= TPM_TIS_ACCESS_SEIZE;
+                dprintf("tpm_tis: TPM_TIS_ACCESS_SEIZE: "
+                        "Locality %d seized from locality %d\n",
+                        locty, tis->active_locty);
+                dprintf("tpm_tis: TPM_TIS_ACCESS_SEIZE: Initiating abort.\n");
+                set_new_locty = 0;
+                tpm_tis_prep_abort(s, tis->active_locty, locty);
+                break;
+            }
+        }
+
+        if ((val & TPM_TIS_ACCESS_REQUEST_USE)) {
+            if (tis->active_locty != locty) {
+                if (TPM_TIS_IS_VALID_LOCTY(tis->active_locty)) {
+                    tis->loc[locty].access |= TPM_TIS_ACCESS_REQUEST_USE;
+                } else {
+                    /* no locality active -> make this one active now */
+                    active_locty = locty;
+                }
+            }
+        }
+
+        if (set_new_locty) {
+            tpm_tis_new_active_locality(s, active_locty);
+        }
+
+        break;
+    case TPM_TIS_REG_INT_ENABLE:
+        if (tis->active_locty != locty) {
+            break;
+        }
+
+        tis->loc[locty].inte = (val & (TPM_TIS_INT_ENABLED | (0x3 << 3) |
+                                     TPM_TIS_INTERRUPTS_SUPPORTED));
+        break;
+    case TPM_TIS_REG_INT_VECTOR:
+        /* hard wired -- ignore */
+        break;
+    case TPM_TIS_REG_INT_STATUS:
+        if (tis->active_locty != locty) {
+            break;
+        }
+
+        /* clearing of interrupt flags */
+        if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) &&
+            (tis->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) {
+            tis->loc[locty].ints &= ~val;
+            if (tis->loc[locty].ints == 0) {
+                qemu_irq_lower(tis->irq);
+                dprintf("tpm_tis: Lowering IRQ\n");
+            }
+        }
+        tis->loc[locty].ints &= ~(val & TPM_TIS_INTERRUPTS_SUPPORTED);
+        break;
+    case TPM_TIS_REG_STS:
+        if (tis->active_locty != locty) {
+            break;
+        }
+
+        val &= (TPM_TIS_STS_COMMAND_READY | TPM_TIS_STS_TPM_GO |
+                TPM_TIS_STS_RESPONSE_RETRY);
+
+        if (val == TPM_TIS_STS_COMMAND_READY) {
+            switch (tis->loc[locty].status) {
+
+            case TPM_TIS_STATUS_READY:
+                tis->loc[locty].w_offset = 0;
+                tis->loc[locty].r_offset = 0;
+            break;
+
+            case TPM_TIS_STATUS_IDLE:
+                tis->loc[locty].sts = TPM_TIS_STS_COMMAND_READY;
+                tis->loc[locty].status = TPM_TIS_STATUS_READY;
+                tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY);
+            break;
+
+            case TPM_TIS_STATUS_EXECUTION:
+            case TPM_TIS_STATUS_RECEPTION:
+                /* abort currently running command */
+                dprintf("tpm_tis: %s: Initiating abort.\n",
+                        __func__);
+                tpm_tis_prep_abort(s, locty, locty);
+            break;
+
+            case TPM_TIS_STATUS_COMPLETION:
+                tis->loc[locty].w_offset = 0;
+                tis->loc[locty].r_offset = 0;
+                /* shortcut to ready state with C/R set */
+                tis->loc[locty].status = TPM_TIS_STATUS_READY;
+                if (!(tis->loc[locty].sts & TPM_TIS_STS_COMMAND_READY)) {
+                    tis->loc[locty].sts   = TPM_TIS_STS_COMMAND_READY;
+                    tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY);
+                }
+            break;
+
+            }
+        } else if (val == TPM_TIS_STS_TPM_GO) {
+            switch (tis->loc[locty].status) {
+            case TPM_TIS_STATUS_RECEPTION:
+                tpm_tis_tpm_send(s, locty);
+                break;
+            default:
+                /* ignore */
+                break;
+            }
+        } else if (val == TPM_TIS_STS_RESPONSE_RETRY) {
+            switch (tis->loc[locty].status) {
+            case TPM_TIS_STATUS_COMPLETION:
+                tis->loc[locty].r_offset = 0;
+                tis->loc[locty].sts = TPM_TIS_STS_VALID |
+                                      TPM_TIS_STS_DATA_AVAILABLE;
+                break;
+            default:
+                /* ignore */
+                break;
+            }
+        }
+        break;
+    case TPM_TIS_REG_DATA_FIFO:
+        /* data fifo */
+        if (tis->active_locty != locty) {
+            break;
+        }
+
+        if (tis->loc[locty].status == TPM_TIS_STATUS_IDLE ||
+            tis->loc[locty].status == TPM_TIS_STATUS_EXECUTION ||
+            tis->loc[locty].status == TPM_TIS_STATUS_COMPLETION) {
+            /* drop the byte */
+        } else {
+            dprintf("tpm_tis: Byte to send to TPM: %02x\n", (uint8_t)val);
+            if (tis->loc[locty].status == TPM_TIS_STATUS_READY) {
+                tis->loc[locty].status = TPM_TIS_STATUS_RECEPTION;
+                tis->loc[locty].sts = TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID;
+            }
+
+            if ((tis->loc[locty].sts & TPM_TIS_STS_EXPECT)) {
+                if (tis->loc[locty].w_offset < tis->loc[locty].w_buffer.size) {
+                    tis->loc[locty].w_buffer.
+                        buffer[tis->loc[locty].w_offset++] = (uint8_t)val;
+                } else {
+                    tis->loc[locty].sts = TPM_TIS_STS_VALID;
+                }
+            }
+
+            /* check for complete packet */
+            if (tis->loc[locty].w_offset > 5 &&
+                (tis->loc[locty].sts & TPM_TIS_STS_EXPECT)) {
+                /* we have a packet length - see if we have all of it */
+#ifdef RAISE_STS_IRQ
+                bool needIrq = !(tis->loc[locty].sts & TPM_TIS_STS_VALID);
+#endif
+                len = tpm_tis_get_size_from_buffer(&tis->loc[locty].w_buffer);
+                if (len > tis->loc[locty].w_offset) {
+                    tis->loc[locty].sts = TPM_TIS_STS_EXPECT |
+                                          TPM_TIS_STS_VALID;
+                } else {
+                    /* packet complete */
+                    tis->loc[locty].sts = TPM_TIS_STS_VALID;
+                }
+#ifdef RAISE_STS_IRQ
+                if (needIrq) {
+                    tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
+                }
+#endif
+            }
+        }
+        break;
+    }
+
+    qemu_mutex_unlock(&s->state_lock);
+}
+
+static void tpm_tis_mmio_write(void *opaque, target_phys_addr_t addr,
+                               uint64_t val, unsigned size)
+{
+    return tpm_tis_mmio_write_intern(opaque, addr, val, size, false);
+}
+
+static const MemoryRegionOps tpm_tis_memory_ops = {
+    .read = tpm_tis_mmio_read,
+    .write = tpm_tis_mmio_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .valid = {
+        .min_access_size = 1,
+        .max_access_size = 4,
+    },
+};
+
+static int tpm_tis_do_startup_tpm(TPMState *s)
+{
+    return s->be_driver->ops->startup_tpm(s->be_driver);
+}
+
+/*
+ * This function is called when the machine starts, resets or due to
+ * S3 resume.
+ */
+static void tpm_tis_reset(DeviceState *d)
+{
+    TPMState *s = container_of(d, TPMState, busdev.qdev);
+    TPMTISState *tis = &s->s.tis;
+    int c;
+
+    s->tpm_initialized = false;
+
+    s->be_driver->ops->reset(s->be_driver);
+
+    tis->active_locty = TPM_TIS_NO_LOCALITY;
+    tis->next_locty = TPM_TIS_NO_LOCALITY;
+    tis->aborting_locty = TPM_TIS_NO_LOCALITY;
+
+    for (c = 0; c < TPM_TIS_NUM_LOCALITIES; c++) {
+        tis->loc[c].access = TPM_TIS_ACCESS_TPM_REG_VALID_STS;
+        tis->loc[c].sts = 0;
+        tis->loc[c].inte = (1 << 3);
+        tis->loc[c].ints = 0;
+        tis->loc[c].status = TPM_TIS_STATUS_IDLE;
+
+        tis->loc[c].w_offset = 0;
+        s->be_driver->ops->realloc_buffer(&tis->loc[c].w_buffer);
+        tis->loc[c].r_offset = 0;
+        s->be_driver->ops->realloc_buffer(&tis->loc[c].r_buffer);
+    }
+
+    tpm_tis_do_startup_tpm(s);
+}
+
+static int tpm_tis_init(ISADevice *dev)
+{
+    TPMState *s = DO_UPCAST(TPMState, busdev, dev);
+    TPMTISState *tis = &s->s.tis;
+    int rc;
+
+    qemu_mutex_init(&s->state_lock);
+    qemu_cond_init(&s->from_tpm_cond);
+    qemu_cond_init(&s->to_tpm_cond);
+
+    s->be_driver = qemu_find_tpm(s->backend);
+    if (!s->be_driver) {
+        error_report("tpm_tis: backend driver with id %s could not be "
+                     "found.n\n", s->backend);
+        return -1;
+    }
+
+    s->be_driver->fe_model = "tpm-tis";
+
+    if (s->be_driver->ops->init(s->be_driver, s, tpm_tis_receive_cb)) {
+        return -1;
+    }
+
+    isa_init_irq(dev, &tis->irq, tis->irq_num);
+
+    memory_region_init_io(&s->mmio, &tpm_tis_memory_ops, s, "tpm-tis-mmio",
+                          0x1000 * TPM_TIS_NUM_LOCALITIES);
+    memory_region_add_subregion(get_system_memory(), TPM_TIS_ADDR_BASE,
+                                &s->mmio);
+
+    rc = tpm_tis_do_startup_tpm(s);
+    if (rc != 0) {
+        goto err_destroy_memory;
+    }
+
+    return 0;
+
+ err_destroy_memory:
+    memory_region_destroy(&s->mmio);
+
+    return -1;
+}
+
+static const VMStateDescription vmstate_tpm_tis = {
+    .name = "tpm",
+    .unmigratable = 1,
+};
+
+static ISADeviceInfo tpm_tis_device_info = {
+    .init         = tpm_tis_init,
+    .qdev.name    = "tpm-tis",
+    .qdev.size    = sizeof(TPMState),
+    .qdev.vmsd    = &vmstate_tpm_tis,
+    .qdev.reset   = tpm_tis_reset,
+    .qdev.props = (Property[]) {
+        DEFINE_PROP_UINT32("irq", TPMState,
+                           s.tis.irq_num, TPM_TIS_IRQ),
+        DEFINE_PROP_STRING("tpmdev", TPMState, backend),
+        DEFINE_PROP_END_OF_LIST(),
+    },
+};
+
+static void tpm_tis_register_device(void)
+{
+    isa_qdev_register(&tpm_tis_device_info);
+}
+
+device_init(tpm_tis_register_device)