diff mbox series

[5/6] x68: acpi: trigger SMI before scanning for hotplugged CPUs

Message ID 20200720141610.574308-6-imammedo@redhat.com
State New
Headers show
Series x86: fix cpu hotplug with secure boot | expand

Commit Message

Igor Mammedov July 20, 2020, 2:16 p.m. UTC
In case firmware has negotiated CPU hotplug SMI feature, generate
AML to describe SMI IO port region and send SMI to firmware
on each CPU hotplug SCI in case new CPUs were htplugged.

Since new CPUs can be hotplugged while CPU_SCAN_METHOD is running
we can't send SMI before new CPUs are fetched from QEMU as it
could cause sending Notify to a CPU that firmware hasn't seen yet.
So fetch new CPUs into local cache first and then send SMI and
after that sends Notify events to cached CPUs. This should ensure
that Notify is sent only to CPUs which were processed by firmware
first.
Any CPUs that were hotplugged after caching will be process
by the next CPU_SCAN_METHOD, when pending SCI is handled.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
v1:
 - make sure that Notify is sent only to CPUs seen by FW

 - (Laszlo Ersek <lersek@redhat.com>)
    - use macro instead of x-smi-negotiated-features
    - style fixes
    - reserve whole SMI block (0xB2, 0xB3)
v0:
 - s/aml_string/aml_eisaid/
   /fixes Windows BSOD, on nonsense value/ (Laszlo Ersek <lersek@redhat.com>)
 - put SMI device under PCI0 like the rest of hotplug IO port
 - do not generate SMI AML if CPU hotplug SMI feature hasn't been negotiated

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
 include/hw/acpi/cpu.h  |  1 +
 include/hw/i386/ich9.h |  2 ++
 hw/acpi/cpu.c          | 50 ++++++++++++++++++++++++++++++++++++++++--
 hw/i386/acpi-build.c   | 35 ++++++++++++++++++++++++++++-
 hw/isa/lpc_ich9.c      |  3 +++
 5 files changed, 88 insertions(+), 3 deletions(-)

Comments

Laszlo Ersek July 22, 2020, 3:30 p.m. UTC | #1
On 07/20/20 16:16, Igor Mammedov wrote:
> In case firmware has negotiated CPU hotplug SMI feature, generate
> AML to describe SMI IO port region and send SMI to firmware
> on each CPU hotplug SCI in case new CPUs were htplugged.

(1) s/htplugged/hotplugged/

>
> Since new CPUs can be hotplugged while CPU_SCAN_METHOD is running
> we can't send SMI before new CPUs are fetched from QEMU as it
> could cause sending Notify to a CPU that firmware hasn't seen yet.
> So fetch new CPUs into local cache first and then send SMI and
> after that sends Notify events to cached CPUs. This should ensure
> that Notify is sent only to CPUs which were processed by firmware
> first.
> Any CPUs that were hotplugged after caching will be process

(2) s/will be process/will be processed/

> by the next CPU_SCAN_METHOD, when pending SCI is handled.

(3) I think that the subject ("trigger SMI before scanning") may no
longer apply, because we do scan before triggering the SMI.

>
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> ---
> v1:
>  - make sure that Notify is sent only to CPUs seen by FW
>
>  - (Laszlo Ersek <lersek@redhat.com>)
>     - use macro instead of x-smi-negotiated-features
>     - style fixes
>     - reserve whole SMI block (0xB2, 0xB3)
> v0:
>  - s/aml_string/aml_eisaid/
>    /fixes Windows BSOD, on nonsense value/ (Laszlo Ersek <lersek@redhat.com>)
>  - put SMI device under PCI0 like the rest of hotplug IO port
>  - do not generate SMI AML if CPU hotplug SMI feature hasn't been negotiated
>
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> ---
>  include/hw/acpi/cpu.h  |  1 +
>  include/hw/i386/ich9.h |  2 ++
>  hw/acpi/cpu.c          | 50 ++++++++++++++++++++++++++++++++++++++++--
>  hw/i386/acpi-build.c   | 35 ++++++++++++++++++++++++++++-
>  hw/isa/lpc_ich9.c      |  3 +++
>  5 files changed, 88 insertions(+), 3 deletions(-)
>
> diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
> index 62f0278ba2..0eeedaa491 100644
> --- a/include/hw/acpi/cpu.h
> +++ b/include/hw/acpi/cpu.h
> @@ -50,6 +50,7 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
>  typedef struct CPUHotplugFeatures {
>      bool acpi_1_compatible;
>      bool has_legacy_cphp;
> +    const char *smi_path;
>  } CPUHotplugFeatures;
>
>  void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
> diff --git a/include/hw/i386/ich9.h b/include/hw/i386/ich9.h
> index d1bb3f7bf0..0f43ef2481 100644
> --- a/include/hw/i386/ich9.h
> +++ b/include/hw/i386/ich9.h
> @@ -245,6 +245,8 @@ typedef struct ICH9LPCState {
>  #define ICH9_SMB_HST_D1                         0x06
>  #define ICH9_SMB_HOST_BLOCK_DB                  0x07
>
> +#define ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP "x-smi-negotiated-features"
> +
>  /* bit positions used in fw_cfg SMI feature negotiation */
>  #define ICH9_LPC_SMI_F_BROADCAST_BIT            0
>  #define ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT          1
> diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
> index 3d6a500fb7..a6dd6e252a 100644
> --- a/hw/acpi/cpu.c
> +++ b/hw/acpi/cpu.c
> @@ -14,6 +14,8 @@
>  #define ACPI_CPU_CMD_DATA_OFFSET_RW 8
>  #define ACPI_CPU_CMD_DATA2_OFFSET_R 0
>
> +#define OVMF_CPUHP_SMI_CMD 4
> +
>  enum {
>      CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0,
>      CPHP_OST_EVENT_CMD = 1,
> @@ -321,6 +323,7 @@ const VMStateDescription vmstate_cpu_hotplug = {
>  #define CPU_NOTIFY_METHOD "CTFY"
>  #define CPU_EJECT_METHOD  "CEJ0"
>  #define CPU_OST_METHOD    "COST"
> +#define CPU_ADDED_LIST    "CNEW"
>
>  #define CPU_ENABLED       "CPEN"
>  #define CPU_SELECTOR      "CSEL"
> @@ -471,8 +474,27 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>              Aml *dev_chk = aml_int(1);
>              Aml *eject_req = aml_int(3);
>              Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD);
> +            Aml *num_added_cpus = aml_local(1);
> +            Aml *cpu_idx = aml_local(2);
> +            Aml *new_cpus = aml_name(CPU_ADDED_LIST);
>
>              aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
> +
> +            /* use named package as old Windows don't support it in local var */
> +            if (arch_ids->len <= 256) {
> +                /* For compatibility with Windows Server 2008 (max 256 cpus),
> +                 * use ACPI1.0 PackageOp which can cache max 255 elements.
> +                 * Which is fine for 256 CPUs VM. Max 255 can be hotplugged,
> +                 * sice at least one CPU must be already present.
> +                */
> +                aml_append(method, aml_name_decl(CPU_ADDED_LIST,
> +                    aml_package(arch_ids->len)));
> +            } else {
> +                aml_append(method, aml_name_decl(CPU_ADDED_LIST,
> +                    aml_varpackage(arch_ids->len)));
> +            }
> +
> +            aml_append(method, aml_store(zero, num_added_cpus));
>              aml_append(method, aml_store(one, has_event));
>              while_ctx = aml_while(aml_equal(has_event, one));
>              {
> @@ -483,8 +505,10 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>                   aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd));
>                   ifctx = aml_if(aml_equal(ins_evt, one));
>                   {
> -                     aml_append(ifctx,
> -                         aml_call2(CPU_NOTIFY_METHOD, cpu_data, dev_chk));
> +                     /* cache added CPUs to Notify/Wakeup later */
> +                     aml_append(ifctx, aml_store(cpu_data,
> +                         aml_index(new_cpus, num_added_cpus)));
> +                     aml_append(ifctx, aml_increment(num_added_cpus));
>                       aml_append(ifctx, aml_store(one, ins_evt));
>                       aml_append(ifctx, aml_store(one, has_event));
>                   }
> @@ -501,6 +525,28 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
>                   aml_append(while_ctx, else_ctx);
>              }
>              aml_append(method, while_ctx);
> +
> +            /* in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT,
> +             * make upcall to FW, so it can pull in new CPUs before
> +             * OS is notified and wakes them up */
> +            if (opts.smi_path) {
> +                ifctx = aml_if(aml_lnot(aml_equal(num_added_cpus, zero)));
> +                {
> +                    aml_append(ifctx, aml_store(aml_int(OVMF_CPUHP_SMI_CMD),
> +                        aml_name("%s", opts.smi_path)));
> +                }
> +                aml_append(method, ifctx);
> +            }
> +
> +            aml_append(method, aml_store(zero, cpu_idx));
> +            while_ctx = aml_while(aml_lless(cpu_idx, num_added_cpus));
> +            {
> +                aml_append(while_ctx, aml_call2(CPU_NOTIFY_METHOD,
> +                    aml_derefof(aml_index(new_cpus, cpu_idx)), dev_chk));
> +                aml_append(while_ctx, aml_increment(cpu_idx));
> +            }
> +            aml_append(method, while_ctx);
> +
>              aml_append(method, aml_release(ctrl_lock));
>          }
>          aml_append(cpus_dev, method);

(4) This version addresses all my requests from the previous version,
but the above method changes unfortunately break the hot-plugging of a
single CPU even.

Here's the decompiled method (using a topology with 4 possible CPUs):

   1  Method (CSCN, 0, Serialized)
   2  {
   3      Acquire (\_SB.PCI0.PRES.CPLK, 0xFFFF)
   4      Name (CNEW, Package (0x04){})
   5      Local1 = Zero
   6      Local0 = One
   7      While ((Local0 == One))
   8      {
   9          Local0 = Zero
  10          \_SB.PCI0.PRES.CCMD = Zero
  11          If ((\_SB.PCI0.PRES.CINS == One))
  12          {
  13              CNEW [Local1] = \_SB.PCI0.PRES.CDAT
  14              Local1++
  15              \_SB.PCI0.PRES.CINS = One
  16              Local0 = One
  17          }
  18          ElseIf ((\_SB.PCI0.PRES.CRMV == One))
  19          {
  20              CTFY (\_SB.PCI0.PRES.CDAT, 0x03)
  21              \_SB.PCI0.PRES.CRMV = One
  22              Local0 = One
  23          }
  24      }
  25
  26      If ((Local1 != Zero))
  27      {
  28          \_SB.PCI0.SMI0.SMIC = 0x04
  29      }
  30
  31      Local2 = Zero
  32      While ((Local2 < Local1))
  33      {
  34          CTFY (DerefOf (CNEW [Local2]), One)
  35          Local2++
  36      }
  37
  38      Release (\_SB.PCI0.PRES.CPLK)
  39  }

The problem is on line 15. When you write 1 to \_SB.PCI0.PRES.CINS, the
following happens (quoting "docs/specs/acpi_cpu_hotplug.txt"):

> write access:
>     offset:
> [...]
>     [0x4] CPU device control fields: (1 byte access)
>         bits:
>             0: [...]
>             1: if set to 1 clears device insert event, set by OSPM
>                after it has emitted device check event for the
>                selected CPU device

Because of this, by the time the SMI is raised on line 28, the firmware
will see no pending events.

The scanning (= collection into the package) has to be implemented such
that the pending events never change.

This is possible to do; the firmware already does it. The idea is to
advance the "get next CPU with event" command by manually incrementing
the CPU selector past the CPU that has a pending event, rather than by
clearing the events for the currently selected CPU. Here's the
pseudo-code:

    bool scan;
    uint32_t current_selector;
    uint32_t pending_selector;
    uint32_t event_count;
    uint32_t plug_count;
    uint32_t event;

    scan = true;
    current_selector = 0;
    event_count = 0;
    plug_count = 0;

    while (scan) {
        scan = false;

        /* the "get next CPU with pending event" command starts scanning
         * at the currently selected CPU, *inclusive*
         */
        CSEL = current_selector;
        CCMD = 0;
        pending_selector = CDAT;

        /* the "get next CPU with pending event" command may cause the
         * current selector to wrap around; in which case we're done
         */
        if (pending_selector >= current_selector) {
            current_selector = pending_selector;

            /* if there's no pending event for the currently selected
             * CPU, we're done
             */
            if (CINS) {
                /* record INSERT event for currently selected CPU, and
                 * continue scanning
                 */
                CNEW[event_count] = current_selector;
                CEVT[event_count] = 1;
                event_count++;
                plug_count++;
                scan = true;
            } else if (CRMV) {
                /* record REMOVE event for currently selected CPU, and
                 * continue scanning
                 */
                CNEW[event_count] = current_selector;
                CEVT[event_count] = 3;
                event_count++;
                scan = true;
            }

            if (scan) {
                scan = false;
                /* advance past this CPU manually (as we must not clear
                 * events pending for this CPU)
                 */
                current_selector++;
                if (current_selector < possible_cpu_count) {
                    scan = true;
                }
            }
        }
    }

    /* raise "hotplug SMI" now if at least one INSERT event has been
     * collected
     */
    if (plug_count > 0) {
        SMIC = 0x04;
    }

    /* notify the OS about all events, and clear pending events (same
     * order as before)
     */
    event = 0;
    while (event < event_count) {
        CTFY(CNEW[event], CEVT[event]);

        CSEL = CNEW[event];
        if (CEVT[event] == 1) {
            CINS = 1;
        } else if (CEVT[event] == 3) {
            CRMV = 1;
        }

        event++;
    }


Thanks
Laszlo
diff mbox series

Patch

diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
index 62f0278ba2..0eeedaa491 100644
--- a/include/hw/acpi/cpu.h
+++ b/include/hw/acpi/cpu.h
@@ -50,6 +50,7 @@  void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
 typedef struct CPUHotplugFeatures {
     bool acpi_1_compatible;
     bool has_legacy_cphp;
+    const char *smi_path;
 } CPUHotplugFeatures;
 
 void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
diff --git a/include/hw/i386/ich9.h b/include/hw/i386/ich9.h
index d1bb3f7bf0..0f43ef2481 100644
--- a/include/hw/i386/ich9.h
+++ b/include/hw/i386/ich9.h
@@ -245,6 +245,8 @@  typedef struct ICH9LPCState {
 #define ICH9_SMB_HST_D1                         0x06
 #define ICH9_SMB_HOST_BLOCK_DB                  0x07
 
+#define ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP "x-smi-negotiated-features"
+
 /* bit positions used in fw_cfg SMI feature negotiation */
 #define ICH9_LPC_SMI_F_BROADCAST_BIT            0
 #define ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT          1
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 3d6a500fb7..a6dd6e252a 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -14,6 +14,8 @@ 
 #define ACPI_CPU_CMD_DATA_OFFSET_RW 8
 #define ACPI_CPU_CMD_DATA2_OFFSET_R 0
 
+#define OVMF_CPUHP_SMI_CMD 4
+
 enum {
     CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0,
     CPHP_OST_EVENT_CMD = 1,
@@ -321,6 +323,7 @@  const VMStateDescription vmstate_cpu_hotplug = {
 #define CPU_NOTIFY_METHOD "CTFY"
 #define CPU_EJECT_METHOD  "CEJ0"
 #define CPU_OST_METHOD    "COST"
+#define CPU_ADDED_LIST    "CNEW"
 
 #define CPU_ENABLED       "CPEN"
 #define CPU_SELECTOR      "CSEL"
@@ -471,8 +474,27 @@  void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
             Aml *dev_chk = aml_int(1);
             Aml *eject_req = aml_int(3);
             Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD);
+            Aml *num_added_cpus = aml_local(1);
+            Aml *cpu_idx = aml_local(2);
+            Aml *new_cpus = aml_name(CPU_ADDED_LIST);
 
             aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+
+            /* use named package as old Windows don't support it in local var */
+            if (arch_ids->len <= 256) {
+                /* For compatibility with Windows Server 2008 (max 256 cpus),
+                 * use ACPI1.0 PackageOp which can cache max 255 elements.
+                 * Which is fine for 256 CPUs VM. Max 255 can be hotplugged,
+                 * sice at least one CPU must be already present.
+                */
+                aml_append(method, aml_name_decl(CPU_ADDED_LIST,
+                    aml_package(arch_ids->len)));
+            } else {
+                aml_append(method, aml_name_decl(CPU_ADDED_LIST,
+                    aml_varpackage(arch_ids->len)));
+            }
+
+            aml_append(method, aml_store(zero, num_added_cpus));
             aml_append(method, aml_store(one, has_event));
             while_ctx = aml_while(aml_equal(has_event, one));
             {
@@ -483,8 +505,10 @@  void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
                  aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd));
                  ifctx = aml_if(aml_equal(ins_evt, one));
                  {
-                     aml_append(ifctx,
-                         aml_call2(CPU_NOTIFY_METHOD, cpu_data, dev_chk));
+                     /* cache added CPUs to Notify/Wakeup later */
+                     aml_append(ifctx, aml_store(cpu_data,
+                         aml_index(new_cpus, num_added_cpus)));
+                     aml_append(ifctx, aml_increment(num_added_cpus));
                      aml_append(ifctx, aml_store(one, ins_evt));
                      aml_append(ifctx, aml_store(one, has_event));
                  }
@@ -501,6 +525,28 @@  void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
                  aml_append(while_ctx, else_ctx);
             }
             aml_append(method, while_ctx);
+
+            /* in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT,
+             * make upcall to FW, so it can pull in new CPUs before
+             * OS is notified and wakes them up */
+            if (opts.smi_path) {
+                ifctx = aml_if(aml_lnot(aml_equal(num_added_cpus, zero)));
+                {
+                    aml_append(ifctx, aml_store(aml_int(OVMF_CPUHP_SMI_CMD),
+                        aml_name("%s", opts.smi_path)));
+                }
+                aml_append(method, ifctx);
+            }
+
+            aml_append(method, aml_store(zero, cpu_idx));
+            while_ctx = aml_while(aml_lless(cpu_idx, num_added_cpus));
+            {
+                aml_append(while_ctx, aml_call2(CPU_NOTIFY_METHOD,
+                    aml_derefof(aml_index(new_cpus, cpu_idx)), dev_chk));
+                aml_append(while_ctx, aml_increment(cpu_idx));
+            }
+            aml_append(method, while_ctx);
+
             aml_append(method, aml_release(ctrl_lock));
         }
         aml_append(cpus_dev, method);
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index b7bcbbbb2a..2291c050ba 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -95,6 +95,7 @@  typedef struct AcpiPmInfo {
     bool s3_disabled;
     bool s4_disabled;
     bool pcihp_bridge_en;
+    bool smi_on_cpuhp;
     uint8_t s4_val;
     AcpiFadtData fadt;
     uint16_t cpu_hp_io_base;
@@ -194,6 +195,7 @@  static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
     pm->cpu_hp_io_base = 0;
     pm->pcihp_io_base = 0;
     pm->pcihp_io_len = 0;
+    pm->smi_on_cpuhp = false;
 
     assert(obj);
     init_common_fadt_data(machine, obj, &pm->fadt);
@@ -207,12 +209,16 @@  static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
             object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL);
     }
     if (lpc) {
+        uint64_t smi_features = object_property_get_uint(lpc,
+            ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP, NULL);
         struct AcpiGenericAddress r = { .space_id = AML_AS_SYSTEM_IO,
             .bit_width = 8, .address = ICH9_RST_CNT_IOPORT };
         pm->fadt.reset_reg = r;
         pm->fadt.reset_val = 0xf;
         pm->fadt.flags |= 1 << ACPI_FADT_F_RESET_REG_SUP;
         pm->cpu_hp_io_base = ICH9_CPU_HOTPLUG_IO_BASE;
+        pm->smi_on_cpuhp =
+            !!(smi_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT));
     }
 
     /* The above need not be conditional on machine type because the reset port
@@ -1515,6 +1521,32 @@  build_dsdt(GArray *table_data, BIOSLinker *linker,
         aml_append(dev, aml_name_decl("_UID", aml_int(1)));
         aml_append(dev, build_q35_osc_method());
         aml_append(sb_scope, dev);
+
+        if (pm->smi_on_cpuhp) {
+            /* reserve SMI block resources, IO ports 0xB2, 0xB3 */
+            dev = aml_device("PCI0.SMI0");
+            aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A06")));
+            aml_append(dev, aml_name_decl("_UID", aml_string("SMI resources")));
+            crs = aml_resource_template();
+            aml_append(crs,
+                aml_io(
+                       AML_DECODE16,
+                       ACPI_PORT_SMI_CMD,
+                       ACPI_PORT_SMI_CMD,
+                       1,
+                       2)
+            );
+            aml_append(dev, aml_name_decl("_CRS", crs));
+            aml_append(dev, aml_operation_region("SMIR", AML_SYSTEM_IO,
+                aml_int(ACPI_PORT_SMI_CMD), 2));
+            field = aml_field("SMIR", AML_BYTE_ACC, AML_NOLOCK,
+                              AML_WRITE_AS_ZEROS);
+            aml_append(field, aml_named_field("SMIC", 8));
+            aml_append(field, aml_reserved_field(8));
+            aml_append(dev, field);
+            aml_append(sb_scope, dev);
+        }
+
         aml_append(dsdt, sb_scope);
 
         build_hpet_aml(dsdt);
@@ -1530,7 +1562,8 @@  build_dsdt(GArray *table_data, BIOSLinker *linker,
         build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base);
     } else {
         CPUHotplugFeatures opts = {
-            .acpi_1_compatible = true, .has_legacy_cphp = true
+            .acpi_1_compatible = true, .has_legacy_cphp = true,
+            .smi_path = pm->smi_on_cpuhp ? "\\_SB.PCI0.SMI0.SMIC" : NULL,
         };
         build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base,
                        "\\_SB.PCI0", "\\_GPE._E02");
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index c9305080b5..70afc2c5f2 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -646,6 +646,9 @@  static void ich9_lpc_initfn(Object *obj)
                                   &acpi_enable_cmd, OBJ_PROP_FLAG_READ);
     object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_DISABLE_CMD,
                                   &acpi_disable_cmd, OBJ_PROP_FLAG_READ);
+    object_property_add_uint64_ptr(obj, ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP,
+                                   &lpc->smi_negotiated_features,
+                                   OBJ_PROP_FLAG_READ);
 
     ich9_pm_add_properties(obj, &lpc->pm);
 }