diff mbox series

[v4,07/11] hmat acpi: Build Memory Side Cache Information Structure(s) in ACPI HMAT

Message ID 20190508061726.27631-8-tao3.xu@intel.com
State New
Headers show
Series Build ACPI Heterogeneous Memory Attribute Table (HMAT) | expand

Commit Message

Tao Xu May 8, 2019, 6:17 a.m. UTC
From: Liu Jingqi <jingqi.liu@intel.com>

This structure describes memory side cache information for memory
proximity domains if the memory side cache is present and the
physical device(SMBIOS handle) forms the memory side cache.
The software could use this information to effectively place
the data in memory to maximize the performance of the system
memory that use the memory side cache.

Signed-off-by: Liu Jingqi <jingqi.liu@intel.com>
Signed-off-by: Tao Xu <tao3.xu@intel.com>
---

Changes in v4 -> v3:
    - use build_append_int_noprefix() to build Memory Side Cache
    Information Structure(s) tables (Igor)
    - move globals (hmat_cache_info) into MachineState (Igor)
    - move hmat_build_cache() inside of hmat_build_hma() (Igor)
---
 hw/acpi/hmat.c          | 50 ++++++++++++++++++++++++++++++++++++++++-
 hw/acpi/hmat.h          | 25 +++++++++++++++++++++
 include/hw/boards.h     |  3 +++
 include/qemu/typedefs.h |  1 +
 include/sysemu/sysemu.h |  8 +++++++
 5 files changed, 86 insertions(+), 1 deletion(-)

Comments

Igor Mammedov June 4, 2019, 3:04 p.m. UTC | #1
On Wed,  8 May 2019 14:17:22 +0800
Tao Xu <tao3.xu@intel.com> wrote:

> From: Liu Jingqi <jingqi.liu@intel.com>
> 
> This structure describes memory side cache information for memory
> proximity domains if the memory side cache is present and the
> physical device(SMBIOS handle) forms the memory side cache.
> The software could use this information to effectively place
> the data in memory to maximize the performance of the system
> memory that use the memory side cache.
> 
> Signed-off-by: Liu Jingqi <jingqi.liu@intel.com>
> Signed-off-by: Tao Xu <tao3.xu@intel.com>
> ---
> 
> Changes in v4 -> v3:
>     - use build_append_int_noprefix() to build Memory Side Cache
>     Information Structure(s) tables (Igor)
>     - move globals (hmat_cache_info) into MachineState (Igor)
>     - move hmat_build_cache() inside of hmat_build_hma() (Igor)
> ---
>  hw/acpi/hmat.c          | 50 ++++++++++++++++++++++++++++++++++++++++-
>  hw/acpi/hmat.h          | 25 +++++++++++++++++++++
>  include/hw/boards.h     |  3 +++
>  include/qemu/typedefs.h |  1 +
>  include/sysemu/sysemu.h |  8 +++++++
>  5 files changed, 86 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c
> index 54aabf77eb..3a8c41162d 100644
> --- a/hw/acpi/hmat.c
> +++ b/hw/acpi/hmat.c
> @@ -102,10 +102,11 @@ static void hmat_build_hma(GArray *table_data, MachineState *ms)
>  {
>      GSList *device_list = NULL;
>      uint64_t mem_base, mem_len;
> -    int i, j, hrchy, type;
> +    int i, j, hrchy, type, level;
>      uint32_t mem_ranges_num = ms->numa_state->mem_ranges_num;
>      NumaMemRange *mem_ranges = ms->numa_state->mem_ranges;
>      HMAT_LB_Info *numa_hmat_lb;
> +    HMAT_Cache_Info *numa_hmat_cache = NULL;
>  
>      PCMachineState *pcms = PC_MACHINE(ms);
>      AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
> @@ -212,6 +213,53 @@ static void hmat_build_hma(GArray *table_data, MachineState *ms)
>              }
>          }
>      }
> +
> +    /* Build HMAT Memory Side Cache Information. */
> +    for (i = 0; i < ms->numa_state->num_nodes; i++) {
> +        for (level = 0; level <= MAX_HMAT_CACHE_LEVEL; level++) {
> +            numa_hmat_cache = ms->numa_state->hmat_cache[i][level];
> +            if (numa_hmat_cache) {
> +                uint16_t n = numa_hmat_cache->num_smbios_handles;


> +                uint32_t cache_attr = HMAT_CACHE_TOTAL_LEVEL(
> +                                      numa_hmat_cache->total_levels);
> +                cache_attr |= HMAT_CACHE_CURRENT_LEVEL(
> +                              numa_hmat_cache->level);
> +                cache_attr |= HMAT_CACHE_ASSOC(
> +                                          numa_hmat_cache->associativity);
> +                cache_attr |= HMAT_CACHE_WRITE_POLICY(
> +                                          numa_hmat_cache->write_policy);
> +                cache_attr |= HMAT_CACHE_LINE_SIZE(
> +                                          numa_hmat_cache->line_size);
I don't see a merit of hiding bitfield manipulation behind macro
I'd suggest to drop macros here and mask+shift data here.

> +                cache_attr = cpu_to_le32(cache_attr);
> +
> +                /* Memory Side Cache Information Structure */
> +                /* Type */
> +                build_append_int_noprefix(table_data, 2, 2);
> +                /* Reserved */
> +                build_append_int_noprefix(table_data, 0, 2);
> +                /* Length */
> +                build_append_int_noprefix(table_data, 32 + 2 * n, 4);
> +                /* Proximity Domain for the Memory */
> +                build_append_int_noprefix(table_data,
> +                                          numa_hmat_cache->mem_proximity, 4);
> +                /* Reserved */
> +                build_append_int_noprefix(table_data, 0, 4);
> +                /* Memory Side Cache Size */
> +                build_append_int_noprefix(table_data,
> +                                          numa_hmat_cache->size, 8);
> +                /* Cache Attributes */
> +                build_append_int_noprefix(table_data, cache_attr, 4);
> +                /* Reserved */
> +                build_append_int_noprefix(table_data, 0, 2);
> +                /* Number of SMBIOS handles (n) */
> +                build_append_int_noprefix(table_data, n, 2);
> +
> +                /* SMBIOS Handles */
> +                /* TBD: set smbios handles */
> +                build_append_int_noprefix(table_data, 0, 2 * n);
Is memory side cache structure useful at all without pointing to SMBIOS entries?

> +            }
> +        }
> +    }
>  }
>  
>  void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState *ms)
> diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h
> index f37e30e533..8f563f19dd 100644
> --- a/hw/acpi/hmat.h
> +++ b/hw/acpi/hmat.h
> @@ -77,6 +77,31 @@ struct HMAT_LB_Info {
>      uint16_t    bandwidth[MAX_NODES][MAX_NODES];
>  };
>  
> +struct HMAT_Cache_Info {
> +    /* The memory proximity domain to which the memory belongs. */
> +    uint32_t    mem_proximity;
> +    /* Size of memory side cache in bytes. */
> +    uint64_t    size;
> +    /*
> +     * Total cache levels for this memory
> +     * pr#include "hw/acpi/aml-build.h"oximity domain.
> +     */
> +    uint8_t     total_levels;
> +    /* Cache level described in this structure. */
> +    uint8_t     level;
> +    /* Cache Associativity: None/Direct Mapped/Comple Cache Indexing */
> +    uint8_t     associativity;
> +    /* Write Policy: None/Write Back(WB)/Write Through(WT) */
> +    uint8_t     write_policy;
> +    /* Cache Line size in bytes. */
> +    uint16_t    line_size;
> +    /*
> +     * Number of SMBIOS handles that contributes to
> +     * the memory side cache physical devices.
> +     */
> +    uint16_t    num_smbios_handles;
> +};
> +
>  void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState *ms);
>  
>  #endif
> diff --git a/include/hw/boards.h b/include/hw/boards.h
> index e0169b0a64..8609f923d9 100644
> --- a/include/hw/boards.h
> +++ b/include/hw/boards.h
> @@ -266,6 +266,9 @@ typedef struct NumaState {
>  
>      /* NUMA modes HMAT Locality Latency and Bandwidth Information */
>      HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
> +
> +    /* Memory Side Cache Information Structure */
> +    HMAT_Cache_Info *hmat_cache[MAX_NODES][MAX_HMAT_CACHE_LEVEL + 1];
>  } NumaState;
>  
>  /**
> diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
> index c0257e936b..d971f5109e 100644
> --- a/include/qemu/typedefs.h
> +++ b/include/qemu/typedefs.h
> @@ -33,6 +33,7 @@ typedef struct FWCfgEntry FWCfgEntry;
>  typedef struct FWCfgIoState FWCfgIoState;
>  typedef struct FWCfgMemState FWCfgMemState;
>  typedef struct FWCfgState FWCfgState;
> +typedef struct HMAT_Cache_Info HMAT_Cache_Info;
>  typedef struct HMAT_LB_Info HMAT_LB_Info;
>  typedef struct HVFX86EmulatorState HVFX86EmulatorState;
>  typedef struct I2CBus I2CBus;
> diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
> index da51a9bc26..0cfb387887 100644
> --- a/include/sysemu/sysemu.h
> +++ b/include/sysemu/sysemu.h
> @@ -143,9 +143,17 @@ enum {
>      HMAT_LB_DATA_WRITE_BANDWIDTH  = 5,
>  };
>  
> +#define MAX_HMAT_CACHE_LEVEL        3
> +
>  #define HMAT_LB_LEVELS    (HMAT_LB_MEM_CACHE_3RD_LEVEL + 1)
>  #define HMAT_LB_TYPES     (HMAT_LB_DATA_WRITE_BANDWIDTH + 1)
>  
> +#define HMAT_CACHE_TOTAL_LEVEL(level)      (level & 0xF)
> +#define HMAT_CACHE_CURRENT_LEVEL(level)    ((level & 0xF) << 4)
> +#define HMAT_CACHE_ASSOC(assoc)            ((assoc & 0xF) << 8)
> +#define HMAT_CACHE_WRITE_POLICY(policy)    ((policy & 0xF) << 12)
> +#define HMAT_CACHE_LINE_SIZE(size)         ((size & 0xFFFF) << 16)
> +
>  #define MAX_OPTION_ROMS 16
>  typedef struct QEMUOptionRom {
>      const char *name;
Tao Xu June 5, 2019, 6:04 a.m. UTC | #2
On 6/4/2019 11:04 PM, Igor Mammedov wrote:
> On Wed,  8 May 2019 14:17:22 +0800
> Tao Xu <tao3.xu@intel.com> wrote:
> 
>> From: Liu Jingqi <jingqi.liu@intel.com>
>>
>> This structure describes memory side cache information for memory
>> proximity domains if the memory side cache is present and the
>> physical device(SMBIOS handle) forms the memory side cache.
>> The software could use this information to effectively place
>> the data in memory to maximize the performance of the system
>> memory that use the memory side cache.
>>
>> Signed-off-by: Liu Jingqi <jingqi.liu@intel.com>
>> Signed-off-by: Tao Xu <tao3.xu@intel.com>
>> ---
>>
...
>> +
>> +                /* SMBIOS Handles */
>> +                /* TBD: set smbios handles */
>> +                build_append_int_noprefix(table_data, 0, 2 * n);
> Is memory side cache structure useful at all without pointing to SMBIOS entries?
> 
They are not useful yet, and the kernel 5.1 HMAT sysfs doesn't show 
SMBIOS entries. We can update it if it useful in the future.
Igor Mammedov June 5, 2019, 12:12 p.m. UTC | #3
On Wed, 5 Jun 2019 14:04:10 +0800
Tao Xu <tao3.xu@intel.com> wrote:

> On 6/4/2019 11:04 PM, Igor Mammedov wrote:
> > On Wed,  8 May 2019 14:17:22 +0800
> > Tao Xu <tao3.xu@intel.com> wrote:
> >   
> >> From: Liu Jingqi <jingqi.liu@intel.com>
> >>
> >> This structure describes memory side cache information for memory
> >> proximity domains if the memory side cache is present and the
> >> physical device(SMBIOS handle) forms the memory side cache.
> >> The software could use this information to effectively place
> >> the data in memory to maximize the performance of the system
> >> memory that use the memory side cache.
> >>
> >> Signed-off-by: Liu Jingqi <jingqi.liu@intel.com>
> >> Signed-off-by: Tao Xu <tao3.xu@intel.com>
> >> ---
> >>  
> ...
> >> +
> >> +                /* SMBIOS Handles */
> >> +                /* TBD: set smbios handles */
> >> +                build_append_int_noprefix(table_data, 0, 2 * n);  
> > Is memory side cache structure useful at all without pointing to SMBIOS entries?
> >   
> They are not useful yet, and the kernel 5.1 HMAT sysfs doesn't show 
> SMBIOS entries. We can update it if it useful in the future.

In that case I'd suggest to drop it for now until this table is properly
populated and ready for consumption. (i.e. drop this patch and corresponding
CLI 9/11 patch).
Tao Xu June 6, 2019, 3 a.m. UTC | #4
On 6/5/2019 8:12 PM, Igor Mammedov wrote:
> On Wed, 5 Jun 2019 14:04:10 +0800
> Tao Xu <tao3.xu@intel.com> wrote:
> 
>> On 6/4/2019 11:04 PM, Igor Mammedov wrote:
>>> On Wed,  8 May 2019 14:17:22 +0800
>>> Tao Xu <tao3.xu@intel.com> wrote:
>>>    
...
>>>> +
>>>> +                /* SMBIOS Handles */
>>>> +                /* TBD: set smbios handles */
>>>> +                build_append_int_noprefix(table_data, 0, 2 * n);
>>> Is memory side cache structure useful at all without pointing to SMBIOS entries?
>>>    
>> They are not useful yet, and the kernel 5.1 HMAT sysfs doesn't show
>> SMBIOS entries. We can update it if it useful in the future.
> 
> In that case I'd suggest to drop it for now until this table is properly
> populated and ready for consumption. (i.e. drop this patch and corresponding
> CLI 9/11 patch).
> 

But the kernel HMAT can read othe Memory Side Cache Information except 
SMBIOS entries and the host HMAT tables also haven’t SMBIOS Handles it 
also shows Number of SMBIOS handles (n) as 0. So I am wondering if it is 
better to setting "SMBIOS handles (n)" as 0, remove TODO and comment the 
reason why set it 0?
Igor Mammedov June 6, 2019, 4:45 p.m. UTC | #5
On Thu, 6 Jun 2019 11:00:33 +0800
Tao Xu <tao3.xu@intel.com> wrote:

> On 6/5/2019 8:12 PM, Igor Mammedov wrote:
> > On Wed, 5 Jun 2019 14:04:10 +0800
> > Tao Xu <tao3.xu@intel.com> wrote:
> >   
> >> On 6/4/2019 11:04 PM, Igor Mammedov wrote:  
> >>> On Wed,  8 May 2019 14:17:22 +0800
> >>> Tao Xu <tao3.xu@intel.com> wrote:
> >>>      
> ...
> >>>> +
> >>>> +                /* SMBIOS Handles */
> >>>> +                /* TBD: set smbios handles */
> >>>> +                build_append_int_noprefix(table_data, 0, 2 * n);  
> >>> Is memory side cache structure useful at all without pointing to SMBIOS entries?
> >>>      
> >> They are not useful yet, and the kernel 5.1 HMAT sysfs doesn't show
> >> SMBIOS entries. We can update it if it useful in the future.  
> > 
> > In that case I'd suggest to drop it for now until this table is properly
> > populated and ready for consumption. (i.e. drop this patch and corresponding
> > CLI 9/11 patch).
> >   
> 
> But the kernel HMAT can read othe Memory Side Cache Information except 
> SMBIOS entries and the host HMAT tables also haven’t SMBIOS Handles it 
> also shows Number of SMBIOS handles (n) as 0. So I am wondering if it is 
> better to setting "SMBIOS handles (n)" as 0, remove TODO and comment the 
> reason why set it 0?

My understanding is that SMBIOS handles are used to associate side cache
descriptions with RAM pointed by SMBIOS handles, so that OS would be
able to figure out what RAM modules are cached by what cache.
Hence I suspect that side cache table is useless in the best case without
valid references to SMBIOS handles.
(I might be totally mistaken but the matter requires clarification before
we commit to it)
Tao Xu June 10, 2019, 1:39 p.m. UTC | #6
On 6/7/2019 12:45 AM, Igor Mammedov wrote:
> On Thu, 6 Jun 2019 11:00:33 +0800
> Tao Xu <tao3.xu@intel.com> wrote:
> 
...
>>
>> But the kernel HMAT can read othe Memory Side Cache Information except
>> SMBIOS entries and the host HMAT tables also haven’t SMBIOS Handles it
>> also shows Number of SMBIOS handles (n) as 0. So I am wondering if it is
>> better to setting "SMBIOS handles (n)" as 0, remove TODO and comment the
>> reason why set it 0?
> 
> My understanding is that SMBIOS handles are used to associate side cache
> descriptions with RAM pointed by SMBIOS handles, so that OS would be
> able to figure out what RAM modules are cached by what cache.
> Hence I suspect that side cache table is useless in the best case without
> valid references to SMBIOS handles.
> (I might be totally mistaken but the matter requires clarification before
> we commit to it)
> 

I am sorry for not providing a detailed description for Memory Side 
Cache use case. I will add more detailed description in next version of 
patch.

As the commit message and /Documentation/admin-guide/mm/numaperf.rst of 
Kernel HMAT(listed blow), Memory Side Cache Structure is used to provide 
the cache information about System memory for the software to use. Then 
the software can maximize the performance because it can choose the best 
node to use.

Memory Side Cache Information Structure and System Locality Latency and 
Bandwidth Information Structure can both provide more information than 
numa distance for software to see. So back to the SMBIOS, in spec, 
SMBIOS handles point to the memory side cache physical devices, but they 
are also information and not contribute to the performance of the 
described memory. The field "Proximity Domain for the Memory" can show 
the described memory.

I am wondering if this explanation is clear? Thank you.

"System memory may be constructed in a hierarchy of elements with 
various performance characteristics in order to provide large address 
space of slower performing memory cached by a smaller higher performing 
memory."

"An application does not need to know about caching attributes in order
to use the system. Software may optionally query the memory cache
attributes in order to maximize the performance out of such a setup.
If the system provides a way for the kernel to discover this 
information, for example with ACPI HMAT (Heterogeneous Memory Attribute 
Table), the kernel will append these attributes to the NUMA node memory 
target."

"Each cache level's directory provides its attributes. For example, the
following shows a single cache level and the attributes available for
software to query::

	# tree sys/devices/system/node/node0/memory_side_cache/
	/sys/devices/system/node/node0/memory_side_cache/
	|-- index1
	|   |-- indexing
	|   |-- line_size
	|   |-- size
	|   `-- write_policy
"
Igor Mammedov June 16, 2019, 7:41 p.m. UTC | #7
On Mon, 10 Jun 2019 21:39:12 +0800
Tao Xu <tao3.xu@intel.com> wrote:

> On 6/7/2019 12:45 AM, Igor Mammedov wrote:
> > On Thu, 6 Jun 2019 11:00:33 +0800
> > Tao Xu <tao3.xu@intel.com> wrote:
> >   
> ...
> >>
> >> But the kernel HMAT can read othe Memory Side Cache Information except
> >> SMBIOS entries and the host HMAT tables also haven’t SMBIOS Handles it
> >> also shows Number of SMBIOS handles (n) as 0. So I am wondering if it is
> >> better to setting "SMBIOS handles (n)" as 0, remove TODO and comment the
> >> reason why set it 0?  
> > 
> > My understanding is that SMBIOS handles are used to associate side cache
> > descriptions with RAM pointed by SMBIOS handles, so that OS would be
> > able to figure out what RAM modules are cached by what cache.
> > Hence I suspect that side cache table is useless in the best case without
> > valid references to SMBIOS handles.
> > (I might be totally mistaken but the matter requires clarification before
> > we commit to it)
> >   
> 
> I am sorry for not providing a detailed description for Memory Side 
> Cache use case. I will add more detailed description in next version of 
> patch.
> 
> As the commit message and /Documentation/admin-guide/mm/numaperf.rst of 
> Kernel HMAT(listed blow), Memory Side Cache Structure is used to provide 
> the cache information about System memory for the software to use. Then 
> the software can maximize the performance because it can choose the best 
> node to use.
> 
> Memory Side Cache Information Structure and System Locality Latency and 
> Bandwidth Information Structure can both provide more information than 
> numa distance for software to see. So back to the SMBIOS, in spec, 
> SMBIOS handles point to the memory side cache physical devices, but they 
> are also information and not contribute to the performance of the 
> described memory. The field "Proximity Domain for the Memory" can show 
> the described memory.
> 
> I am wondering if this explanation is clear? Thank you.

I didn't manage to find a definite answer in spec to what SMBIOS entry
should describe. Another use of 'Physical Memory Component' is in PMTT
table and it looks to me that it type 17 should reffer to DIMM device.

But well, considering spec isn't clear about subject and that linux
kernel doesn't seem to use this entries lets use it without SMBIOS
entries for now. Like you suggested, lets set number of SMBIOS handles to 0
and drop num_smbios_handles so that user won't be able to provide any.


> "System memory may be constructed in a hierarchy of elements with 
> various performance characteristics in order to provide large address 
> space of slower performing memory cached by a smaller higher performing 
> memory."
> 
> "An application does not need to know about caching attributes in order
> to use the system. Software may optionally query the memory cache
> attributes in order to maximize the performance out of such a setup.
> If the system provides a way for the kernel to discover this 
> information, for example with ACPI HMAT (Heterogeneous Memory Attribute 
> Table), the kernel will append these attributes to the NUMA node memory 
> target."
> 
> "Each cache level's directory provides its attributes. For example, the
> following shows a single cache level and the attributes available for
> software to query::
> 
> 	# tree sys/devices/system/node/node0/memory_side_cache/
> 	/sys/devices/system/node/node0/memory_side_cache/
> 	|-- index1
> 	|   |-- indexing
> 	|   |-- line_size
> 	|   |-- size
> 	|   `-- write_policy
> "
>
diff mbox series

Patch

diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c
index 54aabf77eb..3a8c41162d 100644
--- a/hw/acpi/hmat.c
+++ b/hw/acpi/hmat.c
@@ -102,10 +102,11 @@  static void hmat_build_hma(GArray *table_data, MachineState *ms)
 {
     GSList *device_list = NULL;
     uint64_t mem_base, mem_len;
-    int i, j, hrchy, type;
+    int i, j, hrchy, type, level;
     uint32_t mem_ranges_num = ms->numa_state->mem_ranges_num;
     NumaMemRange *mem_ranges = ms->numa_state->mem_ranges;
     HMAT_LB_Info *numa_hmat_lb;
+    HMAT_Cache_Info *numa_hmat_cache = NULL;
 
     PCMachineState *pcms = PC_MACHINE(ms);
     AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
@@ -212,6 +213,53 @@  static void hmat_build_hma(GArray *table_data, MachineState *ms)
             }
         }
     }
+
+    /* Build HMAT Memory Side Cache Information. */
+    for (i = 0; i < ms->numa_state->num_nodes; i++) {
+        for (level = 0; level <= MAX_HMAT_CACHE_LEVEL; level++) {
+            numa_hmat_cache = ms->numa_state->hmat_cache[i][level];
+            if (numa_hmat_cache) {
+                uint16_t n = numa_hmat_cache->num_smbios_handles;
+                uint32_t cache_attr = HMAT_CACHE_TOTAL_LEVEL(
+                                      numa_hmat_cache->total_levels);
+                cache_attr |= HMAT_CACHE_CURRENT_LEVEL(
+                              numa_hmat_cache->level);
+                cache_attr |= HMAT_CACHE_ASSOC(
+                                          numa_hmat_cache->associativity);
+                cache_attr |= HMAT_CACHE_WRITE_POLICY(
+                                          numa_hmat_cache->write_policy);
+                cache_attr |= HMAT_CACHE_LINE_SIZE(
+                                          numa_hmat_cache->line_size);
+                cache_attr = cpu_to_le32(cache_attr);
+
+                /* Memory Side Cache Information Structure */
+                /* Type */
+                build_append_int_noprefix(table_data, 2, 2);
+                /* Reserved */
+                build_append_int_noprefix(table_data, 0, 2);
+                /* Length */
+                build_append_int_noprefix(table_data, 32 + 2 * n, 4);
+                /* Proximity Domain for the Memory */
+                build_append_int_noprefix(table_data,
+                                          numa_hmat_cache->mem_proximity, 4);
+                /* Reserved */
+                build_append_int_noprefix(table_data, 0, 4);
+                /* Memory Side Cache Size */
+                build_append_int_noprefix(table_data,
+                                          numa_hmat_cache->size, 8);
+                /* Cache Attributes */
+                build_append_int_noprefix(table_data, cache_attr, 4);
+                /* Reserved */
+                build_append_int_noprefix(table_data, 0, 2);
+                /* Number of SMBIOS handles (n) */
+                build_append_int_noprefix(table_data, n, 2);
+
+                /* SMBIOS Handles */
+                /* TBD: set smbios handles */
+                build_append_int_noprefix(table_data, 0, 2 * n);
+            }
+        }
+    }
 }
 
 void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState *ms)
diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h
index f37e30e533..8f563f19dd 100644
--- a/hw/acpi/hmat.h
+++ b/hw/acpi/hmat.h
@@ -77,6 +77,31 @@  struct HMAT_LB_Info {
     uint16_t    bandwidth[MAX_NODES][MAX_NODES];
 };
 
+struct HMAT_Cache_Info {
+    /* The memory proximity domain to which the memory belongs. */
+    uint32_t    mem_proximity;
+    /* Size of memory side cache in bytes. */
+    uint64_t    size;
+    /*
+     * Total cache levels for this memory
+     * pr#include "hw/acpi/aml-build.h"oximity domain.
+     */
+    uint8_t     total_levels;
+    /* Cache level described in this structure. */
+    uint8_t     level;
+    /* Cache Associativity: None/Direct Mapped/Comple Cache Indexing */
+    uint8_t     associativity;
+    /* Write Policy: None/Write Back(WB)/Write Through(WT) */
+    uint8_t     write_policy;
+    /* Cache Line size in bytes. */
+    uint16_t    line_size;
+    /*
+     * Number of SMBIOS handles that contributes to
+     * the memory side cache physical devices.
+     */
+    uint16_t    num_smbios_handles;
+};
+
 void hmat_build_acpi(GArray *table_data, BIOSLinker *linker, MachineState *ms);
 
 #endif
diff --git a/include/hw/boards.h b/include/hw/boards.h
index e0169b0a64..8609f923d9 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -266,6 +266,9 @@  typedef struct NumaState {
 
     /* NUMA modes HMAT Locality Latency and Bandwidth Information */
     HMAT_LB_Info *hmat_lb[HMAT_LB_LEVELS][HMAT_LB_TYPES];
+
+    /* Memory Side Cache Information Structure */
+    HMAT_Cache_Info *hmat_cache[MAX_NODES][MAX_HMAT_CACHE_LEVEL + 1];
 } NumaState;
 
 /**
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index c0257e936b..d971f5109e 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -33,6 +33,7 @@  typedef struct FWCfgEntry FWCfgEntry;
 typedef struct FWCfgIoState FWCfgIoState;
 typedef struct FWCfgMemState FWCfgMemState;
 typedef struct FWCfgState FWCfgState;
+typedef struct HMAT_Cache_Info HMAT_Cache_Info;
 typedef struct HMAT_LB_Info HMAT_LB_Info;
 typedef struct HVFX86EmulatorState HVFX86EmulatorState;
 typedef struct I2CBus I2CBus;
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index da51a9bc26..0cfb387887 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -143,9 +143,17 @@  enum {
     HMAT_LB_DATA_WRITE_BANDWIDTH  = 5,
 };
 
+#define MAX_HMAT_CACHE_LEVEL        3
+
 #define HMAT_LB_LEVELS    (HMAT_LB_MEM_CACHE_3RD_LEVEL + 1)
 #define HMAT_LB_TYPES     (HMAT_LB_DATA_WRITE_BANDWIDTH + 1)
 
+#define HMAT_CACHE_TOTAL_LEVEL(level)      (level & 0xF)
+#define HMAT_CACHE_CURRENT_LEVEL(level)    ((level & 0xF) << 4)
+#define HMAT_CACHE_ASSOC(assoc)            ((assoc & 0xF) << 8)
+#define HMAT_CACHE_WRITE_POLICY(policy)    ((policy & 0xF) << 12)
+#define HMAT_CACHE_LINE_SIZE(size)         ((size & 0xFFFF) << 16)
+
 #define MAX_OPTION_ROMS 16
 typedef struct QEMUOptionRom {
     const char *name;