diff mbox series

[v3,3/5] hw/nvme: add basic endurance group support

Message ID 20230220115926.54623-4-jwd@defmacro.it
State New
Headers show
Series Support Flexible Data Placement (FDP) | expand

Commit Message

Jesper Wendel Devantier Feb. 20, 2023, 11:59 a.m. UTC
From: Klaus Jensen <k.jensen@samsung.com>

Add the mandatory Endurance Group identify data structures and log
pages.

For now, all namespaces in a subsystem belongs to a single Endurance
Group.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
 hw/nvme/ctrl.c       | 48 +++++++++++++++++++++++++++++++++++++++++++-
 hw/nvme/ns.c         |  3 +++
 hw/nvme/nvme.h       |  4 ++++
 include/block/nvme.h | 42 +++++++++++++++++++++++++++++++-------
 4 files changed, 89 insertions(+), 8 deletions(-)

Comments

Joel Granados Feb. 24, 2023, 8:51 a.m. UTC | #1
On Mon, Feb 20, 2023 at 12:59:24PM +0100, Jesper Devantier wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Add the mandatory Endurance Group identify data structures and log
> pages.
> 
> For now, all namespaces in a subsystem belongs to a single Endurance
> Group.
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
>  hw/nvme/ctrl.c       | 48 +++++++++++++++++++++++++++++++++++++++++++-
>  hw/nvme/ns.c         |  3 +++
>  hw/nvme/nvme.h       |  4 ++++
>  include/block/nvme.h | 42 +++++++++++++++++++++++++++++++-------
>  4 files changed, 89 insertions(+), 8 deletions(-)
> 
> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> index 99b92ff20b..729ed9adc5 100644
> --- a/hw/nvme/ctrl.c
> +++ b/hw/nvme/ctrl.c
> @@ -4454,6 +4454,44 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
>      return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
>  }
>  
> +static uint16_t nvme_endgrp_info(NvmeCtrl *n,  uint8_t rae, uint32_t buf_len,
> +                                 uint64_t off, NvmeRequest *req)
> +{
> +    uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
> +    uint16_t endgrpid = (dw11 >> 16) & 0xffff;
> +    struct nvme_stats stats = {};
> +    NvmeEndGrpLog info = {};
> +    int i;
> +
> +    if (!n->subsys || endgrpid != 0x1) {
> +        return NVME_INVALID_FIELD | NVME_DNR;
> +    }
> +
> +    if (off >= sizeof(info)) {
> +        return NVME_INVALID_FIELD | NVME_DNR;
> +    }
> +
> +    for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
> +        NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i);
> +        if (!ns) {
> +            continue;
> +        }
> +
> +        nvme_set_blk_stats(ns, &stats);
> +    }
> +
> +    info.data_units_read[0] = cpu_to_le64(stats.units_read / 1000000000);
> +    info.data_units_written[0] = cpu_to_le64(stats.units_written / 1000000000);
> +    info.media_units_written[0] = cpu_to_le64(stats.units_written / 1000000000);

This division is a bit strange for me. Maybe I'm missing something:

NVMe spec states this about Data Units {Written,Read}: "... Contains the
number of 512 byte data units the host has .... This value is reported
in thousands (i.e., a value of 1 corresponds to 1,000 units of 512 bytes
read) and is rounded up (e.g., one indicates that the number of 512 byte
data units {read,written} is from 1 to 1,000..."

1. The way I understand this text from the spec is that if 512 were
   written, then the data_units_written should contain 1; but as I see
   it now, it will contain 0. Am I missing something?

2. And where is the 512 units represented? I was expecting a bit shift by
   9.

3. And where is it rounding up to thousands?

Shouldn't it be like this:

 +    info.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read >> BDRV_SECTOR_BITS, 1000));
 +    info.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000));
 +    info.media_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000));


> +    info.host_read_commands[0] = cpu_to_le64(stats.read_commands);
> +    info.host_write_commands[0] = cpu_to_le64(stats.write_commands);
> +
> +    buf_len = MIN(sizeof(info) - off, buf_len);
> +
> +    return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req);
> +}
> +
> +
>  static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
>                                   NvmeRequest *req)
>  {
> @@ -4626,6 +4664,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
>          return nvme_changed_nslist(n, rae, len, off, req);
>      case NVME_LOG_CMD_EFFECTS:
>          return nvme_cmd_effects(n, csi, len, off, req);
> +    case NVME_LOG_ENDGRP:
> +        return nvme_endgrp_info(n, rae, len, off, req);
>      default:
>          trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
>          return NVME_INVALID_FIELD | NVME_DNR;
> @@ -7382,6 +7422,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
>      uint8_t *pci_conf = pci_dev->config;
>      uint64_t cap = ldq_le_p(&n->bar.cap);
>      NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
> +    uint32_t ctratt;
>  
>      id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
>      id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
> @@ -7392,7 +7433,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
>      id->cntlid = cpu_to_le16(n->cntlid);
>  
>      id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
> -    id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS);
> +    ctratt = NVME_CTRATT_ELBAS;
>  
>      id->rab = 6;
>  
> @@ -7459,8 +7500,13 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
>  
>      if (n->subsys) {
>          id->cmic |= NVME_CMIC_MULTI_CTRL;
> +        ctratt |= NVME_CTRATT_ENDGRPS;
> +
> +        id->endgidmax = cpu_to_le16(0x1);
>      }
>  
> +    id->ctratt = cpu_to_le32(ctratt);
> +
>      NVME_CAP_SET_MQES(cap, 0x7ff);
>      NVME_CAP_SET_CQR(cap, 1);
>      NVME_CAP_SET_TO(cap, 0xf);
> diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
> index 8ebab4fbce..23872a174c 100644
> --- a/hw/nvme/ns.c
> +++ b/hw/nvme/ns.c
> @@ -592,6 +592,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
>      if (subsys) {
>          subsys->namespaces[nsid] = ns;
>  
> +        ns->id_ns.endgid = cpu_to_le16(0x1);
> +
>          if (ns->params.detached) {
>              return;
>          }
> @@ -607,6 +609,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
>  
>              return;
>          }
> +
>      }
>  
>      nvme_attach_ns(n, ns);
> diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
> index 5d221073ac..a88e0dea5c 100644
> --- a/hw/nvme/nvme.h
> +++ b/hw/nvme/nvme.h
> @@ -45,6 +45,10 @@ typedef struct NvmeBus {
>      OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
>  #define SUBSYS_SLOT_RSVD (void *)0xFFFF
>  
> +typedef struct NvmeEnduranceGroup {
> +    uint8_t event_conf;
> +} NvmeEnduranceGroup;
> +
>  typedef struct NvmeSubsystem {
>      DeviceState parent_obj;
>      NvmeBus     bus;
> diff --git a/include/block/nvme.h b/include/block/nvme.h
> index 8027b7126b..4abc1bbfa5 100644
> --- a/include/block/nvme.h
> +++ b/include/block/nvme.h
> @@ -58,6 +58,24 @@ enum NvmeBarRegs {
>      NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
>  };
>  
> +typedef struct QEMU_PACKED NvmeEndGrpLog {
> +    uint8_t  critical_warning;
> +    uint8_t  rsvd[2];
> +    uint8_t  avail_spare;
> +    uint8_t  avail_spare_thres;
> +    uint8_t  percet_used;
> +    uint8_t  rsvd1[26];
> +    uint64_t end_estimate[2];
> +    uint64_t data_units_read[2];
> +    uint64_t data_units_written[2];
> +    uint64_t media_units_written[2];
> +    uint64_t host_read_commands[2];
> +    uint64_t host_write_commands[2];
> +    uint64_t media_integrity_errors[2];
> +    uint64_t no_err_info_log_entries[2];
> +    uint8_t rsvd2[352];
> +} NvmeEndGrpLog;
> +
>  enum NvmeCapShift {
>      CAP_MQES_SHIFT     = 0,
>      CAP_CQR_SHIFT      = 16,
> @@ -1005,11 +1023,12 @@ enum {
>  };
>  
>  enum NvmeLogIdentifier {
> -    NVME_LOG_ERROR_INFO     = 0x01,
> -    NVME_LOG_SMART_INFO     = 0x02,
> -    NVME_LOG_FW_SLOT_INFO   = 0x03,
> -    NVME_LOG_CHANGED_NSLIST = 0x04,
> -    NVME_LOG_CMD_EFFECTS    = 0x05,
> +    NVME_LOG_ERROR_INFO                 = 0x01,
> +    NVME_LOG_SMART_INFO                 = 0x02,
> +    NVME_LOG_FW_SLOT_INFO               = 0x03,
> +    NVME_LOG_CHANGED_NSLIST             = 0x04,
> +    NVME_LOG_CMD_EFFECTS                = 0x05,
> +    NVME_LOG_ENDGRP                     = 0x09,
>  };
>  
>  typedef struct QEMU_PACKED NvmePSD {
> @@ -1091,7 +1110,10 @@ typedef struct QEMU_PACKED NvmeIdCtrl {
>      uint16_t    mntmt;
>      uint16_t    mxtmt;
>      uint32_t    sanicap;
> -    uint8_t     rsvd332[180];
> +    uint8_t     rsvd332[6];
> +    uint16_t    nsetidmax;
> +    uint16_t    endgidmax;
> +    uint8_t     rsvd342[170];
>      uint8_t     sqes;
>      uint8_t     cqes;
>      uint16_t    maxcmd;
> @@ -1134,6 +1156,7 @@ enum NvmeIdCtrlOaes {
>  };
>  
>  enum NvmeIdCtrlCtratt {
> +    NVME_CTRATT_ENDGRPS = 1 <<  4,
>      NVME_CTRATT_ELBAS   = 1 << 15,
>  };
>  
> @@ -1227,6 +1250,7 @@ enum NvmeNsAttachmentOperation {
>  #define NVME_AEC_SMART(aec)         (aec & 0xff)
>  #define NVME_AEC_NS_ATTR(aec)       ((aec >> 8) & 0x1)
>  #define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1)
> +#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1)
>  
>  #define NVME_ERR_REC_TLER(err_rec)  (err_rec & 0xffff)
>  #define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
> @@ -1338,7 +1362,10 @@ typedef struct QEMU_PACKED NvmeIdNs {
>      uint16_t    mssrl;
>      uint32_t    mcl;
>      uint8_t     msrc;
> -    uint8_t     rsvd81[23];
> +    uint8_t     rsvd81[18];
> +    uint8_t     nsattr;
> +    uint16_t    nvmsetid;
> +    uint16_t    endgid;
>      uint8_t     nguid[16];
>      uint64_t    eui64;
>      NvmeLBAF    lbaf[NVME_MAX_NLBAF];
> @@ -1655,5 +1682,6 @@ static inline void _nvme_check_size(void)
>      QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
>      QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
> +    QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512);
>  }
>  #endif
> -- 
> 2.39.2
> 
>
Klaus Jensen Feb. 24, 2023, 9:32 a.m. UTC | #2
On Feb 24 09:51, Joel Granados wrote:
> On Mon, Feb 20, 2023 at 12:59:24PM +0100, Jesper Devantier wrote:
> > From: Klaus Jensen <k.jensen@samsung.com>
> > 
> > Add the mandatory Endurance Group identify data structures and log
> > pages.
> > 
> > For now, all namespaces in a subsystem belongs to a single Endurance
> > Group.
> > 
> > Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> > ---
> >  hw/nvme/ctrl.c       | 48 +++++++++++++++++++++++++++++++++++++++++++-
> >  hw/nvme/ns.c         |  3 +++
> >  hw/nvme/nvme.h       |  4 ++++
> >  include/block/nvme.h | 42 +++++++++++++++++++++++++++++++-------
> >  4 files changed, 89 insertions(+), 8 deletions(-)
> > 
> > diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> > index 99b92ff20b..729ed9adc5 100644
> > --- a/hw/nvme/ctrl.c
> > +++ b/hw/nvme/ctrl.c
> > @@ -4454,6 +4454,44 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
> >      return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
> >  }
> >  
> > +static uint16_t nvme_endgrp_info(NvmeCtrl *n,  uint8_t rae, uint32_t buf_len,
> > +                                 uint64_t off, NvmeRequest *req)
> > +{
> > +    uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
> > +    uint16_t endgrpid = (dw11 >> 16) & 0xffff;
> > +    struct nvme_stats stats = {};
> > +    NvmeEndGrpLog info = {};
> > +    int i;
> > +
> > +    if (!n->subsys || endgrpid != 0x1) {
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> > +
> > +    if (off >= sizeof(info)) {
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> > +
> > +    for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
> > +        NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i);
> > +        if (!ns) {
> > +            continue;
> > +        }
> > +
> > +        nvme_set_blk_stats(ns, &stats);
> > +    }
> > +
> > +    info.data_units_read[0] = cpu_to_le64(stats.units_read / 1000000000);
> > +    info.data_units_written[0] = cpu_to_le64(stats.units_written / 1000000000);
> > +    info.media_units_written[0] = cpu_to_le64(stats.units_written / 1000000000);
> 
> This division is a bit strange for me. Maybe I'm missing something:
> 
> NVMe spec states this about Data Units {Written,Read}: "... Contains the
> number of 512 byte data units the host has .... This value is reported
> in thousands (i.e., a value of 1 corresponds to 1,000 units of 512 bytes
> read) and is rounded up (e.g., one indicates that the number of 512 byte
> data units {read,written} is from 1 to 1,000..."
> 
> 1. The way I understand this text from the spec is that if 512 were
>    written, then the data_units_written should contain 1; but as I see
>    it now, it will contain 0. Am I missing something?
> 
> 2. And where is the 512 units represented? I was expecting a bit shift by
>    9.
> 
> 3. And where is it rounding up to thousands?
> 
> Shouldn't it be like this:
> 
>  +    info.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read >> BDRV_SECTOR_BITS, 1000));
>  +    info.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000));
>  +    info.media_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000));
> 

Yes and no ;)

First, No, because you are quoting the spec about the SMART log page.
This is the Endurance Group Information log page, where Data Units Read
are total number of bytes, reported in billions.

Secondly, Yes, it is missing the DIV_ROUND_UP().
diff mbox series

Patch

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 99b92ff20b..729ed9adc5 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -4454,6 +4454,44 @@  static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
     return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
 }
 
+static uint16_t nvme_endgrp_info(NvmeCtrl *n,  uint8_t rae, uint32_t buf_len,
+                                 uint64_t off, NvmeRequest *req)
+{
+    uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
+    uint16_t endgrpid = (dw11 >> 16) & 0xffff;
+    struct nvme_stats stats = {};
+    NvmeEndGrpLog info = {};
+    int i;
+
+    if (!n->subsys || endgrpid != 0x1) {
+        return NVME_INVALID_FIELD | NVME_DNR;
+    }
+
+    if (off >= sizeof(info)) {
+        return NVME_INVALID_FIELD | NVME_DNR;
+    }
+
+    for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+        NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i);
+        if (!ns) {
+            continue;
+        }
+
+        nvme_set_blk_stats(ns, &stats);
+    }
+
+    info.data_units_read[0] = cpu_to_le64(stats.units_read / 1000000000);
+    info.data_units_written[0] = cpu_to_le64(stats.units_written / 1000000000);
+    info.media_units_written[0] = cpu_to_le64(stats.units_written / 1000000000);
+    info.host_read_commands[0] = cpu_to_le64(stats.read_commands);
+    info.host_write_commands[0] = cpu_to_le64(stats.write_commands);
+
+    buf_len = MIN(sizeof(info) - off, buf_len);
+
+    return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req);
+}
+
+
 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
                                  NvmeRequest *req)
 {
@@ -4626,6 +4664,8 @@  static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
         return nvme_changed_nslist(n, rae, len, off, req);
     case NVME_LOG_CMD_EFFECTS:
         return nvme_cmd_effects(n, csi, len, off, req);
+    case NVME_LOG_ENDGRP:
+        return nvme_endgrp_info(n, rae, len, off, req);
     default:
         trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
         return NVME_INVALID_FIELD | NVME_DNR;
@@ -7382,6 +7422,7 @@  static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
     uint8_t *pci_conf = pci_dev->config;
     uint64_t cap = ldq_le_p(&n->bar.cap);
     NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
+    uint32_t ctratt;
 
     id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
     id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -7392,7 +7433,7 @@  static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
     id->cntlid = cpu_to_le16(n->cntlid);
 
     id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
-    id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS);
+    ctratt = NVME_CTRATT_ELBAS;
 
     id->rab = 6;
 
@@ -7459,8 +7500,13 @@  static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
 
     if (n->subsys) {
         id->cmic |= NVME_CMIC_MULTI_CTRL;
+        ctratt |= NVME_CTRATT_ENDGRPS;
+
+        id->endgidmax = cpu_to_le16(0x1);
     }
 
+    id->ctratt = cpu_to_le32(ctratt);
+
     NVME_CAP_SET_MQES(cap, 0x7ff);
     NVME_CAP_SET_CQR(cap, 1);
     NVME_CAP_SET_TO(cap, 0xf);
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 8ebab4fbce..23872a174c 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -592,6 +592,8 @@  static void nvme_ns_realize(DeviceState *dev, Error **errp)
     if (subsys) {
         subsys->namespaces[nsid] = ns;
 
+        ns->id_ns.endgid = cpu_to_le16(0x1);
+
         if (ns->params.detached) {
             return;
         }
@@ -607,6 +609,7 @@  static void nvme_ns_realize(DeviceState *dev, Error **errp)
 
             return;
         }
+
     }
 
     nvme_attach_ns(n, ns);
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 5d221073ac..a88e0dea5c 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -45,6 +45,10 @@  typedef struct NvmeBus {
     OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
 #define SUBSYS_SLOT_RSVD (void *)0xFFFF
 
+typedef struct NvmeEnduranceGroup {
+    uint8_t event_conf;
+} NvmeEnduranceGroup;
+
 typedef struct NvmeSubsystem {
     DeviceState parent_obj;
     NvmeBus     bus;
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 8027b7126b..4abc1bbfa5 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -58,6 +58,24 @@  enum NvmeBarRegs {
     NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
 };
 
+typedef struct QEMU_PACKED NvmeEndGrpLog {
+    uint8_t  critical_warning;
+    uint8_t  rsvd[2];
+    uint8_t  avail_spare;
+    uint8_t  avail_spare_thres;
+    uint8_t  percet_used;
+    uint8_t  rsvd1[26];
+    uint64_t end_estimate[2];
+    uint64_t data_units_read[2];
+    uint64_t data_units_written[2];
+    uint64_t media_units_written[2];
+    uint64_t host_read_commands[2];
+    uint64_t host_write_commands[2];
+    uint64_t media_integrity_errors[2];
+    uint64_t no_err_info_log_entries[2];
+    uint8_t rsvd2[352];
+} NvmeEndGrpLog;
+
 enum NvmeCapShift {
     CAP_MQES_SHIFT     = 0,
     CAP_CQR_SHIFT      = 16,
@@ -1005,11 +1023,12 @@  enum {
 };
 
 enum NvmeLogIdentifier {
-    NVME_LOG_ERROR_INFO     = 0x01,
-    NVME_LOG_SMART_INFO     = 0x02,
-    NVME_LOG_FW_SLOT_INFO   = 0x03,
-    NVME_LOG_CHANGED_NSLIST = 0x04,
-    NVME_LOG_CMD_EFFECTS    = 0x05,
+    NVME_LOG_ERROR_INFO                 = 0x01,
+    NVME_LOG_SMART_INFO                 = 0x02,
+    NVME_LOG_FW_SLOT_INFO               = 0x03,
+    NVME_LOG_CHANGED_NSLIST             = 0x04,
+    NVME_LOG_CMD_EFFECTS                = 0x05,
+    NVME_LOG_ENDGRP                     = 0x09,
 };
 
 typedef struct QEMU_PACKED NvmePSD {
@@ -1091,7 +1110,10 @@  typedef struct QEMU_PACKED NvmeIdCtrl {
     uint16_t    mntmt;
     uint16_t    mxtmt;
     uint32_t    sanicap;
-    uint8_t     rsvd332[180];
+    uint8_t     rsvd332[6];
+    uint16_t    nsetidmax;
+    uint16_t    endgidmax;
+    uint8_t     rsvd342[170];
     uint8_t     sqes;
     uint8_t     cqes;
     uint16_t    maxcmd;
@@ -1134,6 +1156,7 @@  enum NvmeIdCtrlOaes {
 };
 
 enum NvmeIdCtrlCtratt {
+    NVME_CTRATT_ENDGRPS = 1 <<  4,
     NVME_CTRATT_ELBAS   = 1 << 15,
 };
 
@@ -1227,6 +1250,7 @@  enum NvmeNsAttachmentOperation {
 #define NVME_AEC_SMART(aec)         (aec & 0xff)
 #define NVME_AEC_NS_ATTR(aec)       ((aec >> 8) & 0x1)
 #define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1)
+#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1)
 
 #define NVME_ERR_REC_TLER(err_rec)  (err_rec & 0xffff)
 #define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
@@ -1338,7 +1362,10 @@  typedef struct QEMU_PACKED NvmeIdNs {
     uint16_t    mssrl;
     uint32_t    mcl;
     uint8_t     msrc;
-    uint8_t     rsvd81[23];
+    uint8_t     rsvd81[18];
+    uint8_t     nsattr;
+    uint16_t    nvmsetid;
+    uint16_t    endgid;
     uint8_t     nguid[16];
     uint64_t    eui64;
     NvmeLBAF    lbaf[NVME_MAX_NLBAF];
@@ -1655,5 +1682,6 @@  static inline void _nvme_check_size(void)
     QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
     QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
     QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
+    QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512);
 }
 #endif