Message ID | 20230205094411.793816-14-smostafa@google.com |
---|---|
State | New |
Headers | show |
Series | Add stage-2 translation for SMMUv3 | expand |
Hi Mostafa, On 2/5/23 10:44, Mostafa Saleh wrote: > CMD_TLBI_S2_IPA: As S1+S2 is not enabled, for now this can be the > same as CMD_TLBI_NH_VAA. > > CMD_TLBI_S12_VMALL: Added new function to invalidate TLB by VMID. > > Signed-off-by: Mostafa Saleh <smostafa@google.com> > --- > hw/arm/smmu-common.c | 16 ++++++++++++++++ > hw/arm/smmuv3.c | 25 +++++++++++++++++++++++-- > hw/arm/trace-events | 2 ++ > include/hw/arm/smmu-common.h | 1 + > 4 files changed, 42 insertions(+), 2 deletions(-) > > diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c > index 028a60949a..28089d94a6 100644 > --- a/hw/arm/smmu-common.c > +++ b/hw/arm/smmu-common.c > @@ -133,6 +133,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, > > return SMMU_IOTLB_ASID(*iotlb_key) == asid; > } > + > +static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, > + gpointer user_data) > +{ > + uint16_t vmid = *(uint16_t *)user_data; > + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; > + > + return SMMU_IOTLB_VMID(*iotlb_key) == vmid; > +} > + > static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, > gpointer user_data) > { > @@ -185,6 +195,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) > g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); > } > > +inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) > +{ > + trace_smmu_iotlb_inv_vmid(vmid); > + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); > +} > + > /* VMSAv8-64 Translation */ > > /** > diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c > index 8b070f6bb5..2b563a5b1b 100644 > --- a/hw/arm/smmuv3.c > +++ b/hw/arm/smmuv3.c > @@ -1174,14 +1174,35 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > case SMMU_CMD_TLBI_NH_VA: > smmuv3_s1_range_inval(bs, &cmd); > break; > + case SMMU_CMD_TLBI_S12_VMALL: > + uint16_t vmid = CMD_VMID(&cmd); > + > + if (!STAGE2_SUPPORTED(s->features)) { if you add such checks for S2, may you should consider adding similar ones for existing S1? > + cmd_error = SMMU_CERROR_ILL; > + break; > + } > + > + trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); > + smmu_inv_notifiers_all(&s->smmu_state); > + smmu_iotlb_inv_vmid(bs, vmid); > + break; > + case SMMU_CMD_TLBI_S2_IPA: > + if (!STAGE2_SUPPORTED(s->features)) { > + cmd_error = SMMU_CERROR_ILL; > + break; > + } > + /* > + * As currently only either s1 or s2 are supported > + * we can reuse same function for s2. > + */ > + smmuv3_s1_range_inval(bs, &cmd); Shouldn't we rename the function then? Eric > + break; > case SMMU_CMD_TLBI_EL3_ALL: > case SMMU_CMD_TLBI_EL3_VA: > case SMMU_CMD_TLBI_EL2_ALL: > case SMMU_CMD_TLBI_EL2_ASID: > case SMMU_CMD_TLBI_EL2_VA: > case SMMU_CMD_TLBI_EL2_VAA: > - case SMMU_CMD_TLBI_S12_VMALL: > - case SMMU_CMD_TLBI_S2_IPA: > case SMMU_CMD_ATC_INV: > case SMMU_CMD_PRI_RESP: > case SMMU_CMD_RESUME: > diff --git a/hw/arm/trace-events b/hw/arm/trace-events > index 2dee296c8f..61e2ffade5 100644 > --- a/hw/arm/trace-events > +++ b/hw/arm/trace-events > @@ -12,6 +12,7 @@ smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, ui > smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 > smmu_iotlb_inv_all(void) "IOTLB invalidate all" > smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" > +smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" > smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 > smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" > smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" > @@ -48,6 +49,7 @@ smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t > smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" > smmuv3_cmdq_tlbi_nh(void) "" > smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" > +smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" > smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" > smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" > smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" > diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h > index 5cca1c17f5..46ba1f6329 100644 > --- a/include/hw/arm/smmu-common.h > +++ b/include/hw/arm/smmu-common.h > @@ -181,6 +181,7 @@ SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, > uint8_t tg, uint8_t level); > void smmu_iotlb_inv_all(SMMUState *s); > void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); > +void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid); > void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, > uint8_t tg, uint64_t num_pages, uint8_t ttl); >
Hi Eric, On Thu, Feb 16, 2023 at 12:56:52PM +0100, Eric Auger wrote: > > @@ -1174,14 +1174,35 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > > case SMMU_CMD_TLBI_NH_VA: > > smmuv3_s1_range_inval(bs, &cmd); > > break; > > + case SMMU_CMD_TLBI_S12_VMALL: > > + uint16_t vmid = CMD_VMID(&cmd); > > + > > + if (!STAGE2_SUPPORTED(s->features)) { > if you add such checks for S2, may you should consider adding similar > ones for existing S1? Yes, I will go through the other commands and do the same for stage-1 only commands. > > + smmu_inv_notifiers_all(&s->smmu_state); > > + smmu_iotlb_inv_vmid(bs, vmid); > > + break; > > + case SMMU_CMD_TLBI_S2_IPA: > > + if (!STAGE2_SUPPORTED(s->features)) { > > + cmd_error = SMMU_CERROR_ILL; > > + break; > > + } > > + /* > > + * As currently only either s1 or s2 are supported > > + * we can reuse same function for s2. > > + */ > > + smmuv3_s1_range_inval(bs, &cmd); > Shouldn't we rename the function then? I guess we can rename it smmuv3_s1_s2_range_inval, we will have to revisit this when nesting is supported. Thanks, Mostafa
On 2/16/23 14:58, Mostafa Saleh wrote: > Hi Eric, > > On Thu, Feb 16, 2023 at 12:56:52PM +0100, Eric Auger wrote: >>> @@ -1174,14 +1174,35 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) >>> case SMMU_CMD_TLBI_NH_VA: >>> smmuv3_s1_range_inval(bs, &cmd); >>> break; >>> + case SMMU_CMD_TLBI_S12_VMALL: >>> + uint16_t vmid = CMD_VMID(&cmd); >>> + >>> + if (!STAGE2_SUPPORTED(s->features)) { >> if you add such checks for S2, may you should consider adding similar >> ones for existing S1? > Yes, I will go through the other commands and do the same for stage-1 > only commands. > >>> + smmu_inv_notifiers_all(&s->smmu_state); >>> + smmu_iotlb_inv_vmid(bs, vmid); >>> + break; >>> + case SMMU_CMD_TLBI_S2_IPA: >>> + if (!STAGE2_SUPPORTED(s->features)) { >>> + cmd_error = SMMU_CERROR_ILL; >>> + break; >>> + } >>> + /* >>> + * As currently only either s1 or s2 are supported >>> + * we can reuse same function for s2. >>> + */ >>> + smmuv3_s1_range_inval(bs, &cmd); >> Shouldn't we rename the function then? > I guess we can rename it smmuv3_s1_s2_range_inval, we will have to > revisit this when nesting is supported. or simply smmuv3_range_inval, adding a comment specifying its is usable for both stages Eric > > Thanks, > Mostafa >
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 028a60949a..28089d94a6 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -133,6 +133,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, return SMMU_IOTLB_ASID(*iotlb_key) == asid; } + +static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, + gpointer user_data) +{ + uint16_t vmid = *(uint16_t *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return SMMU_IOTLB_VMID(*iotlb_key) == vmid; +} + static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, gpointer user_data) { @@ -185,6 +195,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); } +inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) +{ + trace_smmu_iotlb_inv_vmid(vmid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); +} + /* VMSAv8-64 Translation */ /** diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 8b070f6bb5..2b563a5b1b 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1174,14 +1174,35 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_NH_VA: smmuv3_s1_range_inval(bs, &cmd); break; + case SMMU_CMD_TLBI_S12_VMALL: + uint16_t vmid = CMD_VMID(&cmd); + + if (!STAGE2_SUPPORTED(s->features)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_vmid(bs, vmid); + break; + case SMMU_CMD_TLBI_S2_IPA: + if (!STAGE2_SUPPORTED(s->features)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + /* + * As currently only either s1 or s2 are supported + * we can reuse same function for s2. + */ + smmuv3_s1_range_inval(bs, &cmd); + break; case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: case SMMU_CMD_TLBI_EL2_ALL: case SMMU_CMD_TLBI_EL2_ASID: case SMMU_CMD_TLBI_EL2_VA: case SMMU_CMD_TLBI_EL2_VAA: - case SMMU_CMD_TLBI_S12_VMALL: - case SMMU_CMD_TLBI_S2_IPA: case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 2dee296c8f..61e2ffade5 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -12,6 +12,7 @@ smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, ui smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 smmu_iotlb_inv_all(void) "IOTLB invalidate all" smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" @@ -48,6 +49,7 @@ smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" smmuv3_cmdq_tlbi_nh(void) "" smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 5cca1c17f5..46ba1f6329 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -181,6 +181,7 @@ SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level); void smmu_iotlb_inv_all(SMMUState *s); void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); +void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid); void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl);
CMD_TLBI_S2_IPA: As S1+S2 is not enabled, for now this can be the same as CMD_TLBI_NH_VAA. CMD_TLBI_S12_VMALL: Added new function to invalidate TLB by VMID. Signed-off-by: Mostafa Saleh <smostafa@google.com> --- hw/arm/smmu-common.c | 16 ++++++++++++++++ hw/arm/smmuv3.c | 25 +++++++++++++++++++++++-- hw/arm/trace-events | 2 ++ include/hw/arm/smmu-common.h | 1 + 4 files changed, 42 insertions(+), 2 deletions(-)