diff mbox series

[3/5] armv8: fsl-layerscape: create bypass smmu mapping for MC

Message ID 20230906160152.24185-3-laurentiu.tudor@nxp.com
State Superseded
Delegated to: Peng Fan
Headers show
Series [1/5] armv8: fsl-layerscape: make some functions static | expand

Commit Message

Laurentiu Tudor Sept. 6, 2023, 4:01 p.m. UTC
MC being a plain DMA master as any other device in the SoC and
being live at OS boot time, as soon as the SMMU is probed it
will immediately start triggering faults because there is no
mapping in the SMMU for the MC. Pre-create such a mapping in
the SMMU, being the OS's responsibility to preserve it.

Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
---
 arch/arm/cpu/armv8/fsl-layerscape/soc.c       | 26 ++++++++++++++++---
 .../asm/arch-fsl-layerscape/immap_lsch3.h     |  9 +++++++
 2 files changed, 32 insertions(+), 3 deletions(-)

Comments

Robin Murphy Sept. 6, 2023, 5:21 p.m. UTC | #1
On 2023-09-06 17:01, Laurentiu Tudor wrote:
> MC being a plain DMA master as any other device in the SoC and
> being live at OS boot time, as soon as the SMMU is probed it
> will immediately start triggering faults because there is no
> mapping in the SMMU for the MC. Pre-create such a mapping in
> the SMMU, being the OS's responsibility to preserve it.

Does U-Boot enable the SMMU? AFAICS the only thing it knows how to do is 
explicitly turn it *off*, therefore programming other registers appears 
to be a complete waste of time.

All that should matter to the OS, and that it is responsible for 
upholding, is the reserved memory regions from patch #2. For instance, 
if the OS is Linux, literally the first thing arm_smmu_device_reset() 
does is rewrite all the S2CRs and SMRs without so much as looking.

Thanks,
Robin.

> Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
> ---
>   arch/arm/cpu/armv8/fsl-layerscape/soc.c       | 26 ++++++++++++++++---
>   .../asm/arch-fsl-layerscape/immap_lsch3.h     |  9 +++++++
>   2 files changed, 32 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
> index 3bfdc3f77431..870b99838ab5 100644
> --- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
> +++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
> @@ -376,6 +376,18 @@ void bypass_smmu(void)
>   	val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
>   	out_le32(SMMU_NSCR0, val);
>   }
> +
> +void setup_smmu_mc_bypass(int icid, int mask)
> +{
> +	u32 val;
> +
> +	val = SMMU_SMR_VALID_MASK | (icid << SMMU_SMR_ID_SHIFT) |
> +		(mask << SMMU_SMR_MASK_SHIFT);
> +	out_le32(SMMU_REG_SMR(0), val);
> +	val = SMMU_S2CR_EXIDVALID_VALID_MASK | SMMU_S2CR_TYPE_BYPASS_MASK;
> +	out_le32(SMMU_REG_S2CR(0), val);
> +}
> +
>   void fsl_lsch3_early_init_f(void)
>   {
>   	erratum_rcw_src();
> @@ -402,10 +414,18 @@ void fsl_lsch3_early_init_f(void)
>   		bypass_smmu();
>   #endif
>   
> -#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS1028A) || \
> -	defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) || \
> -	defined(CONFIG_ARCH_LX2162A)
> +#ifdef CONFIG_ARCH_LS1028A
> +	set_icids();
> +#endif
> +
> +#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS2080A)
> +	set_icids();
> +	setup_smmu_mc_bypass(0x300, 0);
> +#endif
> +
> +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
>   	set_icids();
> +	setup_smmu_mc_bypass(0x4000, 0);
>   #endif
>   }
>   
> diff --git a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
> index ca5e33379ba9..bec5355adaed 100644
> --- a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
> +++ b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
> @@ -190,6 +190,15 @@
>   #define SCR0_CLIENTPD_MASK		0x00000001
>   #define SCR0_USFCFG_MASK		0x00000400
>   
> +#define SMMU_REG_SMR(n)			(SMMU_BASE + 0x800 + ((n) << 2))
> +#define SMMU_REG_S2CR(n)		(SMMU_BASE + 0xc00 + ((n) << 2))
> +#define SMMU_SMR_VALID_MASK		0x80000000
> +#define SMMU_SMR_MASK_MASK		0xffff0000
> +#define SMMU_SMR_MASK_SHIFT		16
> +#define SMMU_SMR_ID_MASK		0x0000ffff
> +#define SMMU_SMR_ID_SHIFT		0
> +#define SMMU_S2CR_EXIDVALID_VALID_MASK	0x00000400
> +#define SMMU_S2CR_TYPE_BYPASS_MASK	0x00010000
>   
>   /* PCIe */
>   #define CFG_SYS_PCIE1_ADDR			(CONFIG_SYS_IMMR + 0x2400000)
Laurentiu Tudor Sept. 6, 2023, 6:10 p.m. UTC | #2
On 9/6/2023 8:21 PM, Robin Murphy wrote:
> On 2023-09-06 17:01, Laurentiu Tudor wrote:
>> MC being a plain DMA master as any other device in the SoC and
>> being live at OS boot time, as soon as the SMMU is probed it
>> will immediately start triggering faults because there is no
>> mapping in the SMMU for the MC. Pre-create such a mapping in
>> the SMMU, being the OS's responsibility to preserve it.
> 
> Does U-Boot enable the SMMU? AFAICS the only thing it knows how to do is 
> explicitly turn it *off*, therefore programming other registers appears 
> to be a complete waste of time.

No, it doesn't enable SMMU but it does mark a SMR as valid for MC FW. 
And the ARM SMMU driver subtly preserves it, see [1] (it's late and I 
might be wrong, but I'll double check tomorrow). :-)

> All that should matter to the OS, and that it is responsible for 
> upholding, is the reserved memory regions from patch #2. For instance, 
> if the OS is Linux, literally the first thing arm_smmu_device_reset() 
> does is rewrite all the S2CRs and SMRs without so much as looking.

[1] 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/iommu/arm/arm-smmu/arm-smmu.c#n894

---
Best Regards, Laurentiu

> 
>> Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
>> ---
>>   arch/arm/cpu/armv8/fsl-layerscape/soc.c       | 26 ++++++++++++++++---
>>   .../asm/arch-fsl-layerscape/immap_lsch3.h     |  9 +++++++
>>   2 files changed, 32 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c 
>> b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>> index 3bfdc3f77431..870b99838ab5 100644
>> --- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>> +++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>> @@ -376,6 +376,18 @@ void bypass_smmu(void)
>>       val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & 
>> ~(SCR0_USFCFG_MASK);
>>       out_le32(SMMU_NSCR0, val);
>>   }
>> +
>> +void setup_smmu_mc_bypass(int icid, int mask)
>> +{
>> +    u32 val;
>> +
>> +    val = SMMU_SMR_VALID_MASK | (icid << SMMU_SMR_ID_SHIFT) |
>> +        (mask << SMMU_SMR_MASK_SHIFT);
>> +    out_le32(SMMU_REG_SMR(0), val);
>> +    val = SMMU_S2CR_EXIDVALID_VALID_MASK | SMMU_S2CR_TYPE_BYPASS_MASK;
>> +    out_le32(SMMU_REG_S2CR(0), val);
>> +}
>> +
>>   void fsl_lsch3_early_init_f(void)
>>   {
>>       erratum_rcw_src();
>> @@ -402,10 +414,18 @@ void fsl_lsch3_early_init_f(void)
>>           bypass_smmu();
>>   #endif
>> -#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS1028A) || \
>> -    defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) || \
>> -    defined(CONFIG_ARCH_LX2162A)
>> +#ifdef CONFIG_ARCH_LS1028A
>> +    set_icids();
>> +#endif
>> +
>> +#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS2080A)
>> +    set_icids();
>> +    setup_smmu_mc_bypass(0x300, 0);
>> +#endif
>> +
>> +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
>>       set_icids();
>> +    setup_smmu_mc_bypass(0x4000, 0);
>>   #endif
>>   }
>> diff --git a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h 
>> b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>> index ca5e33379ba9..bec5355adaed 100644
>> --- a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>> +++ b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>> @@ -190,6 +190,15 @@
>>   #define SCR0_CLIENTPD_MASK        0x00000001
>>   #define SCR0_USFCFG_MASK        0x00000400
>> +#define SMMU_REG_SMR(n)            (SMMU_BASE + 0x800 + ((n) << 2))
>> +#define SMMU_REG_S2CR(n)        (SMMU_BASE + 0xc00 + ((n) << 2))
>> +#define SMMU_SMR_VALID_MASK        0x80000000
>> +#define SMMU_SMR_MASK_MASK        0xffff0000
>> +#define SMMU_SMR_MASK_SHIFT        16
>> +#define SMMU_SMR_ID_MASK        0x0000ffff
>> +#define SMMU_SMR_ID_SHIFT        0
>> +#define SMMU_S2CR_EXIDVALID_VALID_MASK    0x00000400
>> +#define SMMU_S2CR_TYPE_BYPASS_MASK    0x00010000
>>   /* PCIe */
>>   #define CFG_SYS_PCIE1_ADDR            (CONFIG_SYS_IMMR + 0x2400000)
Robin Murphy Sept. 6, 2023, 8:09 p.m. UTC | #3
On 2023-09-06 19:10, Laurentiu Tudor wrote:
> 
> 
> On 9/6/2023 8:21 PM, Robin Murphy wrote:
>> On 2023-09-06 17:01, Laurentiu Tudor wrote:
>>> MC being a plain DMA master as any other device in the SoC and
>>> being live at OS boot time, as soon as the SMMU is probed it
>>> will immediately start triggering faults because there is no
>>> mapping in the SMMU for the MC. Pre-create such a mapping in
>>> the SMMU, being the OS's responsibility to preserve it.
>>
>> Does U-Boot enable the SMMU? AFAICS the only thing it knows how to do 
>> is explicitly turn it *off*, therefore programming other registers 
>> appears to be a complete waste of time.
> 
> No, it doesn't enable SMMU but it does mark a SMR as valid for MC FW. 
> And the ARM SMMU driver subtly preserves it, see [1] (it's late and I 
> might be wrong, but I'll double check tomorrow). :-)

No, that sets the SMR valid bit *if* the corresponding entry is 
allocated and marked as valid in the software state in smmu->smrs, which 
at probe time it isn't, because that's only just been allocated and is 
still zero-initialised. Unless, that is, 
arm_smmu_rmr_install_bypass_smr() found a reserved region and 
preallocated an entry to honour it. But even those entries are still 
constructed from scratch; we can't do anything with the existing 
SMR/S2CR register contents in general since they may be uninitialised 
random reset values, so we don't even look.

Pay no attention to the qcom_smmu_cfg_probe() hack either - that only 
exists on the promise that the relevant platforms couldn't have their 
firmware updated to use proper RMRs.

You're already doing the right thing in patch #2, so there's no need to 
waste code on doing a pointless wrong thing as well.

Thanks,
Robin.

>> All that should matter to the OS, and that it is responsible for 
>> upholding, is the reserved memory regions from patch #2. For instance, 
>> if the OS is Linux, literally the first thing arm_smmu_device_reset() 
>> does is rewrite all the S2CRs and SMRs without so much as looking.
> 
> [1] 
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/iommu/arm/arm-smmu/arm-smmu.c#n894
> 
> ---
> Best Regards, Laurentiu
> 
>>
>>> Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
>>> ---
>>>   arch/arm/cpu/armv8/fsl-layerscape/soc.c       | 26 ++++++++++++++++---
>>>   .../asm/arch-fsl-layerscape/immap_lsch3.h     |  9 +++++++
>>>   2 files changed, 32 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c 
>>> b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>> index 3bfdc3f77431..870b99838ab5 100644
>>> --- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>> +++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>> @@ -376,6 +376,18 @@ void bypass_smmu(void)
>>>       val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & 
>>> ~(SCR0_USFCFG_MASK);
>>>       out_le32(SMMU_NSCR0, val);
>>>   }
>>> +
>>> +void setup_smmu_mc_bypass(int icid, int mask)
>>> +{
>>> +    u32 val;
>>> +
>>> +    val = SMMU_SMR_VALID_MASK | (icid << SMMU_SMR_ID_SHIFT) |
>>> +        (mask << SMMU_SMR_MASK_SHIFT);
>>> +    out_le32(SMMU_REG_SMR(0), val);
>>> +    val = SMMU_S2CR_EXIDVALID_VALID_MASK | SMMU_S2CR_TYPE_BYPASS_MASK;
>>> +    out_le32(SMMU_REG_S2CR(0), val);
>>> +}
>>> +
>>>   void fsl_lsch3_early_init_f(void)
>>>   {
>>>       erratum_rcw_src();
>>> @@ -402,10 +414,18 @@ void fsl_lsch3_early_init_f(void)
>>>           bypass_smmu();
>>>   #endif
>>> -#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS1028A) || \
>>> -    defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) || \
>>> -    defined(CONFIG_ARCH_LX2162A)
>>> +#ifdef CONFIG_ARCH_LS1028A
>>> +    set_icids();
>>> +#endif
>>> +
>>> +#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS2080A)
>>> +    set_icids();
>>> +    setup_smmu_mc_bypass(0x300, 0);
>>> +#endif
>>> +
>>> +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
>>>       set_icids();
>>> +    setup_smmu_mc_bypass(0x4000, 0);
>>>   #endif
>>>   }
>>> diff --git a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h 
>>> b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>> index ca5e33379ba9..bec5355adaed 100644
>>> --- a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>> +++ b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>> @@ -190,6 +190,15 @@
>>>   #define SCR0_CLIENTPD_MASK        0x00000001
>>>   #define SCR0_USFCFG_MASK        0x00000400
>>> +#define SMMU_REG_SMR(n)            (SMMU_BASE + 0x800 + ((n) << 2))
>>> +#define SMMU_REG_S2CR(n)        (SMMU_BASE + 0xc00 + ((n) << 2))
>>> +#define SMMU_SMR_VALID_MASK        0x80000000
>>> +#define SMMU_SMR_MASK_MASK        0xffff0000
>>> +#define SMMU_SMR_MASK_SHIFT        16
>>> +#define SMMU_SMR_ID_MASK        0x0000ffff
>>> +#define SMMU_SMR_ID_SHIFT        0
>>> +#define SMMU_S2CR_EXIDVALID_VALID_MASK    0x00000400
>>> +#define SMMU_S2CR_TYPE_BYPASS_MASK    0x00010000
>>>   /* PCIe */
>>>   #define CFG_SYS_PCIE1_ADDR            (CONFIG_SYS_IMMR + 0x2400000)
Laurentiu Tudor Sept. 15, 2023, 12:16 p.m. UTC | #4
On 9/6/2023 11:09 PM, Robin Murphy wrote:
> On 2023-09-06 19:10, Laurentiu Tudor wrote:
>>
>>
>> On 9/6/2023 8:21 PM, Robin Murphy wrote:
>>> On 2023-09-06 17:01, Laurentiu Tudor wrote:
>>>> MC being a plain DMA master as any other device in the SoC and
>>>> being live at OS boot time, as soon as the SMMU is probed it
>>>> will immediately start triggering faults because there is no
>>>> mapping in the SMMU for the MC. Pre-create such a mapping in
>>>> the SMMU, being the OS's responsibility to preserve it.
>>>
>>> Does U-Boot enable the SMMU? AFAICS the only thing it knows how to do
>>> is explicitly turn it *off*, therefore programming other registers
>>> appears to be a complete waste of time.
>>
>> No, it doesn't enable SMMU but it does mark a SMR as valid for MC FW.
>> And the ARM SMMU driver subtly preserves it, see [1] (it's late and I
>> might be wrong, but I'll double check tomorrow). :-)
>
> No, that sets the SMR valid bit *if* the corresponding entry is
> allocated and marked as valid in the software state in smmu->smrs, which
> at probe time it isn't, because that's only just been allocated and is
> still zero-initialised. Unless, that is,
> arm_smmu_rmr_install_bypass_smr() found a reserved region and
> preallocated an entry to honour it. But even those entries are still
> constructed from scratch; we can't do anything with the existing
> SMR/S2CR register contents in general since they may be uninitialised
> random reset values, so we don't even look.
>
> Pay no attention to the qcom_smmu_cfg_probe() hack either - that only
> exists on the promise that the relevant platforms couldn't have their
> firmware updated to use proper RMRs.
>
> You're already doing the right thing in patch #2, so there's no need to
> waste code on doing a pointless wrong thing as well.
>

Spent quite a while (sorry!) trying to remember why this was needed but
don't have a clear answer. I suspect that the hack was used with early
iort rmr implementations.
Anyway, I retested without it both DT and ACPI and looks good. Will drop
the patch and resubmit the series.

---
Thanks & Best Regards, Laurentiu

>
>>> All that should matter to the OS, and that it is responsible for
>>> upholding, is the reserved memory regions from patch #2. For
>>> instance, if the OS is Linux, literally the first thing
>>> arm_smmu_device_reset() does is rewrite all the S2CRs and SMRs
>>> without so much as looking.
>>
>> [1]
>> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/iommu/arm/arm-smmu/arm-smmu.c#n894
>>
>>
>> ---
>> Best Regards, Laurentiu
>>
>>>
>>>> Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
>>>> ---
>>>>   arch/arm/cpu/armv8/fsl-layerscape/soc.c       | 26
>>>> ++++++++++++++++---
>>>>   .../asm/arch-fsl-layerscape/immap_lsch3.h     |  9 +++++++
>>>>   2 files changed, 32 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>>> b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>>> index 3bfdc3f77431..870b99838ab5 100644
>>>> --- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>>> +++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
>>>> @@ -376,6 +376,18 @@ void bypass_smmu(void)
>>>>       val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) &
>>>> ~(SCR0_USFCFG_MASK);
>>>>       out_le32(SMMU_NSCR0, val);
>>>>   }
>>>> +
>>>> +void setup_smmu_mc_bypass(int icid, int mask)
>>>> +{
>>>> +    u32 val;
>>>> +
>>>> +    val = SMMU_SMR_VALID_MASK | (icid << SMMU_SMR_ID_SHIFT) |
>>>> +        (mask << SMMU_SMR_MASK_SHIFT);
>>>> +    out_le32(SMMU_REG_SMR(0), val);
>>>> +    val = SMMU_S2CR_EXIDVALID_VALID_MASK | SMMU_S2CR_TYPE_BYPASS_MASK;
>>>> +    out_le32(SMMU_REG_S2CR(0), val);
>>>> +}
>>>> +
>>>>   void fsl_lsch3_early_init_f(void)
>>>>   {
>>>>       erratum_rcw_src();
>>>> @@ -402,10 +414,18 @@ void fsl_lsch3_early_init_f(void)
>>>>           bypass_smmu();
>>>>   #endif
>>>> -#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS1028A) || \
>>>> -    defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) || \
>>>> -    defined(CONFIG_ARCH_LX2162A)
>>>> +#ifdef CONFIG_ARCH_LS1028A
>>>> +    set_icids();
>>>> +#endif
>>>> +
>>>> +#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS2080A)
>>>> +    set_icids();
>>>> +    setup_smmu_mc_bypass(0x300, 0);
>>>> +#endif
>>>> +
>>>> +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
>>>>       set_icids();
>>>> +    setup_smmu_mc_bypass(0x4000, 0);
>>>>   #endif
>>>>   }
>>>> diff --git a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>>> b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>>> index ca5e33379ba9..bec5355adaed 100644
>>>> --- a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>>> +++ b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
>>>> @@ -190,6 +190,15 @@
>>>>   #define SCR0_CLIENTPD_MASK        0x00000001
>>>>   #define SCR0_USFCFG_MASK        0x00000400
>>>> +#define SMMU_REG_SMR(n)            (SMMU_BASE + 0x800 + ((n) << 2))
>>>> +#define SMMU_REG_S2CR(n)        (SMMU_BASE + 0xc00 + ((n) << 2))
>>>> +#define SMMU_SMR_VALID_MASK        0x80000000
>>>> +#define SMMU_SMR_MASK_MASK        0xffff0000
>>>> +#define SMMU_SMR_MASK_SHIFT        16
>>>> +#define SMMU_SMR_ID_MASK        0x0000ffff
>>>> +#define SMMU_SMR_ID_SHIFT        0
>>>> +#define SMMU_S2CR_EXIDVALID_VALID_MASK    0x00000400
>>>> +#define SMMU_S2CR_TYPE_BYPASS_MASK    0x00010000
>>>>   /* PCIe */
>>>>   #define CFG_SYS_PCIE1_ADDR            (CONFIG_SYS_IMMR + 0x2400000)
diff mbox series

Patch

diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
index 3bfdc3f77431..870b99838ab5 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
@@ -376,6 +376,18 @@  void bypass_smmu(void)
 	val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
 	out_le32(SMMU_NSCR0, val);
 }
+
+void setup_smmu_mc_bypass(int icid, int mask)
+{
+	u32 val;
+
+	val = SMMU_SMR_VALID_MASK | (icid << SMMU_SMR_ID_SHIFT) |
+		(mask << SMMU_SMR_MASK_SHIFT);
+	out_le32(SMMU_REG_SMR(0), val);
+	val = SMMU_S2CR_EXIDVALID_VALID_MASK | SMMU_S2CR_TYPE_BYPASS_MASK;
+	out_le32(SMMU_REG_S2CR(0), val);
+}
+
 void fsl_lsch3_early_init_f(void)
 {
 	erratum_rcw_src();
@@ -402,10 +414,18 @@  void fsl_lsch3_early_init_f(void)
 		bypass_smmu();
 #endif
 
-#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS1028A) || \
-	defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) || \
-	defined(CONFIG_ARCH_LX2162A)
+#ifdef CONFIG_ARCH_LS1028A
+	set_icids();
+#endif
+
+#if defined(CONFIG_ARCH_LS1088A) || defined(CONFIG_ARCH_LS2080A)
+	set_icids();
+	setup_smmu_mc_bypass(0x300, 0);
+#endif
+
+#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
 	set_icids();
+	setup_smmu_mc_bypass(0x4000, 0);
 #endif
 }
 
diff --git a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
index ca5e33379ba9..bec5355adaed 100644
--- a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
+++ b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
@@ -190,6 +190,15 @@ 
 #define SCR0_CLIENTPD_MASK		0x00000001
 #define SCR0_USFCFG_MASK		0x00000400
 
+#define SMMU_REG_SMR(n)			(SMMU_BASE + 0x800 + ((n) << 2))
+#define SMMU_REG_S2CR(n)		(SMMU_BASE + 0xc00 + ((n) << 2))
+#define SMMU_SMR_VALID_MASK		0x80000000
+#define SMMU_SMR_MASK_MASK		0xffff0000
+#define SMMU_SMR_MASK_SHIFT		16
+#define SMMU_SMR_ID_MASK		0x0000ffff
+#define SMMU_SMR_ID_SHIFT		0
+#define SMMU_S2CR_EXIDVALID_VALID_MASK	0x00000400
+#define SMMU_S2CR_TYPE_BYPASS_MASK	0x00010000
 
 /* PCIe */
 #define CFG_SYS_PCIE1_ADDR			(CONFIG_SYS_IMMR + 0x2400000)