diff mbox series

[RFC,24/45] KVM: arm64: smmu-v3: Setup stream table

Message ID 20230201125328.2186498-25-jean-philippe@linaro.org (mailing list archive)
State New, archived
Headers show
Series KVM: Arm SMMUv3 driver for pKVM | expand

Commit Message

Jean-Philippe Brucker Feb. 1, 2023, 12:53 p.m. UTC
Map the stream table allocated by the host into the hypervisor address
space. When the host mappings are finalized, the table is unmapped from
the host. Depending on the host configuration, the stream table may have
one or two levels. Populate the level-2 stream table lazily.

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
 include/kvm/arm_smmu_v3.h                   |   4 +
 arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c | 133 +++++++++++++++++++-
 2 files changed, 136 insertions(+), 1 deletion(-)

Comments

Mostafa Saleh Jan. 16, 2024, 8:59 a.m. UTC | #1
Hi Jean,

On Wed, Feb 1, 2023 at 12:59 PM Jean-Philippe Brucker
<jean-philippe@linaro.org> wrote:
>
> Map the stream table allocated by the host into the hypervisor address
> space. When the host mappings are finalized, the table is unmapped from
> the host. Depending on the host configuration, the stream table may have
> one or two levels. Populate the level-2 stream table lazily.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
> ---
>  include/kvm/arm_smmu_v3.h                   |   4 +
>  arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c | 133 +++++++++++++++++++-
>  2 files changed, 136 insertions(+), 1 deletion(-)
>
> diff --git a/include/kvm/arm_smmu_v3.h b/include/kvm/arm_smmu_v3.h
> index da36737bc1e0..fc67a3bf5709 100644
> --- a/include/kvm/arm_smmu_v3.h
> +++ b/include/kvm/arm_smmu_v3.h
> @@ -24,6 +24,10 @@ struct hyp_arm_smmu_v3_device {
>         u32                     cmdq_prod;
>         u64                     *cmdq_base;
>         size_t                  cmdq_log2size;
> +       u64                     *strtab_base;
> +       size_t                  strtab_num_entries;
> +       size_t                  strtab_num_l1_entries;
> +       u8                      strtab_split;
>  };
>
>  extern size_t kvm_nvhe_sym(kvm_hyp_arm_smmu_v3_count);
> diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> index 36ee5724f36f..021bebebd40c 100644
> --- a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> +++ b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> @@ -141,7 +141,6 @@ static int smmu_sync_cmd(struct hyp_arm_smmu_v3_device *smmu)
>         return smmu_wait_event(smmu, smmu_cmdq_empty(smmu));
>  }
>
> -__maybe_unused
>  static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
>                          struct arm_smmu_cmdq_ent *cmd)
>  {
> @@ -153,6 +152,82 @@ static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
>         return smmu_sync_cmd(smmu);
>  }
>
> +__maybe_unused
> +static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
> +{
> +       struct arm_smmu_cmdq_ent cmd = {
> +               .opcode = CMDQ_OP_CFGI_STE,
> +               .cfgi.sid = sid,
> +               .cfgi.leaf = true,
> +       };
> +
> +       return smmu_send_cmd(smmu, &cmd);
> +}
> +
I see the page tables are properly configured for ARM_SMMU_FEAT_COHERENCY but no
handling for the STE or CMDQ, I believe here we should have something as:
if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
        kvm_flush_dcache_to_poc(step, STRTAB_STE_DWORDS << 3);

Similarly in "smmu_add_cmd" for the command queue. Or use NC mapping
(which doesn't exist
upstream as far as I can see)

Thanks,
Mostafa
Jean-Philippe Brucker Jan. 23, 2024, 7:45 p.m. UTC | #2
Hi Mostafa,

On Tue, Jan 16, 2024 at 08:59:41AM +0000, Mostafa Saleh wrote:
> > +__maybe_unused
> > +static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
> > +{
> > +       struct arm_smmu_cmdq_ent cmd = {
> > +               .opcode = CMDQ_OP_CFGI_STE,
> > +               .cfgi.sid = sid,
> > +               .cfgi.leaf = true,
> > +       };
> > +
> > +       return smmu_send_cmd(smmu, &cmd);
> > +}
> > +
> I see the page tables are properly configured for ARM_SMMU_FEAT_COHERENCY but no
> handling for the STE or CMDQ, I believe here we should have something as:
> if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
>         kvm_flush_dcache_to_poc(step, STRTAB_STE_DWORDS << 3);
> 
> Similarly in "smmu_add_cmd" for the command queue. Or use NC mapping
> (which doesn't exist
> upstream as far as I can see)

Right, the host driver seems to do this. If I'm following correctly we end
up with dma_direct_alloc() calling pgprot_dmacoherent() and get
MT_NORMAL_NC, when the SMMU is declared non-coherent in DT/IORT.

So we'd get mismatched attributes if hyp is then mapping these structures
cacheable, but I don't remember how that works exactly. Might be fine
since host donates the pages to hyp and we'd have a cache flush in
between. I'll have to read up on that.

Regardless, mapping NC seems cleaner, more readable. I'll see if I can add
that attribute to kvm_pgtable_hyp_map().

Thanks,
Jean
Mostafa Saleh Feb. 16, 2024, 12:19 p.m. UTC | #3
On Tue, Jan 23, 2024 at 7:45 PM Jean-Philippe Brucker
<jean-philippe@linaro.org> wrote:
>
> Hi Mostafa,
>
> On Tue, Jan 16, 2024 at 08:59:41AM +0000, Mostafa Saleh wrote:
> > > +__maybe_unused
> > > +static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
> > > +{
> > > +       struct arm_smmu_cmdq_ent cmd = {
> > > +               .opcode = CMDQ_OP_CFGI_STE,
> > > +               .cfgi.sid = sid,
> > > +               .cfgi.leaf = true,
> > > +       };
> > > +
> > > +       return smmu_send_cmd(smmu, &cmd);
> > > +}
> > > +
> > I see the page tables are properly configured for ARM_SMMU_FEAT_COHERENCY but no
> > handling for the STE or CMDQ, I believe here we should have something as:
> > if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
> >         kvm_flush_dcache_to_poc(step, STRTAB_STE_DWORDS << 3);
> >
> > Similarly in "smmu_add_cmd" for the command queue. Or use NC mapping
> > (which doesn't exist
> > upstream as far as I can see)
>
> Right, the host driver seems to do this. If I'm following correctly we end
> up with dma_direct_alloc() calling pgprot_dmacoherent() and get
> MT_NORMAL_NC, when the SMMU is declared non-coherent in DT/IORT.
>
> So we'd get mismatched attributes if hyp is then mapping these structures
> cacheable, but I don't remember how that works exactly. Might be fine
> since host donates the pages to hyp and we'd have a cache flush in
> between. I'll have to read up on that.

I guess that is not enough, as the hypervisor writes the STE/CMDQ at any time.

> Regardless, mapping NC seems cleaner, more readable. I'll see if I can add
> that attribute to kvm_pgtable_hyp_map().

There is a patch for that already in Android
https://android.googlesource.com/kernel/common/+/636c912401dec4d178f6cdf6073f546b15828cf7%5E%21/#F0

But I a guess as beginning CMO is enough, I have this POC for it
https://android-kvm.googlesource.com/linux/+/193b027de376317eb8daa4eb207badaa1d6fda4a%5E%21/#F0

Thanks,
Mostafa
Jean-Philippe Brucker Feb. 26, 2024, 2:13 p.m. UTC | #4
On Fri, Feb 16, 2024 at 12:19:01PM +0000, Mostafa Saleh wrote:
> On Tue, Jan 23, 2024 at 7:45 PM Jean-Philippe Brucker
> <jean-philippe@linaro.org> wrote:
> >
> > Hi Mostafa,
> >
> > On Tue, Jan 16, 2024 at 08:59:41AM +0000, Mostafa Saleh wrote:
> > > > +__maybe_unused
> > > > +static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
> > > > +{
> > > > +       struct arm_smmu_cmdq_ent cmd = {
> > > > +               .opcode = CMDQ_OP_CFGI_STE,
> > > > +               .cfgi.sid = sid,
> > > > +               .cfgi.leaf = true,
> > > > +       };
> > > > +
> > > > +       return smmu_send_cmd(smmu, &cmd);
> > > > +}
> > > > +
> > > I see the page tables are properly configured for ARM_SMMU_FEAT_COHERENCY but no
> > > handling for the STE or CMDQ, I believe here we should have something as:
> > > if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
> > >         kvm_flush_dcache_to_poc(step, STRTAB_STE_DWORDS << 3);
> > >
> > > Similarly in "smmu_add_cmd" for the command queue. Or use NC mapping
> > > (which doesn't exist
> > > upstream as far as I can see)
> >
> > Right, the host driver seems to do this. If I'm following correctly we end
> > up with dma_direct_alloc() calling pgprot_dmacoherent() and get
> > MT_NORMAL_NC, when the SMMU is declared non-coherent in DT/IORT.
> >
> > So we'd get mismatched attributes if hyp is then mapping these structures
> > cacheable, but I don't remember how that works exactly. Might be fine
> > since host donates the pages to hyp and we'd have a cache flush in
> > between. I'll have to read up on that.
> 
> I guess that is not enough, as the hypervisor writes the STE/CMDQ at any time.
> 
> > Regardless, mapping NC seems cleaner, more readable. I'll see if I can add
> > that attribute to kvm_pgtable_hyp_map().
> 
> There is a patch for that already in Android
> https://android.googlesource.com/kernel/common/+/636c912401dec4d178f6cdf6073f546b15828cf7%5E%21/#F0

Nice, I've added this (rather than CMO, to avoid mismatched attributes)
but don't have the hardware to test it:

diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
index 4b0b70017f59..e43011b51ef4 100644
--- a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
+++ b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
@@ -268,12 +268,17 @@ static int smmu_init_registers(struct hyp_arm_smmu_v3_device *smmu)
 }
 
 /* Transfer ownership of structures from host to hyp */
-static void *smmu_take_pages(u64 base, size_t size)
+static void *smmu_take_pages(struct hyp_arm_smmu_v3_device *smmu, u64 base,
+			     size_t size)
 {
 	void *hyp_ptr;
+	enum kvm_pgtable_prot prot = PAGE_HYP;
+
+	if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
+		prot |= KVM_PGTABLE_PROT_NC;
 
 	hyp_ptr = hyp_phys_to_virt(base);
-	if (pkvm_create_mappings(hyp_ptr, hyp_ptr + size, PAGE_HYP))
+	if (pkvm_create_mappings(hyp_ptr, hyp_ptr + size, prot))
 		return NULL;
 
 	return hyp_ptr;
@@ -293,7 +298,7 @@ static int smmu_init_cmdq(struct hyp_arm_smmu_v3_device *smmu)
 	cmdq_size = cmdq_nr_entries * CMDQ_ENT_DWORDS * 8;
 
 	cmdq_base &= Q_BASE_ADDR_MASK;
-	smmu->cmdq_base = smmu_take_pages(cmdq_base, cmdq_size);
+	smmu->cmdq_base = smmu_take_pages(smmu, cmdq_base, cmdq_size);
 	if (!smmu->cmdq_base)
 		return -EINVAL;
 
@@ -350,7 +355,7 @@ static int smmu_init_strtab(struct hyp_arm_smmu_v3_device *smmu)
 	}
 
 	strtab_base &= STRTAB_BASE_ADDR_MASK;
-	smmu->strtab_base = smmu_take_pages(strtab_base, strtab_size);
+	smmu->strtab_base = smmu_take_pages(smmu, strtab_base, strtab_size);
 	if (!smmu->strtab_base)
 		return -EINVAL;
Mostafa Saleh March 6, 2024, 12:51 p.m. UTC | #5
On Mon, Feb 26, 2024 at 02:13:52PM +0000, Jean-Philippe Brucker wrote:
> On Fri, Feb 16, 2024 at 12:19:01PM +0000, Mostafa Saleh wrote:
> > On Tue, Jan 23, 2024 at 7:45 PM Jean-Philippe Brucker
> > <jean-philippe@linaro.org> wrote:
> > >
> > > Hi Mostafa,
> > >
> > > On Tue, Jan 16, 2024 at 08:59:41AM +0000, Mostafa Saleh wrote:
> > > > > +__maybe_unused
> > > > > +static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
> > > > > +{
> > > > > +       struct arm_smmu_cmdq_ent cmd = {
> > > > > +               .opcode = CMDQ_OP_CFGI_STE,
> > > > > +               .cfgi.sid = sid,
> > > > > +               .cfgi.leaf = true,
> > > > > +       };
> > > > > +
> > > > > +       return smmu_send_cmd(smmu, &cmd);
> > > > > +}
> > > > > +
> > > > I see the page tables are properly configured for ARM_SMMU_FEAT_COHERENCY but no
> > > > handling for the STE or CMDQ, I believe here we should have something as:
> > > > if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
> > > >         kvm_flush_dcache_to_poc(step, STRTAB_STE_DWORDS << 3);
> > > >
> > > > Similarly in "smmu_add_cmd" for the command queue. Or use NC mapping
> > > > (which doesn't exist
> > > > upstream as far as I can see)
> > >
> > > Right, the host driver seems to do this. If I'm following correctly we end
> > > up with dma_direct_alloc() calling pgprot_dmacoherent() and get
> > > MT_NORMAL_NC, when the SMMU is declared non-coherent in DT/IORT.
> > >
> > > So we'd get mismatched attributes if hyp is then mapping these structures
> > > cacheable, but I don't remember how that works exactly. Might be fine
> > > since host donates the pages to hyp and we'd have a cache flush in
> > > between. I'll have to read up on that.
> > 
> > I guess that is not enough, as the hypervisor writes the STE/CMDQ at any time.
> > 
> > > Regardless, mapping NC seems cleaner, more readable. I'll see if I can add
> > > that attribute to kvm_pgtable_hyp_map().
> > 
> > There is a patch for that already in Android
> > https://android.googlesource.com/kernel/common/+/636c912401dec4d178f6cdf6073f546b15828cf7%5E%21/#F0
> 
> Nice, I've added this (rather than CMO, to avoid mismatched attributes)
> but don't have the hardware to test it:
> 
> diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> index 4b0b70017f59..e43011b51ef4 100644
> --- a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> +++ b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> @@ -268,12 +268,17 @@ static int smmu_init_registers(struct hyp_arm_smmu_v3_device *smmu)
>  }
>  
>  /* Transfer ownership of structures from host to hyp */
> -static void *smmu_take_pages(u64 base, size_t size)
> +static void *smmu_take_pages(struct hyp_arm_smmu_v3_device *smmu, u64 base,
> +			     size_t size)
>  {
>  	void *hyp_ptr;
> +	enum kvm_pgtable_prot prot = PAGE_HYP;
> +
> +	if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY))
> +		prot |= KVM_PGTABLE_PROT_NC;
>  
>  	hyp_ptr = hyp_phys_to_virt(base);
> -	if (pkvm_create_mappings(hyp_ptr, hyp_ptr + size, PAGE_HYP))
> +	if (pkvm_create_mappings(hyp_ptr, hyp_ptr + size, prot))
>  		return NULL;
>  
>  	return hyp_ptr;
> @@ -293,7 +298,7 @@ static int smmu_init_cmdq(struct hyp_arm_smmu_v3_device *smmu)
>  	cmdq_size = cmdq_nr_entries * CMDQ_ENT_DWORDS * 8;
>  
>  	cmdq_base &= Q_BASE_ADDR_MASK;
> -	smmu->cmdq_base = smmu_take_pages(cmdq_base, cmdq_size);
> +	smmu->cmdq_base = smmu_take_pages(smmu, cmdq_base, cmdq_size);
>  	if (!smmu->cmdq_base)
>  		return -EINVAL;
>  
> @@ -350,7 +355,7 @@ static int smmu_init_strtab(struct hyp_arm_smmu_v3_device *smmu)
>  	}
>  
>  	strtab_base &= STRTAB_BASE_ADDR_MASK;
> -	smmu->strtab_base = smmu_take_pages(strtab_base, strtab_size);
> +	smmu->strtab_base = smmu_take_pages(smmu, strtab_base, strtab_size);
>  	if (!smmu->strtab_base)
>  		return -EINVAL;

Thanks, that is missing the L2 for the STE, but I guess for that we can
just CMO for now, as the HW doen't update it, unlike the CMDQ which must
be mapped as NC and CMO won't be enough.

I am investigating to see if we can map the memory donated from the host
on demand with differet prot, in that case iommu_donate_pages can return
memory with the different attributes.

Thanks,
Mostafa
diff mbox series

Patch

diff --git a/include/kvm/arm_smmu_v3.h b/include/kvm/arm_smmu_v3.h
index da36737bc1e0..fc67a3bf5709 100644
--- a/include/kvm/arm_smmu_v3.h
+++ b/include/kvm/arm_smmu_v3.h
@@ -24,6 +24,10 @@  struct hyp_arm_smmu_v3_device {
 	u32			cmdq_prod;
 	u64			*cmdq_base;
 	size_t			cmdq_log2size;
+	u64			*strtab_base;
+	size_t			strtab_num_entries;
+	size_t			strtab_num_l1_entries;
+	u8			strtab_split;
 };
 
 extern size_t kvm_nvhe_sym(kvm_hyp_arm_smmu_v3_count);
diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
index 36ee5724f36f..021bebebd40c 100644
--- a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
+++ b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
@@ -141,7 +141,6 @@  static int smmu_sync_cmd(struct hyp_arm_smmu_v3_device *smmu)
 	return smmu_wait_event(smmu, smmu_cmdq_empty(smmu));
 }
 
-__maybe_unused
 static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
 			 struct arm_smmu_cmdq_ent *cmd)
 {
@@ -153,6 +152,82 @@  static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
 	return smmu_sync_cmd(smmu);
 }
 
+__maybe_unused
+static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
+{
+	struct arm_smmu_cmdq_ent cmd = {
+		.opcode = CMDQ_OP_CFGI_STE,
+		.cfgi.sid = sid,
+		.cfgi.leaf = true,
+	};
+
+	return smmu_send_cmd(smmu, &cmd);
+}
+
+static int smmu_alloc_l2_strtab(struct hyp_arm_smmu_v3_device *smmu, u32 idx)
+{
+	void *table;
+	u64 l2ptr, span;
+
+	/* Leaf tables must be page-sized */
+	if (smmu->strtab_split + ilog2(STRTAB_STE_DWORDS) + 3 != PAGE_SHIFT)
+		return -EINVAL;
+
+	span = smmu->strtab_split + 1;
+	if (WARN_ON(span < 1 || span > 11))
+		return -EINVAL;
+
+	table = kvm_iommu_donate_page();
+	if (!table)
+		return -ENOMEM;
+
+	l2ptr = hyp_virt_to_phys(table);
+	if (l2ptr & (~STRTAB_L1_DESC_L2PTR_MASK | ~PAGE_MASK))
+		return -EINVAL;
+
+	/* Ensure the empty stream table is visible before the descriptor write */
+	wmb();
+
+	if ((cmpxchg64_relaxed(&smmu->strtab_base[idx], 0, l2ptr | span) != 0))
+		kvm_iommu_reclaim_page(table);
+
+	return 0;
+}
+
+__maybe_unused
+static u64 *smmu_get_ste_ptr(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
+{
+	u32 idx;
+	int ret;
+	u64 l1std, span, *base;
+
+	if (sid >= smmu->strtab_num_entries)
+		return NULL;
+	sid = array_index_nospec(sid, smmu->strtab_num_entries);
+
+	if (!smmu->strtab_split)
+		return smmu->strtab_base + sid * STRTAB_STE_DWORDS;
+
+	idx = sid >> smmu->strtab_split;
+	l1std = smmu->strtab_base[idx];
+	if (!l1std) {
+		ret = smmu_alloc_l2_strtab(smmu, idx);
+		if (ret)
+			return NULL;
+		l1std = smmu->strtab_base[idx];
+		if (WARN_ON(!l1std))
+			return NULL;
+	}
+
+	span = l1std & STRTAB_L1_DESC_SPAN;
+	idx = sid & ((1 << smmu->strtab_split) - 1);
+	if (!span || idx >= (1 << (span - 1)))
+		return NULL;
+
+	base = hyp_phys_to_virt(l1std & STRTAB_L1_DESC_L2PTR_MASK);
+	return base + idx * STRTAB_STE_DWORDS;
+}
+
 static int smmu_init_registers(struct hyp_arm_smmu_v3_device *smmu)
 {
 	u64 val, old;
@@ -221,6 +296,58 @@  static int smmu_init_cmdq(struct hyp_arm_smmu_v3_device *smmu)
 	return 0;
 }
 
+static int smmu_init_strtab(struct hyp_arm_smmu_v3_device *smmu)
+{
+	u64 strtab_base;
+	size_t strtab_size;
+	u32 strtab_cfg, fmt;
+	int split, log2size;
+
+	strtab_base = readq_relaxed(smmu->base + ARM_SMMU_STRTAB_BASE);
+	if (strtab_base & ~(STRTAB_BASE_ADDR_MASK | STRTAB_BASE_RA))
+		return -EINVAL;
+
+	strtab_cfg = readl_relaxed(smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
+	if (strtab_cfg & ~(STRTAB_BASE_CFG_FMT | STRTAB_BASE_CFG_SPLIT |
+			   STRTAB_BASE_CFG_LOG2SIZE))
+		return -EINVAL;
+
+	fmt = FIELD_GET(STRTAB_BASE_CFG_FMT, strtab_cfg);
+	split = FIELD_GET(STRTAB_BASE_CFG_SPLIT, strtab_cfg);
+	log2size = FIELD_GET(STRTAB_BASE_CFG_LOG2SIZE, strtab_cfg);
+
+	smmu->strtab_split = split;
+	smmu->strtab_num_entries = 1 << log2size;
+
+	switch (fmt) {
+	case STRTAB_BASE_CFG_FMT_LINEAR:
+		if (split)
+			return -EINVAL;
+		smmu->strtab_num_l1_entries = smmu->strtab_num_entries;
+		strtab_size = smmu->strtab_num_l1_entries *
+			      STRTAB_STE_DWORDS * 8;
+		break;
+	case STRTAB_BASE_CFG_FMT_2LVL:
+		if (split != 6 && split != 8 && split != 10)
+			return -EINVAL;
+		smmu->strtab_num_l1_entries = 1 << max(0, log2size - split);
+		strtab_size = smmu->strtab_num_l1_entries *
+			      STRTAB_L1_DESC_DWORDS * 8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	strtab_base &= STRTAB_BASE_ADDR_MASK;
+	smmu->strtab_base = smmu_take_pages(strtab_base, strtab_size);
+	if (!smmu->strtab_base)
+		return -EINVAL;
+
+	/* Disable all STEs */
+	memset(smmu->strtab_base, 0, strtab_size);
+	return 0;
+}
+
 static int smmu_init_device(struct hyp_arm_smmu_v3_device *smmu)
 {
 	int ret;
@@ -241,6 +368,10 @@  static int smmu_init_device(struct hyp_arm_smmu_v3_device *smmu)
 	if (ret)
 		return ret;
 
+	ret = smmu_init_strtab(smmu);
+	if (ret)
+		return ret;
+
 	return 0;
 }