diff mbox

[v2,02/17] powerpc/cell: Move data segment faulting code out of cell platform

Message ID 1412073306-13812-3-git-send-email-mikey@neuling.org (mailing list archive)
State Changes Requested
Headers show

Commit Message

Michael Neuling Sept. 30, 2014, 10:34 a.m. UTC
From: Ian Munsie <imunsie@au1.ibm.com>

__spu_trap_data_seg() currently contains code to determine the VSID and ESID
required for a particular EA and mm struct.

This code is generically useful for other co-processors.  This moves the code
of the cell platform so it can be used by other powerpc code.  It also adds 1TB
segment handling which Cell didn't have.

Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
 arch/powerpc/include/asm/mmu-hash64.h  |  7 ++++-
 arch/powerpc/mm/copro_fault.c          | 48 ++++++++++++++++++++++++++++++++++
 arch/powerpc/mm/slb.c                  |  3 ---
 arch/powerpc/platforms/cell/spu_base.c | 41 +++--------------------------
 4 files changed, 58 insertions(+), 41 deletions(-)

Comments

Michael Ellerman Oct. 1, 2014, 6:47 a.m. UTC | #1
On Tue, 2014-30-09 at 10:34:51 UTC, Michael Neuling wrote:
> From: Ian Munsie <imunsie@au1.ibm.com>
> 
> __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> required for a particular EA and mm struct.
> 
> This code is generically useful for other co-processors.  This moves the code
> of the cell platform so it can be used by other powerpc code.  It also adds 1TB
> segment handling which Cell didn't have.

I'm not loving this.

For starters the name "copro_data_segment()" doesn't contain any verbs, and it
doesn't tell me what it does.

If we give it a name that says what it does, we get copro_get_ea_esid_and_vsid().
Or something equally ugly.

And then in patch 10 you move the bulk of the logic into calculate_vsid().

So instead can we:
 - add a small helper that does the esid calculation, eg. calculate_esid() ?
 - factor out the vsid logic into a helper, calculate_vsid() ?
 - rework the spu code to use those, dropping __spu_trap_data_seg()
 - use the helpers in the cxl code


cheers
Benjamin Herrenschmidt Oct. 1, 2014, 6:51 a.m. UTC | #2
On Wed, 2014-10-01 at 16:47 +1000, Michael Ellerman wrote:
> 
> If we give it a name that says what it does, we get
> copro_get_ea_esid_and_vsid().
> Or something equally ugly.

copro_calc_full_va() ?

Ben.
Aneesh Kumar K.V Oct. 1, 2014, 9:45 a.m. UTC | #3
Michael Neuling <mikey@neuling.org> writes:

> From: Ian Munsie <imunsie@au1.ibm.com>
>
> __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> required for a particular EA and mm struct.
>
> This code is generically useful for other co-processors.  This moves the code
> of the cell platform so it can be used by other powerpc code.  It also adds 1TB
> segment handling which Cell didn't have.
>
> Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
> Signed-off-by: Michael Neuling <mikey@neuling.org>
> ---
>  arch/powerpc/include/asm/mmu-hash64.h  |  7 ++++-
>  arch/powerpc/mm/copro_fault.c          | 48 ++++++++++++++++++++++++++++++++++
>  arch/powerpc/mm/slb.c                  |  3 ---
>  arch/powerpc/platforms/cell/spu_base.c | 41 +++--------------------------
>  4 files changed, 58 insertions(+), 41 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> index d765144..6d0b7a2 100644
> --- a/arch/powerpc/include/asm/mmu-hash64.h
> +++ b/arch/powerpc/include/asm/mmu-hash64.h
> @@ -189,7 +189,12 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
>  #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
>  
>  #ifndef __ASSEMBLY__
> -
> +static inline int slb_vsid_shift(int ssize)
> +{
> +	if (ssize == MMU_SEGSIZE_256M)
> +		return SLB_VSID_SHIFT;
> +	return SLB_VSID_SHIFT_1T;
> +}
>  static inline int segment_shift(int ssize)
>  {
>  	if (ssize == MMU_SEGSIZE_256M)
> diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
> index ba7df14..b865697 100644
> --- a/arch/powerpc/mm/copro_fault.c
> +++ b/arch/powerpc/mm/copro_fault.c
> @@ -90,3 +90,51 @@ out_unlock:
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
> +
> +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> +{
> +	int psize, ssize;
> +
> +	*esid = (ea & ESID_MASK) | SLB_ESID_V;
> +
> +	switch (REGION_ID(ea)) {
> +	case USER_REGION_ID:
> +		pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> +#ifdef CONFIG_PPC_MM_SLICES
> +		psize = get_slice_psize(mm, ea);
> +#else
> +		psize = mm->context.user_psize;
> +#endif

We don't really need that as explained in last review.

-aneesh
Aneesh Kumar K.V Oct. 1, 2014, 9:53 a.m. UTC | #4
Michael Neuling <mikey@neuling.org> writes:

> From: Ian Munsie <imunsie@au1.ibm.com>
>
> __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> required for a particular EA and mm struct.
>
> This code is generically useful for other co-processors.  This moves the code
> of the cell platform so it can be used by other powerpc code.  It also adds 1TB
> segment handling which Cell didn't have.
>
> Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
> Signed-off-by: Michael Neuling <mikey@neuling.org>
> ---
>  arch/powerpc/include/asm/mmu-hash64.h  |  7 ++++-
>  arch/powerpc/mm/copro_fault.c          | 48 ++++++++++++++++++++++++++++++++++
>  arch/powerpc/mm/slb.c                  |  3 ---
>  arch/powerpc/platforms/cell/spu_base.c | 41 +++--------------------------
>  4 files changed, 58 insertions(+), 41 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> index d765144..6d0b7a2 100644
> --- a/arch/powerpc/include/asm/mmu-hash64.h
> +++ b/arch/powerpc/include/asm/mmu-hash64.h
> @@ -189,7 +189,12 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
>  #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
>  
>  #ifndef __ASSEMBLY__
> -
> +static inline int slb_vsid_shift(int ssize)
> +{
> +	if (ssize == MMU_SEGSIZE_256M)
> +		return SLB_VSID_SHIFT;
> +	return SLB_VSID_SHIFT_1T;
> +}
>  static inline int segment_shift(int ssize)
>  {
>  	if (ssize == MMU_SEGSIZE_256M)
> diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
> index ba7df14..b865697 100644
> --- a/arch/powerpc/mm/copro_fault.c
> +++ b/arch/powerpc/mm/copro_fault.c
> @@ -90,3 +90,51 @@ out_unlock:
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
> +
> +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> +{
> +	int psize, ssize;
> +
> +	*esid = (ea & ESID_MASK) | SLB_ESID_V;
> +
> +	switch (REGION_ID(ea)) {
> +	case USER_REGION_ID:
> +		pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> +#ifdef CONFIG_PPC_MM_SLICES
> +		psize = get_slice_psize(mm, ea);
> +#else
> +		psize = mm->context.user_psize;
> +#endif
> +		ssize = user_segment_size(ea);
> +		*vsid = (get_vsid(mm->context.id, ea, ssize)
> +			 << slb_vsid_shift(ssize)) | SLB_VSID_USER;
> +		break;
> +	case VMALLOC_REGION_ID:
> +		pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", ea);
> +		if (ea < VMALLOC_END)
> +			psize = mmu_vmalloc_psize;
> +		else
> +			psize = mmu_io_psize;
> +		ssize = mmu_kernel_ssize;
> +		*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> +			 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;

why not
		*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
			 << slb_vsid_shift(ssize)) | SLB_VSID_KERNEL;

for vmalloc and kernel region ? We could end up using 1T segments for kernel mapping too.

-aneesh
Michael Neuling Oct. 1, 2014, 11:10 a.m. UTC | #5
On Wed, 2014-10-01 at 15:15 +0530, Aneesh Kumar K.V wrote:
> Michael Neuling <mikey@neuling.org> writes:
> 
> > From: Ian Munsie <imunsie@au1.ibm.com>
> >
> > __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> > required for a particular EA and mm struct.
> >
> > This code is generically useful for other co-processors.  This moves the code
> > of the cell platform so it can be used by other powerpc code.  It also adds 1TB
> > segment handling which Cell didn't have.
> >
> > Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
> > Signed-off-by: Michael Neuling <mikey@neuling.org>
> > ---
> >  arch/powerpc/include/asm/mmu-hash64.h  |  7 ++++-
> >  arch/powerpc/mm/copro_fault.c          | 48 ++++++++++++++++++++++++++++++++++
> >  arch/powerpc/mm/slb.c                  |  3 ---
> >  arch/powerpc/platforms/cell/spu_base.c | 41 +++--------------------------
> >  4 files changed, 58 insertions(+), 41 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> > index d765144..6d0b7a2 100644
> > --- a/arch/powerpc/include/asm/mmu-hash64.h
> > +++ b/arch/powerpc/include/asm/mmu-hash64.h
> > @@ -189,7 +189,12 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
> >  #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
> >  
> >  #ifndef __ASSEMBLY__
> > -
> > +static inline int slb_vsid_shift(int ssize)
> > +{
> > +	if (ssize == MMU_SEGSIZE_256M)
> > +		return SLB_VSID_SHIFT;
> > +	return SLB_VSID_SHIFT_1T;
> > +}
> >  static inline int segment_shift(int ssize)
> >  {
> >  	if (ssize == MMU_SEGSIZE_256M)
> > diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
> > index ba7df14..b865697 100644
> > --- a/arch/powerpc/mm/copro_fault.c
> > +++ b/arch/powerpc/mm/copro_fault.c
> > @@ -90,3 +90,51 @@ out_unlock:
> >  	return ret;
> >  }
> >  EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
> > +
> > +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> > +{
> > +	int psize, ssize;
> > +
> > +	*esid = (ea & ESID_MASK) | SLB_ESID_V;
> > +
> > +	switch (REGION_ID(ea)) {
> > +	case USER_REGION_ID:
> > +		pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> > +#ifdef CONFIG_PPC_MM_SLICES
> > +		psize = get_slice_psize(mm, ea);
> > +#else
> > +		psize = mm->context.user_psize;
> > +#endif
> 
> We don't really need that as explained in last review.

That cleanup is in patch 10. I avoided changing it here so it's clearer
that what is being removed is the same as what is being added.

Mikey
Michael Neuling Oct. 2, 2014, 12:42 a.m. UTC | #6
On Wed, 2014-10-01 at 16:47 +1000, Michael Ellerman wrote:
> On Tue, 2014-30-09 at 10:34:51 UTC, Michael Neuling wrote:
> > From: Ian Munsie <imunsie@au1.ibm.com>
> > 
> > __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> > required for a particular EA and mm struct.
> > 
> > This code is generically useful for other co-processors.  This moves the code
> > of the cell platform so it can be used by other powerpc code.  It also adds 1TB
> > segment handling which Cell didn't have.
> 
> I'm not loving this.
> 
> For starters the name "copro_data_segment()" doesn't contain any verbs, and it
> doesn't tell me what it does.

Ok.

> If we give it a name that says what it does, we get copro_get_ea_esid_and_vsid().
> Or something equally ugly.

Ok

> And then in patch 10 you move the bulk of the logic into calculate_vsid().

That was intentional on my part.  I want this patch to be clear that
we're moving this code out of cell.  Then I wanted the optimisations to
be in a separate patch.  It does mean we touch the code twice in this
series, but I was hoping it would make it easier to review.  Alas. :-)

> So instead can we:
>  - add a small helper that does the esid calculation, eg. calculate_esid() ?
>  - factor out the vsid logic into a helper, calculate_vsid() ?
>  - rework the spu code to use those, dropping __spu_trap_data_seg()
>  - use the helpers in the cxl code

OK, I think I can do that.  I might change the name to something better
in this patch, but I'll leave these cleanups to the later patch 10.

Mikey
Michael Neuling Oct. 2, 2014, 12:58 a.m. UTC | #7
On Wed, 2014-10-01 at 15:23 +0530, Aneesh Kumar K.V wrote:
> Michael Neuling <mikey@neuling.org> writes:
> 
> > From: Ian Munsie <imunsie@au1.ibm.com>
> >
> > __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> > required for a particular EA and mm struct.
> >
> > This code is generically useful for other co-processors.  This moves the code
> > of the cell platform so it can be used by other powerpc code.  It also adds 1TB
> > segment handling which Cell didn't have.
> >
> > Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
> > Signed-off-by: Michael Neuling <mikey@neuling.org>
> > ---
> >  arch/powerpc/include/asm/mmu-hash64.h  |  7 ++++-
> >  arch/powerpc/mm/copro_fault.c          | 48 ++++++++++++++++++++++++++++++++++
> >  arch/powerpc/mm/slb.c                  |  3 ---
> >  arch/powerpc/platforms/cell/spu_base.c | 41 +++--------------------------
> >  4 files changed, 58 insertions(+), 41 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> > index d765144..6d0b7a2 100644
> > --- a/arch/powerpc/include/asm/mmu-hash64.h
> > +++ b/arch/powerpc/include/asm/mmu-hash64.h
> > @@ -189,7 +189,12 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
> >  #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
> >  
> >  #ifndef __ASSEMBLY__
> > -
> > +static inline int slb_vsid_shift(int ssize)
> > +{
> > +	if (ssize == MMU_SEGSIZE_256M)
> > +		return SLB_VSID_SHIFT;
> > +	return SLB_VSID_SHIFT_1T;
> > +}
> >  static inline int segment_shift(int ssize)
> >  {
> >  	if (ssize == MMU_SEGSIZE_256M)
> > diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
> > index ba7df14..b865697 100644
> > --- a/arch/powerpc/mm/copro_fault.c
> > +++ b/arch/powerpc/mm/copro_fault.c
> > @@ -90,3 +90,51 @@ out_unlock:
> >  	return ret;
> >  }
> >  EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
> > +
> > +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> > +{
> > +	int psize, ssize;
> > +
> > +	*esid = (ea & ESID_MASK) | SLB_ESID_V;
> > +
> > +	switch (REGION_ID(ea)) {
> > +	case USER_REGION_ID:
> > +		pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> > +#ifdef CONFIG_PPC_MM_SLICES
> > +		psize = get_slice_psize(mm, ea);
> > +#else
> > +		psize = mm->context.user_psize;
> > +#endif
> > +		ssize = user_segment_size(ea);
> > +		*vsid = (get_vsid(mm->context.id, ea, ssize)
> > +			 << slb_vsid_shift(ssize)) | SLB_VSID_USER;
> > +		break;
> > +	case VMALLOC_REGION_ID:
> > +		pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", ea);
> > +		if (ea < VMALLOC_END)
> > +			psize = mmu_vmalloc_psize;
> > +		else
> > +			psize = mmu_io_psize;
> > +		ssize = mmu_kernel_ssize;
> > +		*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> > +			 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
> 
> why not
> 		*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> 			 << slb_vsid_shift(ssize)) | SLB_VSID_KERNEL;
> 
> for vmalloc and kernel region ? We could end up using 1T segments for kernel mapping too.

Yep, but I'm going to do this in patch 10 where the other optimisations
are for this.

Mikey
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d765144..6d0b7a2 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -189,7 +189,12 @@  static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
 #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
 
 #ifndef __ASSEMBLY__
-
+static inline int slb_vsid_shift(int ssize)
+{
+	if (ssize == MMU_SEGSIZE_256M)
+		return SLB_VSID_SHIFT;
+	return SLB_VSID_SHIFT_1T;
+}
 static inline int segment_shift(int ssize)
 {
 	if (ssize == MMU_SEGSIZE_256M)
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index ba7df14..b865697 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -90,3 +90,51 @@  out_unlock:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
+
+int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
+{
+	int psize, ssize;
+
+	*esid = (ea & ESID_MASK) | SLB_ESID_V;
+
+	switch (REGION_ID(ea)) {
+	case USER_REGION_ID:
+		pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
+#ifdef CONFIG_PPC_MM_SLICES
+		psize = get_slice_psize(mm, ea);
+#else
+		psize = mm->context.user_psize;
+#endif
+		ssize = user_segment_size(ea);
+		*vsid = (get_vsid(mm->context.id, ea, ssize)
+			 << slb_vsid_shift(ssize)) | SLB_VSID_USER;
+		break;
+	case VMALLOC_REGION_ID:
+		pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", ea);
+		if (ea < VMALLOC_END)
+			psize = mmu_vmalloc_psize;
+		else
+			psize = mmu_io_psize;
+		ssize = mmu_kernel_ssize;
+		*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
+			 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
+		break;
+	case KERNEL_REGION_ID:
+		pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", ea);
+		psize = mmu_linear_psize;
+		ssize = mmu_kernel_ssize;
+		*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
+			 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
+		break;
+	default:
+		/* Future: support kernel segments so that drivers can use the
+		 * CoProcessors */
+		pr_debug("invalid region access at %016llx\n", ea);
+		return 1;
+	}
+	*vsid |= mmu_psize_defs[psize].sllp |
+		((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(copro_data_segment);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0399a67..6e450ca 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -46,9 +46,6 @@  static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
 	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
 }
 
-#define slb_vsid_shift(ssize)	\
-	((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
-
 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
 					 unsigned long flags)
 {
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 2930d1e..fe004b1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -167,45 +167,12 @@  static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
 
 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
 {
-	struct mm_struct *mm = spu->mm;
 	struct spu_slb slb;
-	int psize;
-
-	pr_debug("%s\n", __func__);
-
-	slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
+	int ret;
 
-	switch(REGION_ID(ea)) {
-	case USER_REGION_ID:
-#ifdef CONFIG_PPC_MM_SLICES
-		psize = get_slice_psize(mm, ea);
-#else
-		psize = mm->context.user_psize;
-#endif
-		slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
-				<< SLB_VSID_SHIFT) | SLB_VSID_USER;
-		break;
-	case VMALLOC_REGION_ID:
-		if (ea < VMALLOC_END)
-			psize = mmu_vmalloc_psize;
-		else
-			psize = mmu_io_psize;
-		slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
-				<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
-		break;
-	case KERNEL_REGION_ID:
-		psize = mmu_linear_psize;
-		slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
-				<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
-		break;
-	default:
-		/* Future: support kernel segments so that drivers
-		 * can use SPUs.
-		 */
-		pr_debug("invalid region access at %016lx\n", ea);
-		return 1;
-	}
-	slb.vsid |= mmu_psize_defs[psize].sllp;
+	ret = copro_data_segment(spu->mm, ea, &slb.esid, &slb.vsid);
+	if (ret)
+		return ret;
 
 	spu_load_slb(spu, spu->slb_replace, &slb);