diff mbox series

[v2,06/13] powerpc: Support prefixed instructions in alignment handler

Message ID 20200211053355.21574-7-jniethe5@gmail.com (mailing list archive)
State Superseded
Headers show
Series Initial Prefixed Instruction support | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (a5bc6e124219546a81ce334dc9b16483d55e9abf)
snowpatch_ozlabs/checkpatch warning total: 0 errors, 0 warnings, 7 checks, 122 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Jordan Niethe Feb. 11, 2020, 5:33 a.m. UTC
Alignment interrupts can be caused by prefixed instructions accessing
memory. In the alignment handler the instruction that caused the
exception is loaded and attempted emulate. If the instruction is a
prefixed instruction load the prefix and suffix to emulate. After
emulating increment the NIP by 8.

Prefixed instructions are not permitted to cross 64-byte boundaries. If
they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set.
If this occurs send a SIGBUS to the offending process if in user mode.
If in kernel mode call bad_page_fault().

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v2: - Move __get_user_instr() and __get_user_instr_inatomic() to this
commit (previously in "powerpc sstep: Prepare to support prefixed
instructions").
    - Rename sufx to suffix
    - Use a macro for calculating instruction length
---
 arch/powerpc/include/asm/uaccess.h | 30 ++++++++++++++++++++++++++++++
 arch/powerpc/kernel/align.c        |  8 +++++---
 arch/powerpc/kernel/traps.c        | 21 ++++++++++++++++++++-
 3 files changed, 55 insertions(+), 4 deletions(-)

Comments

Christophe Leroy Feb. 11, 2020, 6:14 a.m. UTC | #1
Le 11/02/2020 à 06:33, Jordan Niethe a écrit :
> Alignment interrupts can be caused by prefixed instructions accessing
> memory. In the alignment handler the instruction that caused the
> exception is loaded and attempted emulate. If the instruction is a
> prefixed instruction load the prefix and suffix to emulate. After
> emulating increment the NIP by 8.
> 
> Prefixed instructions are not permitted to cross 64-byte boundaries. If
> they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set.
> If this occurs send a SIGBUS to the offending process if in user mode.
> If in kernel mode call bad_page_fault().
> 
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
> v2: - Move __get_user_instr() and __get_user_instr_inatomic() to this
> commit (previously in "powerpc sstep: Prepare to support prefixed
> instructions").
>      - Rename sufx to suffix
>      - Use a macro for calculating instruction length
> ---
>   arch/powerpc/include/asm/uaccess.h | 30 ++++++++++++++++++++++++++++++
>   arch/powerpc/kernel/align.c        |  8 +++++---
>   arch/powerpc/kernel/traps.c        | 21 ++++++++++++++++++++-
>   3 files changed, 55 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
> index 2f500debae21..30f63a81c8d8 100644
> --- a/arch/powerpc/include/asm/uaccess.h
> +++ b/arch/powerpc/include/asm/uaccess.h
> @@ -474,4 +474,34 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
>   #define unsafe_copy_to_user(d, s, l, e) \
>   	unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
>   

Could it go close to other __get_user() and friends instead of being at 
the end of the file ?

> +/*
> + * When reading an instruction iff it is a prefix, the suffix needs to be also
> + * loaded.
> + */
> +#define __get_user_instr(x, y, ptr)			\
> +({							\
> +	long __gui_ret = 0;				\
> +	y = 0;						\
> +	__gui_ret = __get_user(x, ptr);			\
> +	if (!__gui_ret) {				\
> +		if (IS_PREFIX(x))			\

Does this apply to PPC32 ?
If not, can we make sure IS_PREFIX is constant 0 on PPC32 so that the 
second read gets dropped at compile time ?

Can we instead do :

	if (!__gui_ret && IS_PREFIX(x))

> +			__gui_ret = __get_user(y, ptr + 1);	\
> +	}						\
> +							\
> +	__gui_ret;					\
> +})
> +
> +#define __get_user_instr_inatomic(x, y, ptr)		\
> +({							\
> +	long __gui_ret = 0;				\
> +	y = 0;						\
> +	__gui_ret = __get_user_inatomic(x, ptr);	\
> +	if (!__gui_ret) {				\
> +		if (IS_PREFIX(x))			\

Same commments as above

> +			__gui_ret = __get_user_inatomic(y, ptr + 1);	\
> +	}						\
> +							\
> +	__gui_ret;					\
> +})
> +
>   #endif	/* _ARCH_POWERPC_UACCESS_H */
> diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
> index ba3bf5c3ab62..e42cfaa616d3 100644
> --- a/arch/powerpc/kernel/align.c
> +++ b/arch/powerpc/kernel/align.c
> @@ -293,7 +293,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
>   
>   int fix_alignment(struct pt_regs *regs)
>   {
> -	unsigned int instr;
> +	unsigned int instr, suffix;
>   	struct instruction_op op;
>   	int r, type;
>   
> @@ -303,13 +303,15 @@ int fix_alignment(struct pt_regs *regs)
>   	 */
>   	CHECK_FULL_REGS(regs);
>   
> -	if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
> +	if (unlikely(__get_user_instr(instr, suffix,
> +				 (unsigned int __user *)regs->nip)))
>   		return -EFAULT;
>   	if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
>   		/* We don't handle PPC little-endian any more... */
>   		if (cpu_has_feature(CPU_FTR_PPC_LE))
>   			return -EIO;
>   		instr = swab32(instr);
> +		suffix = swab32(suffix);
>   	}
>   
>   #ifdef CONFIG_SPE
> @@ -334,7 +336,7 @@ int fix_alignment(struct pt_regs *regs)
>   	if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
>   		return -EIO;
>   
> -	r = analyse_instr(&op, regs, instr, PPC_NO_SUFFIX);
> +	r = analyse_instr(&op, regs, instr, suffix);
>   	if (r < 0)
>   		return -EINVAL;
>   
> diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
> index 82a3438300fd..d80b82fc1ae3 100644
> --- a/arch/powerpc/kernel/traps.c
> +++ b/arch/powerpc/kernel/traps.c
> @@ -583,6 +583,10 @@ static inline int check_io_access(struct pt_regs *regs)
>   #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
>   #define REASON_PRIVILEGED	ESR_PPR
>   #define REASON_TRAP		ESR_PTR
> +#define REASON_PREFIXED		0
> +#define REASON_BOUNDARY		0
> +
> +#define inst_length(reason)	4
>   
>   /* single-step stuff */
>   #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
> @@ -597,6 +601,10 @@ static inline int check_io_access(struct pt_regs *regs)
>   #define REASON_ILLEGAL		SRR1_PROGILL
>   #define REASON_PRIVILEGED	SRR1_PROGPRIV
>   #define REASON_TRAP		SRR1_PROGTRAP
> +#define REASON_PREFIXED		SRR1_PREFIXED
> +#define REASON_BOUNDARY		SRR1_BOUNDARY
> +
> +#define inst_length(reason)	(((reason) & REASON_PREFIXED) ? 8 : 4)
>   
>   #define single_stepping(regs)	((regs)->msr & MSR_SE)
>   #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
> @@ -1593,11 +1601,20 @@ void alignment_exception(struct pt_regs *regs)
>   {
>   	enum ctx_state prev_state = exception_enter();
>   	int sig, code, fixed = 0;
> +	unsigned long  reason;
>   
>   	/* We restore the interrupt state now */
>   	if (!arch_irq_disabled_regs(regs))
>   		local_irq_enable();
>   
> +	reason = get_reason(regs);
> +
> +	if (reason & REASON_BOUNDARY) {
> +		sig = SIGBUS;
> +		code = BUS_ADRALN;
> +		goto bad;
> +	}
> +
>   	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
>   		goto bail;
>   
> @@ -1606,7 +1623,8 @@ void alignment_exception(struct pt_regs *regs)
>   		fixed = fix_alignment(regs);
>   
>   	if (fixed == 1) {
> -		regs->nip += 4;	/* skip over emulated instruction */
> +		/* skip over emulated instruction */
> +		regs->nip += inst_length(reason);
>   		emulate_single_step(regs);
>   		goto bail;
>   	}
> @@ -1619,6 +1637,7 @@ void alignment_exception(struct pt_regs *regs)
>   		sig = SIGBUS;
>   		code = BUS_ADRALN;
>   	}
> +bad:
>   	if (user_mode(regs))
>   		_exception(sig, regs, code, regs->dar);
>   	else
> 

Christophe
Jordan Niethe Feb. 12, 2020, 2:55 a.m. UTC | #2
On Tue, Feb 11, 2020 at 5:14 PM Christophe Leroy
<christophe.leroy@c-s.fr> wrote:
>
>
>
> Le 11/02/2020 à 06:33, Jordan Niethe a écrit :
> > Alignment interrupts can be caused by prefixed instructions accessing
> > memory. In the alignment handler the instruction that caused the
> > exception is loaded and attempted emulate. If the instruction is a
> > prefixed instruction load the prefix and suffix to emulate. After
> > emulating increment the NIP by 8.
> >
> > Prefixed instructions are not permitted to cross 64-byte boundaries. If
> > they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set.
> > If this occurs send a SIGBUS to the offending process if in user mode.
> > If in kernel mode call bad_page_fault().
> >
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> > v2: - Move __get_user_instr() and __get_user_instr_inatomic() to this
> > commit (previously in "powerpc sstep: Prepare to support prefixed
> > instructions").
> >      - Rename sufx to suffix
> >      - Use a macro for calculating instruction length
> > ---
> >   arch/powerpc/include/asm/uaccess.h | 30 ++++++++++++++++++++++++++++++
> >   arch/powerpc/kernel/align.c        |  8 +++++---
> >   arch/powerpc/kernel/traps.c        | 21 ++++++++++++++++++++-
> >   3 files changed, 55 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
> > index 2f500debae21..30f63a81c8d8 100644
> > --- a/arch/powerpc/include/asm/uaccess.h
> > +++ b/arch/powerpc/include/asm/uaccess.h
> > @@ -474,4 +474,34 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
> >   #define unsafe_copy_to_user(d, s, l, e) \
> >       unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
> >
>
> Could it go close to other __get_user() and friends instead of being at
> the end of the file ?
Will do.
>
> > +/*
> > + * When reading an instruction iff it is a prefix, the suffix needs to be also
> > + * loaded.
> > + */
> > +#define __get_user_instr(x, y, ptr)                  \
> > +({                                                   \
> > +     long __gui_ret = 0;                             \
> > +     y = 0;                                          \
> > +     __gui_ret = __get_user(x, ptr);                 \
> > +     if (!__gui_ret) {                               \
> > +             if (IS_PREFIX(x))                       \
>
> Does this apply to PPC32 ?
No, for now (and the foreseeable future) it will just affect 64s.
> If not, can we make sure IS_PREFIX is constant 0 on PPC32 so that the
> second read gets dropped at compile time ?
>
> Can we instead do :
>
>         if (!__gui_ret && IS_PREFIX(x))
Will do.
>
> > +                     __gui_ret = __get_user(y, ptr + 1);     \
> > +     }                                               \
> > +                                                     \
> > +     __gui_ret;                                      \
> > +})
> > +
> > +#define __get_user_instr_inatomic(x, y, ptr)         \
> > +({                                                   \
> > +     long __gui_ret = 0;                             \
> > +     y = 0;                                          \
> > +     __gui_ret = __get_user_inatomic(x, ptr);        \
> > +     if (!__gui_ret) {                               \
> > +             if (IS_PREFIX(x))                       \
>
> Same commments as above
>
> > +                     __gui_ret = __get_user_inatomic(y, ptr + 1);    \
> > +     }                                               \
> > +                                                     \
> > +     __gui_ret;                                      \
> > +})
> > +
> >   #endif      /* _ARCH_POWERPC_UACCESS_H */
> > diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
> > index ba3bf5c3ab62..e42cfaa616d3 100644
> > --- a/arch/powerpc/kernel/align.c
> > +++ b/arch/powerpc/kernel/align.c
> > @@ -293,7 +293,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
> >
> >   int fix_alignment(struct pt_regs *regs)
> >   {
> > -     unsigned int instr;
> > +     unsigned int instr, suffix;
> >       struct instruction_op op;
> >       int r, type;
> >
> > @@ -303,13 +303,15 @@ int fix_alignment(struct pt_regs *regs)
> >        */
> >       CHECK_FULL_REGS(regs);
> >
> > -     if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
> > +     if (unlikely(__get_user_instr(instr, suffix,
> > +                              (unsigned int __user *)regs->nip)))
> >               return -EFAULT;
> >       if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
> >               /* We don't handle PPC little-endian any more... */
> >               if (cpu_has_feature(CPU_FTR_PPC_LE))
> >                       return -EIO;
> >               instr = swab32(instr);
> > +             suffix = swab32(suffix);
> >       }
> >
> >   #ifdef CONFIG_SPE
> > @@ -334,7 +336,7 @@ int fix_alignment(struct pt_regs *regs)
> >       if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
> >               return -EIO;
> >
> > -     r = analyse_instr(&op, regs, instr, PPC_NO_SUFFIX);
> > +     r = analyse_instr(&op, regs, instr, suffix);
> >       if (r < 0)
> >               return -EINVAL;
> >
> > diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
> > index 82a3438300fd..d80b82fc1ae3 100644
> > --- a/arch/powerpc/kernel/traps.c
> > +++ b/arch/powerpc/kernel/traps.c
> > @@ -583,6 +583,10 @@ static inline int check_io_access(struct pt_regs *regs)
> >   #define REASON_ILLEGAL              (ESR_PIL | ESR_PUO)
> >   #define REASON_PRIVILEGED   ESR_PPR
> >   #define REASON_TRAP         ESR_PTR
> > +#define REASON_PREFIXED              0
> > +#define REASON_BOUNDARY              0
> > +
> > +#define inst_length(reason)  4
> >
> >   /* single-step stuff */
> >   #define single_stepping(regs)       (current->thread.debug.dbcr0 & DBCR0_IC)
> > @@ -597,6 +601,10 @@ static inline int check_io_access(struct pt_regs *regs)
> >   #define REASON_ILLEGAL              SRR1_PROGILL
> >   #define REASON_PRIVILEGED   SRR1_PROGPRIV
> >   #define REASON_TRAP         SRR1_PROGTRAP
> > +#define REASON_PREFIXED              SRR1_PREFIXED
> > +#define REASON_BOUNDARY              SRR1_BOUNDARY
> > +
> > +#define inst_length(reason)  (((reason) & REASON_PREFIXED) ? 8 : 4)
> >
> >   #define single_stepping(regs)       ((regs)->msr & MSR_SE)
> >   #define clear_single_step(regs)     ((regs)->msr &= ~MSR_SE)
> > @@ -1593,11 +1601,20 @@ void alignment_exception(struct pt_regs *regs)
> >   {
> >       enum ctx_state prev_state = exception_enter();
> >       int sig, code, fixed = 0;
> > +     unsigned long  reason;
> >
> >       /* We restore the interrupt state now */
> >       if (!arch_irq_disabled_regs(regs))
> >               local_irq_enable();
> >
> > +     reason = get_reason(regs);
> > +
> > +     if (reason & REASON_BOUNDARY) {
> > +             sig = SIGBUS;
> > +             code = BUS_ADRALN;
> > +             goto bad;
> > +     }
> > +
> >       if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
> >               goto bail;
> >
> > @@ -1606,7 +1623,8 @@ void alignment_exception(struct pt_regs *regs)
> >               fixed = fix_alignment(regs);
> >
> >       if (fixed == 1) {
> > -             regs->nip += 4; /* skip over emulated instruction */
> > +             /* skip over emulated instruction */
> > +             regs->nip += inst_length(reason);
> >               emulate_single_step(regs);
> >               goto bail;
> >       }
> > @@ -1619,6 +1637,7 @@ void alignment_exception(struct pt_regs *regs)
> >               sig = SIGBUS;
> >               code = BUS_ADRALN;
> >       }
> > +bad:
> >       if (user_mode(regs))
> >               _exception(sig, regs, code, regs->dar);
> >       else
> >
>
> Christophe
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 2f500debae21..30f63a81c8d8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -474,4 +474,34 @@  static __must_check inline bool user_access_begin(const void __user *ptr, size_t
 #define unsafe_copy_to_user(d, s, l, e) \
 	unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
 
+/*
+ * When reading an instruction iff it is a prefix, the suffix needs to be also
+ * loaded.
+ */
+#define __get_user_instr(x, y, ptr)			\
+({							\
+	long __gui_ret = 0;				\
+	y = 0;						\
+	__gui_ret = __get_user(x, ptr);			\
+	if (!__gui_ret) {				\
+		if (IS_PREFIX(x))			\
+			__gui_ret = __get_user(y, ptr + 1);	\
+	}						\
+							\
+	__gui_ret;					\
+})
+
+#define __get_user_instr_inatomic(x, y, ptr)		\
+({							\
+	long __gui_ret = 0;				\
+	y = 0;						\
+	__gui_ret = __get_user_inatomic(x, ptr);	\
+	if (!__gui_ret) {				\
+		if (IS_PREFIX(x))			\
+			__gui_ret = __get_user_inatomic(y, ptr + 1);	\
+	}						\
+							\
+	__gui_ret;					\
+})
+
 #endif	/* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ba3bf5c3ab62..e42cfaa616d3 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -293,7 +293,7 @@  static int emulate_spe(struct pt_regs *regs, unsigned int reg,
 
 int fix_alignment(struct pt_regs *regs)
 {
-	unsigned int instr;
+	unsigned int instr, suffix;
 	struct instruction_op op;
 	int r, type;
 
@@ -303,13 +303,15 @@  int fix_alignment(struct pt_regs *regs)
 	 */
 	CHECK_FULL_REGS(regs);
 
-	if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
+	if (unlikely(__get_user_instr(instr, suffix,
+				 (unsigned int __user *)regs->nip)))
 		return -EFAULT;
 	if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
 		/* We don't handle PPC little-endian any more... */
 		if (cpu_has_feature(CPU_FTR_PPC_LE))
 			return -EIO;
 		instr = swab32(instr);
+		suffix = swab32(suffix);
 	}
 
 #ifdef CONFIG_SPE
@@ -334,7 +336,7 @@  int fix_alignment(struct pt_regs *regs)
 	if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
 		return -EIO;
 
-	r = analyse_instr(&op, regs, instr, PPC_NO_SUFFIX);
+	r = analyse_instr(&op, regs, instr, suffix);
 	if (r < 0)
 		return -EINVAL;
 
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 82a3438300fd..d80b82fc1ae3 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -583,6 +583,10 @@  static inline int check_io_access(struct pt_regs *regs)
 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
 #define REASON_PRIVILEGED	ESR_PPR
 #define REASON_TRAP		ESR_PTR
+#define REASON_PREFIXED		0
+#define REASON_BOUNDARY		0
+
+#define inst_length(reason)	4
 
 /* single-step stuff */
 #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
@@ -597,6 +601,10 @@  static inline int check_io_access(struct pt_regs *regs)
 #define REASON_ILLEGAL		SRR1_PROGILL
 #define REASON_PRIVILEGED	SRR1_PROGPRIV
 #define REASON_TRAP		SRR1_PROGTRAP
+#define REASON_PREFIXED		SRR1_PREFIXED
+#define REASON_BOUNDARY		SRR1_BOUNDARY
+
+#define inst_length(reason)	(((reason) & REASON_PREFIXED) ? 8 : 4)
 
 #define single_stepping(regs)	((regs)->msr & MSR_SE)
 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
@@ -1593,11 +1601,20 @@  void alignment_exception(struct pt_regs *regs)
 {
 	enum ctx_state prev_state = exception_enter();
 	int sig, code, fixed = 0;
+	unsigned long  reason;
 
 	/* We restore the interrupt state now */
 	if (!arch_irq_disabled_regs(regs))
 		local_irq_enable();
 
+	reason = get_reason(regs);
+
+	if (reason & REASON_BOUNDARY) {
+		sig = SIGBUS;
+		code = BUS_ADRALN;
+		goto bad;
+	}
+
 	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
 		goto bail;
 
@@ -1606,7 +1623,8 @@  void alignment_exception(struct pt_regs *regs)
 		fixed = fix_alignment(regs);
 
 	if (fixed == 1) {
-		regs->nip += 4;	/* skip over emulated instruction */
+		/* skip over emulated instruction */
+		regs->nip += inst_length(reason);
 		emulate_single_step(regs);
 		goto bail;
 	}
@@ -1619,6 +1637,7 @@  void alignment_exception(struct pt_regs *regs)
 		sig = SIGBUS;
 		code = BUS_ADRALN;
 	}
+bad:
 	if (user_mode(regs))
 		_exception(sig, regs, code, regs->dar);
 	else