Message ID | a687ada5f0134140f3fff27e8ff34b590cf558e6.1561408899.git.acsawdey@linux.ibm.com |
---|---|
State | New |
Headers | show |
Series | Rename movmem pattern to cpymem | expand |
On 25/06/2019 21:22, acsawdey@linux.ibm.com wrote: > From: Aaron Sawdey <acsawdey@linux.ibm.com> > > * config/aarch64/aarch64-protos.h: Change movmem to cpymem. > * config/aarch64/aarch64.c (aarch64_expand_movmem): Change movmem > to cpymem. > * config/aarch64/aarch64.h: Change movmem to cpymem. > * config/aarch64/aarch64.md (movmemdi): Change name to cpymemdi. OK. R. > --- > gcc/config/aarch64/aarch64-protos.h | 4 ++-- > gcc/config/aarch64/aarch64.c | 4 ++-- > gcc/config/aarch64/aarch64.h | 2 +- > gcc/config/aarch64/aarch64.md | 6 +++--- > 4 files changed, 8 insertions(+), 8 deletions(-) > > diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h > index 4b20796..e2f4cc1 100644 > --- a/gcc/config/aarch64/aarch64-protos.h > +++ b/gcc/config/aarch64/aarch64-protos.h > @@ -424,12 +424,12 @@ bool aarch64_constant_address_p (rtx); > bool aarch64_emit_approx_div (rtx, rtx, rtx); > bool aarch64_emit_approx_sqrt (rtx, rtx, bool); > void aarch64_expand_call (rtx, rtx, bool); > -bool aarch64_expand_movmem (rtx *); > +bool aarch64_expand_cpymem (rtx *); > bool aarch64_float_const_zero_rtx_p (rtx); > bool aarch64_float_const_rtx_p (rtx); > bool aarch64_function_arg_regno_p (unsigned); > bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs); > -bool aarch64_gen_movmemqi (rtx *); > +bool aarch64_gen_cpymemqi (rtx *); > bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *); > bool aarch64_is_extend_from_extract (scalar_int_mode, rtx, rtx); > bool aarch64_is_long_call_p (rtx); > diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c > index 285ae1c..5a923ca 100644 > --- a/gcc/config/aarch64/aarch64.c > +++ b/gcc/config/aarch64/aarch64.c > @@ -17386,11 +17386,11 @@ aarch64_copy_one_block_and_progress_pointers (rtx *src, rtx *dst, > *dst = aarch64_progress_pointer (*dst); > } > > -/* Expand movmem, as if from a __builtin_memcpy. Return true if > +/* Expand cpymem, as if from a __builtin_memcpy. Return true if > we succeed, otherwise return false. */ > > bool > -aarch64_expand_movmem (rtx *operands) > +aarch64_expand_cpymem (rtx *operands) > { > int n, mode_bits; > rtx dst = operands[0]; > diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h > index bf06caa..92e38a8 100644 > --- a/gcc/config/aarch64/aarch64.h > +++ b/gcc/config/aarch64/aarch64.h > @@ -855,7 +855,7 @@ typedef struct > /* MOVE_RATIO dictates when we will use the move_by_pieces infrastructure. > move_by_pieces will continually copy the largest safe chunks. So a > 7-byte copy is a 4-byte + 2-byte + byte copy. This proves inefficient > - for both size and speed of copy, so we will instead use the "movmem" > + for both size and speed of copy, so we will instead use the "cpymem" > standard name to implement the copy. This logic does not apply when > targeting -mstrict-align, so keep a sensible default in that case. */ > #define MOVE_RATIO(speed) \ > diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md > index 91e46cf..7026b3a 100644 > --- a/gcc/config/aarch64/aarch64.md > +++ b/gcc/config/aarch64/aarch64.md > @@ -1375,17 +1375,17 @@ > > ;; 0 is dst > ;; 1 is src > -;; 2 is size of move in bytes > +;; 2 is size of copy in bytes > ;; 3 is alignment > > -(define_expand "movmemdi" > +(define_expand "cpymemdi" > [(match_operand:BLK 0 "memory_operand") > (match_operand:BLK 1 "memory_operand") > (match_operand:DI 2 "immediate_operand") > (match_operand:DI 3 "immediate_operand")] > "!STRICT_ALIGNMENT" > { > - if (aarch64_expand_movmem (operands)) > + if (aarch64_expand_cpymem (operands)) > DONE; > FAIL; > } >
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 4b20796..e2f4cc1 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -424,12 +424,12 @@ bool aarch64_constant_address_p (rtx); bool aarch64_emit_approx_div (rtx, rtx, rtx); bool aarch64_emit_approx_sqrt (rtx, rtx, bool); void aarch64_expand_call (rtx, rtx, bool); -bool aarch64_expand_movmem (rtx *); +bool aarch64_expand_cpymem (rtx *); bool aarch64_float_const_zero_rtx_p (rtx); bool aarch64_float_const_rtx_p (rtx); bool aarch64_function_arg_regno_p (unsigned); bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs); -bool aarch64_gen_movmemqi (rtx *); +bool aarch64_gen_cpymemqi (rtx *); bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *); bool aarch64_is_extend_from_extract (scalar_int_mode, rtx, rtx); bool aarch64_is_long_call_p (rtx); diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 285ae1c..5a923ca 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -17386,11 +17386,11 @@ aarch64_copy_one_block_and_progress_pointers (rtx *src, rtx *dst, *dst = aarch64_progress_pointer (*dst); } -/* Expand movmem, as if from a __builtin_memcpy. Return true if +/* Expand cpymem, as if from a __builtin_memcpy. Return true if we succeed, otherwise return false. */ bool -aarch64_expand_movmem (rtx *operands) +aarch64_expand_cpymem (rtx *operands) { int n, mode_bits; rtx dst = operands[0]; diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h index bf06caa..92e38a8 100644 --- a/gcc/config/aarch64/aarch64.h +++ b/gcc/config/aarch64/aarch64.h @@ -855,7 +855,7 @@ typedef struct /* MOVE_RATIO dictates when we will use the move_by_pieces infrastructure. move_by_pieces will continually copy the largest safe chunks. So a 7-byte copy is a 4-byte + 2-byte + byte copy. This proves inefficient - for both size and speed of copy, so we will instead use the "movmem" + for both size and speed of copy, so we will instead use the "cpymem" standard name to implement the copy. This logic does not apply when targeting -mstrict-align, so keep a sensible default in that case. */ #define MOVE_RATIO(speed) \ diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 91e46cf..7026b3a 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -1375,17 +1375,17 @@ ;; 0 is dst ;; 1 is src -;; 2 is size of move in bytes +;; 2 is size of copy in bytes ;; 3 is alignment -(define_expand "movmemdi" +(define_expand "cpymemdi" [(match_operand:BLK 0 "memory_operand") (match_operand:BLK 1 "memory_operand") (match_operand:DI 2 "immediate_operand") (match_operand:DI 3 "immediate_operand")] "!STRICT_ALIGNMENT" { - if (aarch64_expand_movmem (operands)) + if (aarch64_expand_cpymem (operands)) DONE; FAIL; }
From: Aaron Sawdey <acsawdey@linux.ibm.com> * config/aarch64/aarch64-protos.h: Change movmem to cpymem. * config/aarch64/aarch64.c (aarch64_expand_movmem): Change movmem to cpymem. * config/aarch64/aarch64.h: Change movmem to cpymem. * config/aarch64/aarch64.md (movmemdi): Change name to cpymemdi. --- gcc/config/aarch64/aarch64-protos.h | 4 ++-- gcc/config/aarch64/aarch64.c | 4 ++-- gcc/config/aarch64/aarch64.h | 2 +- gcc/config/aarch64/aarch64.md | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-)