diff mbox

[2/6] RISC-V Port: gcc

Message ID 20170112023038.13449-3-palmer@dabbelt.com
State New
Headers show

Commit Message

Palmer Dabbelt Jan. 12, 2017, 2:30 a.m. UTC
From: Andrew Waterman <andrew@sifive.com>

---
 gcc/common/config/riscv/riscv-common.c |  131 ++
 gcc/config.gcc                         |   93 ++
 gcc/config/riscv/constraints.md        |   79 ++
 gcc/config/riscv/elf.h                 |   35 +
 gcc/config/riscv/generic.md            |   78 ++
 gcc/config/riscv/linux.h               |   34 +
 gcc/config/riscv/peephole.md           |   85 ++
 gcc/config/riscv/pic.md                |   85 ++
 gcc/config/riscv/predicates.md         |  163 +++
 gcc/config/riscv/riscv-modes.def       |   22 +
 gcc/config/riscv/riscv-opts.h          |   41 +
 gcc/config/riscv/riscv-protos.h        |   72 +
 gcc/config/riscv/riscv.h               | 1015 ++++++++++++++
 gcc/config/riscv/riscv.md              | 2342 ++++++++++++++++++++++++++++++++
 gcc/config/riscv/riscv.opt             |  114 ++
 gcc/config/riscv/sync.md               |  207 +++
 gcc/config/riscv/t-elf-multilib        |    2 +
 gcc/config/riscv/t-elf-nomultilib      |    2 +
 gcc/config/riscv/t-linux-multilib      |   13 +
 gcc/config/riscv/t-linux-nomultilib    |   27 +
 gcc/configure.ac                       |   15 +-
 21 files changed, 4653 insertions(+), 2 deletions(-)
 create mode 100644 gcc/common/config/riscv/riscv-common.c
 create mode 100644 gcc/config/riscv/constraints.md
 create mode 100644 gcc/config/riscv/elf.h
 create mode 100644 gcc/config/riscv/generic.md
 create mode 100644 gcc/config/riscv/linux.h
 create mode 100644 gcc/config/riscv/peephole.md
 create mode 100644 gcc/config/riscv/pic.md
 create mode 100644 gcc/config/riscv/predicates.md
 create mode 100644 gcc/config/riscv/riscv-modes.def
 create mode 100644 gcc/config/riscv/riscv-opts.h
 create mode 100644 gcc/config/riscv/riscv-protos.h
 create mode 100644 gcc/config/riscv/riscv.h
 create mode 100644 gcc/config/riscv/riscv.md
 create mode 100644 gcc/config/riscv/riscv.opt
 create mode 100644 gcc/config/riscv/sync.md
 create mode 100644 gcc/config/riscv/t-elf-multilib
 create mode 100644 gcc/config/riscv/t-elf-nomultilib
 create mode 100644 gcc/config/riscv/t-linux-multilib
 create mode 100644 gcc/config/riscv/t-linux-nomultilib

Comments

Joseph Myers Jan. 12, 2017, 10:30 p.m. UTC | #1
On Wed, 11 Jan 2017, Palmer Dabbelt wrote:

> +static void
> +riscv_parse_arch_string (const char *isa, int *flags)

This should be passed the location from riscv_handle_option...

> +      error ("-march=%s: ISA string must begin with rv32 or rv64", isa);

 ... so you can use error_at with an explicit location (for all errors in 
thsi function).

> +static bool
> +riscv_handle_option (struct gcc_options *opts,
> +		     struct gcc_options *opts_set ATTRIBUTE_UNUSED,
> +		     const struct cl_decoded_option *decoded,
> +		     location_t loc ATTRIBUTE_UNUSED)

The location will no longer then be ATTRIBUTE_UNUSED.

> +		supported_defaults="abi arch tune"

So you have three supported defaults here ...

> +/* Support for a compile-time default CPU, et cetera.  The rules are:
> +   --with-arch is ignored if -march is specified.
> +   --with-abi is ignored if -mabi is specified.
> +   --with-tune is ignored if -mtune is specified.  */
> +#define OPTION_DEFAULT_SPECS \
> +  {"march", "%{!march=*:-march=%(VALUE)}" }, \
> +  {"mabi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
> +  {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
> +  {"arch", "%{!march=*:-march=%(VALUE)}" }, \
> +  {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \

 ... why do you need the "march" and "mabi" entries here, when I'd expect 
three entries as well?

> +#undef ASM_SPEC
> +#define ASM_SPEC "\
> +%(subtarget_asm_debugging_spec) \
> +%{fPIC|fpic|fPIE|fpie:-fpic} \

I'd expect you to use FPIE_OR_FPIC_SPEC here to enable 
--enable-default-pie.
Andrew Waterman Jan. 13, 2017, 7:58 p.m. UTC | #2
On Thu, Jan 12, 2017 at 2:30 PM, Joseph Myers <joseph@codesourcery.com> wrote:
> On Wed, 11 Jan 2017, Palmer Dabbelt wrote:
>
>> +static void
>> +riscv_parse_arch_string (const char *isa, int *flags)
>
> This should be passed the location from riscv_handle_option...
>
>> +      error ("-march=%s: ISA string must begin with rv32 or rv64", isa);
>
>  ... so you can use error_at with an explicit location (for all errors in
> thsi function).
>
>> +static bool
>> +riscv_handle_option (struct gcc_options *opts,
>> +                  struct gcc_options *opts_set ATTRIBUTE_UNUSED,
>> +                  const struct cl_decoded_option *decoded,
>> +                  location_t loc ATTRIBUTE_UNUSED)
>
> The location will no longer then be ATTRIBUTE_UNUSED.

Will do.

>
>> +             supported_defaults="abi arch tune"
>
> So you have three supported defaults here ...
>
>> +/* Support for a compile-time default CPU, et cetera.  The rules are:
>> +   --with-arch is ignored if -march is specified.
>> +   --with-abi is ignored if -mabi is specified.
>> +   --with-tune is ignored if -mtune is specified.  */
>> +#define OPTION_DEFAULT_SPECS \
>> +  {"march", "%{!march=*:-march=%(VALUE)}" }, \
>> +  {"mabi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
>> +  {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
>> +  {"arch", "%{!march=*:-march=%(VALUE)}" }, \
>> +  {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
>
>  ... why do you need the "march" and "mabi" entries here, when I'd expect
> three entries as well?

Indeed, we do not.

>
>> +#undef ASM_SPEC
>> +#define ASM_SPEC "\
>> +%(subtarget_asm_debugging_spec) \
>> +%{fPIC|fpic|fPIE|fpie:-fpic} \
>
> I'd expect you to use FPIE_OR_FPIC_SPEC here to enable
> --enable-default-pie.

Thanks for pointing this out.  We missed that this macro was
introduced after we started the port.

>
> --
> Joseph S. Myers
> joseph@codesourcery.com
Karsten Merker Jan. 14, 2017, 10:05 a.m. UTC | #3
Palmer Dabbelt wrote:

> diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
> new file mode 100644
> index 0000000..045f6cc
> --- /dev/null
> +++ b/gcc/config/riscv/linux.h
> [...]
>  +#define GLIBC_DYNAMIC_LINKER "/lib" XLEN_SPEC "/" ABI_SPEC "/ld.so.1"

[with XLEN_SPEC being either 32 or 64 and ABI_SPEC being one of
 ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d]

Hello everybody,

I am not fully happy with the way the dynamic linker path (which
gets embedded into every Linux executable built by gcc and
therefore cannot be changed later) is defined here.  The dynamic
linker path must be unique over all platforms for which a Linux
port exists to make multiarch installations (i.e. having
dynamically linked binaries for multiple architectures/ABIs in
the same root filesystem) possible.  The path specifier as cited
above contains nothing that makes the linker path inherently
specific to RISC-V.  While there is AFAIK no other architecture
that currently uses exactly this specific linker path model with
the ABI specifier as a separate subdirectory (instead of encoding
it into the filename), so that there technically isn't a naming
conflict, I think that we should follow the convention of the
other "modern" Linux architectures, which all include the
architecture name in their linker path:

  * arm64:    /lib/ld-linux-aarch64.so.1
  * armhf:    /lib/ld-linux-armhf.so.3
  * ia64:     /lib/ld-linux-ia64.so.2
  * mips n64: /lib64/ld-linux-mipsn8.so.1
  * nios2:    /lib/ld-linux-nios2.so.1
  * x86_64:   /lib64/ld-linux-x86-64.so.2 

So the actual ld.so binary should be called something like
"ld-linux-rv.so.1" instead of just "ld.so.1". With everything
else staying the same, that would give us a dynamic linker path
along the lines of "/lib64/lp64f/ld-linux-rv.so.1" for an RV64G
system.

Changing the linker path is of course an incompatible change, but
RISC-V gcc support as submitted upstream now already incoporates
a number of incompatible changes to the previous development
versions (including a different dynamic linker path than older
riscv-gcc versions), so moving from the previous development
versions to upstream results in incompatible binaries anyway,
therefore IMHO now is the time to get these things sorted out
once for all.

Regards,
Karsten
Joseph Myers Jan. 15, 2017, 12:01 a.m. UTC | #4
On Sat, 14 Jan 2017, Karsten Merker wrote:

> So the actual ld.so binary should be called something like
> "ld-linux-rv.so.1" instead of just "ld.so.1". With everything
> else staying the same, that would give us a dynamic linker path
> along the lines of "/lib64/lp64f/ld-linux-rv.so.1" for an RV64G
> system.

For reference: a glibc port should include a proposed update to 
<https://sourceware.org/glibc/wiki/ABIList> listing all the supported ABI 
variants and corresponding dynamic linker paths.  (It should also include 
an update to glibc's build-many-glibcs.py to build at least one glibc 
configuration for each ABI variant.)

> Changing the linker path is of course an incompatible change, but
> RISC-V gcc support as submitted upstream now already incoporates
> a number of incompatible changes to the previous development
> versions (including a different dynamic linker path than older
> riscv-gcc versions), so moving from the previous development

And new glibc ports are expected to use the symbol version for the glibc 
release that actually gets the port (I'm supposing you might be aiming for 
2.26, although strictly the glibc release freeze needn't affect new ports 
that don't affect existing ports), not any older symbol version that might 
have been used in development.
Palmer Dabbelt Jan. 17, 2017, 5:37 a.m. UTC | #5
On Sat, 14 Jan 2017 02:05:27 PST (-0800), merker@debian.org wrote:
> Palmer Dabbelt wrote:
>
>> diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
>> new file mode 100644
>> index 0000000..045f6cc
>> --- /dev/null
>> +++ b/gcc/config/riscv/linux.h
>> [...]
>>  +#define GLIBC_DYNAMIC_LINKER "/lib" XLEN_SPEC "/" ABI_SPEC "/ld.so.1"
>
> [with XLEN_SPEC being either 32 or 64 and ABI_SPEC being one of
>  ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d]
>
> Hello everybody,
>
> I am not fully happy with the way the dynamic linker path (which
> gets embedded into every Linux executable built by gcc and
> therefore cannot be changed later) is defined here.  The dynamic
> linker path must be unique over all platforms for which a Linux
> port exists to make multiarch installations (i.e. having
> dynamically linked binaries for multiple architectures/ABIs in
> the same root filesystem) possible.  The path specifier as cited
> above contains nothing that makes the linker path inherently
> specific to RISC-V.  While there is AFAIK no other architecture
> that currently uses exactly this specific linker path model with
> the ABI specifier as a separate subdirectory (instead of encoding
> it into the filename), so that there technically isn't a naming
> conflict, I think that we should follow the convention of the
> other "modern" Linux architectures, which all include the
> architecture name in their linker path:
>
>   * arm64:    /lib/ld-linux-aarch64.so.1
>   * armhf:    /lib/ld-linux-armhf.so.3
>   * ia64:     /lib/ld-linux-ia64.so.2
>   * mips n64: /lib64/ld-linux-mipsn8.so.1
>   * nios2:    /lib/ld-linux-nios2.so.1
>   * x86_64:   /lib64/ld-linux-x86-64.so.2
>
> So the actual ld.so binary should be called something like
> "ld-linux-rv.so.1" instead of just "ld.so.1". With everything
> else staying the same, that would give us a dynamic linker path
> along the lines of "/lib64/lp64f/ld-linux-rv.so.1" for an RV64G
> system.
>
> Changing the linker path is of course an incompatible change, but
> RISC-V gcc support as submitted upstream now already incoporates
> a number of incompatible changes to the previous development
> versions (including a different dynamic linker path than older
> riscv-gcc versions), so moving from the previous development
> versions to upstream results in incompatible binaries anyway,
> therefore IMHO now is the time to get these things sorted out
> once for all.

I'm perfectly fine with this approach.  We have a policy of not maintaining ABI
compatibility until code is upstream and released (so we'll be ABI compatible
in binutils when 2.28 comes out soon) because we want to fix these sorts of
issues the right way rather than having being stuck with something bad forever.

Just to be clear, the paths you'd like would look exactly like

  rv32-ilp32: /lib32/ilp32/ld-linux-rv.so.1
  rv64-lp64d: /lib64/lp64d/ld-linux-rv.so.1

?  If so, that should be a pretty straight-forward change.  I'll incorporate it
into our v2 patchset.  I'd also be OK with something like
"/lib64/lp64d/ld-linux-rv64imafd-lp64d.so.1", if for some reason that's better
(it looks a bit more like the other architectures to me).  I'm really OK with
pretty much anything here, so feel free to offer suggestions -- otherwise I'll
just go with what's listed above.

Thanks!
Karsten Merker Jan. 17, 2017, 8:48 p.m. UTC | #6
On Mon, Jan 16, 2017 at 09:37:15PM -0800, Palmer Dabbelt wrote:
> On Sat, 14 Jan 2017 02:05:27 PST (-0800), merker@debian.org wrote:
> > Palmer Dabbelt wrote:
> >
> >> diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
> >> new file mode 100644
> >> index 0000000..045f6cc
> >> --- /dev/null
> >> +++ b/gcc/config/riscv/linux.h
> >> [...]
> >>  +#define GLIBC_DYNAMIC_LINKER "/lib" XLEN_SPEC "/" ABI_SPEC "/ld.so.1"
> >
> > [with XLEN_SPEC being either 32 or 64 and ABI_SPEC being one of
> >  ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d]
[...]
> > I am not fully happy with the way the dynamic linker path (which
> > gets embedded into every Linux executable built by gcc and
> > therefore cannot be changed later) is defined here.  The dynamic
> > linker path must be unique over all platforms for which a Linux
> > port exists to make multiarch installations (i.e. having
> > dynamically linked binaries for multiple architectures/ABIs in
> > the same root filesystem) possible.  The path specifier as cited
> > above contains nothing that makes the linker path inherently
> > specific to RISC-V.  While there is AFAIK no other architecture
> > that currently uses exactly this specific linker path model with
> > the ABI specifier as a separate subdirectory (instead of encoding
> > it into the filename), so that there technically isn't a naming
> > conflict, I think that we should follow the convention of the
> > other "modern" Linux architectures, which all include the
> > architecture name in their linker path:
> >
> >   * arm64:    /lib/ld-linux-aarch64.so.1
> >   * armhf:    /lib/ld-linux-armhf.so.3
> >   * ia64:     /lib/ld-linux-ia64.so.2
> >   * mips n64: /lib64/ld-linux-mipsn8.so.1
> >   * nios2:    /lib/ld-linux-nios2.so.1
> >   * x86_64:   /lib64/ld-linux-x86-64.so.2
> >
> > So the actual ld.so binary should be called something like
> > "ld-linux-rv.so.1" instead of just "ld.so.1". With everything
> > else staying the same, that would give us a dynamic linker path
> > along the lines of "/lib64/lp64f/ld-linux-rv.so.1" for an RV64G
> > system.
[...]
> Just to be clear, the paths you'd like would look exactly like
> 
>   rv32-ilp32: /lib32/ilp32/ld-linux-rv.so.1
>   rv64-lp64d: /lib64/lp64d/ld-linux-rv.so.1
> 
> ?

Yes, that is what I had in mind.

> If so, that should be a pretty straight-forward change.  I'll
> incorporate it into our v2 patchset.  I'd also be OK with something
> like "/lib64/lp64d/ld-linux-rv64imafd-lp64d.so.1", if for some reason
> that's better (it looks a bit more like the other architectures to
> me).  I'm really OK with pretty much anything here, so feel free to
> offer suggestions -- otherwise I'll just go with what's listed above.

Including the ABI specifier twice, i.e. both as a subdirectory
name (.../lp64d/...) and as part of the ld.so filename
(ld-linux-rv64imafd-lp64d.so.1) doesn't seem useful to me. The
ABI specifier must be a part of the dynamic linker path, but
having it in there once should be enough :-).

Whether one encodes the ABI specifier inside the ld.so filename
or in the form of a subdirectory AFAICS doesn't make any
technical difference and appears to me largely as a matter of
taste.  My proposal above was just the minimalst-possible change
against the existing code that would fullfill my aim.

The other Linux platforms commonly don't use subdirectories and
instead encode the ABI specifier as part of the ld.so filename
(e.g. the "hf" part in /lib/ld-linux-armhf.so.3 specifies
hardfloat EABI, and the "n8" part in
/lib64/ld-linux-mipsn8.so.1 specifies a specific MIPS ABI variant),
while RISC-V currently encodes the ABI variant as a subdirectory name.

Stefan O'Rear wrote on the RISC-V sw-dev list that he would prefer to
encode the ABI specifier as part of the ld.so filename and put
everything in /lib instead of differentiating the directory by XLEN,
which would keep things largely similar to the other Linux platforms.
Based on your two examples above that would result in something like:

rv32-ilp32: /lib/ld-linux-rv32ilp32.so.1
rv64-lp64d: /lib/ld-linux-rv64lp64d.so.1

I am happy with any of these variants as long as the resulting
naming scheme encodes both platform and ABI and thereby makes
sure that the dynamic linker path is free of conflicts in a
multiarch installation.  Stefan's proposal is nearer to what
other Linux platforms do, but I assume that Andrew Waterman,
who has introduced the current RISC-V scheme with the ABI
subdirectories, has had a reason to do things the way they are.
Andrew, can you perhaps comment on this?



Regarding encoding the minimum ISA extensions into the ld.so
filename (i.e. the "64imafd" part in your example of
"ld-linux-rv64imafd-lp64d.so.1"): We had a disussion about that
on the RISC-V sw-dev list and the idea didn't find any
supporters.  The result of the discussion was to handle things
similar to other platforms and just define RV{32|64}IMA(C) as the
softfloat baseline and RV{32|64}G(C) as the hardfloat baseline
for running Linux and be done with it.

As some background information for people on the gcc lists who
don't follow the RISC-V development in detail: RISC-V uses a base
ISA (I=integer operations) plus a number of optional ISA
extensions (among them M=hardware multiply, A=atomic operations,
F=single precision float, D=double precision float, C=compressed
instruction set).  The combination of IMAFD is considered the
"general purpose" set and commonly abbreviated as "G".

The ABI identifier specifies the calling conventions, but not the
ISA extensions used by the code.  This means that e.g. binaries
for an RV32I-only CPU and binaries for an RV32IMA CPU that make
use of the additional M+A instructions would use the same ABI
identifier (ilp32), as the calling conventions are the same for
both.

Regards,
Karsten
Andrew Waterman Jan. 17, 2017, 9:16 p.m. UTC | #7
On Tue, Jan 17, 2017 at 12:48 PM, Karsten Merker <merker@debian.org> wrote:
> On Mon, Jan 16, 2017 at 09:37:15PM -0800, Palmer Dabbelt wrote:
>> On Sat, 14 Jan 2017 02:05:27 PST (-0800), merker@debian.org wrote:
>> > Palmer Dabbelt wrote:
>> >
>> >> diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
>> >> new file mode 100644
>> >> index 0000000..045f6cc
>> >> --- /dev/null
>> >> +++ b/gcc/config/riscv/linux.h
>> >> [...]
>> >>  +#define GLIBC_DYNAMIC_LINKER "/lib" XLEN_SPEC "/" ABI_SPEC "/ld.so.1"
>> >
>> > [with XLEN_SPEC being either 32 or 64 and ABI_SPEC being one of
>> >  ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d]
> [...]
>> > I am not fully happy with the way the dynamic linker path (which
>> > gets embedded into every Linux executable built by gcc and
>> > therefore cannot be changed later) is defined here.  The dynamic
>> > linker path must be unique over all platforms for which a Linux
>> > port exists to make multiarch installations (i.e. having
>> > dynamically linked binaries for multiple architectures/ABIs in
>> > the same root filesystem) possible.  The path specifier as cited
>> > above contains nothing that makes the linker path inherently
>> > specific to RISC-V.  While there is AFAIK no other architecture
>> > that currently uses exactly this specific linker path model with
>> > the ABI specifier as a separate subdirectory (instead of encoding
>> > it into the filename), so that there technically isn't a naming
>> > conflict, I think that we should follow the convention of the
>> > other "modern" Linux architectures, which all include the
>> > architecture name in their linker path:
>> >
>> >   * arm64:    /lib/ld-linux-aarch64.so.1
>> >   * armhf:    /lib/ld-linux-armhf.so.3
>> >   * ia64:     /lib/ld-linux-ia64.so.2
>> >   * mips n64: /lib64/ld-linux-mipsn8.so.1
>> >   * nios2:    /lib/ld-linux-nios2.so.1
>> >   * x86_64:   /lib64/ld-linux-x86-64.so.2
>> >
>> > So the actual ld.so binary should be called something like
>> > "ld-linux-rv.so.1" instead of just "ld.so.1". With everything
>> > else staying the same, that would give us a dynamic linker path
>> > along the lines of "/lib64/lp64f/ld-linux-rv.so.1" for an RV64G
>> > system.
> [...]
>> Just to be clear, the paths you'd like would look exactly like
>>
>>   rv32-ilp32: /lib32/ilp32/ld-linux-rv.so.1
>>   rv64-lp64d: /lib64/lp64d/ld-linux-rv.so.1
>>
>> ?
>
> Yes, that is what I had in mind.
>
>> If so, that should be a pretty straight-forward change.  I'll
>> incorporate it into our v2 patchset.  I'd also be OK with something
>> like "/lib64/lp64d/ld-linux-rv64imafd-lp64d.so.1", if for some reason
>> that's better (it looks a bit more like the other architectures to
>> me).  I'm really OK with pretty much anything here, so feel free to
>> offer suggestions -- otherwise I'll just go with what's listed above.
>
> Including the ABI specifier twice, i.e. both as a subdirectory
> name (.../lp64d/...) and as part of the ld.so filename
> (ld-linux-rv64imafd-lp64d.so.1) doesn't seem useful to me. The
> ABI specifier must be a part of the dynamic linker path, but
> having it in there once should be enough :-).
>
> Whether one encodes the ABI specifier inside the ld.so filename
> or in the form of a subdirectory AFAICS doesn't make any
> technical difference and appears to me largely as a matter of
> taste.  My proposal above was just the minimalst-possible change
> against the existing code that would fullfill my aim.
>
> The other Linux platforms commonly don't use subdirectories and
> instead encode the ABI specifier as part of the ld.so filename
> (e.g. the "hf" part in /lib/ld-linux-armhf.so.3 specifies
> hardfloat EABI, and the "n8" part in
> /lib64/ld-linux-mipsn8.so.1 specifies a specific MIPS ABI variant),
> while RISC-V currently encodes the ABI variant as a subdirectory name.
>
> Stefan O'Rear wrote on the RISC-V sw-dev list that he would prefer to
> encode the ABI specifier as part of the ld.so filename and put
> everything in /lib instead of differentiating the directory by XLEN,
> which would keep things largely similar to the other Linux platforms.
> Based on your two examples above that would result in something like:
>
> rv32-ilp32: /lib/ld-linux-rv32ilp32.so.1
> rv64-lp64d: /lib/ld-linux-rv64lp64d.so.1
>
> I am happy with any of these variants as long as the resulting
> naming scheme encodes both platform and ABI and thereby makes
> sure that the dynamic linker path is free of conflicts in a
> multiarch installation.  Stefan's proposal is nearer to what
> other Linux platforms do, but I assume that Andrew Waterman,
> who has introduced the current RISC-V scheme with the ABI
> subdirectories, has had a reason to do things the way they are.
> Andrew, can you perhaps comment on this?

Thanks for taking the time to ponder this.  I agree that the important
point is that the ABI (hence XLEN) is encoded somewhere in the
filename--and that once is enough :-).

We went with the /libXX/YY/ approach because, on a multilib system,
the system libraries also need to be distinguished by ABI.  It seemed
most natural to us to handle ld.so and e.g. libc.so in a consistent
manner.  However, something along the lines of your proposal, like
/lib/ld-linux-riscvXX-YY.so.1, is also perfectly serviceable, and has
the advantage of looking more similar to other Linux systems.

>
>
>
> Regarding encoding the minimum ISA extensions into the ld.so
> filename (i.e. the "64imafd" part in your example of
> "ld-linux-rv64imafd-lp64d.so.1"): We had a disussion about that
> on the RISC-V sw-dev list and the idea didn't find any
> supporters.  The result of the discussion was to handle things
> similar to other platforms and just define RV{32|64}IMA(C) as the
> softfloat baseline and RV{32|64}G(C) as the hardfloat baseline
> for running Linux and be done with it.

Also, ld.so can be compiled to the least-common denominator ISA that
can implement the corresponding ABI.  lp64d implies that the F and D
extensions must be present, whereas C (and V and ...) can be omitted
altogether for ld.so's purposes.

>
> As some background information for people on the gcc lists who
> don't follow the RISC-V development in detail: RISC-V uses a base
> ISA (I=integer operations) plus a number of optional ISA
> extensions (among them M=hardware multiply, A=atomic operations,
> F=single precision float, D=double precision float, C=compressed
> instruction set).  The combination of IMAFD is considered the
> "general purpose" set and commonly abbreviated as "G".
>
> The ABI identifier specifies the calling conventions, but not the
> ISA extensions used by the code.  This means that e.g. binaries
> for an RV32I-only CPU and binaries for an RV32IMA CPU that make
> use of the additional M+A instructions would use the same ABI
> identifier (ilp32), as the calling conventions are the same for
> both.
>
> Regards,
> Karsten
> --
> Gem. Par. 28 Abs. 4 Bundesdatenschutzgesetz widerspreche ich der Nutzung
> sowie der Weitergabe meiner personenbezogenen Daten für Zwecke der
> Werbung sowie der Markt- oder Meinungsforschung.
Richard Henderson Jan. 21, 2017, 6:41 a.m. UTC | #8
On 01/11/2017 06:30 PM, Palmer Dabbelt wrote:
> +(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
> +  "A floating-point register (if available).")
> +

I know this is the Traditional Way, but I do wonder if using the new enable 
attribute on the alternatives would be better.  No need to change change; just 
something that makes me wonder.

> +(define_constraint "Q"
> +  "@internal"
> +  (match_operand 0 "const_arith_operand"))

How does this differ from "I"?

> +
> +(define_memory_constraint "W"
> +  "@internal
> +   A memory address based on a member of @code{BASE_REG_CLASS}."
> +  (and (match_code "mem")
> +       (match_operand 0 "memory_operand")))

How does this differ (materially) from "m"?  Or from just

   (match_operand 0 "memory_operand")

for that matter?


> +;;........................
> +;; DI -> SI optimizations
> +;;........................
> +
> +;; Simplify (int)(a + 1), etc.
> +(define_peephole2
> +  [(set (match_operand:DI 0 "register_operand")
> +	(match_operator:DI 4 "modular_operator"
> +	  [(match_operand:DI 1 "register_operand")
> +	   (match_operand:DI 2 "arith_operand")]))
> +   (set (match_operand:SI 3 "register_operand")
> +	(truncate:SI (match_dup 0)))]
> +  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
> +   && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
> +  [(set (match_dup 3)
> +	  (truncate:SI
> +	     (match_op_dup:DI 4
> +	       [(match_operand:DI 1 "register_operand")
> +		(match_operand:DI 2 "arith_operand")])))])

I take from this that combine + ree fail to do the job?

I must say I'm less than thrilled about the use of truncate instead of simply 
properly representing the sign-extensions that are otherwise required by the 
ABI.  RISCV is unlike MIPS in that the 32-bit operations (addw et al) do not 
require sign-extended inputs in order to produce a correct output value.

Consider Alpha as a counter-example to MIPS, where the ABI requires 
sign-extensions at function edges and the ISA requires sign-extensions for 
comparisons.
>
> +(define_insn "*local_pic_storesi<mode>"
> +  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
> +	(match_operand:ANYF 1 "register_operand" "f"))
> +   (clobber (reg:SI T0_REGNUM))]
> +  "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
> +  "<store>\t%1,%0,t0"
> +  [(set (attr "length") (const_int 8))])

Use match_scratch so that you need not hard-code T0.
And likewise for the other pic stores.

> +(define_predicate "const_0_operand"
> +  (and (match_code "const_int,const_double,const_vector")
> +       (match_test "op == CONST0_RTX (GET_MODE (op))")))

Probably want to support const_wide_int.

> +  /* Don't handle multi-word moves this way; we don't want to introduce
> +     the individual word-mode moves until after reload.  */
> +  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
> +    return false;

Why not?  Do the subreg passes not do a good job?  I would expect it to do well 
on a target like RISCV.

> +(define_predicate "move_operand"
> +  (match_operand 0 "general_operand")

Normally this is handled by TARGET_LEGITIMATE_CONSTANT_P.

> +/* Target CPU builtins.  */
> +#define TARGET_CPU_CPP_BUILTINS()					\
> +  do									\
> +    {									\
> +      builtin_define ("__riscv");					\

Perhaps better to move this to riscv-c.c?

> +      if (TARGET_MUL)							\
> +	builtin_define ("__riscv_mul");					\
> +      if (TARGET_DIV)							\
> +	builtin_define ("__riscv_div");					\
> +      if (TARGET_DIV && TARGET_MUL)					\
> +	builtin_define ("__riscv_muldiv");				\

Out of curiosity, why are these split when the M extension doesn't do so?

> +/* Define this macro if it is advisable to hold scalars in registers
> +   in a wider mode than that declared by the program.  In such cases,
> +   the value is constrained to be within the bounds of the declared
> +   type, but kept valid in the wider mode.  The signedness of the
> +   extension may differ from that of the type.  */
> +
> +#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)	\
> +  if (GET_MODE_CLASS (MODE) == MODE_INT		\
> +      && GET_MODE_SIZE (MODE) < 4)		\
> +    {						\
> +      (MODE) = Pmode;				\
> +    }

I think you want to force unsignedp false for SImode when extending to DImode.

> +/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
> +   The call RTLs themselves clobber ra.  */
> +
> +#define CALL_USED_REGISTERS						\
> +{ /* General registers.  */						\
> +  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
> +  /* Floating-point registers.  */					\
> +  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
> +  /* Others.  */							\
> +  1, 1									\
> +}
> +
> +#define CALL_REALLY_USED_REGISTERS					\
> +{ /* General registers.  */						\
> +  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
> +  /* Floating-point registers.  */					\
> +  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
> +  /* Others.  */							\
> +  1, 1									\
> +}

You shouldn't have to replicate these if they're the same.

> +/* Return coprocessor number from register number.  */
> +
> +#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) 				\
> +  (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2'			\
> +   : COP3_REG_P (REGNO) ? '3' : '?')
> +

MIPS cruft.

> +typedef struct {
> +  /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
> +  unsigned int num_gprs;
> +
> +  /* Number of floating-point registers used so far, likewise.  */
> +  unsigned int num_fprs;
> +} CUMULATIVE_ARGS;

Related to something I mentioned for patch 1: why isn't this just a single int?

> +(define_c_enum "unspec" [
> +  ;; GP manipulation.
> +  UNSPEC_EH_RETURN
> +
> +  ;; Symbolic accesses.  The order of this list must match that of
> +  ;; enum riscv_symbol_type in riscv-protos.h.
> +  UNSPEC_ADDRESS_FIRST
> +  UNSPEC_PCREL
> +  UNSPEC_LOAD_GOT
> +  UNSPEC_TLS
> +  UNSPEC_TLS_LE
> +  UNSPEC_TLS_IE
> +  UNSPEC_TLS_GD
> +
> +  UNSPEC_AUIPC
> +
> +  ;; Register save and restore.
> +  UNSPEC_GPR_SAVE
> +  UNSPEC_GPR_RESTORE
> +
> +  ;; Blockage and synchronisation.
> +  UNSPEC_BLOCKAGE
> +  UNSPEC_FENCE
> +  UNSPEC_FENCE_I
> +])

Some of these are unspec_volatile, and so should be in define_c_enum "unspecv".

> +(define_expand "add<mode>3"
> +  [(set (match_operand:GPR 0 "register_operand")
> +	(plus:GPR (match_operand:GPR 1 "register_operand")
> +		  (match_operand:GPR 2 "arith_operand")))]
> +  "")
> +
> +(define_insn "*addsi3"
> +  [(set (match_operand:SI 0 "register_operand" "=r,r")
> +	(plus:SI (match_operand:GPR 1 "register_operand" "r,r")
> +		  (match_operand:GPR2 2 "arith_operand" "r,Q")))]
> +  ""
> +  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
> +  [(set_attr "type" "arith")
> +   (set_attr "mode" "SI")])

Err... mismatched input modes?  That's wrong.

> +
> +(define_insn "*adddi3"
> +  [(set (match_operand:DI 0 "register_operand" "=r,r")
> +	(plus:DI (match_operand:DI 1 "register_operand" "r,r")
> +		  (match_operand:DI 2 "arith_operand" "r,Q")))]
> +  "TARGET_64BIT"
> +  "add\t%0,%1,%2"
> +  [(set_attr "type" "arith")
> +   (set_attr "mode" "DI")])

Remove the addsi3 weirdness and it doesn't appear that you need the define_expand.

Likewise for sub and mul.

> +(define_expand "<u>mulditi3"
> +  [(set (match_operand:TI 0 "register_operand")
> +	(mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
> +		 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
> +  "TARGET_MUL && TARGET_64BIT"
> +{
> +  rtx low = gen_reg_rtx (DImode);
> +  emit_insn (gen_muldi3 (low, operands[1], operands[2]));
> +
> +  rtx high = gen_reg_rtx (DImode);
> +  emit_insn (gen_<u>muldi3_highpart (high, operands[1], operands[2]));
> +
> +  emit_move_insn (gen_lowpart (DImode, operands[0]), low);
> +  emit_move_insn (gen_highpart (DImode, operands[0]), high);
> +  DONE;
> +})

FWIW, the manual recommends the sequence MULHU; MUL if you need both the high 
and low parts.  Whether that implies that you keep those together as a unit 
until final output, or whether you simply emit them in the other order, one 
would need to consult an actual hardware manual.
>
> +(define_insn "floatsidf2"
> +  [(set (match_operand:DF 0 "register_operand" "=f")
> +	(float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
> +  "TARGET_DOUBLE_FLOAT"
> +  "fcvt.d.w\t%0,%z1"
> +  [(set_attr "type"	"fcvt")
> +   (set_attr "mode"	"DF")
> +   (set_attr "cnv_mode"	"I2D")])
> +
> +
> +(define_insn "floatdidf2"
> +  [(set (match_operand:DF 0 "register_operand" "=f")
> +	(float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
> +  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
> +  "fcvt.d.l\t%0,%z1"
> +  [(set_attr "type"	"fcvt")
> +   (set_attr "mode"	"DF")
> +   (set_attr "cnv_mode"	"I2D")])
> +

Merge with GPR?  And likewise for other fp conversions.

> +;; Instructions for adding the low 16 bits of an address to a register.

paste-o: s/16/12/

> +(define_insn "*low<mode>"
> +  [(set (match_operand:P 0 "register_operand" "=r")
> +	(lo_sum:P (match_operand:P 1 "register_operand" "r")
> +		  (match_operand:P 2 "symbolic_operand" "")))]
> +  ""
> +  "add\t%0,%1,%R2"

Really add, not addi?

> +;; Unlike most other insns, the move insns can't be split with '
> +;; different predicates, because register spilling and other parts of
> +;; the compiler, have memoized the insn number already.

This comment is incorrect.  In general you can't split non-moves this way either.

> +;; HImode constant generation; see riscv_move_integer for details.
> +;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
> +
> +(define_insn "add<mode>hi3"
> +  [(set (match_operand:HI 0 "register_operand" "=r,r")
> +	(plus:HI (match_operand:HISI 1 "register_operand" "r,r")
> +		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
> +  ""
> +  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
> +  [(set_attr "type" "arith")
> +   (set_attr "mode" "HI")])
> +
> +(define_insn "xor<mode>hi3"
> +  [(set (match_operand:HI 0 "register_operand" "=r,r")
> +	(xor:HI (match_operand:HISI 1 "register_operand" "r,r")
> +		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
> +  ""
> +  "xor\t%0,%1,%2"
> +  [(set_attr "type" "logical")
> +   (set_attr "mode" "HI")])

I see from the comment you want to use these for HImode constant generation. 
However, I do wonder if it wouldn't be better to name these such that they 
don't get used during normal rtl expansion.

Why do you support HI and QImode moves into FP registers?

> +;; Expand in-line code to clear the instruction cache between operand[0] and
> +;; operand[1].
> +(define_expand "clear_cache"
> +  [(match_operand 0 "pmode_register_operand")
> +   (match_operand 1 "pmode_register_operand")]
> +  ""
> +  "
> +{
> +  emit_insn (gen_fence_i ());
> +  DONE;
> +}")

Do you need a FENCE before the FENCE.I?

> +(define_insn "call_value_multiple_internal"
> +  [(set (match_operand 0 "register_operand" "")
> +	(call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
> +	      (match_operand 2 "" "")))
> +   (set (match_operand 3 "register_operand" "")
> +	(call (mem:SI (match_dup 1))
> +	      (match_dup 2)))

Any reason for this?  Your return value registers are sequential.  The normal 
thing to do is just use e.g. (reg:TI 10).

> +    case MEMMODEL_SEQ_CST:
> +    case MEMMODEL_SYNC_SEQ_CST:
> +    case MEMMODEL_ACQ_REL:
> +      return "fence rw,rw";
> +    case MEMMODEL_ACQUIRE:
> +    case MEMMODEL_SYNC_ACQUIRE:
> +    case MEMMODEL_CONSUME:
> +      return "fence r,rw";
> +    case MEMMODEL_RELEASE:
> +    case MEMMODEL_SYNC_RELEASE:
> +      return "fence rw,w";

The memory models say whether any memory operation can cross the barrier, not 
what kind of operations might cross.  I think you have to use rw,rw always.


r~
Richard Henderson Jan. 21, 2017, 6:45 a.m. UTC | #9
On 01/17/2017 01:16 PM, Andrew Waterman wrote:
> We went with the /libXX/YY/ approach because, on a multilib system,
> the system libraries also need to be distinguished by ABI.  It seemed
> most natural to us to handle ld.so and e.g. libc.so in a consistent
> manner.  However, something along the lines of your proposal, like
> /lib/ld-linux-riscvXX-YY.so.1, is also perfectly serviceable, and has
> the advantage of looking more similar to other Linux systems.

Other Linux systems really don't attempt to support more than 2 or 3 abis.  I 
would keep them separated by directory.


r~
Andrew Waterman Jan. 31, 2017, 1:10 a.m. UTC | #10
On Fri, Jan 20, 2017 at 10:41 PM, Richard Henderson <rth@redhat.com> wrote:
> On 01/11/2017 06:30 PM, Palmer Dabbelt wrote:
>>
>> +(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
>> +  "A floating-point register (if available).")
>> +
>
>
> I know this is the Traditional Way, but I do wonder if using the new enable
> attribute on the alternatives would be better.  No need to change change;
> just something that makes me wonder.

Yeah, we could look into that later.

>
>> +(define_constraint "Q"
>> +  "@internal"
>> +  (match_operand 0 "const_arith_operand"))
>
>
> How does this differ from "I"?
>
>> +
>> +(define_memory_constraint "W"
>> +  "@internal
>> +   A memory address based on a member of @code{BASE_REG_CLASS}."
>> +  (and (match_code "mem")
>> +       (match_operand 0 "memory_operand")))
>
>
> How does this differ (materially) from "m"?  Or from just
>
>   (match_operand 0 "memory_operand")
>
> for that matter?
>
>
>> +;;........................
>> +;; DI -> SI optimizations
>> +;;........................
>> +
>> +;; Simplify (int)(a + 1), etc.
>> +(define_peephole2
>> +  [(set (match_operand:DI 0 "register_operand")
>> +       (match_operator:DI 4 "modular_operator"
>> +         [(match_operand:DI 1 "register_operand")
>> +          (match_operand:DI 2 "arith_operand")]))
>> +   (set (match_operand:SI 3 "register_operand")
>> +       (truncate:SI (match_dup 0)))]
>> +  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) ||
>> peep2_reg_dead_p (2, operands[0]))
>> +   && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) &&
>> INTVAL (operands[2]) < 32))"
>> +  [(set (match_dup 3)
>> +         (truncate:SI
>> +            (match_op_dup:DI 4
>> +              [(match_operand:DI 1 "register_operand")
>> +               (match_operand:DI 2 "arith_operand")])))])
>
>
> I take from this that combine + ree fail to do the job?
>
> I must say I'm less than thrilled about the use of truncate instead of
> simply properly representing the sign-extensions that are otherwise required
> by the ABI.  RISCV is unlike MIPS in that the 32-bit operations (addw et al)
> do not require sign-extended inputs in order to produce a correct output
> value.
>
> Consider Alpha as a counter-example to MIPS, where the ABI requires
> sign-extensions at function edges and the ISA requires sign-extensions for
> comparisons.

We've revamped the port to use TRULY_NOOP_TRUNCATION.

>>
>>
>> +(define_insn "*local_pic_storesi<mode>"
>> +  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
>> +       (match_operand:ANYF 1 "register_operand" "f"))
>> +   (clobber (reg:SI T0_REGNUM))]
>> +  "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO
>> (operands[0])"
>> +  "<store>\t%1,%0,t0"
>> +  [(set (attr "length") (const_int 8))])
>
>
> Use match_scratch so that you need not hard-code T0.
> And likewise for the other pic stores.
>
>> +(define_predicate "const_0_operand"
>> +  (and (match_code "const_int,const_double,const_vector")
>> +       (match_test "op == CONST0_RTX (GET_MODE (op))")))
>
>
> Probably want to support const_wide_int.
>
>> +  /* Don't handle multi-word moves this way; we don't want to introduce
>> +     the individual word-mode moves until after reload.  */
>> +  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
>> +    return false;
>
>
> Why not?  Do the subreg passes not do a good job?  I would expect it to do
> well on a target like RISCV.
>
>> +(define_predicate "move_operand"
>> +  (match_operand 0 "general_operand")
>
>
> Normally this is handled by TARGET_LEGITIMATE_CONSTANT_P.
>
>> +/* Target CPU builtins.  */
>> +#define TARGET_CPU_CPP_BUILTINS()                                      \
>> +  do                                                                   \
>> +    {                                                                  \
>> +      builtin_define ("__riscv");                                      \
>
>
> Perhaps better to move this to riscv-c.c?
>
>> +      if (TARGET_MUL)                                                  \
>> +       builtin_define ("__riscv_mul");                                 \
>> +      if (TARGET_DIV)                                                  \
>> +       builtin_define ("__riscv_div");                                 \
>> +      if (TARGET_DIV && TARGET_MUL)                                    \
>> +       builtin_define ("__riscv_muldiv");                              \
>
>
> Out of curiosity, why are these split when the M extension doesn't do so?

Implementations that have MUL but not DIV cannot claim conformance to
the M extension, but they do exist, and it's easy enough for us to
support them.  AFAIK, they are all FPGA soft cores, where MUL maps
very cheaply to the DSP macros, but DIV is relatively more expensive.

>
>> +/* Define this macro if it is advisable to hold scalars in registers
>> +   in a wider mode than that declared by the program.  In such cases,
>> +   the value is constrained to be within the bounds of the declared
>> +   type, but kept valid in the wider mode.  The signedness of the
>> +   extension may differ from that of the type.  */
>> +
>> +#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)    \
>> +  if (GET_MODE_CLASS (MODE) == MODE_INT                \
>> +      && GET_MODE_SIZE (MODE) < 4)             \
>> +    {                                          \
>> +      (MODE) = Pmode;                          \
>> +    }
>
>
> I think you want to force unsignedp false for SImode when extending to
> DImode.
>
>> +/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
>> +   The call RTLs themselves clobber ra.  */
>> +
>> +#define CALL_USED_REGISTERS                                            \
>> +{ /* General registers.  */                                            \
>> +  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,                      \
>> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,                      \
>> +  /* Floating-point registers.  */                                     \
>> +  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,                      \
>> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,                      \
>> +  /* Others.  */                                                       \
>> +  1, 1                                                                 \
>> +}
>> +
>> +#define CALL_REALLY_USED_REGISTERS                                     \
>> +{ /* General registers.  */                                            \
>> +  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,                      \
>> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,                      \
>> +  /* Floating-point registers.  */                                     \
>> +  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,                      \
>> +  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,                      \
>> +  /* Others.  */                                                       \
>> +  1, 1                                                                 \
>> +}
>
>
> You shouldn't have to replicate these if they're the same.
>
>> +/* Return coprocessor number from register number.  */
>> +
>> +#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO)                              \
>> +  (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2'                 \
>> +   : COP3_REG_P (REGNO) ? '3' : '?')
>> +
>
>
> MIPS cruft.
>
>> +typedef struct {
>> +  /* Number of integer registers used so far, up to
>> MAX_ARGS_IN_REGISTERS. */
>> +  unsigned int num_gprs;
>> +
>> +  /* Number of floating-point registers used so far, likewise.  */
>> +  unsigned int num_fprs;
>> +} CUMULATIVE_ARGS;
>
>
> Related to something I mentioned for patch 1: why isn't this just a single
> int?
>
>
>> +(define_c_enum "unspec" [
>> +  ;; GP manipulation.
>> +  UNSPEC_EH_RETURN
>> +
>> +  ;; Symbolic accesses.  The order of this list must match that of
>> +  ;; enum riscv_symbol_type in riscv-protos.h.
>> +  UNSPEC_ADDRESS_FIRST
>> +  UNSPEC_PCREL
>> +  UNSPEC_LOAD_GOT
>> +  UNSPEC_TLS
>> +  UNSPEC_TLS_LE
>> +  UNSPEC_TLS_IE
>> +  UNSPEC_TLS_GD
>> +
>> +  UNSPEC_AUIPC
>> +
>> +  ;; Register save and restore.
>> +  UNSPEC_GPR_SAVE
>> +  UNSPEC_GPR_RESTORE
>> +
>> +  ;; Blockage and synchronisation.
>> +  UNSPEC_BLOCKAGE
>> +  UNSPEC_FENCE
>> +  UNSPEC_FENCE_I
>> +])
>
>
> Some of these are unspec_volatile, and so should be in define_c_enum
> "unspecv".
>
>> +(define_expand "add<mode>3"
>> +  [(set (match_operand:GPR 0 "register_operand")
>> +       (plus:GPR (match_operand:GPR 1 "register_operand")
>> +                 (match_operand:GPR 2 "arith_operand")))]
>> +  "")
>> +
>> +(define_insn "*addsi3"
>> +  [(set (match_operand:SI 0 "register_operand" "=r,r")
>> +       (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
>> +                 (match_operand:GPR2 2 "arith_operand" "r,Q")))]
>> +  ""
>> +  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
>> +  [(set_attr "type" "arith")
>> +   (set_attr "mode" "SI")])
>
>
> Err... mismatched input modes?  That's wrong.
>
>> +
>> +(define_insn "*adddi3"
>> +  [(set (match_operand:DI 0 "register_operand" "=r,r")
>> +       (plus:DI (match_operand:DI 1 "register_operand" "r,r")
>> +                 (match_operand:DI 2 "arith_operand" "r,Q")))]
>> +  "TARGET_64BIT"
>> +  "add\t%0,%1,%2"
>> +  [(set_attr "type" "arith")
>> +   (set_attr "mode" "DI")])
>
>
> Remove the addsi3 weirdness and it doesn't appear that you need the
> define_expand.
>
> Likewise for sub and mul.
>
>> +(define_expand "<u>mulditi3"
>> +  [(set (match_operand:TI 0 "register_operand")
>> +       (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
>> +                (any_extend:TI (match_operand:DI 2
>> "register_operand"))))]
>> +  "TARGET_MUL && TARGET_64BIT"
>> +{
>> +  rtx low = gen_reg_rtx (DImode);
>> +  emit_insn (gen_muldi3 (low, operands[1], operands[2]));
>> +
>> +  rtx high = gen_reg_rtx (DImode);
>> +  emit_insn (gen_<u>muldi3_highpart (high, operands[1], operands[2]));
>> +
>> +  emit_move_insn (gen_lowpart (DImode, operands[0]), low);
>> +  emit_move_insn (gen_highpart (DImode, operands[0]), high);
>> +  DONE;
>> +})
>
>
> FWIW, the manual recommends the sequence MULHU; MUL if you need both the
> high and low parts.  Whether that implies that you keep those together as a
> unit until final output, or whether you simply emit them in the other order,
> one would need to consult an actual hardware manual.

The manual means to suggest they should be kept together.  However, no
implementations I know of take advantage of this fusion opportunity,
and so scheduling them apart is actually preferable.  Once such an
implementation exists, we can make this decision consequent to -mtune.

>>
>>
>> +(define_insn "floatsidf2"
>> +  [(set (match_operand:DF 0 "register_operand" "=f")
>> +       (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
>> +  "TARGET_DOUBLE_FLOAT"
>> +  "fcvt.d.w\t%0,%z1"
>> +  [(set_attr "type"    "fcvt")
>> +   (set_attr "mode"    "DF")
>> +   (set_attr "cnv_mode"        "I2D")])
>> +
>> +
>> +(define_insn "floatdidf2"
>> +  [(set (match_operand:DF 0 "register_operand" "=f")
>> +       (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
>> +  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
>> +  "fcvt.d.l\t%0,%z1"
>> +  [(set_attr "type"    "fcvt")
>> +   (set_attr "mode"    "DF")
>> +   (set_attr "cnv_mode"        "I2D")])
>> +
>
>
> Merge with GPR?  And likewise for other fp conversions.
>
>> +;; Instructions for adding the low 16 bits of an address to a register.
>
>
> paste-o: s/16/12/
>
>> +(define_insn "*low<mode>"
>> +  [(set (match_operand:P 0 "register_operand" "=r")
>> +       (lo_sum:P (match_operand:P 1 "register_operand" "r")
>> +                 (match_operand:P 2 "symbolic_operand" "")))]
>> +  ""
>> +  "add\t%0,%1,%R2"
>
>
> Really add, not addi?
>
>> +;; Unlike most other insns, the move insns can't be split with '
>> +;; different predicates, because register spilling and other parts of
>> +;; the compiler, have memoized the insn number already.
>
>
> This comment is incorrect.  In general you can't split non-moves this way
> either.
>
>> +;; HImode constant generation; see riscv_move_integer for details.
>> +;; si+si->hi without truncation is legal because of
>> TRULY_NOOP_TRUNCATION.
>> +
>> +(define_insn "add<mode>hi3"
>> +  [(set (match_operand:HI 0 "register_operand" "=r,r")
>> +       (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
>> +                 (match_operand:HISI 2 "arith_operand" "r,Q")))]
>> +  ""
>> +  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
>> +  [(set_attr "type" "arith")
>> +   (set_attr "mode" "HI")])
>> +
>> +(define_insn "xor<mode>hi3"
>> +  [(set (match_operand:HI 0 "register_operand" "=r,r")
>> +       (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
>> +                 (match_operand:HISI 2 "arith_operand" "r,Q")))]
>> +  ""
>> +  "xor\t%0,%1,%2"
>> +  [(set_attr "type" "logical")
>> +   (set_attr "mode" "HI")])
>
>
> I see from the comment you want to use these for HImode constant generation.
> However, I do wonder if it wouldn't be better to name these such that they
> don't get used during normal rtl expansion.
>
> Why do you support HI and QImode moves into FP registers?
>
>> +;; Expand in-line code to clear the instruction cache between operand[0]
>> and
>> +;; operand[1].
>> +(define_expand "clear_cache"
>> +  [(match_operand 0 "pmode_register_operand")
>> +   (match_operand 1 "pmode_register_operand")]
>> +  ""
>> +  "
>> +{
>> +  emit_insn (gen_fence_i ());
>> +  DONE;
>> +}")
>
>
> Do you need a FENCE before the FENCE.I?

It's actually not clear to me what the semantics of clear_cache are
for multiprocessors.  Can you shed some light?

If thread A does modifies code then sets a flag, then thread B reads
the flag and executes a FENCE.I, then thread A needs a FENCE before
setting the flag and thread B needs a fence before the FENCE.I.  But,
is it not the software's responsibility to insert both fences, rather
than assuming one of the fences is folded into clear_cache?

>
>> +(define_insn "call_value_multiple_internal"
>> +  [(set (match_operand 0 "register_operand" "")
>> +       (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
>> +             (match_operand 2 "" "")))
>> +   (set (match_operand 3 "register_operand" "")
>> +       (call (mem:SI (match_dup 1))
>> +             (match_dup 2)))
>
>
> Any reason for this?  Your return value registers are sequential.  The
> normal thing to do is just use e.g. (reg:TI 10).

I think we'd need different patterns for mixed int/FP struct returns
(which use a0 and fa0) if we took that approach.

>
>> +    case MEMMODEL_SEQ_CST:
>> +    case MEMMODEL_SYNC_SEQ_CST:
>> +    case MEMMODEL_ACQ_REL:
>> +      return "fence rw,rw";
>> +    case MEMMODEL_ACQUIRE:
>> +    case MEMMODEL_SYNC_ACQUIRE:
>> +    case MEMMODEL_CONSUME:
>> +      return "fence r,rw";
>> +    case MEMMODEL_RELEASE:
>> +    case MEMMODEL_SYNC_RELEASE:
>> +      return "fence rw,w";
>
>
> The memory models say whether any memory operation can cross the barrier,
> not what kind of operations might cross.  I think you have to use rw,rw
> always.
>
>
> r~
Richard Henderson Jan. 31, 2017, 6:32 p.m. UTC | #11
On 01/30/2017 05:10 PM, Andrew Waterman wrote:
>>> +(define_expand "clear_cache"
>>> +  [(match_operand 0 "pmode_register_operand")
>>> +   (match_operand 1 "pmode_register_operand")]
>>> +  ""
>>> +  "
>>> +{
>>> +  emit_insn (gen_fence_i ());
>>> +  DONE;
>>> +}")
>>
>>
>> Do you need a FENCE before the FENCE.I?
> 
> It's actually not clear to me what the semantics of clear_cache are
> for multiprocessors.  Can you shed some light?
> 
> If thread A does modifies code then sets a flag, then thread B reads
> the flag and executes a FENCE.I, then thread A needs a FENCE before
> setting the flag and thread B needs a fence before the FENCE.I.  But,
> is it not the software's responsibility to insert both fences, rather
> than assuming one of the fences is folded into clear_cache?

Your introduction of "flag" confuses the issue.

Having re-read the description in section 2.7, I see that FENCE.I is
thread-local and is all that is required for a single thread to sync its own I
and D caches.  I think perhaps I'd mis-read or mis-remembered before.

Practically speaking, I'm not sure we have put any real thought about what
needs to happen for threads using on-stack trampolines.  Certainly no other gcc
port attempts to broadcast the need for an icache flush to other cpus.

So just leave as-is -- __builtin_clear_cache works properly for the local thread.


>>> +(define_insn "call_value_multiple_internal"
>>> +  [(set (match_operand 0 "register_operand" "")
>>> +       (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
>>> +             (match_operand 2 "" "")))
>>> +   (set (match_operand 3 "register_operand" "")
>>> +       (call (mem:SI (match_dup 1))
>>> +             (match_dup 2)))
>>
>>
>> Any reason for this?  Your return value registers are sequential.  The
>> normal thing to do is just use e.g. (reg:TI 10).
> 
> I think we'd need different patterns for mixed int/FP struct returns
> (which use a0 and fa0) if we took that approach.

Ah.  Other targets get away with using a PARALLEL.

>From sparc.c, function_arg_record_value:

  data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));

and sparc.md:

(define_insn "*call_value_symbolic_sp64"
  [(set (match_operand 0 "" "")
        (call (mem:DI (match_operand:DI 1 "symbolic_operand" "s"))
              (match_operand 2 "" "")))
   (clobber (reg:DI O7_REG))]

So you wind up with things like

  (set (parallel [
         (reg:DI o0)
         (reg:DF fp0)
         (reg:DI o1)
         (reg:DF fp1)
        ])
       (call (mem:DI (symbol_ref:DI "function"))
             (const_int 0)))

We do the same for x86_64 -- in function_value_64:

  ret = construct_container (mode, orig_mode, valtype, 1,
                             X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
                             x86_64_int_return_registers, 0);


r~
Andrew Waterman Jan. 31, 2017, 9:10 p.m. UTC | #12
On Tue, Jan 31, 2017 at 10:32 AM, Richard Henderson <rth@redhat.com> wrote:
> On 01/30/2017 05:10 PM, Andrew Waterman wrote:
>>>> +(define_expand "clear_cache"
>>>> +  [(match_operand 0 "pmode_register_operand")
>>>> +   (match_operand 1 "pmode_register_operand")]
>>>> +  ""
>>>> +  "
>>>> +{
>>>> +  emit_insn (gen_fence_i ());
>>>> +  DONE;
>>>> +}")
>>>
>>>
>>> Do you need a FENCE before the FENCE.I?
>>
>> It's actually not clear to me what the semantics of clear_cache are
>> for multiprocessors.  Can you shed some light?
>>
>> If thread A does modifies code then sets a flag, then thread B reads
>> the flag and executes a FENCE.I, then thread A needs a FENCE before
>> setting the flag and thread B needs a fence before the FENCE.I.  But,
>> is it not the software's responsibility to insert both fences, rather
>> than assuming one of the fences is folded into clear_cache?
>
> Your introduction of "flag" confuses the issue.
>
> Having re-read the description in section 2.7, I see that FENCE.I is
> thread-local and is all that is required for a single thread to sync its own I
> and D caches.  I think perhaps I'd mis-read or mis-remembered before.
>
> Practically speaking, I'm not sure we have put any real thought about what
> needs to happen for threads using on-stack trampolines.  Certainly no other gcc
> port attempts to broadcast the need for an icache flush to other cpus.
>
> So just leave as-is -- __builtin_clear_cache works properly for the local thread.

Sorry, I jumped right to the assumption that you were suggesting a
FENCE for multiprocessor synchronization.  Indeed, it's not necessary
for enforcing ordering between local stores and FENCE.I.

>
>
>>>> +(define_insn "call_value_multiple_internal"
>>>> +  [(set (match_operand 0 "register_operand" "")
>>>> +       (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
>>>> +             (match_operand 2 "" "")))
>>>> +   (set (match_operand 3 "register_operand" "")
>>>> +       (call (mem:SI (match_dup 1))
>>>> +             (match_dup 2)))
>>>
>>>
>>> Any reason for this?  Your return value registers are sequential.  The
>>> normal thing to do is just use e.g. (reg:TI 10).
>>
>> I think we'd need different patterns for mixed int/FP struct returns
>> (which use a0 and fa0) if we took that approach.
>
> Ah.  Other targets get away with using a PARALLEL.
>
> >From sparc.c, function_arg_record_value:
>
>   data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
>
> and sparc.md:
>
> (define_insn "*call_value_symbolic_sp64"
>   [(set (match_operand 0 "" "")
>         (call (mem:DI (match_operand:DI 1 "symbolic_operand" "s"))
>               (match_operand 2 "" "")))
>    (clobber (reg:DI O7_REG))]
>
> So you wind up with things like
>
>   (set (parallel [
>          (reg:DI o0)
>          (reg:DF fp0)
>          (reg:DI o1)
>          (reg:DF fp1)
>         ])
>        (call (mem:DI (symbol_ref:DI "function"))
>              (const_int 0)))
>
> We do the same for x86_64 -- in function_value_64:
>
>   ret = construct_container (mode, orig_mode, valtype, 1,
>                              X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
>                              x86_64_int_return_registers, 0);

Thanks--these pointers were quite helpful.

>
>
> r~
diff mbox

Patch

diff --git a/gcc/common/config/riscv/riscv-common.c b/gcc/common/config/riscv/riscv-common.c
new file mode 100644
index 0000000..4b9fad3
--- /dev/null
+++ b/gcc/common/config/riscv/riscv-common.c
@@ -0,0 +1,131 @@ 
+/* Common hooks for RISC-V.
+   Copyright (C) 2016 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+#include "opts.h"
+#include "flags.h"
+#include "errors.h"
+
+/* Parse a RISC-V ISA string into an option mask.  */
+
+static void
+riscv_parse_arch_string (const char *isa, int *flags)
+{
+  const char *p = isa;
+
+  if (strncmp (p, "rv32", 4) == 0)
+    *flags &= ~MASK_64BIT, p += 4;
+  else if (strncmp (p, "rv64", 4) == 0)
+    *flags |= MASK_64BIT, p += 4;
+  else
+    {
+      error ("-march=%s: ISA string must begin with rv32 or rv64", isa);
+      return;
+    }
+
+  if (*p == 'g')
+    {
+      p++;
+
+      *flags |= MASK_MUL;
+      *flags |= MASK_ATOMIC;
+      *flags |= MASK_HARD_FLOAT;
+      *flags |= MASK_DOUBLE_FLOAT;
+    }
+  else if (*p == 'i')
+    {
+      p++;
+
+      *flags &= ~MASK_MUL;
+      if (*p == 'm')
+	*flags |= MASK_MUL, p++;
+
+      *flags &= ~MASK_ATOMIC;
+      if (*p == 'a')
+	*flags |= MASK_ATOMIC, p++;
+
+      *flags &= ~(MASK_HARD_FLOAT | MASK_DOUBLE_FLOAT);
+      if (*p == 'f')
+	{
+	  *flags |= MASK_HARD_FLOAT, p++;
+
+	  if (*p == 'd')
+	    {
+	      *flags |= MASK_DOUBLE_FLOAT;
+	      p++;
+	    }
+	}
+    }
+  else
+    {
+      error ("-march=%s: invalid ISA string", isa);
+      return;
+    }
+
+  *flags &= ~MASK_RVC;
+  if (*p == 'c')
+    *flags |= MASK_RVC, p++;
+
+  if (*p)
+    {
+      error ("-march=%s: unsupported ISA substring `%s'", isa, p);
+      return;
+    }
+}
+
+/* Implement TARGET_HANDLE_OPTION.  */
+
+static bool
+riscv_handle_option (struct gcc_options *opts,
+		     struct gcc_options *opts_set ATTRIBUTE_UNUSED,
+		     const struct cl_decoded_option *decoded,
+		     location_t loc ATTRIBUTE_UNUSED)
+{
+  switch (decoded->opt_index)
+    {
+    case OPT_march_:
+      riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
+      return true;
+
+    default:
+      return true;
+    }
+}
+
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */
+static const struct default_options riscv_option_optimization_table[] =
+  {
+    { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
+    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+    { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
+    { OPT_LEVELS_NONE, 0, NULL, 0 }
+  };
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION riscv_handle_option
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 90308cd..c68560a 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -451,6 +451,9 @@  powerpc*-*-*)
 	esac
 	extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
 	;;
+riscv*)
+	cpu_type=riscv
+	;;
 rs6000*-*-*)
 	extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
 	;;
@@ -2016,6 +2019,32 @@  microblaze*-*-elf)
 	cxx_target_objs="${cxx_target_objs} microblaze-c.o"
 	tmake_file="${tmake_file} microblaze/t-microblaze"
         ;;
+riscv*-*-linux*)
+	tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h"
+	case "x${enable_multilib}" in
+	xno) tmake_file="${tmake_file} riscv/t-linux-nomultilib" ;;
+	xyes) tmake_file="${tmake_file} riscv/t-linux-multilib" ;;
+	*) echo "Unknown value for enable_multilib"; exit 1
+	esac
+	gnu_ld=yes
+	gas=yes
+	# Force .init_array support.  The configure script cannot always
+	# automatically detect that GAS supports it, yet we require it.
+	gcc_cv_initfini_array=yes
+	;;
+riscv*-*-elf*)
+	tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
+	case "x${enable_multilib}" in
+	xno) tmake_file="${tmake_file} riscv/t-elf-nomultilib" ;;
+	xyes) tmake_file="${tmake_file} riscv/t-elf-multilib" ;;
+	*) echo "Unknown value for enable_multilib"; exit 1
+	esac
+	gnu_ld=yes
+	gas=yes
+	# Force .init_array support.  The configure script cannot always
+	# automatically detect that GAS supports it, yet we require it.
+	gcc_cv_initfini_array=yes
+	;;
 mips*-*-netbsd*)			# NetBSD/mips, either endian.
 	target_cpu_default="MASK_ABICALLS"
 	tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
@@ -3939,6 +3968,70 @@  case "${target}" in
 		done
 		;;
 
+	riscv*-*-*)
+		supported_defaults="abi arch tune"
+
+		case "${target}" in
+		riscv32*) xlen=32 ;;
+		riscv64*) xlen=64 ;;
+		*) echo "Unsupported RISC-V target ${target}" 1>&2; exit 1 ;;
+		esac
+
+		# Infer arch from --with-arch, --target, and --with-abi.
+		case "${with_arch}" in
+		rv32i* | rv32g* | rv64i* | rv64g*)
+			# OK.
+			;;
+		"")
+			# Infer XLEN, but otherwise assume GC.
+			case "${with_abi}" in
+			ilp32 | ilp32f | ilp32d) with_arch="rv32gc" ;;
+			lp64 | lp64f | lp64d) with_arch="rv64gc" ;;
+			*) with_arch="rv${xlen}gc" ;;
+			esac
+			;;
+		*)
+			echo "--with-arch=${with_arch} is not supported.  The argument must begin with rv32i, rv32g, rv64i, or rv64g." 1>&2
+			exit 1
+			;;
+		esac
+
+		# Make sure --with-abi is valid.  If it was not specified,
+		# pick a default based on the ISA, preferring soft-float
+		# unless the D extension is present.
+		case "${with_abi}" in
+		ilp32 | ilp32f | ilp32d | lp64 | lp64f | lp64d)
+			;;
+		"")
+			case "${with_arch}" in
+			rv32*d* | rv32g*) with_abi=ilp32d ;;
+			rv32*) with_abi=ilp32 ;;
+			rv64*d* | rv64g*) with_abi=lp64d ;;
+			rv64*) with_abi=lp64 ;;
+			esac
+			;;
+		*)
+			echo "--with-abi=${with_abi} is not supported" 1>&2
+			exit 1
+			;;
+		esac
+
+		# Make sure ABI and ISA are compatible.
+		case "${with_abi},${with_arch}" in
+		ilp32,rv32* \
+		| ilp32f,rv32*f* | ilp32f,rv32g* \
+		| ilp32d,rv32*d* | ilp32d,rv32g* \
+		| lp64,rv64* \
+		| lp64f,rv64*f* | lp64f,rv64g* \
+		| lp64d,rv64*d* | lp64d,rv64g*)
+			;;
+		*)
+			echo "--with-abi=${with_abi} is not supported for ISA ${with_arch}" 1>&2
+			exit 1
+			;;
+		esac
+		;;
+
 	mips*-*-*)
 		supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
 
diff --git a/gcc/config/riscv/constraints.md b/gcc/config/riscv/constraints.md
new file mode 100644
index 0000000..1a4f2bd
--- /dev/null
+++ b/gcc/config/riscv/constraints.md
@@ -0,0 +1,79 @@ 
+;; Constraint definitions for RISC-V target.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+;; Based on MIPS target for GNU compiler.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register constraints
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
+  "A floating-point register (if available).")
+
+(define_register_constraint "j" "SIBCALL_REGS"
+  "@internal")
+
+;; Avoid using register t0 for JALR's argument, because for some
+;; microarchitectures that is a return-address stack hint.
+(define_register_constraint "l" "JALR_REGS"
+  "@internal")
+
+;; General constraints
+
+(define_constraint "I"
+  "An I-type 12-bit signed immediate."
+  (and (match_code "const_int")
+       (match_test "SMALL_OPERAND (ival)")))
+
+(define_constraint "J"
+  "Integer zero."
+  (and (match_code "const_int")
+       (match_test "ival == 0")))
+
+;; Floating-point constant +0.0, used for FCVT-based moves when FMV is
+;; not available in RV32.
+(define_constraint "G"
+  "@internal"
+  (and (match_code "const_double")
+       (match_test "op == CONST0_RTX (mode)")))
+
+(define_constraint "Q"
+  "@internal"
+  (match_operand 0 "const_arith_operand"))
+
+(define_memory_constraint "A"
+  "An address that is held in a general-purpose register."
+  (and (match_code "mem")
+       (match_test "GET_CODE(XEXP(op,0)) == REG")))
+
+(define_constraint "S"
+  "@internal
+   A constant call address."
+  (and (match_operand 0 "call_insn_operand")
+       (match_test "CONSTANT_P (op)")))
+
+(define_constraint "T"
+  "@internal
+   A constant @code{move_operand}."
+  (and (match_operand 0 "move_operand")
+       (match_test "CONSTANT_P (op)")))
+
+(define_memory_constraint "W"
+  "@internal
+   A memory address based on a member of @code{BASE_REG_CLASS}."
+  (and (match_code "mem")
+       (match_operand 0 "memory_operand")))
diff --git a/gcc/config/riscv/elf.h b/gcc/config/riscv/elf.h
new file mode 100644
index 0000000..391e59f
--- /dev/null
+++ b/gcc/config/riscv/elf.h
@@ -0,0 +1,35 @@ 
+/* Target macros for riscv*-elf targets.
+   Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#define LINK_SPEC "\
+-melf" XLEN_SPEC "lriscv \
+%{shared}"
+
+/* Link against Newlib libraries, because the ELF backend assumes Newlib.
+   Handle the circular dependence between libc and libgloss. */
+#undef  LIB_SPEC
+#define LIB_SPEC "--start-group -lc -lgloss --end-group"
+
+#undef  STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
+
+#undef  ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+#define NO_IMPLICIT_EXTERN_C 1
diff --git a/gcc/config/riscv/generic.md b/gcc/config/riscv/generic.md
new file mode 100644
index 0000000..294c7ef
--- /dev/null
+++ b/gcc/config/riscv/generic.md
@@ -0,0 +1,78 @@ 
+;; Generic DFA-based pipeline description for RISC-V targets.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+;; Based on MIPS target for GNU compiler.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+(define_automaton "pipe0")
+(define_cpu_unit "alu" "pipe0")
+(define_cpu_unit "imuldiv" "pipe0")
+(define_cpu_unit "fdivsqrt" "pipe0")
+
+(define_insn_reservation "generic_alu" 1
+  (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
+  "alu")
+
+(define_insn_reservation "generic_load" 3
+  (eq_attr "type" "load,fpload")
+  "alu")
+
+(define_insn_reservation "generic_store" 1
+  (eq_attr "type" "store,fpstore")
+  "alu")
+
+(define_insn_reservation "generic_xfer" 3
+  (eq_attr "type" "mfc,mtc,fcvt,fmove,fcmp")
+  "alu")
+
+(define_insn_reservation "generic_branch" 1
+  (eq_attr "type" "branch,jump,call")
+  "alu")
+
+(define_insn_reservation "generic_imul" 10
+  (eq_attr "type" "imul")
+  "imuldiv*10")
+
+(define_insn_reservation "generic_idivsi" 34
+  (and (eq_attr "type" "idiv")
+       (eq_attr "mode" "SI"))
+  "imuldiv*34")
+
+(define_insn_reservation "generic_idivdi" 66
+  (and (eq_attr "type" "idiv")
+       (eq_attr "mode" "DI"))
+  "imuldiv*66")
+
+(define_insn_reservation "generic_fmul_single" 5
+  (and (eq_attr "type" "fadd,fmul,fmadd")
+       (eq_attr "mode" "SF"))
+  "alu")
+
+(define_insn_reservation "generic_fmul_double" 7
+  (and (eq_attr "type" "fadd,fmul,fmadd")
+       (eq_attr "mode" "DF"))
+  "alu")
+
+(define_insn_reservation "generic_fdiv" 20
+  (eq_attr "type" "fdiv")
+  "fdivsqrt*20")
+
+(define_insn_reservation "generic_fsqrt" 25
+  (eq_attr "type" "fsqrt")
+  "fdivsqrt*25")
diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
new file mode 100644
index 0000000..045f6cc
--- /dev/null
+++ b/gcc/config/riscv/linux.h
@@ -0,0 +1,34 @@ 
+/* Definitions for RISC-V GNU/Linux systems with ELF format.
+   Copyright (C) 1998-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#define TARGET_OS_CPP_BUILTINS()				\
+  do {								\
+    GNU_USER_TARGET_OS_CPP_BUILTINS();				\
+  } while (0)
+
+#define GLIBC_DYNAMIC_LINKER "/lib" XLEN_SPEC "/" ABI_SPEC "/ld.so.1"
+
+#define LINK_SPEC "\
+-melf" XLEN_SPEC "lriscv \
+%{shared} \
+  %{!shared: \
+    %{!static: \
+      %{rdynamic:-export-dynamic} \
+      -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
+    %{static:-static}}"
diff --git a/gcc/config/riscv/peephole.md b/gcc/config/riscv/peephole.md
new file mode 100644
index 0000000..78db1d7
--- /dev/null
+++ b/gcc/config/riscv/peephole.md
@@ -0,0 +1,85 @@ 
+;; Peephole optimizations for RISC-V for GNU compiler.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;;........................
+;; DI -> SI optimizations
+;;........................
+
+;; Simplify (int)(a + 1), etc.
+(define_peephole2
+  [(set (match_operand:DI 0 "register_operand")
+	(match_operator:DI 4 "modular_operator"
+	  [(match_operand:DI 1 "register_operand")
+	   (match_operand:DI 2 "arith_operand")]))
+   (set (match_operand:SI 3 "register_operand")
+	(truncate:SI (match_dup 0)))]
+  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
+   && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
+  [(set (match_dup 3)
+	  (truncate:SI
+	     (match_op_dup:DI 4 
+	       [(match_operand:DI 1 "register_operand")
+		(match_operand:DI 2 "arith_operand")])))])
+
+;; Simplify (int)a + 1, etc.
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand")
+	(truncate:SI (match_operand:DI 1 "register_operand")))
+   (set (match_operand:SI 3 "register_operand")
+	(match_operator:SI 4 "modular_operator"
+	  [(match_dup 0)
+	   (match_operand:SI 2 "arith_operand")]))]
+  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
+  [(set (match_dup 3)
+	(match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
+
+;; Simplify -(int)a, etc.
+(define_peephole2
+  [(set (match_operand:SI 0 "register_operand")
+	(truncate:SI (match_operand:DI 2 "register_operand")))
+   (set (match_operand:SI 3 "register_operand")
+	(match_operator:SI 4 "modular_operator"
+	  [(match_operand:SI 1 "reg_or_0_operand")
+	   (match_dup 0)]))]
+  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
+  [(set (match_dup 3)
+	(match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
+
+;; Simplify (unsigned long)(unsigned int)a << const
+(define_peephole2
+  [(set (match_operand:DI 0 "register_operand")
+	(ashift:DI (match_operand:DI 1 "register_operand")
+		   (match_operand 2 "const_int_operand")))
+   (set (match_operand:DI 3 "register_operand")
+	(lshiftrt:DI (match_dup 0) (match_dup 2)))
+   (set (match_operand:DI 4 "register_operand")
+	(ashift:DI (match_dup 3) (match_operand 5 "const_int_operand")))]
+  "TARGET_64BIT
+   && INTVAL (operands[5]) < INTVAL (operands[2])
+   && (REGNO (operands[3]) == REGNO (operands[4])
+       || peep2_reg_dead_p (3, operands[3]))"
+  [(set (match_dup 0)
+	(ashift:DI (match_dup 1) (match_dup 2)))
+   (set (match_dup 4)
+	(lshiftrt:DI (match_dup 0) (match_operand 5)))]
+{
+  operands[5] = GEN_INT (INTVAL (operands[2]) - INTVAL (operands[5]));
+})
diff --git a/gcc/config/riscv/pic.md b/gcc/config/riscv/pic.md
new file mode 100644
index 0000000..bde9005
--- /dev/null
+++ b/gcc/config/riscv/pic.md
@@ -0,0 +1,85 @@ 
+;; PIC codegen for RISC-V for GNU compiler.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; Simplify PIC loads to static variables.
+;; These should go away once we figure out how to emit auipc discretely.
+
+(define_insn "*local_pic_load<mode>"
+  [(set (match_operand:ANYI 0 "register_operand" "=r")
+	(mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
+  "USE_LOAD_ADDRESS_MACRO (operands[1])"
+  "<load>\t%0,%1"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_load<mode>"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
+   (clobber (reg:DI T0_REGNUM))]
+  "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
+  "<load>\t%0,%1,t0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_load<mode>"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
+   (clobber (reg:SI T0_REGNUM))]
+  "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
+  "<load>\t%0,%1,t0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_loadu<mode>"
+  [(set (match_operand:SUPERQI 0 "register_operand" "=r")
+	(zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
+  "USE_LOAD_ADDRESS_MACRO (operands[1])"
+  "<load>u\t%0,%1"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_storedi<mode>"
+  [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
+	(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
+   (clobber (reg:DI T0_REGNUM))]
+  "TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
+  "<store>\t%z1,%0,t0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_storesi<mode>"
+  [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
+	(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
+   (clobber (reg:SI T0_REGNUM))]
+  "!TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
+  "<store>\t%z1,%0,t0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_storedi<mode>"
+  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
+	(match_operand:ANYF 1 "register_operand" "f"))
+   (clobber (reg:DI T0_REGNUM))]
+  "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
+  "<store>\t%1,%0,t0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_storesi<mode>"
+  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
+	(match_operand:ANYF 1 "register_operand" "f"))
+   (clobber (reg:SI T0_REGNUM))]
+  "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
+  "<store>\t%1,%0,t0"
+  [(set (attr "length") (const_int 8))])
diff --git a/gcc/config/riscv/predicates.md b/gcc/config/riscv/predicates.md
new file mode 100644
index 0000000..2258878
--- /dev/null
+++ b/gcc/config/riscv/predicates.md
@@ -0,0 +1,163 @@ 
+;; Predicate description for RISC-V target.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+;; Based on MIPS target for GNU compiler.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_predicate "const_arith_operand"
+  (and (match_code "const_int")
+       (match_test "SMALL_OPERAND (INTVAL (op))")))
+
+(define_predicate "arith_operand"
+  (ior (match_operand 0 "const_arith_operand")
+       (match_operand 0 "register_operand")))
+
+(define_predicate "sle_operand"
+  (and (match_code "const_int")
+       (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
+
+(define_predicate "sleu_operand"
+  (and (match_operand 0 "sle_operand")
+       (match_test "INTVAL (op) + 1 != 0")))
+
+(define_predicate "const_0_operand"
+  (and (match_code "const_int,const_double,const_vector")
+       (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
+(define_predicate "reg_or_0_operand"
+  (ior (match_operand 0 "const_0_operand")
+       (match_operand 0 "register_operand")))
+
+;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
+(define_predicate "branch_on_bit_operand"
+  (and (match_code "const_int")
+       (match_test "INTVAL (op) >= IMM_BITS - 1")))
+
+;; A legitimate CONST_INT operand that takes more than one instruction
+;; to load.
+(define_predicate "splittable_const_int_operand"
+  (match_code "const_int")
+{
+  /* Don't handle multi-word moves this way; we don't want to introduce
+     the individual word-mode moves until after reload.  */
+  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+    return false;
+
+  /* Otherwise check whether the constant can be loaded in a single
+     instruction.  */
+  return !LUI_OPERAND (INTVAL (op)) && !SMALL_OPERAND (INTVAL (op));
+})
+
+(define_predicate "move_operand"
+  (match_operand 0 "general_operand")
+{
+  enum riscv_symbol_type symbol_type;
+
+  /* The thinking here is as follows:
+
+     (1) The move expanders should split complex load sequences into
+	 individual instructions.  Those individual instructions can
+	 then be optimized by all rtl passes.
+
+     (2) The target of pre-reload load sequences should not be used
+	 to store temporary results.  If the target register is only
+	 assigned one value, reload can rematerialize that value
+	 on demand, rather than spill it to the stack.
+
+     (3) If we allowed pre-reload passes like combine and cse to recreate
+	 complex load sequences, we would want to be able to split the
+	 sequences before reload as well, so that the pre-reload scheduler
+	 can see the individual instructions.  This falls foul of (2);
+	 the splitter would be forced to reuse the target register for
+	 intermediate results.
+
+     (4) We want to define complex load splitters for combine.  These
+	 splitters can request a temporary scratch register, which avoids
+	 the problem in (2).  They allow things like:
+
+	      (set (reg T1) (high SYM))
+	      (set (reg T2) (low (reg T1) SYM))
+	      (set (reg X) (plus (reg T2) (const_int OFFSET)))
+
+	 to be combined into:
+
+	      (set (reg T3) (high SYM+OFFSET))
+	      (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
+
+	 if T2 is only used this once.  */
+  switch (GET_CODE (op))
+    {
+    case CONST_INT:
+      return !splittable_const_int_operand (op, mode);
+
+    case CONST:
+    case SYMBOL_REF:
+    case LABEL_REF:
+      return riscv_symbolic_constant_p (op, &symbol_type)
+	      && !riscv_split_symbol_type (symbol_type);
+
+    case HIGH:
+      op = XEXP (op, 0);
+      return riscv_symbolic_constant_p (op, &symbol_type)
+	      && riscv_split_symbol_type (symbol_type)
+	      && symbol_type != SYMBOL_PCREL;
+
+    default:
+      return true;
+    }
+})
+
+(define_predicate "symbolic_operand"
+  (match_code "const,symbol_ref,label_ref")
+{
+  enum riscv_symbol_type type;
+  return riscv_symbolic_constant_p (op, &type);
+})
+
+(define_predicate "absolute_symbolic_operand"
+  (match_code "const,symbol_ref,label_ref")
+{
+  enum riscv_symbol_type type;
+  return (riscv_symbolic_constant_p (op, &type)
+	  && (type == SYMBOL_ABSOLUTE || type == SYMBOL_PCREL));
+})
+
+(define_predicate "plt_symbolic_operand"
+  (match_code "const,symbol_ref,label_ref")
+{
+  enum riscv_symbol_type type;
+  return (riscv_symbolic_constant_p (op, &type)
+	  && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
+})
+
+(define_predicate "call_insn_operand"
+  (ior (match_operand 0 "absolute_symbolic_operand")
+       (match_operand 0 "plt_symbolic_operand")
+       (match_operand 0 "register_operand")))
+
+(define_predicate "modular_operator"
+  (match_code "plus,minus,mult,ashift"))
+
+(define_predicate "equality_operator"
+  (match_code "eq,ne"))
+
+(define_predicate "order_operator"
+  (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
+
+(define_predicate "fp_order_operator"
+  (match_code "eq,ne,lt,le,gt,ge"))
diff --git a/gcc/config/riscv/riscv-modes.def b/gcc/config/riscv/riscv-modes.def
new file mode 100644
index 0000000..5c65667
--- /dev/null
+++ b/gcc/config/riscv/riscv-modes.def
@@ -0,0 +1,22 @@ 
+/* Extra machine modes for RISC-V target.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   Contributed by Andrew Waterman (andrew@sifive.com).
+   Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc/config/riscv/riscv-opts.h b/gcc/config/riscv/riscv-opts.h
new file mode 100644
index 0000000..2b19233
--- /dev/null
+++ b/gcc/config/riscv/riscv-opts.h
@@ -0,0 +1,41 @@ 
+/* Definition of RISC-V target for GNU compiler.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   Contributed by Andrew Waterman (andrew@sifive.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_RISCV_OPTS_H
+#define GCC_RISCV_OPTS_H
+
+enum riscv_abi_type {
+  ABI_ILP32,
+  ABI_ILP32F,
+  ABI_ILP32D,
+  ABI_LP64,
+  ABI_LP64F,
+  ABI_LP64D
+};
+extern enum riscv_abi_type riscv_abi;
+
+enum riscv_code_model {
+  CM_MEDLOW,
+  CM_MEDANY,
+  CM_PIC
+};
+extern enum riscv_code_model riscv_cmodel;
+
+#endif /* ! GCC_RISCV_OPTS_H */
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
new file mode 100644
index 0000000..c1ed00c
--- /dev/null
+++ b/gcc/config/riscv/riscv-protos.h
@@ -0,0 +1,72 @@ 
+/* Definition of RISC-V target for GNU compiler.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   Contributed by Andrew Waterman (andrew@sifive.com).
+   Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_RISCV_PROTOS_H
+#define GCC_RISCV_PROTOS_H
+
+/* Symbol types we understand.  The order of this list must match that of
+   the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST.  */
+enum riscv_symbol_type {
+  SYMBOL_ABSOLUTE,
+  SYMBOL_PCREL,
+  SYMBOL_GOT_DISP,
+  SYMBOL_TLS,
+  SYMBOL_TLS_LE,
+  SYMBOL_TLS_IE,
+  SYMBOL_TLS_GD
+};
+#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
+
+extern enum riscv_symbol_type riscv_classify_symbolic_expression (rtx);
+extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
+extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
+extern bool riscv_hard_regno_mode_ok_p (unsigned int, enum machine_mode);
+extern int riscv_address_insns (rtx, enum machine_mode, bool);
+extern int riscv_const_insns (rtx);
+extern int riscv_split_const_insns (rtx);
+extern int riscv_load_store_insns (rtx, rtx_insn *);
+extern rtx riscv_emit_move (rtx, rtx);
+extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
+extern bool riscv_split_symbol_type (enum riscv_symbol_type);
+extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
+extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
+extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
+extern rtx riscv_subword (rtx, bool);
+extern bool riscv_split_64bit_move_p (rtx, rtx);
+extern void riscv_split_doubleword_move (rtx, rtx);
+extern const char *riscv_output_move (rtx, rtx);
+extern const char *riscv_output_gpr_save (unsigned);
+#ifdef RTX_CODE
+extern void riscv_expand_scc (rtx *);
+extern void riscv_expand_conditional_branch (rtx *);
+#endif
+extern rtx_insn *riscv_expand_call (bool, rtx, rtx, rtx);
+extern void riscv_set_return_address (rtx, rtx);
+extern bool riscv_expand_block_move (rtx, rtx, rtx);
+extern rtx riscv_return_addr (int, rtx);
+extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
+extern void riscv_expand_prologue (void);
+extern void riscv_expand_epilogue (bool);
+extern bool riscv_can_use_return_insn (void);
+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
+extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
+
+#endif /* ! GCC_RISCV_PROTOS_H */
diff --git a/gcc/config/riscv/riscv.h b/gcc/config/riscv/riscv.h
new file mode 100644
index 0000000..076663e
--- /dev/null
+++ b/gcc/config/riscv/riscv.h
@@ -0,0 +1,1015 @@ 
+/* Definition of RISC-V target for GNU compiler.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   Contributed by Andrew Waterman (andrew@sifive.com).
+   Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_RISCV_H
+#define GCC_RISCV_H
+
+#include "config/riscv/riscv-opts.h"
+
+/* Target CPU builtins.  */
+#define TARGET_CPU_CPP_BUILTINS()					\
+  do									\
+    {									\
+      builtin_define ("__riscv");					\
+									\
+      if (TARGET_RVC)							\
+	builtin_define ("__riscv_compressed");				\
+									\
+      if (TARGET_ATOMIC)						\
+	builtin_define ("__riscv_atomic");				\
+									\
+      if (TARGET_MUL)							\
+	builtin_define ("__riscv_mul");					\
+      if (TARGET_DIV)							\
+	builtin_define ("__riscv_div");					\
+      if (TARGET_DIV && TARGET_MUL)					\
+	builtin_define ("__riscv_muldiv");				\
+									\
+      builtin_define_with_int_value ("__riscv_xlen",			\
+				     UNITS_PER_WORD * 8);		\
+      if (TARGET_HARD_FLOAT)						\
+	builtin_define_with_int_value ("__riscv_flen",			\
+				       UNITS_PER_FP_REG * 8);		\
+									\
+      if (TARGET_HARD_FLOAT && TARGET_FDIV)				\
+	{								\
+	  builtin_define ("__riscv_fdiv");				\
+	  builtin_define ("__riscv_fsqrt");				\
+	}								\
+									\
+      switch (riscv_abi)						\
+	{								\
+	case ABI_ILP32:							\
+	case ABI_LP64:							\
+	  builtin_define ("__riscv_float_abi_soft");			\
+	  break;							\
+									\
+	case ABI_ILP32F:						\
+	case ABI_LP64F:							\
+	  builtin_define ("__riscv_float_abi_single");			\
+	  break;							\
+									\
+	case ABI_ILP32D:						\
+	case ABI_LP64D:							\
+	  builtin_define ("__riscv_float_abi_double");			\
+	  break;							\
+	}								\
+									\
+      switch (riscv_cmodel)						\
+	{								\
+	case CM_MEDLOW:							\
+	  builtin_define ("__riscv_cmodel_medlow");			\
+	  break;							\
+									\
+	case CM_MEDANY:							\
+	  builtin_define ("__riscv_cmodel_medany");			\
+	  break;							\
+									\
+	case CM_PIC:							\
+	  builtin_define ("__riscv_cmodel_pic");			\
+	  break;							\
+	}								\
+    }									\
+  while (0)
+
+/* Default target_flags if no switches are specified  */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+#ifndef RISCV_TUNE_STRING_DEFAULT
+#define RISCV_TUNE_STRING_DEFAULT "rocket"
+#endif
+
+/* Support for a compile-time default CPU, et cetera.  The rules are:
+   --with-arch is ignored if -march is specified.
+   --with-abi is ignored if -mabi is specified.
+   --with-tune is ignored if -mtune is specified.  */
+#define OPTION_DEFAULT_SPECS \
+  {"march", "%{!march=*:-march=%(VALUE)}" }, \
+  {"mabi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
+  {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
+  {"arch", "%{!march=*:-march=%(VALUE)}" }, \
+  {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+/* Make this compile time constant for libgcc2 */
+#define TARGET_64BIT           (__riscv_xlen == 64)
+#endif /* IN_LIBGCC2 */
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%(subtarget_asm_debugging_spec) \
+%{fPIC|fpic|fPIE|fpie:-fpic} \
+%{march=*} \
+%{mabi=*} \
+%(subtarget_asm_spec)"
+
+#define TARGET_DEFAULT_CMODEL CM_MEDLOW
+
+#define LOCAL_LABEL_PREFIX	"."
+#define USER_LABEL_PREFIX	""
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor.
+   The default for this in 64-bit mode is 8, which causes problems with
+   SFmode register saves.  */
+#define DWARF_CIE_DATA_ALIGNMENT -4
+
+/* The mapping from gcc register number to DWARF 2 CFA column number.  */
+#define DWARF_FRAME_REGNUM(REGNO) \
+  (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
+
+/* The DWARF 2 CFA column which tracks the return address.  */
+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
+
+/* Describe how we implement __builtin_eh_return.  */
+#define EH_RETURN_DATA_REGNO(N) \
+  ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
+
+#define EH_RETURN_STACKADJ_RTX  gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
+
+/* Target machine storage layout */
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 0
+#define WORDS_BIG_ENDIAN 0
+
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes).  */
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#ifndef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD 4
+#endif
+
+/* The `Q' extension is not yet supported.  */
+#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4)
+
+/* The largest type that can be passed in floating-point registers.  */
+#define UNITS_PER_FP_ARG					\
+  (riscv_abi == ABI_ILP32 || riscv_abi == ABI_LP64 ? 0 :	\
+   riscv_abi == ABI_ILP32F || riscv_abi == ABI_LP64F ? 4 : 8)	\
+
+/* Set the sizes of the core types.  */
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define POINTER_SIZE (riscv_abi >= ABI_LP64 ? 64 : 32)
+#define LONG_TYPE_SIZE POINTER_SIZE
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list.  */
+#define PARM_BOUNDARY BITS_PER_WORD
+
+/* Allocation boundary (in *bits*) for the code of a function.  */
+#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
+
+/* There is no point aligning anything to a rounder boundary than this.  */
+#define BIGGEST_ALIGNMENT 128
+
+/* The user-level ISA permits misaligned accesses, but they may execute
+   extremely slowly and non-atomically.  Some privileged architectures
+   do not permit them at all.  It is best to enforce strict alignment.  */
+#define STRICT_ALIGNMENT 1
+
+/* Define this if you wish to imitate the way many other C compilers
+   handle alignment of bitfields and the structures that contain
+   them.
+
+   The behavior is that the type written for a bit-field (`int',
+   `short', or other integer type) imposes an alignment for the
+   entire structure, as if the structure really did contain an
+   ordinary field of that type.  In addition, the bit-field is placed
+   within the structure so that it would fit within such a field,
+   not crossing a boundary for it.
+
+   Thus, on most machines, a bit-field whose type is written as `int'
+   would not cross a four-byte boundary, and would force four-byte
+   alignment for the whole structure.  (The alignment used may not
+   be four bytes; it is controlled by the other alignment
+   parameters.)
+
+   If the macro is defined, its definition should be a C expression;
+   a nonzero value for the expression enables this behavior.  */
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* If defined, a C expression to compute the alignment given to a
+   constant that is being placed in memory.  CONSTANT is the constant
+   and ALIGN is the alignment that the object would ordinarily have.
+   The value of this macro is used instead of that alignment to align
+   the object.
+
+   If this macro is not defined, then ALIGN is used.
+
+   The typical use of this macro is to increase alignment for string
+   constants to be word aligned so that `strcpy' calls that copy
+   constants can be done inline.  */
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN)					\
+  ((TREE_CODE (EXP) == STRING_CST  || TREE_CODE (EXP) == CONSTRUCTOR)	\
+   && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* If defined, a C expression to compute the alignment for a static
+   variable.  TYPE is the data type, and ALIGN is the alignment that
+   the object would ordinarily have.  The value of this macro is used
+   instead of that alignment to align the object.
+
+   If this macro is not defined, then ALIGN is used.
+
+   One use of this macro is to increase alignment of medium-size
+   data to make it all fit in fewer cache lines.  Another is to
+   cause character arrays to be word-aligned so that `strcpy' calls
+   that copy constants to character arrays can be done inline.  */
+
+#define DATA_ALIGNMENT(TYPE, ALIGN)					\
+  ((((ALIGN) < BITS_PER_WORD)						\
+    && (TREE_CODE (TYPE) == ARRAY_TYPE					\
+	|| TREE_CODE (TYPE) == UNION_TYPE				\
+	|| TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
+   character arrays to be word-aligned so that `strcpy' calls that copy
+   constants to character arrays can be done inline, and 'strcmp' can be
+   optimised to use word loads. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+  DATA_ALIGNMENT (TYPE, ALIGN)
+
+/* Define if operations between registers always perform the operation
+   on the full register even if a narrower mode is specified.  */
+#define WORD_REGISTER_OPERATIONS 1
+
+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
+   moves.  All other references are zero extended.  */
+#define LOAD_EXTEND_OP(MODE) \
+  (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
+   ? SIGN_EXTEND : ZERO_EXTEND)
+
+/* Define this macro if it is advisable to hold scalars in registers
+   in a wider mode than that declared by the program.  In such cases,
+   the value is constrained to be within the bounds of the declared
+   type, but kept valid in the wider mode.  The signedness of the
+   extension may differ from that of the type.  */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)	\
+  if (GET_MODE_CLASS (MODE) == MODE_INT		\
+      && GET_MODE_SIZE (MODE) < 4)		\
+    {						\
+      (MODE) = Pmode;				\
+    }
+
+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
+   Extensions of pointers to word_mode must be signed.  */
+#define POINTERS_EXTEND_UNSIGNED false
+
+/* When floating-point registers are wider than integer ones, moves between
+   them must go through memory.  */
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE)	\
+  (GET_MODE_SIZE (MODE) > UNITS_PER_WORD		\
+   && ((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS))
+
+/* Define if loading short immediate values into registers sign extends.  */
+#define SHORT_IMMEDIATES_SIGN_EXTEND 1
+
+/* Standard register usage.  */
+
+/* Number of hardware registers.  We have:
+
+   - 32 integer registers
+   - 32 floating point registers
+   - 2 fake registers:
+	- ARG_POINTER_REGNUM
+	- FRAME_POINTER_REGNUM */
+
+#define FIRST_PSEUDO_REGISTER 66
+
+/* x0, sp, gp, and tp are fixed. */
+
+#define FIXED_REGISTERS							\
+{ /* General registers.  */						\
+  1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
+  /* Floating-point registers.  */					\
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
+  /* Others.  */							\
+  1, 1									\
+}
+
+
+/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
+   The call RTLs themselves clobber ra.  */
+
+#define CALL_USED_REGISTERS						\
+{ /* General registers.  */						\
+  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
+  /* Floating-point registers.  */					\
+  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
+  /* Others.  */							\
+  1, 1									\
+}
+
+#define CALL_REALLY_USED_REGISTERS					\
+{ /* General registers.  */						\
+  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
+  /* Floating-point registers.  */					\
+  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
+  /* Others.  */							\
+  1, 1									\
+}
+
+/* Internal macros to classify an ISA register's type.  */
+
+#define GP_REG_FIRST 0
+#define GP_REG_LAST  31
+#define GP_REG_NUM   (GP_REG_LAST - GP_REG_FIRST + 1)
+
+#define FP_REG_FIRST 32
+#define FP_REG_LAST  63
+#define FP_REG_NUM   (FP_REG_LAST - FP_REG_FIRST + 1)
+
+/* The DWARF 2 CFA column which tracks the return address from a
+   signal handler context.  This means that to maintain backwards
+   compatibility, no hard register can be assigned this column if it
+   would need to be handled by the DWARF unwinder.  */
+#define DWARF_ALT_FRAME_RETURN_COLUMN 64
+
+#define GP_REG_P(REGNO)	\
+  ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
+#define FP_REG_P(REGNO)  \
+  ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
+
+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
+
+/* Return coprocessor number from register number.  */
+
+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) 				\
+  (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2'			\
+   : COP3_REG_P (REGNO) ? '3' : '?')
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE)					\
+  riscv_hard_regno_mode_ok_p (REGNO, MODE)
+
+/* Don't allow floating-point modes to be tied, since type punning of
+   single-precision and double-precision is implementation defined.  */
+#define MODES_TIEABLE_P(MODE1, MODE2)			\
+  ((MODE1) == (MODE2)					\
+   || !(GET_MODE_CLASS (MODE1) == MODE_FLOAT		\
+	&& GET_MODE_CLASS (MODE2) == MODE_FLOAT))
+
+/* Use s0 as the frame pointer if it is so requested.  */
+#define HARD_FRAME_POINTER_REGNUM 8
+#define STACK_POINTER_REGNUM 2
+#define THREAD_POINTER_REGNUM 4
+
+/* These two registers don't really exist: they get eliminated to either
+   the stack or hard frame pointer.  */
+#define ARG_POINTER_REGNUM 64
+#define FRAME_POINTER_REGNUM 65
+
+/* Register in which static-chain is passed to a function.  */
+#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
+
+/* Registers used as temporaries in prologue/epilogue code.
+
+   The prologue registers mustn't conflict with any
+   incoming arguments, the static chain pointer, or the frame pointer.
+   The epilogue temporary mustn't conflict with the return registers,
+   the frame pointer, the EH stack adjustment, or the EH data registers. */
+
+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
+#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
+
+#define MCOUNT_NAME "_mcount"
+
+#define NO_PROFILE_COUNTERS 1
+
+/* Emit rtl for profiling.  Output assembler code to FILE
+   to call "_mcount" for profiling a function entry.  */
+#define PROFILE_HOOK(LABEL)						\
+  {									\
+    rtx fun, ra;							\
+    ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);		\
+    fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME);			\
+    emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, ra, Pmode);	\
+  }
+
+/* All the work done in PROFILE_HOOK, but still required.  */
+#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
+
+/* Define this macro if it is as good or better to call a constant
+   function address than to call an address kept in a register.  */
+#define NO_FUNCTION_CSE 1
+
+/* Define the classes of registers for register constraints in the
+   machine description.  Also define ranges of constants.
+
+   One of the classes must always be named ALL_REGS and include all hard regs.
+   If there is more than one class, another class must be named NO_REGS
+   and contain no registers.
+
+   The name GENERAL_REGS must be the name of a class (or an alias for
+   another name such as ALL_REGS).  This is the class of registers
+   that is allowed by "g" or "r" in a register constraint.
+   Also, registers outside this class are allocated only when
+   instructions express preferences for them.
+
+   The classes must be numbered in nondecreasing order; that is,
+   a larger-numbered class must never be contained completely
+   in a smaller-numbered class.
+
+   For any two classes, it is very desirable that there be another
+   class that represents their union.  */
+
+enum reg_class
+{
+  NO_REGS,			/* no registers in set */
+  SIBCALL_REGS,			/* registers used by indirect sibcalls */
+  JALR_REGS,			/* registers used by indirect calls */
+  GR_REGS,			/* integer registers */
+  FP_REGS,			/* floating-point registers */
+  FRAME_REGS,			/* arg pointer and frame pointer */
+  ALL_REGS,			/* all registers */
+  LIM_REG_CLASSES		/* max value + 1 */
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define GENERAL_REGS GR_REGS
+
+/* An initializer containing the names of the register classes as C
+   string constants.  These names are used in writing some of the
+   debugging dumps.  */
+
+#define REG_CLASS_NAMES							\
+{									\
+  "NO_REGS",								\
+  "SIBCALL_REGS",							\
+  "JALR_REGS",								\
+  "GR_REGS",								\
+  "FP_REGS",								\
+  "FRAME_REGS",								\
+  "ALL_REGS"								\
+}
+
+/* An initializer containing the contents of the register classes,
+   as integers which are bit masks.  The Nth integer specifies the
+   contents of class N.  The way the integer MASK is interpreted is
+   that register R is in the class if `MASK & (1 << R)' is 1.
+
+   When the machine has more than 32 registers, an integer does not
+   suffice.  Then the integers are replaced by sub-initializers,
+   braced groupings containing several integers.  Each
+   sub-initializer must be suitable as an initializer for the type
+   `HARD_REG_SET' which is defined in `hard-reg-set.h'.  */
+
+#define REG_CLASS_CONTENTS						\
+{									\
+  { 0x00000000, 0x00000000, 0x00000000 },	/* NO_REGS */		\
+  { 0xf00000c0, 0x00000000, 0x00000000 },	/* SIBCALL_REGS */	\
+  { 0xffffffc0, 0x00000000, 0x00000000 },	/* JALR_REGS */		\
+  { 0xffffffff, 0x00000000, 0x00000000 },	/* GR_REGS */		\
+  { 0x00000000, 0xffffffff, 0x00000000 },	/* FP_REGS */		\
+  { 0x00000000, 0x00000000, 0x00000003 },	/* FRAME_REGS */	\
+  { 0xffffffff, 0xffffffff, 0x00000003 }	/* ALL_REGS */		\
+}
+
+/* A C expression whose value is a register class containing hard
+   register REGNO.  In general there is more that one such class;
+   choose a class which is "minimal", meaning that no smaller class
+   also contains the register.  */
+
+#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
+
+/* A macro whose definition is the name of the class to which a
+   valid base register must belong.  A base register is one used in
+   an address which is the register value plus a displacement.  */
+
+#define BASE_REG_CLASS GR_REGS
+
+/* A macro whose definition is the name of the class to which a
+   valid index register must belong.  An index register is one used
+   in an address where its value is either multiplied by a scale
+   factor or added to another register (as well as added to a
+   displacement).  */
+
+#define INDEX_REG_CLASS NO_REGS
+
+/* We generally want to put call-clobbered registers ahead of
+   call-saved ones.  (IRA expects this.)  */
+
+#define REG_ALLOC_ORDER							\
+{ \
+  /* Call-clobbered GPRs.  */						\
+  15, 14, 13, 12, 11, 10, 16, 17, 6, 28, 29, 30, 31, 5, 7, 1,		\
+  /* Call-saved GPRs.  */						\
+  8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,	       			\
+  /* GPRs that can never be exposed to the register allocator.  */	\
+  0, 2, 3, 4,								\
+  /* Call-clobbered FPRs.  */						\
+  47, 46, 45, 44, 43, 42, 32, 33, 34, 35, 36, 37, 38, 39, 48, 49,	\
+  60, 61, 62, 63,							\
+  /* Call-saved FPRs.  */						\
+  40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,			\
+  /* None of the remaining classes have defined call-saved		\
+     registers.  */							\
+  64, 65								\
+}
+
+/* True if VALUE is a signed 12-bit number.  */
+
+#define SMALL_OPERAND(VALUE) \
+  ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
+
+/* True if VALUE can be loaded into a register using LUI.  */
+
+#define LUI_OPERAND(VALUE)						\
+  (((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH)	\
+   || ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+  reg_classes_intersect_p (FP_REGS, CLASS)
+
+/* Stack layout; function entry, exit and calling.  */
+
+#define STACK_GROWS_DOWNWARD 1
+
+#define FRAME_GROWS_DOWNWARD 1
+
+#define STARTING_FRAME_OFFSET 0
+
+#define RETURN_ADDR_RTX riscv_return_addr
+
+#define ELIMINABLE_REGS							\
+{{ ARG_POINTER_REGNUM,   STACK_POINTER_REGNUM},				\
+ { ARG_POINTER_REGNUM,   HARD_FRAME_POINTER_REGNUM},			\
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM},				\
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}				\
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+  (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
+
+/* Allocate stack space for arguments at the beginning of each function.  */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* The argument pointer always points to the first argument.  */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+#define REG_PARM_STACK_SPACE(FNDECL) 0
+
+/* Define this if it is the responsibility of the caller to
+   allocate the area reserved for arguments passed in registers.
+   If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
+   of this macro is to determine whether the space is included in
+   `crtl->outgoing_args_size'.  */
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
+
+#define STACK_BOUNDARY 128
+
+/* Symbolic macros for the registers used to return integer and floating
+   point values.  */
+
+#define GP_RETURN GP_ARG_FIRST
+#define FP_RETURN (UNITS_PER_FP_ARG == 0 ? GP_RETURN : FP_ARG_FIRST)
+
+#define MAX_ARGS_IN_REGISTERS 8
+
+/* Symbolic macros for the first/last argument registers.  */
+
+#define GP_ARG_FIRST (GP_REG_FIRST + 10)
+#define GP_ARG_LAST  (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
+#define FP_ARG_FIRST (FP_REG_FIRST + 10)
+#define FP_ARG_LAST  (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+
+#define CALLEE_SAVED_REG_NUMBER(REGNO)			\
+  ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 :		\
+   (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
+
+#define LIBCALL_VALUE(MODE) \
+  riscv_function_value (NULL_TREE, NULL_TREE, MODE)
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+  riscv_function_value (VALTYPE, FUNC, VOIDmode)
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
+
+/* 1 if N is a possible register number for function argument passing.
+   We have no FP argument registers when soft-float.  When FP registers
+   are 32 bits, we can't directly reference the odd numbered ones.  */
+
+/* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI.  */
+#define FUNCTION_ARG_REGNO_P(N)						\
+  (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST)				\
+   || (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST)))
+
+typedef struct {
+  /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
+  unsigned int num_gprs;
+
+  /* Number of floating-point registers used so far, likewise.  */
+  unsigned int num_fprs;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+   for a call to a function whose data type is FNTYPE.
+   For a library call, FNTYPE is 0.  */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+  memset (&(CUM), 0, sizeof (CUM))
+
+#define EPILOGUE_USES(REGNO)	((REGNO) == RETURN_ADDR_REGNUM)
+
+/* ABI requires 16-byte alignment, even on RV32. */
+#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+   the stack pointer does not matter.  The value is tested only in
+   functions that have frame pointers.
+   No definition is equivalent to always zero.  */
+
+#define EXIT_IGNORE_STACK 1
+
+
+/* Trampolines are a block of code followed by two pointers.  */
+
+#define TRAMPOLINE_CODE_SIZE 16
+#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
+
+/* Addressing modes, and classification of registers for them.  */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+  riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+   and check its validity for a certain class.
+   We have two alternate definitions for each of them.
+   The usual definition accepts all pseudo regs; the other rejects them all.
+   The symbol REG_OK_STRICT causes the latter definition to be used.
+
+   Most source files want to accept pseudo regs in the hope that
+   they will get allocated to the class that the insn wants them to be in.
+   Some source files that are used after register allocation
+   need to be strict.  */
+
+#ifndef REG_OK_STRICT
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
+  riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
+#else
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
+  riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
+#endif
+
+#define REG_OK_FOR_INDEX_P(X) 0
+
+/* Maximum number of registers that can appear in a valid memory address.  */
+
+#define MAX_REGS_PER_ADDRESS 1
+
+#define CONSTANT_ADDRESS_P(X) \
+  (CONSTANT_P (X) && memory_address_p (SImode, X))
+
+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
+   'the start of the function that this code is output in'.  */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME)  \
+  if (strcmp (NAME, "..CURRENT_FUNCTION") == 0)				\
+    asm_fprintf ((FILE), "%U%s",					\
+		 XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));	\
+  else									\
+    asm_fprintf ((FILE), "%U%s", (NAME))
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+#define CASE_VECTOR_MODE SImode
+#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
+
+/* The load-address macro is used for PC-relative addressing of symbols
+   that bind locally.  Don't use it for symbols that should be addressed
+   via the GOT.  Also, avoid it for CM_MEDLOW, where LUI addressing
+   currently results in more opportunities for linker relaxation.  */
+#define USE_LOAD_ADDRESS_MACRO(sym)					\
+  (!TARGET_EXPLICIT_RELOCS &&						\
+   ((flag_pic								\
+     && ((SYMBOL_REF_P (sym) && SYMBOL_REF_LOCAL_P (sym))		\
+	 || ((GET_CODE (sym) == CONST)					\
+	     && SYMBOL_REF_P (XEXP (XEXP (sym, 0),0))			\
+	     && SYMBOL_REF_LOCAL_P (XEXP (XEXP (sym, 0),0)))))		\
+     || riscv_cmodel == CM_MEDANY))
+
+/* Define this as 1 if `char' should by default be signed; else as 0.  */
+#define DEFAULT_SIGNED_CHAR 0
+
+#define MOVE_MAX UNITS_PER_WORD
+#define MAX_MOVE_MAX 8
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+   is done just by pretending it is already truncated.  */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
+  (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
+
+/* Specify the machine mode that pointers have.
+   After generation of rtl, the compiler makes no further distinction
+   between pointers and any other objects of this machine mode.  */
+
+#ifndef Pmode
+#define Pmode (TARGET_64BIT ? DImode : SImode)
+#endif
+
+/* Give call MEMs SImode since it is the "most permissive" mode
+   for both 32-bit and 64-bit targets.  */
+
+#define FUNCTION_MODE SImode
+
+/* A C expression for the cost of a branch instruction.  A value of 2
+   seems to minimize code size.  */
+
+#define BRANCH_COST(speed_p, predictable_p) \
+  ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
+
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+
+/* Control the assembler format that we output.  */
+
+/* Output to assembler file text saying following lines
+   may contain character constants, extra white space, comments, etc.  */
+
+#ifndef ASM_APP_ON
+#define ASM_APP_ON " #APP\n"
+#endif
+
+/* Output to assembler file text saying following lines
+   no longer contain unusual constructs.  */
+
+#ifndef ASM_APP_OFF
+#define ASM_APP_OFF " #NO_APP\n"
+#endif
+
+#define REGISTER_NAMES						\
+{ "zero","ra",  "sp",  "gp",  "tp",  "t0",  "t1",  "t2",	\
+  "s0",  "s1",  "a0",  "a1",  "a2",  "a3",  "a4",  "a5",	\
+  "a6",  "a7",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",	\
+  "s8",  "s9",  "s10", "s11", "t3",  "t4",  "t5",  "t6",	\
+  "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",	\
+  "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",	\
+  "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",	\
+  "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11",	\
+  "arg", "frame", }
+
+#define ADDITIONAL_REGISTER_NAMES					\
+{									\
+  { "x0",	 0 + GP_REG_FIRST },					\
+  { "x1",	 1 + GP_REG_FIRST },					\
+  { "x2",	 2 + GP_REG_FIRST },					\
+  { "x3",	 3 + GP_REG_FIRST },					\
+  { "x4",	 4 + GP_REG_FIRST },					\
+  { "x5",	 5 + GP_REG_FIRST },					\
+  { "x6",	 6 + GP_REG_FIRST },					\
+  { "x7",	 7 + GP_REG_FIRST },					\
+  { "x8",	 8 + GP_REG_FIRST },					\
+  { "x9",	 9 + GP_REG_FIRST },					\
+  { "x10",	10 + GP_REG_FIRST },					\
+  { "x11",	11 + GP_REG_FIRST },					\
+  { "x12",	12 + GP_REG_FIRST },					\
+  { "x13",	13 + GP_REG_FIRST },					\
+  { "x14",	14 + GP_REG_FIRST },					\
+  { "x15",	15 + GP_REG_FIRST },					\
+  { "x16",	16 + GP_REG_FIRST },					\
+  { "x17",	17 + GP_REG_FIRST },					\
+  { "x18",	18 + GP_REG_FIRST },					\
+  { "x19",	19 + GP_REG_FIRST },					\
+  { "x20",	20 + GP_REG_FIRST },					\
+  { "x21",	21 + GP_REG_FIRST },					\
+  { "x22",	22 + GP_REG_FIRST },					\
+  { "x23",	23 + GP_REG_FIRST },					\
+  { "x24",	24 + GP_REG_FIRST },					\
+  { "x25",	25 + GP_REG_FIRST },					\
+  { "x26",	26 + GP_REG_FIRST },					\
+  { "x27",	27 + GP_REG_FIRST },					\
+  { "x28",	28 + GP_REG_FIRST },					\
+  { "x29",	29 + GP_REG_FIRST },					\
+  { "x30",	30 + GP_REG_FIRST },					\
+  { "x31",	31 + GP_REG_FIRST },					\
+  { "f0",	 0 + FP_REG_FIRST },					\
+  { "f1",	 1 + FP_REG_FIRST },					\
+  { "f2",	 2 + FP_REG_FIRST },					\
+  { "f3",	 3 + FP_REG_FIRST },					\
+  { "f4",	 4 + FP_REG_FIRST },					\
+  { "f5",	 5 + FP_REG_FIRST },					\
+  { "f6",	 6 + FP_REG_FIRST },					\
+  { "f7",	 7 + FP_REG_FIRST },					\
+  { "f8",	 8 + FP_REG_FIRST },					\
+  { "f9",	 9 + FP_REG_FIRST },					\
+  { "f10",	10 + FP_REG_FIRST },					\
+  { "f11",	11 + FP_REG_FIRST },					\
+  { "f12",	12 + FP_REG_FIRST },					\
+  { "f13",	13 + FP_REG_FIRST },					\
+  { "f14",	14 + FP_REG_FIRST },					\
+  { "f15",	15 + FP_REG_FIRST },					\
+  { "f16",	16 + FP_REG_FIRST },					\
+  { "f17",	17 + FP_REG_FIRST },					\
+  { "f18",	18 + FP_REG_FIRST },					\
+  { "f19",	19 + FP_REG_FIRST },					\
+  { "f20",	20 + FP_REG_FIRST },					\
+  { "f21",	21 + FP_REG_FIRST },					\
+  { "f22",	22 + FP_REG_FIRST },					\
+  { "f23",	23 + FP_REG_FIRST },					\
+  { "f24",	24 + FP_REG_FIRST },					\
+  { "f25",	25 + FP_REG_FIRST },					\
+  { "f26",	26 + FP_REG_FIRST },					\
+  { "f27",	27 + FP_REG_FIRST },					\
+  { "f28",	28 + FP_REG_FIRST },					\
+  { "f29",	29 + FP_REG_FIRST },					\
+  { "f30",	30 + FP_REG_FIRST },					\
+  { "f31",	31 + FP_REG_FIRST },					\
+}
+
+/* Globalizing directive for a label.  */
+#define GLOBAL_ASM_OP "\t.globl\t"
+
+/* This is how to store into the string LABEL
+   the symbol_ref name of an internal numbered label where
+   PREFIX is the class of label and NUM is the number within the class.
+   This is suitable for output with `assemble_name'.  */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM)			\
+  sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
+
+/* This is how to output an element of a case-vector that is absolute.  */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE)				\
+  fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
+
+/* This is how to output an element of a PIC case-vector. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)		\
+  fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n",				\
+	   LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
+
+/* This is how to output an assembler line
+   that says to advance the location counter
+   to a multiple of 2**LOG bytes.  */
+
+#define ASM_OUTPUT_ALIGN(STREAM,LOG)					\
+  fprintf (STREAM, "\t.align\t%d\n", (LOG))
+
+/* Define the strings to put out for each section in the object file.  */
+#define TEXT_SECTION_ASM_OP	"\t.text"	/* instructions */
+#define DATA_SECTION_ASM_OP	"\t.data"	/* large data */
+#define READONLY_DATA_SECTION_ASM_OP	"\t.section\t.rodata"
+#define BSS_SECTION_ASM_OP	"\t.bss"
+#define SBSS_SECTION_ASM_OP	"\t.section\t.sbss,\"aw\",@nobits"
+#define SDATA_SECTION_ASM_OP	"\t.section\t.sdata,\"aw\",@progbits"
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO)				\
+do									\
+  {									\
+    fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n",		\
+	     reg_names[STACK_POINTER_REGNUM],				\
+	     reg_names[STACK_POINTER_REGNUM],				\
+	     TARGET_64BIT ? "sd" : "sw",				\
+	     reg_names[REGNO],						\
+	     reg_names[STACK_POINTER_REGNUM]);				\
+  }									\
+while (0)
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO)				\
+do									\
+  {									\
+    fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n",		\
+	     TARGET_64BIT ? "ld" : "lw",				\
+	     reg_names[REGNO],						\
+	     reg_names[STACK_POINTER_REGNUM],				\
+	     reg_names[STACK_POINTER_REGNUM],				\
+	     reg_names[STACK_POINTER_REGNUM]);				\
+  }									\
+while (0)
+
+#define ASM_COMMENT_START "#"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
+
+/* The maximum number of bytes that can be copied by one iteration of
+   a movmemsi loop; see riscv_block_move_loop.  */
+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER 32
+
+/* The maximum number of bytes that can be copied by a straight-line
+   implementation of movmemsi; see riscv_block_move_straight.  We want
+   to make sure that any loop-based implementation will iterate at
+   least twice.  */
+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
+
+/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
+
+#define RISCV_CALL_RATIO 6
+
+/* Any loop-based implementation of movmemsi will have at least
+   RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
+   moves, so allow individual copies of fewer elements.
+
+   When movmemsi is not available, use a value approximating
+   the length of a memcpy call sequence, so that move_by_pieces
+   will generate inline code if it is shorter than a function call.
+   Since move_by_pieces_ninsns counts memory-to-memory moves, but
+   we'll have to generate a load/store pair for each, halve the
+   value of RISCV_CALL_RATIO to take that into account.  */
+
+#define MOVE_RATIO(speed)				\
+  (HAVE_movmemsi					\
+   ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX		\
+   : RISCV_CALL_RATIO / 2)
+
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
+   of the length of a memset call, but use the default otherwise.  */
+
+#define CLEAR_RATIO(speed)\
+  ((speed) ? 15 : RISCV_CALL_RATIO)
+
+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
+   optimizing for size adjust the ratio to account for the overhead of
+   loading the constant and replicating it across the word.  */
+
+#define SET_RATIO(speed) \
+  ((speed) ? 15 : RISCV_CALL_RATIO - 2)
+
+#ifndef USED_FOR_TARGET
+
+extern const enum reg_class riscv_regno_to_class[];
+extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
+#endif
+
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+  (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
+
+#define XLEN_SPEC \
+  "%{march=rv32*:32}" \
+  "%{march=rv64*:64}" \
+
+#define ABI_SPEC \
+  "%{mabi=ilp32:ilp32}" \
+  "%{mabi=ilp32f:ilp32f}" \
+  "%{mabi=ilp32d:ilp32d}" \
+  "%{mabi=lp64:lp64}" \
+  "%{mabi=lp64f:lp64f}" \
+  "%{mabi=lp64d:lp64d}" \
+
+#define STARTFILE_PREFIX_SPEC 			\
+   "/lib" XLEN_SPEC "/" ABI_SPEC "/ "		\
+   "/usr/lib" XLEN_SPEC "/" ABI_SPEC "/ "
+
+/* ISA constants needed for code generation.  */
+#define OPCODE_LW    0x2003
+#define OPCODE_LD    0x3003
+#define OPCODE_AUIPC 0x17
+#define OPCODE_JALR  0x67
+#define SHIFT_RD  7
+#define SHIFT_RS1 15
+#define SHIFT_IMM 20
+#define IMM_BITS 12
+
+#define IMM_REACH (1LL << IMM_BITS)
+#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH/2)) & ~(IMM_REACH-1))
+#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
+
+#endif /* ! GCC_RISCV_H */
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
new file mode 100644
index 0000000..17bed16
--- /dev/null
+++ b/gcc/config/riscv/riscv.md
@@ -0,0 +1,2342 @@ 
+;; Machine description for RISC-V for GNU compiler.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+;; Based on MIPS target for GNU compiler.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+  ;; GP manipulation.
+  UNSPEC_EH_RETURN
+
+  ;; Symbolic accesses.  The order of this list must match that of
+  ;; enum riscv_symbol_type in riscv-protos.h.
+  UNSPEC_ADDRESS_FIRST
+  UNSPEC_PCREL
+  UNSPEC_LOAD_GOT
+  UNSPEC_TLS
+  UNSPEC_TLS_LE
+  UNSPEC_TLS_IE
+  UNSPEC_TLS_GD
+
+  UNSPEC_AUIPC
+
+  ;; Register save and restore.
+  UNSPEC_GPR_SAVE
+  UNSPEC_GPR_RESTORE
+
+  ;; Blockage and synchronisation.
+  UNSPEC_BLOCKAGE
+  UNSPEC_FENCE
+  UNSPEC_FENCE_I
+])
+
+(define_constants
+  [(RETURN_ADDR_REGNUM		1)
+   (T0_REGNUM			5)
+   (T1_REGNUM			6)
+])
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; ....................
+;;
+;;	Attributes
+;;
+;; ....................
+
+(define_attr "got" "unset,xgot_high,load"
+  (const_string "unset"))
+
+;; Classification of moves, extensions and truncations.  Most values
+;; are as for "type" (see below) but there are also the following
+;; move-specific values:
+;;
+;; andi		a single ANDI instruction
+;; shift_shift	a shift left followed by a shift right
+;;
+;; This attribute is used to determine the instruction's length and
+;; scheduling type.  For doubleword moves, the attribute always describes
+;; the split instructions; in some cases, it is more appropriate for the
+;; scheduling type to be "multi" instead.
+(define_attr "move_type"
+  "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
+   const,logical,arith,andi,shift_shift"
+  (const_string "unknown"))
+
+;; Main data type used by the insn
+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
+  (const_string "unknown"))
+
+;; True if the main data type is twice the size of a word.
+(define_attr "dword_mode" "no,yes"
+  (cond [(and (eq_attr "mode" "DI,DF")
+	      (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
+	 (const_string "yes")
+
+	 (and (eq_attr "mode" "TI,TF")
+	      (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
+	 (const_string "yes")]
+	(const_string "no")))
+
+;; Classification of each insn.
+;; branch	conditional branch
+;; jump		unconditional jump
+;; call		unconditional call
+;; load		load instruction(s)
+;; fpload	floating point load
+;; store	store instruction(s)
+;; fpstore	floating point store
+;; mtc		transfer to coprocessor
+;; mfc		transfer from coprocessor
+;; const	load constant
+;; arith	integer arithmetic instructions
+;; logical      integer logical instructions
+;; shift	integer shift instructions
+;; slt		set less than instructions
+;; imul		integer multiply 
+;; idiv		integer divide
+;; move		integer register move (addi rd, rs1, 0)
+;; fmove	floating point register move
+;; fadd		floating point add/subtract
+;; fmul		floating point multiply
+;; fmadd	floating point multiply-add
+;; fdiv		floating point divide
+;; fcmp		floating point compare
+;; fcvt		floating point convert
+;; fsqrt	floating point square root
+;; multi	multiword sequence (or user asm statements)
+;; nop		no operation
+;; ghost	an instruction that produces no real code
+(define_attr "type"
+  "unknown,branch,jump,call,load,fpload,store,fpstore,
+   mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
+   fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
+  (cond [(eq_attr "got" "load") (const_string "load")
+
+	 ;; If a doubleword move uses these expensive instructions,
+	 ;; it is usually better to schedule them in the same way
+	 ;; as the singleword form, rather than as "multi".
+	 (eq_attr "move_type" "load") (const_string "load")
+	 (eq_attr "move_type" "fpload") (const_string "fpload")
+	 (eq_attr "move_type" "store") (const_string "store")
+	 (eq_attr "move_type" "fpstore") (const_string "fpstore")
+	 (eq_attr "move_type" "mtc") (const_string "mtc")
+	 (eq_attr "move_type" "mfc") (const_string "mfc")
+
+	 ;; These types of move are always single insns.
+	 (eq_attr "move_type" "fmove") (const_string "fmove")
+	 (eq_attr "move_type" "arith") (const_string "arith")
+	 (eq_attr "move_type" "logical") (const_string "logical")
+	 (eq_attr "move_type" "andi") (const_string "logical")
+
+	 ;; These types of move are always split.
+	 (eq_attr "move_type" "shift_shift")
+	   (const_string "multi")
+
+	 ;; These types of move are split for doubleword modes only.
+	 (and (eq_attr "move_type" "move,const")
+	      (eq_attr "dword_mode" "yes"))
+	   (const_string "multi")
+	 (eq_attr "move_type" "move") (const_string "move")
+	 (eq_attr "move_type" "const") (const_string "const")]
+	(const_string "unknown")))
+
+;; Mode for conversion types (fcvt)
+;; I2S          integer to float single (SI/DI to SF)
+;; I2D          integer to float double (SI/DI to DF)
+;; S2I          float to integer (SF to SI/DI)
+;; D2I          float to integer (DF to SI/DI)
+;; D2S          double to float single
+;; S2D          float single to double
+
+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" 
+  (const_string "unknown"))
+
+;; Length of instruction in bytes.
+(define_attr "length" ""
+   (cond [
+	  ;; Direct branch instructions have a range of [-0x1000,0xffc],
+	  ;; relative to the address of the delay slot.  If a branch is
+	  ;; outside this range, convert a branch like:
+	  ;;
+	  ;;	bne	r1,r2,target
+	  ;;
+	  ;; to:
+	  ;;
+	  ;;	beq	r1,r2,1f
+	  ;;  j target
+	  ;; 1:
+	  ;;
+	  (eq_attr "type" "branch")
+	  (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
+				  (le (minus (pc) (match_dup 0)) (const_int 4092)))
+	  (const_int 4)
+	  (const_int 8))
+
+	  ;; Conservatively assume calls take two instructions (AUIPC + JALR).
+	  ;; The linker will opportunistically relax the sequence to JAL.
+	  (eq_attr "type" "call") (const_int 8)
+
+	  ;; "Ghost" instructions occupy no space.
+	  (eq_attr "type" "ghost") (const_int 0)
+
+	  (eq_attr "got" "load") (const_int 8)
+
+	  (eq_attr "type" "fcmp") (const_int 8)
+
+	  ;; SHIFT_SHIFTs are decomposed into two separate instructions.
+	  (eq_attr "move_type" "shift_shift")
+		(const_int 8)
+
+	  ;; Check for doubleword moves that are decomposed into two
+	  ;; instructions.
+	  (and (eq_attr "move_type" "mtc,mfc,move")
+	       (eq_attr "dword_mode" "yes"))
+	  (const_int 8)
+
+	  ;; Doubleword CONST{,N} moves are split into two word
+	  ;; CONST{,N} moves.
+	  (and (eq_attr "move_type" "const")
+	       (eq_attr "dword_mode" "yes"))
+	  (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
+
+	  ;; Otherwise, constants, loads and stores are handled by external
+	  ;; routines.
+	  (eq_attr "move_type" "load,fpload")
+	  (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
+	  (eq_attr "move_type" "store,fpstore")
+	  (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
+	  ] (const_int 4)))
+
+;; Is copying of this instruction disallowed?
+(define_attr "cannot_copy" "no,yes" (const_string "no"))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+  [(set_attr "type" "multi")])
+
+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
+;; from the same template.
+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
+(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
+
+;; A copy of GPR that can be used when a pattern has two independent
+;; modes.
+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; pointer-sized quantities.  Exactly one of the two alternatives will match.
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; 32-bit integer moves for which we provide move patterns.
+(define_mode_iterator IMOVE32 [SI])
+
+;; 64-bit modes for which we provide move patterns.
+(define_mode_iterator MOVE64 [DI DF])
+
+;; This mode iterator allows the QI and HI extension patterns to be
+;; defined from the same template.
+(define_mode_iterator SHORT [QI HI])
+
+;; Likewise the 64-bit truncate-and-shift patterns.
+(define_mode_iterator SUBDI [QI HI SI])
+(define_mode_iterator HISI [HI SI])
+(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
+
+;; This mode iterator allows :ANYF to be used where SF or DF is allowed.
+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
+			    (DF "TARGET_DOUBLE_FLOAT")])
+(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
+			     (SF "TARGET_HARD_FLOAT")
+			     (DF "TARGET_DOUBLE_FLOAT")])
+
+;; This attribute gives the length suffix for a sign- or zero-extension
+;; instruction.
+(define_mode_attr size [(QI "b") (HI "h")])
+
+;; Mode attributes for loads.
+(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
+
+;; Instruction names for stores.
+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
+
+;; This attribute gives the best constraint to use for registers of
+;; a given mode.
+(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
+
+;; This attribute gives the format suffix for floating-point operations.
+(define_mode_attr fmt [(SF "s") (DF "d")])
+
+;; This attribute gives the format suffix for atomic memory operations.
+(define_mode_attr amo [(SI "w") (DI "d")])
+
+;; This attribute gives the upper-case mode name for one unit of a
+;; floating-point mode.
+(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
+
+;; This attribute gives the integer mode that has half the size of
+;; the controlling mode.
+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
+
+;; This code iterator allows signed and unsigned widening multiplications
+;; to use the same template.
+(define_code_iterator any_extend [sign_extend zero_extend])
+
+;; This code iterator allows the two right shift instructions to be
+;; generated from the same template.
+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
+
+;; This code iterator allows the three shift instructions to be generated
+;; from the same template.
+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
+
+;; This code iterator supports emitting signaling FP comparisons.
+(define_code_iterator fp_scmp [ge gt le lt])
+
+;; This code iterator allows unsigned and signed division to be generated
+;; from the same template.
+(define_code_iterator any_div [div udiv])
+
+;; This code iterator allows unsigned and signed modulus to be generated
+;; from the same template.
+(define_code_iterator any_mod [mod umod])
+
+;; These code iterators allow the signed and unsigned scc operations to use
+;; the same template.
+(define_code_iterator any_gt [gt gtu])
+(define_code_iterator any_ge [ge geu])
+(define_code_iterator any_lt [lt ltu])
+(define_code_iterator any_le [le leu])
+
+;; <u> expands to an empty string when doing a signed operation and
+;; "u" when doing an unsigned operation.
+(define_code_attr u [(sign_extend "") (zero_extend "u")
+		     (div "") (udiv "u")
+		     (mod "") (umod "u")
+		     (gt "") (gtu "u")
+		     (ge "") (geu "u")
+		     (lt "") (ltu "u")
+		     (le "") (leu "u")])
+
+;; <su> is like <u>, but the signed form expands to "s" rather than "".
+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
+
+;; <optab> expands to the name of the optab for a particular code.
+(define_code_attr optab [(ashift "ashl")
+			 (ashiftrt "ashr")
+			 (lshiftrt "lshr")
+			 (ge "ge")
+			 (le "le")
+			 (gt "gt")
+			 (lt "lt")
+			 (ior "ior")
+			 (xor "xor")
+			 (and "and")
+			 (plus "add")
+			 (minus "sub")])
+
+;; <insn> expands to the name of the insn that implements a particular code.
+(define_code_attr insn [(ashift "sll")
+			(ashiftrt "sra")
+			(lshiftrt "srl")
+			(ior "or")
+			(xor "xor")
+			(and "and")
+			(plus "add")
+			(minus "sub")])
+
+;; Ghost instructions produce no real code and introduce no hazards.
+;; They exist purely to express an effect on dataflow.
+(define_insn_reservation "ghost" 0
+  (eq_attr "type" "ghost")
+  "nothing")
+
+;;
+;;  ....................
+;;
+;;	ADDITION
+;;
+;;  ....................
+;;
+
+(define_insn "add<mode>3"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
+		   (match_operand:ANYF 2 "register_operand" "f")))]
+  ""
+  "fadd.<fmt>\t%0,%1,%2"
+  [(set_attr "type" "fadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_expand "add<mode>3"
+  [(set (match_operand:GPR 0 "register_operand")
+	(plus:GPR (match_operand:GPR 1 "register_operand")
+		  (match_operand:GPR 2 "arith_operand")))]
+  "")
+
+(define_insn "*addsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r,r")
+	(plus:SI (match_operand:GPR 1 "register_operand" "r,r")
+		  (match_operand:GPR2 2 "arith_operand" "r,Q")))]
+  ""
+  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*adddi3"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(plus:DI (match_operand:DI 1 "register_operand" "r,r")
+		  (match_operand:DI 2 "arith_operand" "r,Q")))]
+  "TARGET_64BIT"
+  "add\t%0,%1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "DI")])
+
+(define_insn "*addsi3_extended"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(sign_extend:DI
+	     (plus:SI (match_operand:SI 1 "register_operand" "r,r")
+		      (match_operand:SI 2 "arith_operand" "r,Q"))))]
+  "TARGET_64BIT"
+  "addw\t%0,%1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*adddisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r,r")
+	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
+		      (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
+  "TARGET_64BIT"
+  "addw\t%0,%1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*adddisisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r,r")
+	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
+		      (match_operand:SI 2 "arith_operand" "r,Q")))]
+  "TARGET_64BIT"
+  "addw\t%0,%1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*adddi3_truncsi"
+  [(set (match_operand:SI 0 "register_operand" "=r,r")
+	  (truncate:SI
+	     (plus:DI (match_operand:DI 1 "register_operand" "r,r")
+		      (match_operand:DI 2 "arith_operand" "r,Q"))))]
+  "TARGET_64BIT"
+  "addw\t%0,%1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+;;
+;;  ....................
+;;
+;;	SUBTRACTION
+;;
+;;  ....................
+;;
+
+(define_insn "sub<mode>3"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
+		    (match_operand:ANYF 2 "register_operand" "f")))]
+  ""
+  "fsub.<fmt>\t%0,%1,%2"
+  [(set_attr "type" "fadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_expand "sub<mode>3"
+  [(set (match_operand:GPR 0 "register_operand")
+	(minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
+		   (match_operand:GPR 2 "register_operand")))]
+  "")
+
+(define_insn "*subdi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+		   (match_operand:DI 2 "register_operand" "r")))]
+  "TARGET_64BIT"
+  "sub\t%0,%z1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "DI")])
+
+(define_insn "*subsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
+		   (match_operand:GPR2 2 "register_operand" "r")))]
+  ""
+  { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*subsi3_extended"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(sign_extend:DI
+	    (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+		      (match_operand:SI 2 "register_operand" "r"))))]
+  "TARGET_64BIT"
+  "subw\t%0,%z1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "DI")])
+
+(define_insn "*subdisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	     (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
+		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
+  "TARGET_64BIT"
+  "subw\t%0,%z1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*subdisisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	     (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
+		      (match_operand:SI 2 "register_operand" "r")))]
+  "TARGET_64BIT"
+  "subw\t%0,%z1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*subsidisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	     (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
+  "TARGET_64BIT"
+  "subw\t%0,%z1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+(define_insn "*subdi3_truncsi"
+  [(set (match_operand:SI 0 "register_operand" "=r,r")
+	  (truncate:SI
+	     (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
+		      (match_operand:DI 2 "arith_operand" "r,Q"))))]
+  "TARGET_64BIT"
+  "subw\t%0,%z1,%2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "SI")])
+
+;;
+;;  ....................
+;;
+;;	MULTIPLICATION
+;;
+;;  ....................
+;;
+
+(define_insn "mul<mode>3"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+		      (match_operand:ANYF 2 "register_operand" "f")))]
+  ""
+  "fmul.<fmt>\t%0,%1,%2"
+  [(set_attr "type" "fmul")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_expand "mul<mode>3"
+  [(set (match_operand:GPR 0 "register_operand")
+	(mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
+		   (match_operand:GPR 2 "register_operand")))]
+  "TARGET_MUL")
+
+(define_insn "*mulsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(mult:SI (match_operand:GPR 1 "register_operand" "r")
+		  (match_operand:GPR2 2 "register_operand" "r")))]
+  "TARGET_MUL"
+  { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
+  [(set_attr "type" "imul")
+   (set_attr "mode" "SI")])
+
+(define_insn "*muldisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	     (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
+		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
+  "TARGET_MUL && TARGET_64BIT"
+  "mulw\t%0,%1,%2"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "SI")])
+
+(define_insn "*muldi3_truncsi"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	  (truncate:SI
+	     (mult:DI (match_operand:DI 1 "register_operand" "r")
+		      (match_operand:DI 2 "register_operand" "r"))))]
+  "TARGET_MUL && TARGET_64BIT"
+  "mulw\t%0,%1,%2"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "SI")])
+
+(define_insn "*muldi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(mult:DI (match_operand:DI 1 "register_operand" "r")
+		  (match_operand:DI 2 "register_operand" "r")))]
+  "TARGET_MUL && TARGET_64BIT"
+  "mul\t%0,%1,%2"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "DI")])
+
+;;
+;;  ........................
+;;
+;;	MULTIPLICATION HIGH-PART
+;;
+;;  ........................
+;;
+
+
+(define_expand "<u>mulditi3"
+  [(set (match_operand:TI 0 "register_operand")
+	(mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
+		 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
+  "TARGET_MUL && TARGET_64BIT"
+{
+  rtx low = gen_reg_rtx (DImode);
+  emit_insn (gen_muldi3 (low, operands[1], operands[2]));
+
+  rtx high = gen_reg_rtx (DImode);
+  emit_insn (gen_<u>muldi3_highpart (high, operands[1], operands[2]));
+
+  emit_move_insn (gen_lowpart (DImode, operands[0]), low);
+  emit_move_insn (gen_highpart (DImode, operands[0]), high);
+  DONE;
+})
+
+(define_insn "<u>muldi3_highpart"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(truncate:DI
+	  (lshiftrt:TI
+	    (mult:TI (any_extend:TI
+		       (match_operand:DI 1 "register_operand" "r"))
+		     (any_extend:TI
+		       (match_operand:DI 2 "register_operand" "r")))
+	    (const_int 64))))]
+  "TARGET_MUL && TARGET_64BIT"
+  "mulh<u>\t%0,%1,%2"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "DI")])
+
+(define_expand "usmulditi3"
+  [(set (match_operand:TI 0 "register_operand")
+	(mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
+		 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
+  "TARGET_MUL && TARGET_64BIT"
+{
+  rtx low = gen_reg_rtx (DImode);
+  emit_insn (gen_muldi3 (low, operands[1], operands[2]));
+
+  rtx high = gen_reg_rtx (DImode);
+  emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
+
+  emit_move_insn (gen_lowpart (DImode, operands[0]), low);
+  emit_move_insn (gen_highpart (DImode, operands[0]), high);
+  DONE;
+})
+
+(define_insn "usmuldi3_highpart"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(truncate:DI
+	  (lshiftrt:TI
+	    (mult:TI (zero_extend:TI
+		       (match_operand:DI 1 "register_operand" "r"))
+		     (sign_extend:TI
+		       (match_operand:DI 2 "register_operand" "r")))
+	    (const_int 64))))]
+  "TARGET_MUL && TARGET_64BIT"
+  "mulhsu\t%0,%2,%1"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "DI")])
+
+(define_expand "<u>mulsidi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(mult:DI (any_extend:DI
+		   (match_operand:SI 1 "register_operand" "r"))
+		 (any_extend:DI
+		   (match_operand:SI 2 "register_operand" "r"))))]
+  "TARGET_MUL && !TARGET_64BIT"
+{
+  rtx temp = gen_reg_rtx (SImode);
+  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
+  emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
+				     operands[1], operands[2]));
+  emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
+  DONE;
+}
+  )
+
+(define_insn "<u>mulsi3_highpart"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(truncate:SI
+	  (lshiftrt:DI
+	    (mult:DI (any_extend:DI
+		       (match_operand:SI 1 "register_operand" "r"))
+		     (any_extend:DI
+		       (match_operand:SI 2 "register_operand" "r")))
+	    (const_int 32))))]
+  "TARGET_MUL && !TARGET_64BIT"
+  "mulh<u>\t%0,%1,%2"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "SI")])
+
+
+(define_expand "usmulsidi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(mult:DI (zero_extend:DI
+		   (match_operand:SI 1 "register_operand" "r"))
+		 (sign_extend:DI
+		   (match_operand:SI 2 "register_operand" "r"))))]
+  "TARGET_MUL && !TARGET_64BIT"
+{
+  rtx temp = gen_reg_rtx (SImode);
+  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
+  emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
+				     operands[1], operands[2]));
+  emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
+  DONE;
+}
+  )
+
+(define_insn "usmulsi3_highpart"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(truncate:SI
+	  (lshiftrt:DI
+	    (mult:DI (zero_extend:DI
+		       (match_operand:SI 1 "register_operand" "r"))
+		     (sign_extend:DI
+		       (match_operand:SI 2 "register_operand" "r")))
+	    (const_int 32))))]
+  "TARGET_MUL && !TARGET_64BIT"
+  "mulhsu\t%0,%2,%1"
+  [(set_attr "type" "imul")
+   (set_attr "mode" "SI")])
+
+;;
+;;  ....................
+;;
+;;	DIVISION and REMAINDER
+;;
+;;  ....................
+;;
+
+(define_insn "<u>divsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(any_div:SI (match_operand:SI 1 "register_operand" "r")
+		  (match_operand:SI 2 "register_operand" "r")))]
+  "TARGET_DIV"
+  { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
+  [(set_attr "type" "idiv")
+   (set_attr "mode" "SI")])
+
+(define_insn "<u>divdi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(any_div:DI (match_operand:DI 1 "register_operand" "r")
+		  (match_operand:DI 2 "register_operand" "r")))]
+  "TARGET_DIV && TARGET_64BIT"
+  "div<u>\t%0,%1,%2"
+  [(set_attr "type" "idiv")
+   (set_attr "mode" "DI")])
+
+(define_insn "<u>modsi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(any_mod:SI (match_operand:SI 1 "register_operand" "r")
+		  (match_operand:SI 2 "register_operand" "r")))]
+  "TARGET_DIV"
+  { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
+  [(set_attr "type" "idiv")
+   (set_attr "mode" "SI")])
+
+(define_insn "<u>moddi3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(any_mod:DI (match_operand:DI 1 "register_operand" "r")
+		  (match_operand:DI 2 "register_operand" "r")))]
+  "TARGET_DIV && TARGET_64BIT"
+  "rem<u>\t%0,%1,%2"
+  [(set_attr "type" "idiv")
+   (set_attr "mode" "DI")])
+
+(define_insn "div<mode>3"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(div:ANYF (match_operand:ANYF 1 "register_operand" "f")
+		  (match_operand:ANYF 2 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT && TARGET_FDIV"
+  "fdiv.<fmt>\t%0,%1,%2"
+  [(set_attr "type" "fdiv")
+   (set_attr "mode" "<UNITMODE>")])
+
+;;
+;;  ....................
+;;
+;;	SQUARE ROOT
+;;
+;;  ....................
+
+(define_insn "sqrt<mode>2"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT && TARGET_FDIV"
+{
+    return "fsqrt.<fmt>\t%0,%1";
+}
+  [(set_attr "type" "fsqrt")
+   (set_attr "mode" "<UNITMODE>")])
+
+;; Floating point multiply accumulate instructions.
+
+(define_insn "fma<mode>4"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+    (fma:ANYF
+      (match_operand:ANYF 1 "register_operand" "f")
+      (match_operand:ANYF 2 "register_operand" "f")
+      (match_operand:ANYF 3 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fmadd.<fmt>\t%0,%1,%2,%3"
+  [(set_attr "type" "fmadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "fms<mode>4"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+    (fma:ANYF
+      (match_operand:ANYF 1 "register_operand" "f")
+      (match_operand:ANYF 2 "register_operand" "f")
+      (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
+  "TARGET_HARD_FLOAT"
+  "fmsub.<fmt>\t%0,%1,%2,%3"
+  [(set_attr "type" "fmadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "nfma<mode>4"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+    (neg:ANYF
+      (fma:ANYF
+	(match_operand:ANYF 1 "register_operand" "f")
+	(match_operand:ANYF 2 "register_operand" "f")
+	(match_operand:ANYF 3 "register_operand" "f"))))]
+  "TARGET_HARD_FLOAT"
+  "fnmadd.<fmt>\t%0,%1,%2,%3"
+  [(set_attr "type" "fmadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "nfms<mode>4"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+    (neg:ANYF
+      (fma:ANYF
+	(match_operand:ANYF 1 "register_operand" "f")
+	(match_operand:ANYF 2 "register_operand" "f")
+	(neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
+  "TARGET_HARD_FLOAT"
+  "fnmsub.<fmt>\t%0,%1,%2,%3"
+  [(set_attr "type" "fmadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+;; modulo signed zeros, -(a*b+c) == -c-a*b
+(define_insn "*nfma<mode>4_fastmath"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+    (minus:ANYF
+      (match_operand:ANYF 3 "register_operand" "f")
+      (mult:ANYF
+	(neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
+	(match_operand:ANYF 2 "register_operand" "f"))))]
+  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
+  "fnmadd.<fmt>\t%0,%1,%2,%3"
+  [(set_attr "type" "fmadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+;; modulo signed zeros, -(a*b-c) == c-a*b
+(define_insn "*nfms<mode>4_fastmath"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+    (minus:ANYF
+      (match_operand:ANYF 3 "register_operand" "f")
+      (mult:ANYF
+	(match_operand:ANYF 1 "register_operand" "f")
+	(match_operand:ANYF 2 "register_operand" "f"))))]
+  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
+  "fnmsub.<fmt>\t%0,%1,%2,%3"
+  [(set_attr "type" "fmadd")
+   (set_attr "mode" "<UNITMODE>")])
+
+;;
+;;  ....................
+;;
+;;	ABSOLUTE VALUE
+;;
+;;  ....................
+
+(define_insn "abs<mode>2"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fabs.<fmt>\t%0,%1"
+  [(set_attr "type" "fmove")
+   (set_attr "mode" "<UNITMODE>")])
+
+
+;;
+;;  ....................
+;;
+;;	MIN/MAX
+;;
+;;  ....................
+
+(define_insn "smin<mode>3"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+		   (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
+			    (match_operand:ANYF 2 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fmin.<fmt>\t%0,%1,%2"
+  [(set_attr "type" "fmove")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "smax<mode>3"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+		   (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
+			    (match_operand:ANYF 2 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fmax.<fmt>\t%0,%1,%2"
+  [(set_attr "type" "fmove")
+   (set_attr "mode" "<UNITMODE>")])
+
+
+;;
+;;  ....................
+;;
+;;	NEGATION and ONE'S COMPLEMENT '
+;;
+;;  ....................
+
+(define_insn "neg<mode>2"
+  [(set (match_operand:ANYF 0 "register_operand" "=f")
+	(neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fneg.<fmt>\t%0,%1"
+  [(set_attr "type" "fmove")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "one_cmpl<mode>2"
+  [(set (match_operand:GPR 0 "register_operand" "=r")
+	(not:GPR (match_operand:GPR 1 "register_operand" "r")))]
+  ""
+  "not\t%0,%1"
+  [(set_attr "type" "logical")
+   (set_attr "mode" "<MODE>")])
+
+;;
+;;  ....................
+;;
+;;	LOGICAL
+;;
+;;  ....................
+;;
+
+(define_insn "and<mode>3"
+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
+	(and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
+  ""
+  "and\t%0,%1,%2"
+  [(set_attr "type" "logical")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "ior<mode>3"
+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
+	(ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
+  ""
+  "or\t%0,%1,%2"
+  [(set_attr "type" "logical")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "xor<mode>3"
+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
+	(xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
+  ""
+  "xor\t%0,%1,%2"
+  [(set_attr "type" "logical")
+   (set_attr "mode" "<MODE>")])
+
+;;
+;;  ....................
+;;
+;;	TRUNCATION
+;;
+;;  ....................
+
+(define_insn "truncdfsf2"
+  [(set (match_operand:SF 0 "register_operand" "=f")
+	(float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
+  "TARGET_DOUBLE_FLOAT"
+  "fcvt.s.d\t%0,%1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "cnv_mode"	"D2S")   
+   (set_attr "mode"	"SF")])
+
+;; Integer truncation patterns.  Truncating to HImode/QImode is a no-op.
+;; Truncating from DImode to SImode is not, because we always keep SImode
+;; values sign-extended in a register so we can safely use DImode branches
+;; and comparisons on SImode values.
+
+(define_insn "truncdisi2"
+  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
+	(truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
+  "TARGET_64BIT"
+  "@
+    sext.w\t%0,%1
+    sw\t%1,%0"
+  [(set_attr "move_type" "arith,store")
+   (set_attr "mode" "SI")])
+
+;; Combiner patterns to optimize shift/truncate combinations.
+
+(define_insn "*ashr_trunc<mode>"
+  [(set (match_operand:SUBDI 0 "register_operand" "=r")
+	(truncate:SUBDI
+	  (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+		       (match_operand:DI 2 "const_arith_operand" ""))))]
+  "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
+  "sra\t%0,%1,%2"
+  [(set_attr "type" "shift")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*lshr32_trunc<mode>"
+  [(set (match_operand:SUBDI 0 "register_operand" "=r")
+	(truncate:SUBDI
+	  (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+		       (const_int 32))))]
+  "TARGET_64BIT"
+  "sra\t%0,%1,32"
+  [(set_attr "type" "shift")
+   (set_attr "mode" "<MODE>")])
+
+;;
+;;  ....................
+;;
+;;	ZERO EXTENSION
+;;
+;;  ....................
+
+;; Extension insns.
+
+(define_insn_and_split "zero_extendsidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
+  "TARGET_64BIT"
+  "@
+   #
+   lwu\t%0,%1"
+  "&& reload_completed && REG_P (operands[1])"
+  [(set (match_dup 0)
+	(ashift:DI (match_dup 1) (const_int 32)))
+   (set (match_dup 0)
+	(lshiftrt:DI (match_dup 0) (const_int 32)))]
+  { operands[1] = gen_lowpart (DImode, operands[1]); }
+  [(set_attr "move_type" "shift_shift,load")
+   (set_attr "mode" "DI")])
+
+;; Combine is not allowed to convert this insn into a zero_extendsidi2
+;; because of TRULY_NOOP_TRUNCATION.
+
+(define_insn_and_split "*clear_upper32"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
+		(const_int 4294967295)))]
+  "TARGET_64BIT"
+{
+  if (which_alternative == 0)
+    return "#";
+
+  operands[1] = gen_lowpart (SImode, operands[1]);
+  return "lwu\t%0,%1";
+}
+  "&& reload_completed && REG_P (operands[1])"
+  [(set (match_dup 0)
+	(ashift:DI (match_dup 1) (const_int 32)))
+   (set (match_dup 0)
+	(lshiftrt:DI (match_dup 0) (const_int 32)))]
+  ""
+  [(set_attr "move_type" "shift_shift,load")
+   (set_attr "mode" "DI")])
+
+(define_insn_and_split "zero_extendhi<GPR:mode>2"
+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
+	(zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   #
+   lhu\t%0,%1"
+  "&& reload_completed && REG_P (operands[1])"
+  [(set (match_dup 0)
+	(ashift:GPR (match_dup 1) (match_dup 2)))
+   (set (match_dup 0)
+	(lshiftrt:GPR (match_dup 0) (match_dup 2)))]
+  {
+    operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
+    operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
+  }
+  [(set_attr "move_type" "shift_shift,load")
+   (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "zero_extendqi<SUPERQI:mode>2"
+  [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
+	(zero_extend:SUPERQI
+	     (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   and\t%0,%1,0xff
+   lbu\t%0,%1"
+  [(set_attr "move_type" "andi,load")
+   (set_attr "mode" "<SUPERQI:MODE>")])
+
+;;
+;;  ....................
+;;
+;;	SIGN EXTENSION
+;;
+;;  ....................
+
+;; Extension insns.
+;; Those for integer source operand are ordered widest source type first.
+
+;; When TARGET_64BIT, all SImode integer registers should already be in
+;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2).  We can
+;; therefore get rid of register->register instructions if we constrain
+;; the source to be in the same register as the destination.
+;;
+;; The register alternative has type "arith" so that the pre-reload
+;; scheduler will treat it as a move.  This reflects what happens if
+;; the register alternative needs a reload.
+(define_insn_and_split "extendsidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+  "TARGET_64BIT"
+  "@
+   #
+   lw\t%0,%1"
+  "&& reload_completed && register_operand (operands[1], VOIDmode)"
+  [(set (match_dup 0) (match_dup 1))]
+{
+  if (REGNO (operands[0]) == REGNO (operands[1]))
+    {
+      emit_note (NOTE_INSN_DELETED);
+      DONE;
+    }
+  operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
+}
+  [(set_attr "move_type" "move,load")
+   (set_attr "mode" "DI")])
+
+(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
+  [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
+	(sign_extend:SUPERQI
+	     (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+  ""
+  "@
+   #
+   l<SHORT:size>\t%0,%1"
+  "&& reload_completed && REG_P (operands[1])"
+  [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
+   (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
+{
+  operands[0] = gen_lowpart (SImode, operands[0]);
+  operands[1] = gen_lowpart (SImode, operands[1]);
+  operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
+			 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
+}
+  [(set_attr "move_type" "shift_shift,load")
+   (set_attr "mode" "SI")])
+
+(define_insn "extendsfdf2"
+  [(set (match_operand:DF 0 "register_operand" "=f")
+	(float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
+  "TARGET_DOUBLE_FLOAT"
+  "fcvt.d.s\t%0,%1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "cnv_mode"	"S2D")   
+   (set_attr "mode"	"DF")])
+
+;;
+;;  ....................
+;;
+;;	CONVERSIONS
+;;
+;;  ....................
+
+(define_insn "fix_truncdfsi2"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(fix:SI (match_operand:DF 1 "register_operand" "f")))]
+  "TARGET_DOUBLE_FLOAT"
+  "fcvt.w.d %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"D2I")])
+
+
+(define_insn "fix_truncsfsi2"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(fix:SI (match_operand:SF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fcvt.w.s %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"S2I")])
+
+
+(define_insn "fix_truncdfdi2"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(fix:DI (match_operand:DF 1 "register_operand" "f")))]
+  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
+  "fcvt.l.d %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"D2I")])
+
+
+(define_insn "fix_truncsfdi2"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(fix:DI (match_operand:SF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT && TARGET_64BIT"
+  "fcvt.l.s %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"S2I")])
+
+
+(define_insn "floatsidf2"
+  [(set (match_operand:DF 0 "register_operand" "=f")
+	(float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_DOUBLE_FLOAT"
+  "fcvt.d.w\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"I2D")])
+
+
+(define_insn "floatdidf2"
+  [(set (match_operand:DF 0 "register_operand" "=f")
+	(float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
+  "fcvt.d.l\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"I2D")])
+
+
+(define_insn "floatsisf2"
+  [(set (match_operand:SF 0 "register_operand" "=f")
+	(float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_HARD_FLOAT"
+  "fcvt.s.w\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"I2S")])
+
+
+(define_insn "floatdisf2"
+  [(set (match_operand:SF 0 "register_operand" "=f")
+	(float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_HARD_FLOAT && TARGET_64BIT"
+  "fcvt.s.l\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"I2S")])
+
+
+(define_insn "floatunssidf2"
+  [(set (match_operand:DF 0 "register_operand" "=f")
+	(unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_DOUBLE_FLOAT"
+  "fcvt.d.wu\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"I2D")])
+
+
+(define_insn "floatunsdidf2"
+  [(set (match_operand:DF 0 "register_operand" "=f")
+	(unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
+  "fcvt.d.lu\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"I2D")])
+
+
+(define_insn "floatunssisf2"
+  [(set (match_operand:SF 0 "register_operand" "=f")
+	(unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_HARD_FLOAT"
+  "fcvt.s.wu\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"I2S")])
+
+
+(define_insn "floatunsdisf2"
+  [(set (match_operand:SF 0 "register_operand" "=f")
+	(unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+  "TARGET_HARD_FLOAT && TARGET_64BIT"
+  "fcvt.s.lu\t%0,%z1"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"I2S")])
+
+
+(define_insn "fixuns_truncdfsi2"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
+  "TARGET_DOUBLE_FLOAT"
+  "fcvt.wu.d %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"D2I")])
+
+
+(define_insn "fixuns_truncsfsi2"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT"
+  "fcvt.wu.s %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"S2I")])
+
+
+(define_insn "fixuns_truncdfdi2"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
+  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
+  "fcvt.lu.d %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"DF")
+   (set_attr "cnv_mode"	"D2I")])
+
+
+(define_insn "fixuns_truncsfdi2"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
+  "TARGET_HARD_FLOAT && TARGET_64BIT"
+  "fcvt.lu.s %0,%1,rtz"
+  [(set_attr "type"	"fcvt")
+   (set_attr "mode"	"SF")
+   (set_attr "cnv_mode"	"S2I")])
+
+;;
+;;  ....................
+;;
+;;	DATA MOVEMENT
+;;
+;;  ....................
+
+;; Lower-level instructions for loading an address from the GOT.
+;; We could use MEMs, but an unspec gives more optimization
+;; opportunities.
+
+(define_insn "got_load<mode>"
+   [(set (match_operand:P 0 "register_operand" "=r")
+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
+		 UNSPEC_LOAD_GOT))]
+  ""
+  "la\t%0,%1"
+   [(set_attr "got" "load")
+    (set_attr "mode" "<MODE>")])
+
+(define_insn "tls_add_tp_le<mode>"
+  [(set (match_operand:P 0 "register_operand" "=r")
+	(unspec:P [(match_operand:P 1 "register_operand" "r")
+		   (match_operand:P 2 "register_operand" "r")
+		   (match_operand:P 3 "symbolic_operand" "")]
+		  UNSPEC_TLS_LE))]
+  ""
+  "add\t%0,%1,%2,%%tprel_add(%3)"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "got_load_tls_gd<mode>"
+  [(set (match_operand:P 0 "register_operand" "=r")
+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
+		 UNSPEC_TLS_GD))]
+  ""
+  "la.tls.gd\t%0,%1"
+  [(set_attr "got" "load")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "got_load_tls_ie<mode>"
+  [(set (match_operand:P 0 "register_operand" "=r")
+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
+		 UNSPEC_TLS_IE))]
+  ""
+  "la.tls.ie\t%0,%1"
+  [(set_attr "got" "load")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "auipc<mode>"
+  [(set (match_operand:P 0 "register_operand" "=r")
+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")
+		  (match_operand:P 2 "const_int_operand")
+		  (pc)]
+		 UNSPEC_AUIPC))]
+  ""
+  ".LA%2: auipc\t%0,%h1"
+  [(set_attr "type" "arith")
+   (set_attr "cannot_copy" "yes")])
+
+;; Instructions for adding the low 16 bits of an address to a register.
+;; Operand 2 is the address: riscv_print_operand works out which relocation
+;; should be applied.
+
+(define_insn "*low<mode>"
+  [(set (match_operand:P 0 "register_operand" "=r")
+	(lo_sum:P (match_operand:P 1 "register_operand" "r")
+		  (match_operand:P 2 "symbolic_operand" "")))]
+  ""
+  "add\t%0,%1,%R2"
+  [(set_attr "type" "arith")
+   (set_attr "mode" "<MODE>")])
+
+;; Allow combine to split complex const_int load sequences, using operand 2
+;; to store the intermediate results.  See move_operand for details.
+(define_split
+  [(set (match_operand:GPR 0 "register_operand")
+	(match_operand:GPR 1 "splittable_const_int_operand"))
+   (clobber (match_operand:GPR 2 "register_operand"))]
+  ""
+  [(const_int 0)]
+{
+  riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
+  DONE;
+})
+
+;; Likewise, for symbolic operands.
+(define_split
+  [(set (match_operand:P 0 "register_operand")
+	(match_operand:P 1))
+   (clobber (match_operand:P 2 "register_operand"))]
+  "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
+  [(set (match_dup 0) (match_dup 3))]
+{
+  riscv_split_symbol (operands[2], operands[1],
+		     MAX_MACHINE_MODE, &operands[3]);
+})
+
+;; 64-bit integer moves
+
+;; Unlike most other insns, the move insns can't be split with '
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+
+(define_expand "movdi"
+  [(set (match_operand:DI 0 "")
+	(match_operand:DI 1 ""))]
+  ""
+{
+  if (riscv_legitimize_move (DImode, operands[0], operands[1]))
+    DONE;
+})
+
+(define_insn "*movdi_32bit"
+  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,  *f,*f,*r,*f,*m")
+	(match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f"))]
+  "!TARGET_64BIT
+   && (register_operand (operands[0], DImode)
+       || reg_or_0_operand (operands[1], DImode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
+   (set_attr "mode" "DI")])
+
+(define_insn "*movdi_64bit"
+  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m,*f,*f,*r,*f,*m")
+	(match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f"))]
+  "TARGET_64BIT
+   && (register_operand (operands[0], DImode)
+       || reg_or_0_operand (operands[1], DImode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
+   (set_attr "mode" "DI")])
+
+;; 32-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+
+(define_expand "mov<mode>"
+  [(set (match_operand:IMOVE32 0 "")
+	(match_operand:IMOVE32 1 ""))]
+  ""
+{
+  if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
+    DONE;
+})
+
+(define_insn "*mov<mode>_internal"
+  [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
+	(match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
+  "(register_operand (operands[0], <MODE>mode)
+    || reg_or_0_operand (operands[1], <MODE>mode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
+   (set_attr "mode" "SI")])
+
+;; 16-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
+
+(define_expand "movhi"
+  [(set (match_operand:HI 0 "")
+	(match_operand:HI 1 ""))]
+  ""
+{
+  if (riscv_legitimize_move (HImode, operands[0], operands[1]))
+    DONE;
+})
+
+(define_insn "*movhi_internal"
+  [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
+	(match_operand:HI 1 "move_operand"	 "r,T,m,rJ,*r*J,*f"))]
+  "(register_operand (operands[0], HImode)
+    || reg_or_0_operand (operands[1], HImode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,const,load,store,mtc,mfc")
+   (set_attr "mode" "HI")])
+
+;; HImode constant generation; see riscv_move_integer for details.
+;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
+
+(define_insn "add<mode>hi3"
+  [(set (match_operand:HI 0 "register_operand" "=r,r")
+	(plus:HI (match_operand:HISI 1 "register_operand" "r,r")
+		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
+  ""
+  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
+  [(set_attr "type" "arith")
+   (set_attr "mode" "HI")])
+
+(define_insn "xor<mode>hi3"
+  [(set (match_operand:HI 0 "register_operand" "=r,r")
+	(xor:HI (match_operand:HISI 1 "register_operand" "r,r")
+		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
+  ""
+  "xor\t%0,%1,%2"
+  [(set_attr "type" "logical")
+   (set_attr "mode" "HI")])
+
+;; 8-bit Integer moves
+
+(define_expand "movqi"
+  [(set (match_operand:QI 0 "")
+	(match_operand:QI 1 ""))]
+  ""
+{
+  if (riscv_legitimize_move (QImode, operands[0], operands[1]))
+    DONE;
+})
+
+(define_insn "*movqi_internal"
+  [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
+	(match_operand:QI 1 "move_operand"	 "r,I,m,rJ,*r*J,*f"))]
+  "(register_operand (operands[0], QImode)
+    || reg_or_0_operand (operands[1], QImode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,const,load,store,mtc,mfc")
+   (set_attr "mode" "QI")])
+
+;; 32-bit floating point moves
+
+(define_expand "movsf"
+  [(set (match_operand:SF 0 "")
+	(match_operand:SF 1 ""))]
+  ""
+{
+  if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
+    DONE;
+})
+
+(define_insn "*movsf_hardfloat"
+  [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
+	(match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
+  "TARGET_HARD_FLOAT
+   && (register_operand (operands[0], SFmode)
+       || reg_or_0_operand (operands[1], SFmode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+   (set_attr "mode" "SF")])
+
+(define_insn "*movsf_softfloat"
+  [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+	(match_operand:SF 1 "move_operand" "Gr,m,r"))]
+  "!TARGET_HARD_FLOAT
+   && (register_operand (operands[0], SFmode)
+       || reg_or_0_operand (operands[1], SFmode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,load,store")
+   (set_attr "mode" "SF")])
+
+;; 64-bit floating point moves
+
+(define_expand "movdf"
+  [(set (match_operand:DF 0 "")
+	(match_operand:DF 1 ""))]
+  ""
+{
+  if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
+    DONE;
+})
+
+;; In RV32, we lack fmv.x.d and fmv.d.x.  Go through memory instead.
+;; (However, we can still use fcvt.d.w to zero a floating-point register.)
+(define_insn "*movdf_hardfloat_rv32"
+  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
+	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
+  "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
+   && (register_operand (operands[0], DFmode)
+       || reg_or_0_operand (operands[1], DFmode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
+   (set_attr "mode" "DF")])
+
+(define_insn "*movdf_hardfloat_rv64"
+  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
+	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
+  "TARGET_64BIT && TARGET_DOUBLE_FLOAT
+   && (register_operand (operands[0], DFmode)
+       || reg_or_0_operand (operands[1], DFmode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+   (set_attr "mode" "DF")])
+
+(define_insn "*movdf_softfloat"
+  [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
+	(match_operand:DF 1 "move_operand" "rG,m,rG"))]
+  "!TARGET_DOUBLE_FLOAT
+   && (register_operand (operands[0], DFmode)
+       || reg_or_0_operand (operands[1], DFmode))"
+  { return riscv_output_move (operands[0], operands[1]); }
+  [(set_attr "move_type" "move,load,store")
+   (set_attr "mode" "DF")])
+
+(define_split
+  [(set (match_operand:MOVE64 0 "nonimmediate_operand")
+	(match_operand:MOVE64 1 "move_operand"))]
+  "reload_completed
+   && riscv_split_64bit_move_p (operands[0], operands[1])"
+  [(const_int 0)]
+{
+  riscv_split_doubleword_move (operands[0], operands[1]);
+  DONE;
+})
+
+;; Expand in-line code to clear the instruction cache between operand[0] and
+;; operand[1].
+(define_expand "clear_cache"
+  [(match_operand 0 "pmode_register_operand")
+   (match_operand 1 "pmode_register_operand")]
+  ""
+  "
+{
+  emit_insn (gen_fence_i ());
+  DONE;
+}")
+
+(define_insn "fence"
+  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
+  ""
+  "%|fence%-")
+
+(define_insn "fence_i"
+  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
+  ""
+  "fence.i")
+
+;; Block moves, see riscv.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemsi"
+  [(parallel [(set (match_operand:BLK 0 "general_operand")
+		   (match_operand:BLK 1 "general_operand"))
+	      (use (match_operand:SI 2 ""))
+	      (use (match_operand:SI 3 "const_int_operand"))])]
+  "!TARGET_MEMCPY"
+{
+  if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
+    DONE;
+  else
+    FAIL;
+})
+
+;;
+;;  ....................
+;;
+;;	SHIFTS
+;;
+;;  ....................
+
+(define_insn "<optab>si3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	(any_shift:SI (match_operand:SI 1 "register_operand" "r")
+		       (match_operand:SI 2 "arith_operand" "rI")))]
+  ""
+{
+  if (GET_CODE (operands[2]) == CONST_INT)
+    operands[2] = GEN_INT (INTVAL (operands[2])
+			   & (GET_MODE_BITSIZE (SImode) - 1));
+
+  return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
+}
+  [(set_attr "type" "shift")
+   (set_attr "mode" "SI")])
+
+(define_insn "*<optab>disi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	     (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
+		      (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
+  "TARGET_64BIT"
+  "<insn>w\t%0,%1,%2"
+  [(set_attr "type" "shift")
+   (set_attr "mode" "SI")])
+
+(define_insn "*ashldi3_truncsi"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	  (truncate:SI
+	     (ashift:DI (match_operand:DI 1 "register_operand" "r")
+		      (match_operand:DI 2 "const_arith_operand" "I"))))]
+  "TARGET_64BIT && INTVAL (operands[2]) < 32"
+  "sllw\t%0,%1,%2"
+  [(set_attr "type" "shift")
+   (set_attr "mode" "SI")])
+
+(define_insn "*ashldisi3"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+	  (ashift:SI (match_operand:GPR 1 "register_operand" "r")
+		      (match_operand:GPR2 2 "arith_operand" "rI")))]
+  "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
+  "sllw\t%0,%1,%2"
+  [(set_attr "type" "shift")
+   (set_attr "mode" "SI")])
+
+(define_insn "<optab>di3"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(any_shift:DI (match_operand:DI 1 "register_operand" "r")
+		       (match_operand:DI 2 "arith_operand" "rI")))]
+  "TARGET_64BIT"
+{
+  if (GET_CODE (operands[2]) == CONST_INT)
+    operands[2] = GEN_INT (INTVAL (operands[2])
+			   & (GET_MODE_BITSIZE (DImode) - 1));
+
+  return "<insn>\t%0,%1,%2";
+}
+  [(set_attr "type" "shift")
+   (set_attr "mode" "DI")])
+
+(define_insn "<optab>si3_extend"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(sign_extend:DI
+	   (any_shift:SI (match_operand:SI 1 "register_operand" "r")
+			 (match_operand:SI 2 "arith_operand" "rI"))))]
+  "TARGET_64BIT"
+{
+  if (GET_CODE (operands[2]) == CONST_INT)
+    operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+  return "<insn>w\t%0,%1,%2";
+}
+  [(set_attr "type" "shift")
+   (set_attr "mode" "SI")])
+
+;;
+;;  ....................
+;;
+;;	CONDITIONAL BRANCHES
+;;
+;;  ....................
+
+;; Conditional branches
+
+(define_insn "*branch_order<mode>"
+  [(set (pc)
+	(if_then_else
+	 (match_operator 1 "order_operator"
+			 [(match_operand:GPR 2 "register_operand" "r")
+			  (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
+	 (label_ref (match_operand 0 "" ""))
+	 (pc)))]
+  ""
+{
+  if (GET_CODE (operands[3]) == CONST_INT)
+    return "b%C1z\t%2,%0";
+  return "b%C1\t%2,%3,%0";
+}
+  [(set_attr "type" "branch")
+   (set_attr "mode" "none")])
+
+;; Used to implement built-in functions.
+(define_expand "condjump"
+  [(set (pc)
+	(if_then_else (match_operand 0)
+		      (label_ref (match_operand 1))
+		      (pc)))])
+
+(define_expand "cbranch<mode>4"
+  [(set (pc)
+	(if_then_else (match_operator 0 "comparison_operator"
+		       [(match_operand:GPR 1 "register_operand")
+			(match_operand:GPR 2 "nonmemory_operand")])
+		      (label_ref (match_operand 3 ""))
+		      (pc)))]
+  ""
+{
+  riscv_expand_conditional_branch (operands);
+  DONE;
+})
+
+(define_expand "cbranch<mode>4"
+  [(set (pc)
+	(if_then_else (match_operator 0 "comparison_operator"
+		       [(match_operand:ANYF 1 "register_operand")
+			(match_operand:ANYF 2 "register_operand")])
+		      (label_ref (match_operand 3 ""))
+		      (pc)))]
+  ""
+{
+  riscv_expand_conditional_branch (operands);
+  DONE;
+})
+
+(define_insn_and_split "*branch_on_bit<GPR:mode>"
+  [(set (pc)
+	(if_then_else
+	 (match_operator 0 "equality_operator"
+	  [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
+		 (const_int 1)
+		 (match_operand 3 "branch_on_bit_operand"))
+		 (const_int 0)])
+	 (label_ref (match_operand 1))
+	 (pc)))
+   (clobber (match_scratch:GPR 4 "=&r"))]
+  ""
+  "#"
+  "reload_completed"
+  [(set (match_dup 4)
+	(ashift:GPR (match_dup 2) (match_dup 3)))
+   (set (pc)
+	(if_then_else
+	 (match_op_dup 0 [(match_dup 4) (const_int 0)])
+	 (label_ref (match_operand 1))
+	 (pc)))]
+{
+  int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
+  operands[3] = GEN_INT (shift);
+
+  if (GET_CODE (operands[0]) == EQ)
+    operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
+  else
+    operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
+})
+
+(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
+  [(set (pc)
+	(if_then_else
+	 (match_operator 0 "equality_operator"
+	  [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
+		 (match_operand 3 "branch_on_bit_operand")
+		 (const_int 0))
+		 (const_int 0)])
+	 (label_ref (match_operand 1))
+	 (pc)))
+   (clobber (match_scratch:GPR 4 "=&r"))]
+  ""
+  "#"
+  "reload_completed"
+  [(set (match_dup 4)
+	(ashift:GPR (match_dup 2) (match_dup 3)))
+   (set (pc)
+	(if_then_else
+	 (match_op_dup 0 [(match_dup 4) (const_int 0)])
+	 (label_ref (match_operand 1))
+	 (pc)))]
+{
+  operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
+})
+
+;;
+;;  ....................
+;;
+;;	SETTING A REGISTER FROM A COMPARISON
+;;
+;;  ....................
+
+;; Destination is always set in SI mode.
+
+(define_expand "cstore<mode>4"
+  [(set (match_operand:SI 0 "register_operand")
+	(match_operator:SI 1 "order_operator"
+	 [(match_operand:GPR 2 "register_operand")
+	  (match_operand:GPR 3 "nonmemory_operand")]))]
+  ""
+{
+  riscv_expand_scc (operands);
+  DONE;
+})
+
+(define_insn "cstore<mode>4"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+	(match_operator:SI 1 "fp_order_operator"
+	      [(match_operand:ANYF 2 "register_operand" "f")
+	       (match_operand:ANYF 3 "register_operand" "f")]))]
+  "TARGET_HARD_FLOAT"
+{
+  if (GET_CODE (operands[1]) == NE)
+    return "feq.<fmt>\t%0,%2,%3; seqz %0, %0";
+  return "f%C1.<fmt>\t%0,%2,%3";
+}
+  [(set_attr "type" "fcmp")
+   (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "f<optab>_guarded<ANYF:mode>4"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+	(fp_scmp:SI
+	      (match_operand:ANYF 1 "register_operand" "f")
+	      (match_operand:ANYF 2 "register_operand" "f")))
+    (clobber (match_scratch:SI 3 "=&r"))]
+  "TARGET_HARD_FLOAT"
+  "frflags\t%3\n\tf<optab>.<fmt>\t%0,%1,%2\n\tfsflags %3"
+  [(set_attr "type" "fcmp")
+   (set_attr "mode" "<UNITMODE>")
+   (set (attr "length") (const_int 12))])
+
+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
+  [(set (match_operand:GPR2 0 "register_operand" "=r")
+	(eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
+		 (const_int 0)))]
+  ""
+  "seqz\t%0,%1"
+  [(set_attr "type" "slt")
+   (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
+  [(set (match_operand:GPR2 0 "register_operand" "=r")
+	(ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
+		 (const_int 0)))]
+  ""
+  "snez\t%0,%1"
+  [(set_attr "type" "slt")
+   (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
+  [(set (match_operand:GPR2 0 "register_operand" "=r")
+	(any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
+		     (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
+  ""
+  "slt<u>\t%0,%z2,%1"
+  [(set_attr "type" "slt")
+   (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
+  [(set (match_operand:GPR2 0 "register_operand" "=r")
+	(any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
+		     (const_int 1)))]
+  ""
+  "slt<u>\t%0,zero,%1"
+  [(set_attr "type" "slt")
+   (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
+  [(set (match_operand:GPR2 0 "register_operand" "=r")
+	(any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
+		     (match_operand:GPR 2 "arith_operand" "rI")))]
+  ""
+  "slt<u>\t%0,%1,%2"
+  [(set_attr "type" "slt")
+   (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
+  [(set (match_operand:GPR2 0 "register_operand" "=r")
+	(any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
+		     (match_operand:GPR 2 "sle_operand" "")))]
+  ""
+{
+  operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
+  return "slt<u>\t%0,%1,%2";
+}
+  [(set_attr "type" "slt")
+   (set_attr "mode" "<GPR:MODE>")])
+
+;;
+;;  ....................
+;;
+;;	UNCONDITIONAL BRANCHES
+;;
+;;  ....................
+
+;; Unconditional branches.
+
+(define_insn "jump"
+  [(set (pc)
+	(label_ref (match_operand 0 "" "")))]
+  ""
+  "j\t%l0"
+  [(set_attr "type"	"jump")
+   (set_attr "mode"	"none")])
+
+(define_expand "indirect_jump"
+  [(set (pc) (match_operand 0 "register_operand"))]
+  ""
+{
+  operands[0] = force_reg (Pmode, operands[0]);
+  if (Pmode == SImode)
+    emit_jump_insn (gen_indirect_jumpsi (operands[0]));
+  else
+    emit_jump_insn (gen_indirect_jumpdi (operands[0]));
+  DONE;
+})
+
+(define_insn "indirect_jump<mode>"
+  [(set (pc) (match_operand:P 0 "register_operand" "l"))]
+  ""
+  "jr\t%0"
+  [(set_attr "type" "jump")
+   (set_attr "mode" "none")])
+
+(define_expand "tablejump"
+  [(set (pc) (match_operand 0 "register_operand" ""))
+	      (use (label_ref (match_operand 1 "" "")))]
+  ""
+{
+  if (CASE_VECTOR_PC_RELATIVE)
+      operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
+					 gen_rtx_LABEL_REF (Pmode, operands[1]),
+					 NULL_RTX, 0, OPTAB_DIRECT);
+
+  if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
+    emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
+  else
+    emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
+  DONE;
+})
+
+(define_insn "tablejump<mode>"
+  [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
+   (use (label_ref (match_operand 1 "" "")))]
+  ""
+  "jr\t%0"
+  [(set_attr "type" "jump")
+   (set_attr "mode" "none")])
+
+;;
+;;  ....................
+;;
+;;	Function prologue/epilogue
+;;
+;;  ....................
+;;
+
+(define_expand "prologue"
+  [(const_int 1)]
+  ""
+{
+  riscv_expand_prologue ();
+  DONE;
+})
+
+;; Block any insns from being moved before this point, since the
+;; profiling call to mcount can use various registers that aren't
+;; saved or used to pass arguments.
+
+(define_insn "blockage"
+  [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
+  ""
+  ""
+  [(set_attr "type" "ghost")
+   (set_attr "mode" "none")])
+
+(define_expand "epilogue"
+  [(const_int 2)]
+  ""
+{
+  riscv_expand_epilogue (false);
+  DONE;
+})
+
+(define_expand "sibcall_epilogue"
+  [(const_int 2)]
+  ""
+{
+  riscv_expand_epilogue (true);
+  DONE;
+})
+
+;; Trivial return.  Make it look like a normal return insn as that
+;; allows jump optimizations to work better.
+
+(define_expand "return"
+  [(simple_return)]
+  "riscv_can_use_return_insn ()"
+  "")
+
+(define_insn "simple_return"
+  [(simple_return)]
+  ""
+  "ret"
+  [(set_attr "type"	"jump")
+   (set_attr "mode"	"none")])
+
+;; Normal return.
+
+(define_insn "simple_return_internal"
+  [(simple_return)
+   (use (match_operand 0 "pmode_register_operand" ""))]
+  ""
+  "jr\t%0"
+  [(set_attr "type"	"jump")
+   (set_attr "mode"	"none")])
+
+;; This is used in compiling the unwind routines.
+(define_expand "eh_return"
+  [(use (match_operand 0 "general_operand"))]
+  ""
+{
+  if (GET_MODE (operands[0]) != word_mode)
+    operands[0] = convert_to_mode (word_mode, operands[0], 0);
+  if (TARGET_64BIT)
+    emit_insn (gen_eh_set_lr_di (operands[0]));
+  else
+    emit_insn (gen_eh_set_lr_si (operands[0]));
+  DONE;
+})
+
+;; Clobber the return address on the stack.  We can't expand this
+;; until we know where it will be put in the stack frame.
+
+(define_insn "eh_set_lr_si"
+  [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
+   (clobber (match_scratch:SI 1 "=&r"))]
+  "! TARGET_64BIT"
+  "#")
+
+(define_insn "eh_set_lr_di"
+  [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
+   (clobber (match_scratch:DI 1 "=&r"))]
+  "TARGET_64BIT"
+  "#")
+
+(define_split
+  [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
+   (clobber (match_scratch 1))]
+  "reload_completed"
+  [(const_int 0)]
+{
+  riscv_set_return_address (operands[0], operands[1]);
+  DONE;
+})
+
+;;
+;;  ....................
+;;
+;;	FUNCTION CALLS
+;;
+;;  ....................
+
+;; Sibling calls.  All these patterns use jump instructions.
+
+;; call_insn_operand will only accept constant
+;; addresses if a direct jump is acceptable.  Since the 'S' constraint
+;; is defined in terms of call_insn_operand, the same is true of the
+;; constraints.
+
+;; When we use an indirect jump, we need a register that will be
+;; preserved by the epilogue (constraint j).
+
+(define_expand "sibcall"
+  [(parallel [(call (match_operand 0 "")
+		    (match_operand 1 ""))
+	      (use (match_operand 2 ""))	;; next_arg_reg
+	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx
+  ""
+{
+  riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
+  DONE;
+})
+
+(define_insn "sibcall_internal"
+  [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
+	 (match_operand 1 "" ""))]
+  "SIBLING_CALL_P (insn)"
+  { return REG_P (operands[0]) ? "jr\t%0"
+	   : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
+	   : "tail\t%0@plt"; }
+  [(set_attr "type" "call")])
+
+(define_expand "sibcall_value"
+  [(parallel [(set (match_operand 0 "")
+		   (call (match_operand 1 "")
+			 (match_operand 2 "")))
+	      (use (match_operand 3 ""))])]		;; next_arg_reg
+  ""
+{
+  riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
+  DONE;
+})
+
+(define_insn "sibcall_value_internal"
+  [(set (match_operand 0 "register_operand" "")
+	(call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
+	      (match_operand 2 "" "")))]
+  "SIBLING_CALL_P (insn)"
+  { return REG_P (operands[1]) ? "jr\t%1"
+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
+	   : "tail\t%1@plt"; }
+  [(set_attr "type" "call")])
+
+(define_insn "sibcall_value_multiple_internal"
+  [(set (match_operand 0 "register_operand" "")
+	(call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
+	      (match_operand 2 "" "")))
+   (set (match_operand 3 "register_operand" "")
+	(call (mem:SI (match_dup 1))
+	      (match_dup 2)))]
+  "SIBLING_CALL_P (insn)"
+  { return REG_P (operands[1]) ? "jr\t%1"
+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
+	   : "tail\t%1@plt"; }
+  [(set_attr "type" "call")])
+
+(define_expand "call"
+  [(parallel [(call (match_operand 0 "")
+		    (match_operand 1 ""))
+	      (use (match_operand 2 ""))	;; next_arg_reg
+	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx
+  ""
+{
+  riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
+  DONE;
+})
+
+(define_insn "call_internal"
+  [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S"))
+	 (match_operand 1 "" ""))
+   (clobber (reg:SI RETURN_ADDR_REGNUM))]
+  ""
+  { return REG_P (operands[0]) ? "jalr\t%0"
+	   : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
+	   : "call\t%0@plt"; }
+  [(set_attr "type" "call")])
+
+(define_expand "call_value"
+  [(parallel [(set (match_operand 0 "")
+		   (call (match_operand 1 "")
+			 (match_operand 2 "")))
+	      (use (match_operand 3 ""))])]		;; next_arg_reg
+  ""
+{
+  riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
+  DONE;
+})
+
+;; See comment for call_internal.
+(define_insn "call_value_internal"
+  [(set (match_operand 0 "register_operand" "")
+	(call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
+	      (match_operand 2 "" "")))
+   (clobber (reg:SI RETURN_ADDR_REGNUM))]
+  ""
+  { return REG_P (operands[1]) ? "jalr\t%1"
+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
+	   : "call\t%1@plt"; }
+  [(set_attr "type" "call")])
+
+;; See comment for call_internal.
+(define_insn "call_value_multiple_internal"
+  [(set (match_operand 0 "register_operand" "")
+	(call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
+	      (match_operand 2 "" "")))
+   (set (match_operand 3 "register_operand" "")
+	(call (mem:SI (match_dup 1))
+	      (match_dup 2)))
+   (clobber (reg:SI RETURN_ADDR_REGNUM))]
+  ""
+  { return REG_P (operands[1]) ? "jalr\t%1"
+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
+	   : "call\t%1@plt"; }
+  [(set_attr "type" "call")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+  [(parallel [(call (match_operand 0 "")
+		    (const_int 0))
+	      (match_operand 1 "")
+	      (match_operand 2 "")])]
+  ""
+{
+  int i;
+
+  emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+  for (i = 0; i < XVECLEN (operands[2], 0); i++)
+    {
+      rtx set = XVECEXP (operands[2], 0, i);
+      riscv_emit_move (SET_DEST (set), SET_SRC (set));
+    }
+
+  emit_insn (gen_blockage ());
+  DONE;
+})
+
+(define_insn "nop"
+  [(const_int 0)]
+  ""
+  "nop"
+  [(set_attr "type"	"nop")
+   (set_attr "mode"	"none")])
+
+(define_insn "trap"
+  [(trap_if (const_int 1) (const_int 0))]
+  ""
+  "ebreak")
+
+(define_insn "gpr_save"
+  [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_SAVE)
+   (clobber (reg:SI T0_REGNUM))
+   (clobber (reg:SI T1_REGNUM))]
+  ""
+  { return riscv_output_gpr_save (INTVAL (operands[0])); })
+
+(define_insn "gpr_restore"
+  [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_RESTORE)]
+  ""
+  "tail\t__riscv_restore_%0")
+
+(define_insn "gpr_restore_return"
+  [(return)
+   (use (match_operand 0 "pmode_register_operand" ""))
+   (const_int 0)]
+  ""
+  "")
+
+(include "sync.md")
+(include "peephole.md")
+(include "pic.md")
+(include "generic.md")
diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt
new file mode 100644
index 0000000..97169a2
--- /dev/null
+++ b/gcc/config/riscv/riscv.opt
@@ -0,0 +1,114 @@ 
+; Options for the RISC-V port of the compiler
+;
+; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3.  If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/riscv/riscv-opts.h
+
+mbranch-cost=
+Target RejectNegative Joined UInteger Var(riscv_branch_cost)
+-mbranch-cost=N	Set the cost of branches to roughly N instructions.
+
+mmemcpy
+Target Report Mask(MEMCPY)
+Don't optimize block moves.
+
+mplt
+Target Report Var(TARGET_PLT) Init(1)
+When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
+
+mabi=
+Target Report RejectNegative Joined Enum(abi_type) Var(riscv_abi) Init(ABI_ILP32)
+Specify floating-point calling convention.
+
+Enum
+Name(abi_type) Type(enum riscv_abi_type)
+Supported ABIs (for use with the -mabi= option):
+
+EnumValue
+Enum(abi_type) String(ilp32) Value(ABI_ILP32)
+
+EnumValue
+Enum(abi_type) String(ilp32f) Value(ABI_ILP32F)
+
+EnumValue
+Enum(abi_type) String(ilp32d) Value(ABI_ILP32D)
+
+EnumValue
+Enum(abi_type) String(lp64) Value(ABI_LP64)
+
+EnumValue
+Enum(abi_type) String(lp64f) Value(ABI_LP64F)
+
+EnumValue
+Enum(abi_type) String(lp64d) Value(ABI_LP64D)
+
+mfdiv
+Target Report Mask(FDIV)
+Use hardware floating-point divide and square root instructions.
+
+mdiv
+Target Report Mask(DIV)
+Use hardware instructions for integer division.
+
+march=
+Target Report RejectNegative Joined
+-march=	Generate code for given RISC-V ISA (e.g. RV64IM).
+
+mtune=
+Target RejectNegative Joined Var(riscv_tune_string)
+-mtune=PROCESSOR	Optimize the output for PROCESSOR.
+
+msmall-data-limit=
+Target Joined Separate UInteger Var(g_switch_value) Init(8)
+-msmall-data-limit=N	Put global and static data smaller than <number> bytes into a special section (on some targets).
+
+msave-restore
+Target Report Mask(SAVE_RESTORE)
+Use smaller but slower prologue and epilogue code.
+
+mcmodel=
+Target Report RejectNegative Joined Enum(code_model) Var(riscv_cmodel) Init(TARGET_DEFAULT_CMODEL)
+Specify the code model.
+
+Enum
+Name(code_model) Type(enum riscv_code_model)
+Known code models (for use with the -mcmodel= option):
+
+EnumValue
+Enum(code_model) String(medlow) Value(CM_MEDLOW)
+
+EnumValue
+Enum(code_model) String(medany) Value(CM_MEDANY)
+
+mexplicit-relocs
+Target Report Mask(EXPLICIT_RELOCS)
+Use %reloc() operators, rather than assembly macros, to load addresses.
+
+Mask(64BIT)
+
+Mask(MUL)
+
+Mask(ATOMIC)
+
+Mask(HARD_FLOAT)
+
+Mask(DOUBLE_FLOAT)
+
+Mask(RVC)
diff --git a/gcc/config/riscv/sync.md b/gcc/config/riscv/sync.md
new file mode 100644
index 0000000..429c8fe
--- /dev/null
+++ b/gcc/config/riscv/sync.md
@@ -0,0 +1,207 @@ 
+;; Machine description for RISC-V atomic operations.
+;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (andrew@sifive.com).
+;; Based on MIPS target for GNU compiler.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+  UNSPEC_COMPARE_AND_SWAP
+  UNSPEC_SYNC_OLD_OP
+  UNSPEC_SYNC_EXCHANGE
+  UNSPEC_ATOMIC_STORE
+  UNSPEC_MEMORY_BARRIER
+])
+
+(define_code_iterator any_atomic [plus ior xor and])
+(define_code_attr atomic_optab
+  [(plus "add") (ior "or") (xor "xor") (and "and")])
+
+;; Memory barriers.
+
+(define_expand "mem_thread_fence"
+  [(match_operand:SI 0 "const_int_operand" "")] ;; model
+  ""
+{
+  if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
+    {
+      rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+      MEM_VOLATILE_P (mem) = 1;
+      emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
+    }
+  DONE;
+})
+
+(define_insn "mem_thread_fence_1"
+  [(set (match_operand:BLK 0 "" "")
+	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
+   (match_operand:SI 1 "const_int_operand" "")] ;; model
+  ""
+{
+  long model = INTVAL (operands[1]);
+
+  switch (model)
+    {
+    case MEMMODEL_SEQ_CST:
+    case MEMMODEL_SYNC_SEQ_CST:
+    case MEMMODEL_ACQ_REL:
+      return "fence rw,rw";
+    case MEMMODEL_ACQUIRE:
+    case MEMMODEL_SYNC_ACQUIRE:
+    case MEMMODEL_CONSUME:
+      return "fence r,rw";
+    case MEMMODEL_RELEASE:
+    case MEMMODEL_SYNC_RELEASE:
+      return "fence rw,w";
+    default:
+      gcc_unreachable ();
+    }
+})
+
+;; Atomic memory operations.
+
+;; Implement atomic stores with amoswap.  Fall back to fences for atomic loads.
+(define_insn "atomic_store<mode>"
+  [(set (match_operand:GPR 0 "memory_operand" "=A")
+    (unspec_volatile:GPR
+      [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
+       (match_operand:SI 2 "const_int_operand")]      ;; model
+      UNSPEC_ATOMIC_STORE))]
+  "TARGET_ATOMIC"
+  "%F2amoswap.<amo>%A2 zero,%z1,%0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "atomic_<atomic_optab><mode>"
+  [(set (match_operand:GPR 0 "memory_operand" "+A")
+	(unspec_volatile:GPR
+	  [(any_atomic:GPR (match_dup 0)
+		     (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
+	   (match_operand:SI 2 "const_int_operand")] ;; model
+	 UNSPEC_SYNC_OLD_OP))]
+  "TARGET_ATOMIC"
+  "%F2amo<insn>.<amo>%A2 zero,%z1,%0"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "atomic_fetch_<atomic_optab><mode>"
+  [(set (match_operand:GPR 0 "register_operand" "=&r")
+	(match_operand:GPR 1 "memory_operand" "+A"))
+   (set (match_dup 1)
+	(unspec_volatile:GPR
+	  [(any_atomic:GPR (match_dup 1)
+		     (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
+	   (match_operand:SI 3 "const_int_operand")] ;; model
+	 UNSPEC_SYNC_OLD_OP))]
+  "TARGET_ATOMIC"
+  "%F3amo<insn>.<amo>%A3 %0,%z2,%1"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "atomic_exchange<mode>"
+  [(set (match_operand:GPR 0 "register_operand" "=&r")
+	(unspec_volatile:GPR
+	  [(match_operand:GPR 1 "memory_operand" "+A")
+	   (match_operand:SI 3 "const_int_operand")] ;; model
+	  UNSPEC_SYNC_EXCHANGE))
+   (set (match_dup 1)
+	(match_operand:GPR 2 "register_operand" "0"))]
+  "TARGET_ATOMIC"
+  "%F3amoswap.<amo>%A3 %0,%z2,%1"
+  [(set (attr "length") (const_int 8))])
+
+(define_insn "atomic_cas_value_strong<mode>"
+  [(set (match_operand:GPR 0 "register_operand" "=&r")
+	(match_operand:GPR 1 "memory_operand" "+A"))
+   (set (match_dup 1)
+	(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
+			      (match_operand:GPR 3 "reg_or_0_operand" "rJ")
+			      (match_operand:SI 4 "const_int_operand")  ;; mod_s
+			      (match_operand:SI 5 "const_int_operand")] ;; mod_f
+	 UNSPEC_COMPARE_AND_SWAP))
+   (clobber (match_scratch:GPR 6 "=&r"))]
+  "TARGET_ATOMIC"
+  "%F5 1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
+  [(set (attr "length") (const_int 20))])
+
+(define_expand "atomic_compare_and_swap<mode>"
+  [(match_operand:SI 0 "register_operand" "")   ;; bool output
+   (match_operand:GPR 1 "register_operand" "")  ;; val output
+   (match_operand:GPR 2 "memory_operand" "")    ;; memory
+   (match_operand:GPR 3 "reg_or_0_operand" "")  ;; expected value
+   (match_operand:GPR 4 "reg_or_0_operand" "")  ;; desired value
+   (match_operand:SI 5 "const_int_operand" "")  ;; is_weak
+   (match_operand:SI 6 "const_int_operand" "")  ;; mod_s
+   (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
+  "TARGET_ATOMIC"
+{
+  emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
+						operands[3], operands[4],
+						operands[6], operands[7]));
+
+  rtx compare = operands[1];
+  if (operands[3] != const0_rtx)
+    {
+      rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
+      compare = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_rtx_SET (compare, difference));
+    }
+
+  rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
+  rtx result = gen_reg_rtx (<MODE>mode);
+  emit_insn (gen_rtx_SET (result, eq));
+  emit_insn (gen_rtx_SET (operands[0], gen_lowpart (SImode, result)));
+  DONE;
+})
+
+(define_expand "atomic_test_and_set"
+  [(match_operand:QI 0 "register_operand" "")     ;; bool output
+   (match_operand:QI 1 "memory_operand" "+A")    ;; memory
+   (match_operand:SI 2 "const_int_operand" "")]   ;; model
+  "TARGET_ATOMIC"
+{
+  /* We have no QImode atomics, so use the address LSBs to form a mask,
+     then use an aligned SImode atomic. */
+  rtx result = operands[0];
+  rtx mem = operands[1];
+  rtx model = operands[2];
+  rtx addr = force_reg (Pmode, XEXP (mem, 0));
+
+  rtx aligned_addr = gen_reg_rtx (Pmode);
+  emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
+
+  rtx aligned_mem = change_address (mem, SImode, aligned_addr);
+  set_mem_alias_set (aligned_mem, 0);
+
+  rtx offset = gen_reg_rtx (SImode);
+  emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
+				       GEN_INT (3)));
+
+  rtx tmp = gen_reg_rtx (SImode);
+  emit_move_insn (tmp, GEN_INT (1));
+
+  rtx shmt = gen_reg_rtx (SImode);
+  emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
+
+  rtx word = gen_reg_rtx (SImode);
+  emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
+
+  tmp = gen_reg_rtx (SImode);
+  emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
+
+  emit_move_insn (gen_lowpart (SImode, result),
+		  gen_rtx_LSHIFTRT (SImode, tmp,
+				    gen_lowpart (SImode, shmt)));
+  DONE;
+})
diff --git a/gcc/config/riscv/t-elf-multilib b/gcc/config/riscv/t-elf-multilib
new file mode 100644
index 0000000..b7a41ff
--- /dev/null
+++ b/gcc/config/riscv/t-elf-multilib
@@ -0,0 +1,2 @@ 
+# For the time being, Linux and Newlib have the same multilib settings.
+include $(srcdir)/config/riscv/t-linux-multilib
diff --git a/gcc/config/riscv/t-elf-nomultilib b/gcc/config/riscv/t-elf-nomultilib
new file mode 100644
index 0000000..3ce76a0
--- /dev/null
+++ b/gcc/config/riscv/t-elf-nomultilib
@@ -0,0 +1,2 @@ 
+# For the time being, Linux and Newlib have the same multilib settings.
+include $(srcdir)/config/riscv/t-linux-nomultilib
diff --git a/gcc/config/riscv/t-linux-multilib b/gcc/config/riscv/t-linux-multilib
new file mode 100644
index 0000000..8a23189
--- /dev/null
+++ b/gcc/config/riscv/t-linux-multilib
@@ -0,0 +1,13 @@ 
+# This is a bit odd: the "nomultilib" configurtion defines all the multilibs,
+# and this multilib configuration just whitelists the ones we want to build.
+include $(srcdir)/config/riscv/t-linux-nomultilib
+
+# We don't actually support all the possible multilib configurations on RISC-V.
+# Here's a blessed list of the interesting ones.  Some will never be supported
+# (rv32imafd/lp64), some are impossible (rv64ima/lp64d) and some will eventually be
+# supported (rv64imafd/ilp32).
+MULTILIB_REQUIRED  =
+MULTILIB_REQUIRED += march=rv32ima/mabi=ilp32
+MULTILIB_REQUIRED += march=rv32imafd/mabi=ilp32d
+MULTILIB_REQUIRED += march=rv64ima/mabi=lp64
+MULTILIB_REQUIRED += march=rv64imafd/mabi=lp64d
diff --git a/gcc/config/riscv/t-linux-nomultilib b/gcc/config/riscv/t-linux-nomultilib
new file mode 100644
index 0000000..b7c7fae
--- /dev/null
+++ b/gcc/config/riscv/t-linux-nomultilib
@@ -0,0 +1,27 @@ 
+# Build the libraries for both hard and soft floating point
+# This specifies the possible RISC-V multilib names, along with the directory
+# structures that will be created.  MULTILIB_OPTIONS defines which flags can be
+# passed, with options seperated by a "/" being mutually exclusive.  GCC's
+# build infastructure will append no flag as one of the targets, which is why
+# MULTILIB_REQUIRED is defined.  The directory names line up with the various
+# options, ignoring space vs slash distinctions.  The full directory name for a
+# multilib is a slash-joined string of every present option, with the DIRNAME
+# being the directory inside GCC (for libgcc and friends) and the OSDIRNAME
+# setting the default library search path for the linker.  OSDIRNAME starts
+# relative to lib, which is why there's a ".." before some of them.  As an
+# example, one of the generated multilibs will be:
+#  CFLAGS:    -march=rv32imafd -mabi=ilp32d
+#  DIRNAME:   GCC_HOME/rv32imafd/ilp32d
+#  OSDIRNAME: /lib32/ilp32d
+MULTILIB_OPTIONS    = march=rv32g/march=rv64g/march=rv32imafd/march=rv64imafd/march=rv32ima/march=rv64ima/march=rv32imaf/march=rv64imaf mabi=ilp32/mabi=ilp32f/mabi=ilp32d/mabi=lp64/mabi=lp64f/mabi=lp64d
+MULTILIB_DIRNAMES   =       lib32       lib64           lib32           lib64         lib32         lib64          lib32          lib64      ilp32      ilp32f      ilp32d      lp64      lp64f      lp64d
+MULTILIB_OSDIRNAMES =    ../lib32    ../lib64        ../lib32        ../lib64      ../lib32      ../lib64       ../lib32    ../lib64      ilp32      ilp32f      ilp32d      lp64      lp64f      lp64d
+
+# There's a hack in the multilibs we build: it appears that if I tell GCC to
+# build a multilib for rv64g then it just builds one of them and ignores the
+# ABI flags.  To get around this, I provide "rv{32,64}g" as the "default
+# multilib", but I don't want to also go and build a libc for them so instead I
+# inform GCC that G ISAs are actually IMAFD.
+MULTILIB_REUSE  =
+MULTILIB_REUSE += march.rv32imafd/mabi.ilp32d=march.rv32g/mabi.ilp32d
+MULTILIB_REUSE += march.rv64imafd/mabi.lp64d=march.rv64g/mabi.lp64d
diff --git a/gcc/configure.ac b/gcc/configure.ac
index b73a807..645c71d 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -3390,6 +3390,17 @@  x3:	.space 4
 	tls_first_minor=14
 	tls_as_opt="-a32 --fatal-warnings"
 	;;
+  riscv*-*-*)
+    conftest_s='
+	.section .tdata,"awT",@progbits
+x:	.word 2
+	.text
+	la.tls.gd a0,x
+        call __tls_get_addr'
+	tls_first_major=2
+	tls_first_minor=21
+	tls_as_opt='--fatal-warnings'
+	;;
   s390-*-*)
     conftest_s='
 	.section ".tdata","awT",@progbits
@@ -4741,8 +4752,8 @@  esac
 # version to the per-target configury.
 case "$cpu_type" in
   aarch64 | alpha | arm | avr | bfin | cris | i386 | m32c | m68k | microblaze \
-  | mips | nios2 | pa | rs6000 | score | sparc | spu | tilegx | tilepro \
-  | visium | xstormy16 | xtensa)
+  | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu | tilegx \
+  | tilepro | visium | xstormy16 | xtensa)
     insn="nop"
     ;;
   ia64 | s390)