diff mbox series

[V2,2/8] bpf: new GCC port

Message ID 87d0h4y5c1.fsf@gnu.org
State New
Headers show
Series eBPF support for GCC | expand

Commit Message

Jose E. Marchesi Aug. 17, 2019, 12:59 a.m. UTC
This patch adds a port for the Linux kernel eBPF architecture to GCC.

ChangeLog:

  * configure.ac: Support for bpf-*-* targets.
  * configure: Regenerate.

contrib/ChangeLog:

  * config-list.mk (LIST): Disable go in bpf-*-* targets.

gcc/ChangeLog:

  * config.gcc: Support for bpf-*-* targets.
  * common/config/bpf/bpf-common.c: New file.
  * config/bpf/t-bpf: Likewise.
  * config/bpf/predicates.md: Likewise.
  * config/bpf/constraints.md: Likewise.
  * config/bpf/bpf.opt: Likewise.
  * config/bpf/bpf.md: Likewise.
  * config/bpf/bpf.h: Likewise.
  * config/bpf/bpf.c: Likewise.
  * config/bpf/bpf-protos.h: Likewise.
  * config/bpf/bpf-opts.h: Likewise.
  * config/bpf/bpf-helpers.h: Likewise.
  * config/bpf/bpf-helpers.def: Likewise.
---
 ChangeLog                          |    5 +
 configure                          |   68 ++-
 configure.ac                       |   54 +-
 contrib/ChangeLog                  |    4 +
 contrib/config-list.mk             |    2 +-
 gcc/ChangeLog                      |   16 +
 gcc/common/config/bpf/bpf-common.c |   57 ++
 gcc/config.gcc                     |    9 +
 gcc/config/bpf/bpf-helpers.def     |  194 ++++++
 gcc/config/bpf/bpf-helpers.h       |  324 ++++++++++
 gcc/config/bpf/bpf-opts.h          |   56 ++
 gcc/config/bpf/bpf-protos.h        |   33 ++
 gcc/config/bpf/bpf.c               | 1136 ++++++++++++++++++++++++++++++++++++
 gcc/config/bpf/bpf.h               |  565 ++++++++++++++++++
 gcc/config/bpf/bpf.md              |  528 +++++++++++++++++
 gcc/config/bpf/bpf.opt             |  119 ++++
 gcc/config/bpf/constraints.md      |   29 +
 gcc/config/bpf/predicates.md       |  105 ++++
 gcc/config/bpf/t-bpf               |    0
 19 files changed, 3300 insertions(+), 4 deletions(-)
 create mode 100644 gcc/common/config/bpf/bpf-common.c
 create mode 100644 gcc/config/bpf/bpf-helpers.def
 create mode 100644 gcc/config/bpf/bpf-helpers.h
 create mode 100644 gcc/config/bpf/bpf-opts.h
 create mode 100644 gcc/config/bpf/bpf-protos.h
 create mode 100644 gcc/config/bpf/bpf.c
 create mode 100644 gcc/config/bpf/bpf.h
 create mode 100644 gcc/config/bpf/bpf.md
 create mode 100644 gcc/config/bpf/bpf.opt
 create mode 100644 gcc/config/bpf/constraints.md
 create mode 100644 gcc/config/bpf/predicates.md
 create mode 100644 gcc/config/bpf/t-bpf

Comments

Richard Sandiford Aug. 19, 2019, 7:57 p.m. UTC | #1
In addition to Segher's comments:

jemarch@gnu.org (Jose E. Marchesi) writes:
> [...]
> +/* This file contains the definition of the kernel helpers that are
> +   available to eBPF programs.
> +
> +   The primary source for information on kernel helpers is the
> +   linux/include/uapi/linux/bpf.h file in the Linux source tree.
> +   Please keep this database in sync.
> +
> +   The first column is the first kernel version featuring the helper
> +   function.  This should be an enumerate from bpf_kernel_version,
> +   defined in bpf-opts.h.  Note that the backend assumes that helpers
> +   never get deprecated in the kernel.  If that eventually happens,
> +   then we will need to use a bitmask here instead of an enumerate.
> +
> +   The second column is the constant-name for the helper.
> +   The third column is the program-name of the helper.
> +
> +   The fourth column is a list of names describing the types of the
> +   values returned and accepted by the helper, in one of these forms:
> +
> +     TYPES (type1, type2, ..., 0)
> +     VTYPES (type1, type2, ..., 0)
> +
> +   VTYPES should be used should the helper accept a variable number of
> +   arguments, TYPES otherwise.  The valid type names are:
> +
> +     `vt' for void.
> +     `it' for signed int.
> +     `ut' for unsigned int.
> +     `pt' for *void.
> +     `cpt' for const *void.

"*" after "void" in both cases.

> +     `st' for short int.
> +     `ust' for unsigned short int.
> +     `cst' for const char *.

Very minor, but it might be less confusing to pick something other than "s"
for "cst" given the above.

> [...]
> +/* Functions to emit BPF_LD_ABS and BPF_LD_IND instructions.  We
> +   provide the "standard" names as synonyms of the corresponding GCC
> +   builtins.  Note how the SKB argument is ignored.  */
> +
> +static inline long long
> +load_byte (void *skb, unsigned long long off)
> +{
> +  return __builtin_bpf_load_byte (off);
> +}
> [etc]

It might be worth adding __attribute__((unused)) to them, in case
anyone compiles with -Wsystem-headers.

> [...]
> +/* Supported versions of the Linux kernel.  */
> +enum bpf_kernel_version
> +{
> + /* Linux 4.x */
> + LINUX_V4_0,
> [etc.]

The contents should be indented by two spaces.

> [...]
> +enum bpf_builtins
> +{
> + BPF_BUILTIN_UNUSED = 0,
> + /* Built-ins for kernel helpers.  */
> +#define DEF_HELPER(V,D,N,T) BPF_BUILTIN_HELPER_##D,
> +#  include "bpf-helpers.def"
> +#undef DEF_HELPER
> + BPF_BUILTIN_HELPER_MAX,
> + /* Built-ins for non-generic loads and stores.  */
> + BPF_BUILTIN_LOAD_BYTE = BPF_BUILTIN_HELPER_MAX,
> + BPF_BUILTIN_LOAD_HALF,
> + BPF_BUILTIN_LOAD_WORD,
> + BPF_BUILTIN_MAX,
> +};
> +
> +/* This table is indexed by an enum bpf_builtin.  */
> +static const char *bpf_helper_names[] =
> +{
> + NULL,
> +#define DEF_HELPER(V,D,N,T) #N,
> +#  include "bpf-helpers.def"
> +#undef DEF_HELPER
> + NULL,
> + NULL,
> + NULL,
> + NULL
> +};

Same for these two.

> [...]
> +#define INCLUDE_STRING

You didn't seem to rely on this (i.e. std::string).

> [...]
> +/* Override options and do some other initialization.  */
> +
> +static void
> +bpf_option_override (void)
> +{
> +  /* Set the default target kernel if no -mkernel was specified.  */
> +  if (!global_options_set.x_bpf_kernel)
> +    bpf_kernel = LINUX_LATEST;

LINUX_LATEST is the default in the .opt file, so when is this needed?

> [...]
> +  /* Define BPF_KERNEL_VERSION_CODE */
> +  {
> +    const char *version_code;
> +    char *kernel_version_code;
> +
> +    switch (bpf_kernel)
> +      {
> +      case LINUX_V4_0: version_code = "0x40000"; break;
> +      case LINUX_V4_1: version_code = "0x40100"; break;
> +      case LINUX_V4_2: version_code = "0x40200"; break;
> +      case LINUX_V4_3: version_code = "0x40300"; break;
> +      case LINUX_V4_4: version_code = "0x40400"; break;
> +      case LINUX_V4_5: version_code = "0x40500"; break;
> +      case LINUX_V4_6: version_code = "0x40600"; break;
> +      case LINUX_V4_7: version_code = "0x40700"; break;
> +      case LINUX_V4_8: version_code = "0x40800"; break;
> +      case LINUX_V4_9: version_code = "0x40900"; break;
> +      case LINUX_V4_10: version_code = "0x40a00"; break;
> +      case LINUX_V4_11: version_code = "0x40b00"; break;
> +      case LINUX_V4_12: version_code = "0x40c00"; break;
> +      case LINUX_V4_13: version_code = "0x40d00"; break;
> +      case LINUX_V4_14: version_code = "0x40e00"; break;
> +      case LINUX_V4_15: version_code = "0x40f00"; break;
> +      case LINUX_V4_16: version_code = "0x41000"; break;
> +      case LINUX_V4_17: version_code = "0x42000"; break;
> +      case LINUX_V4_18: version_code = "0x43000"; break;
> +      case LINUX_V4_19: version_code = "0x44000"; break;
> +      case LINUX_V4_20: version_code = "0x45000"; break;
> +      case LINUX_V5_0: version_code = "0x50000"; break;
> +      case LINUX_V5_1: version_code = "0x50100"; break;
> +      case LINUX_V5_2: version_code = "0x50200"; break;
> +      default:
> +	gcc_unreachable ();      
> +      }
> +
> +#define KERNEL_VERSION_CODE "__BPF_KERNEL_VERSION_CODE__="    
> +    kernel_version_code
> +      = (char *) alloca (strlen (KERNEL_VERSION_CODE) + 7 + 1);
> +    strcpy (kernel_version_code, KERNEL_VERSION_CODE);
> +#undef KERNEL_VERSION_CODE
> +    strcat (kernel_version_code, version_code);
> +    builtin_define (kernel_version_code);

FWIW, a slightly easier way of writing this is:

    kernel_version_code = ACONCAT (("__BPF_KERNEL_VERSION_CODE__=",
				    version_code, NULL));

> [...]
> +/* Determine whether the port is prepared to handle insns involving
> +   scalar mode MODE.  For a scalar mode to be considered supported,
> +   all the basic arithmetic and comparisons must work.  */
> +
> +static bool
> +bpf_scalar_mode_supported_p (scalar_mode mode)
> +{
> +  switch (mode)
> +    {
> +    case E_QImode:
> +    case E_HImode:
> +    case E_SImode:
> +    case E_DImode:
> +    case E_TImode:
> +      return true;
> +
> +    default:
> +      return false;
> +    }
> +
> +  return false;
> +}

Are you overriding this just to exclude floating-point modes?
If so, what specifically doesn't work?

Would be worth a comment.

> [...]
> +/* Return true if REGNO is th enumber of a hard register in which the

typo: "th enumber".

> [...]
> +/* Compute the size of the function's stack frame, including the local
> +   area and the register-save area.  */
> +
> +static void
> +bpf_compute_frame (void)
> +{
> +  int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
> +  int padding_locals, regno;
> +
> +  /* Set the space used in the stack by local variables.  This is
> +     rounded up to respect the minimum stack alignment.  */
> +  cfun->machine->local_vars_size = get_frame_size ();
> +
> +  padding_locals = cfun->machine->local_vars_size % stack_alignment;
> +  if (padding_locals)
> +    padding_locals = stack_alignment - padding_locals;
> +
> +  cfun->machine->local_vars_size += padding_locals;
> +
> +  /* Set the space used in the stack by callee-saved used registers in
> +     the current function.  There is no need to round up, since the
> +     registers are all 8 bytes wide.  */
> +  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
> +    if ((!fixed_regs[regno]
> +	 && df_regs_ever_live_p (regno)
> +	 && !call_used_regs[regno])
> +	|| (cfun->calls_alloca
> +	    && regno == STACK_POINTER_REGNUM))
> +      cfun->machine->callee_saved_reg_size += 8;
> +
> +  /* Check that the total size of the frame doesn't exceed the limit
> +     imposed by eBPF: currently 512 bytes.  */
> +  if ((cfun->machine->local_vars_size
> +       + cfun->machine->callee_saved_reg_size) > 512)
> +    {
> +      static int stack_limit_exceeded = 0;
> +
> +      if (!stack_limit_exceeded)
> +	error ("eBPF stack limit of 512 bytes exceeded");
> +      stack_limit_exceeded = 1;
> +    }
> +}

I think this does what TARGET_COMPUTE_FRAME_LAYOUT expects.
It'd be good to define the hook to bpf_compute_frame and avoid calling
it explicitly in the prologue, epilogue and elimination routines.

(The documentation says the hook's optional, but when it's such a
natural fit...)

> [...]
> +  /* Save callee-saved hard registes.  The register-save-area starts
> +     right after the local variables.  */
> +  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
> +    {
> +      if ((!fixed_regs[regno]
> +	   && df_regs_ever_live_p (regno)
> +	   && !call_used_regs[regno])
> +	  || (cfun->calls_alloca
> +	      && regno == STACK_POINTER_REGNUM))
> +	{
> +	  rtx mem;
> +
> +	  if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
> +	    /* This has been already reported as an error in
> +	       bpf_compute_frame. */
> +	    break;
> +	  else
> +	    {
> +	      mem = gen_frame_mem (DImode,
> +				   plus_constant (DImode,
> +						  gen_rtx_REG (DImode, FRAME_POINTER_REGNUM),

hard_frame_pointer_rtx here and elsewhere.

> [...]
> +/* Return true if a value of mode MODE1 is accessible in mode MODE2
> +   without copying.  */
> +
> +static bool
> +bpf_modes_tieable_p (enum machine_mode mode1,
> +		     enum machine_mode mode2)
> +{
> +  return (mode1 == mode2
> +	  || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2));
> +}

The second part makes the first part redundant.  But why do you
need to restrict it based on classes?  It seems like eBPF is an
example of an architecture where any modes are tieable, so using
the default would be better if possible.

If the restriction is needed, it would be worth having a comment
explaining which case you're excluding and why.

> [...]
> +    case PLUS:
> +      {
> +	/* The valid patterns here are:
> +	   
> +	   (PLUS ADDR_BASE CONST_INT)
> +	   (PLUS CONST_INT ADDR_BASE)

The second one isn't canonical rtl, so you shouldn't (need to) handle it.
Please raise a bug if you find a case where it's being generated. :-)

> [...]
> +/* Split an out-of-range address displacement into hi and lo parts.
> +   The hi part will have to be loaded into a register separately, but
> +   the low part will be folded into the memory operand.  */
> +
> +static bool
> +bpf_legitimize_address_displacement (rtx *off1, rtx *off2,
> +				     poly_int64 poly_offset, machine_mode)
> +{
> +  HOST_WIDE_INT orig_offset = poly_offset;
> +
> +  /* Our case is very easy: the REG part of an indirect address is
> +     64-bit wide, so it can hold any address.  This always leads to
> +     REG+0 */
> +
> +  *off1 = GEN_INT (orig_offset);
> +  *off2 = GEN_INT (0);
> +  return true;
> +}
> +
> +#undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
> +#define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT bpf_legitimize_address_displacement

But then do you need to define the hook at all?  I think this is what
LRA does by default.  The hook is only really there to get better spill
code (via common anchor points) on targets with limited offset ranges.

> [...]
> +/* Return true if memory address ADDR in address space AS can have
> +   different meanings depending on the machine mode of the memory
> +   reference it is used for or if the address is valid for some modes
> +   but not others.  */
> +
> +static bool
> +bpf_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
> +                              addr_space_t as ATTRIBUTE_UNUSED)
> +{
> +  return true;
> +}
> +
> +#undef TARGET_MODE_DEPENDENT_ADDRESS_P
> +#define TARGET_MODE_DEPENDENT_ADDRESS_P bpf_mode_dependent_address_p

Why does this need to be true?  False is the better answer if you can
give it. :-)  And it looks like the set of legitimate addresses doesn't
really care about modes.

> [...]
> +/* Return true if X is a legitimate constant for a MODE-mode immediate
> +   operand on the target machine.  */
> +
> +static bool
> +bpf_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
> +			   rtx x ATTRIBUTE_UNUSED)
> +{
> +  return true;
> +}
> +
> +#undef TARGET_LEGITIMATE_CONSTANT_P
> +#define TARGET_LEGITIMATE_CONSTANT_P bpf_legitimate_constant_p

This is the default, no real need to define it.

> [...]
> +/* Return a RTX indicating whether a function argument is passed in a
> +   register and if so, which register.  */
> +
> +static rtx
> +bpf_function_arg (cumulative_args_t ca, enum machine_mode mode ATTRIBUTE_UNUSED,
> +                  const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
> +{
> +  CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
> +
> +  if (*cum < 5)
> +    return gen_rtx_REG (mode, *cum + 1);
> +  else
> +    /* An error have been emitted for this in
> +       bpf_function_arg_advance.  */
> +    return NULL_RTX;

This hook is called first, so "will be" rather than "has been".

(BTW, I just submitted a series of patches to change this interface,
but it should be a trivial change for whichever of us gets to make it.)

> [...]
> +/* Update the summarizer variable pointed by CA to advance past an
> +   argument in the argument list.  */
> +
> +static void
> +bpf_function_arg_advance (cumulative_args_t ca, enum machine_mode mode ATTRIBUTE_UNUSED,
> +                          const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
> +{
> +  CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
> +
> +  if (*cum > 4)
> +    error ("eBPF doesn't support functions with more than 5 arguments");
> +  (*cum)++;
> +}

You allow TImode (int128_t) support, and arguments might be aggregates,
so shouldn't this be incrementing by the number of words rather than 1?

I guess the error logic also needs to be tweaked to report an error for
(say) a TImode argument passed after 4 DImode arguments, which would
need 6 registers in total.  It would also be good to avoid multiple
errors for the same argument list.

> [...]
> +/* Return true if an argument at the position indicated by CUM should
> +   be passed by reference.  If the hook returns true, a copy of that
> +   argument is made in memory and a pointer to the argument is passed
> +   instead of the argument itself.  */
> +
> +static bool
> +bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
> +		       enum machine_mode mode, const_tree type,
> +		       bool named ATTRIBUTE_UNUSED)
> +{
> +  unsigned HOST_WIDE_INT size;
> +
> +  if (type)
> +    {
> +      if (AGGREGATE_TYPE_P (type))
> +	return true;
> +      size = int_size_in_bytes (type);
> +    }
> +  else
> +    size = GET_MODE_SIZE (mode);
> +
> +  return (size > 8*5);
> +}
> +
> +#undef TARGET_PASS_BY_REFERENCE
> +#define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference

I might have misunderstood, but I thought from an earlier (IRC?)
message, it wasn't possible for the callee to access the caller's
frame, which was why you had the error about running out of argument
registers.  If so, won't passing by reference make the argument
inaccessible in practice?  I don't see what you gain by defining
the hook, since I'd have assumed (at least after the fix above)
that it would be better to pass by value and get an error about
having no argument registers left.

> [...]
> +/* Diagnostics on function contents.  */
> +
> +static void
> +bpf_set_current_function (tree decl)
> +{
> +  if (decl == NULL_TREE
> +      || current_function_decl == NULL_TREE
> +      || current_function_decl == error_mark_node
> +      || !cfun->machine
> +      || cfun->machine->diagnostics_checked_p)
> +    return;
> +
> +  /* Currently we don't do anything meaningful here.  To be
> +     changed.  */
> +
> +  /* Don't print the above diagnostics more than once.  */
> +  cfun->machine->diagnostics_checked_p = 1;
> +}
> +
> +#undef TARGET_SET_CURRENT_FUNCTION
> +#define TARGET_SET_CURRENT_FUNCTION bpf_set_current_function

IMO it'd be better to leave this undefined until it needs to do
something.  (Same for diagnostics_checked_p itself.)

> [...]
> +/* Output the assembly code for a constructor.  Since eBPF doesn't
> +   support indirect calls, constructors are not supported.  */
> +
> +static void
> +bpf_output_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
> +{
> +  fatal_insn ("no support for constructors sorry", symbol);

Would be better to use:

  sorry ("no support for constructors");

Even better, when SYMBOL_REF_DECL is nonnull, use it to provide an
alternative, more helpful message :-)

> [...]
> +/* Output the assembly code for a destructor.  Since eBPF doesn't
> +   support indirect calls, destructors are not supported.  */
> +
> +static void
> +bpf_output_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
> +{
> +  fatal_insn ("no support for destructors sorry", symbol);

Same idea here.

> [...]
> +/* Return the appropriate instruction to CALL to a function.  TARGET
> +   is a `mem' RTX denoting the address of the called function.
> +
> +   The main purposes of this function are:
> +   - To reject indirect CALL instructions, which are not supported by
> +     eBPf.

typo: eBPF

> +   - To recognize calls to kernel helper functions and emit the
> +     corresponding CALL N instruction.
> +
> +   This function is called from the expansion of the 'call' pattern in
> +   bpf.md.  */
> +
> +const char *
> +bpf_output_call (rtx target)
> +{
> +  static char *insn;
> +  rtx op;
> +
> +  op = XEXP (target, 0);
> +  switch (GET_CODE (op))
> +    {
> +    case CONST_INT:
> +      insn = (char *) xmalloc (5 + 6 + 1);
> +      sprintf (insn, "call\t%ld", INTVAL (op));

Would be good to avoid the memory leak.  Two ways of doing that are:

(1) output the instructions via output_asm_insn here and return "".

(2) strip the MEM wrapper from operands[0] before printing.  You can
    then return "call %0" for the above.  For...

> +      break;
> +    case SYMBOL_REF:
> +      {
> +	const char *function_name = XSTR (op, 0);
> +	int code;
> +      
> +	if (strncmp (function_name, "__builtin_bpf_helper_", 21) == 0
> +	    && ((code = bpf_helper_code (function_name + 21)) != 0))
> +	  {
> +	    insn = (char *) xmalloc (5 + 6 + 1);
> +	    sprintf (insn, "call\t%d", code);
> +	  }
> +	else
> +	  {	  
> +	    insn = (char *) xmalloc (strlen (function_name) + 5 + 1);
> +	    sprintf (insn, "call\t%s", function_name);
> +	  }
> +	break;

...this you could define a new prefix letter for printing the SYMBOL_REF
operand appropriately ("f" say) and return "call %f0".

But (1) is easiest. :-)

> +      }
> +    default:
> +      error ("indirect call in function, which are not supported by eBPF");
> +      insn = xstrdup ("call 0");

Can just return "call 0" without the xstrdup.

> [...]
> +/* Print an instruction operand.  This function is called in the macro
> +   PRINT_OPERAND defined in bpf.h */
> +
> +void
> +bpf_print_operand (FILE *file, rtx op, int code ATTRIBUTE_UNUSED)
> +{
> +  switch (GET_CODE (op))
> +    {
> +    case REG:
> +      fprintf (file, "%s", reg_names[REGNO (op)]);
> +      break;
> +    case MEM:
> +      output_address (GET_MODE (op), XEXP (op, 0));
> +      break;
> +    case CONST_DOUBLE:
> +      if (CONST_DOUBLE_HIGH (op))
> +	fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
> +		 CONST_DOUBLE_HIGH (op), CONST_DOUBLE_LOW (op));
> +      else if (CONST_DOUBLE_LOW (op) < 0)
> +	fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (op));
> +      else
> +	fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (op));
> +      break;
> +    case LABEL_REF:
> +      /* This is for label values.  */
> +      /* Fallthrough. */
> +    default:
> +      output_addr_const (file, op);
> +    }
> +}

The three lines above the "default:" look redundant.

> [...]
> +/* Print an operand which is an address.  This function should handle
> +   any legit address, as accepted by bpf_legitimate_address_p.
> +
> +   This function is called in the PRINT_OPERAND_ADDRESS macro defined
> +   in bpf.h */
> +
> +void
> +bpf_print_operand_address (FILE *file, rtx addr)
> +{
> +  switch (GET_CODE (addr))
> +    {
> +    case REG:
> +      fprintf (file, "[%s+0]", reg_names[REGNO (addr)]);
> +      break;
> +    case PLUS:
> +      {
> +	rtx op0 = XEXP (addr, 0);
> +	rtx op1 = XEXP (addr, 1);
> +
> +	if (GET_CODE (op0) == REG && CONSTANT_ADDRESS_P (op1))
> +	  {
> +	    fprintf (file, "[%s+", reg_names[REGNO (op0)]);
> +	    output_addr_const (file, op1);
> +	    fputs ("]", file);
> +	  }
> +	else if (GET_CODE (op1) == REG && CONSTANT_ADDRESS_P (op0))
> +	  {
> +	    fprintf (file, "[%s+", reg_names[REGNO (op1)]);
> +	    output_addr_const (file, op0);
> +	    fputs ("]", file);
> +	  }

As above, you shouldn't (need to) handle the case in which the constant
comes before the register.

> [...]
> +/* Add a BPF builtin function with NAME, CODE and TYPE.  Return
> +   the function decl or NULL_TREE if the builtin was not added.  */
> +
> +static tree
> +def_builtin (const char *name, enum bpf_builtins code, tree type)
> +{
> +  tree t
> +    = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
> +
> +  if (t)
> +    bpf_builtins[code] = t;

I don't think add_builtin_function is allowed to return null,
and assigning null would just reassert the initial value anyway.

> [...]
> +      tree offset_arg = CALL_EXPR_ARG (exp, 0);
> +      struct expand_operand ops[2];
> +
> +      create_input_operand (&ops[0], expand_normal (offset_arg),
> +			    TYPE_MODE (TREE_TYPE (offset_arg)));
> +      create_input_operand (&ops[1], gen_rtx_CONST_INT (SImode, 0),
> +			    SImode);

const0_rtx (or use create_integer_operand).

> [...]
> +/* Always promote arguments and return values in function calls.  */
> +
> +#undef TARGET_PROMOTE_FUNCTION_MODE
> +#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote

Not a review comment, just curious: why did you go for this choice?
Is it already enshrined in the ABI?

One of the historical problems with caller promotion from a security
perspective is that the callee then trusts the caller to do the right
thing.  See e.g. the code in combine.c that optimises away "redundant"
extensions that the caller is assumed to have done for us.

E.g. a rogue caller could induce an out-of-bounds access for:

  unsigned int a[256];
  unsigned int f (unsigned char c) { return a[c]; }

because the zero-extension of "c" in the address calculation might be
optimised away.  This might not matter in an eBPF context though...

> [...]
> +/* This should not be needed, because ptr_mode, Pmode and word_mode
> +   are all the same width.  */
> +#define POINTERS_EXTEND_UNSIGNED 1

Yeah, IMO it would be better not to define it.

> [...]
> +/* The widest floating-point format supported by the hardware is
> +   64-bit.  */
> +#define WIDEST_HARDWARE_FP_SIZE 64

Normally soft-fp targets don't need to define this.  Is this related
to the special conversion libcalls you install for eBPF?

> [...]
> +/*** Order of Allocation of Registers.  */
> +
> +/* We generally want to put call-clobbered registers ahead of
> +   call-saved ones.  (IRA expects this.)  */
> +#define REG_ALLOC_ORDER					\
> +  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}

Do you gain much by defining this?  I would have expected better
code without, given the architecture is so regular.

> [...]
> +/**** Register Classes.  */
> +
> +enum reg_class
> +{
> +  NO_REGS,		/* no registers in set.  */
> +  GR_REGS,		/* general-purpose integer registers.  */
> +  ALL_REGS,		/* all registers.  */
> +  LIM_REG_CLASSES	/* max value + 1.  */
> +};
> +
> +#define N_REG_CLASSES (int) LIM_REG_CLASSES
> +#define GENERAL_REGS GR_REGS
> +
> +/* An initializer containing the names of the register classes as C
> +   string constants.  These names are used in writing some of the
> +   debugging dumps.  */
> +#define REG_CLASS_NAMES				\
> +{						\
> +  "NO_REGS",					\
> +  "GR_REGS",					\
> +  "ALL_REGS"					\
> +}
> +
> +/* An initializer containing the contents of the register classes, as
> +   integers which are bit masks.  The Nth integer specifies the
> +   contents of class N.  The way the integer MASK is interpreted is
> +   that register R is in the class if `MASK & (1 << R)' is 1.  */
> +#define REG_CLASS_CONTENTS			\
> +{						\
> +   0x00000000, /* NO_REGS */			\
> +   0x000007ff, /* GR_REGS */			\
> +   0x000007ff, /* ALL_REGS */		        \
> +}
> +
> +/* A C expression whose value is a register class containing hard
> +   register REGNO.  In general there is more that one such class;
> +   choose a class which is "minimal", meaning that no smaller class
> +   also contains the register.  */
> +#define REGNO_REG_CLASS(REGNO) ((REGNO) < 11 ? GR_REGS : ALL_REGS)

Did you mean to include register 11 in ALL_REGS in REG_CLASS_CONTENTS?
If not, then there doesn't seem to be any distinction between ALL_REGS
and GR_REGS, and it'd be better to make one the alias of the other
(and make REGNO_REG_CLASS return NO_REGS for 11).

> [...]
> +/* A macro whose definition is the name of the class to which a
> +   valid index register must belong.  An index register is one used
> +   in an address where its value is either multiplied by a scale
> +   factor or added to another register (as well as added to a
> +   displacement).  */
> +#define INDEX_REG_CLASS GR_REGS

It looked like you didn't support register-indexed addressing,
so NO_REGS would be better.

> [...]
> +/* C expression which is nonzero if register number REGNO is suitable
> +   for use as a base register in operand addresses.  In eBPF every
> +   hard register can be used for this purpose.  */
> +#define REGNO_OK_FOR_BASE_P(REGNO) 			\
> +  ((REGNO) < FIRST_PSEUDO_REGISTER			\
> +   || (unsigned)reg_renumber[REGNO] < FIRST_PSEUDO_REGISTER)

The reg_regnumber stuff isn't needed for modern (LRA) targets.

> [...]
> +/* C expression which is nonzero if register number REGNO is suitable
> +   for use as an index register in operand addresses.  */
> +#define REGNO_OK_FOR_INDEX_P(REGNO)		\
> +  REGNO_OK_FOR_BASE_P(REGNO)

As above, this should be false if you don't support register-indexed
addressing.

> [...]
> +/* It is safe to return CLASS here.  No more restrictive class is
> +   needed.  */
> +#define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS

This is a legacy macro.  The associated target hook is
TARGET_PREFERRED_RELOAD_CLASS, which defaults to the above,
so I think you can just delete this.

> [...]
> +/* Maximum number of consecutive registers of class CLASS needed to
> +   hold a value of mode MODE.  */
> +#define CLASS_MAX_NREGS(CLASS, MODE) \
> +  (((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))

Same here for TARGET_CLASS_MAX_NREGS.

> [...]
> +/* We cannot support DWARF2 because of the limitations of eBPF.  */
> +#define DBX_DEBUGGING_INFO

(I shed tears at this point, but still I continued...)

> [...]
> +/* Cost of moving data of mode MODE from a register in class FROM to a
> +   register in class TO.  Note that 2 is the default.  */
> +#define REGISTER_MOVE_COST(MODE,FROM,TO) 2
> +
> +/* Cost of moving data of mode MODE between a register of class CLASS
> +   and memory. IN is zero if the value is to be written to memory,
> +   nonzero if it is to be read in.  */
> +#define MEMORY_MOVE_COST(MODE,CLASS,IN) 4

These two are now target hooks.

> [...]
> +;;; Subtraction
> +(define_insn "sub<AM:mode>3"
> +  [(set (match_operand:AM          0 "register_operand"   "=r,r")
> +        (plus:AM (match_operand:AM 1 "register_operand"   " 0,0")
> +                 (match_operand:AM 2 "reg_or_imm_operand" " r,I")))]
> +  "1"
> +  "sub<msuffix>\t%0,%2"
> +  [(set_attr "type" "<mtype>")])

This should only (need to) handle subtractions of registers.
Subtractions of constants become additions.

> [...]
> +(define_insn "*mulsi3_extended"
> +  [(set (match_operand:DI	   0 "register_operand" "=r,r")
> +        (sign_extend:DI
> +         (mult:SI (match_operand:SI 1 "register_operand" "0,0")
> +                  (match_operand:SI 2 "reg_or_imm_operand" "r,I"))))]
> +  ""
> +  "mul32\t%0,%2"
> +  [(set_attr "type" "alu32")])

There's a named pattern for this: mulsidi3.  You might get better
code by using that name instead.

> [...]
> +;; Division
> +(define_insn "div<AM:mode>3"
> +  [(set (match_operand:AM 0 "register_operand" "=r,r")
> +        (div:AM (match_operand:AM 1 "register_operand" " 0,0")
> +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
> +  ""
> +  "div<msuffix>\t%0,%2"
> +  [(set_attr "type" "<mtype>")])
> +
> +(define_insn "udiv<AM:mode>3"
> +  [(set (match_operand:AM 0 "register_operand" "=r,r")
> +        (div:AM (match_operand:AM 1 "register_operand" " 0,0")
> +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
> +  ""
> +  "div<msuffix>\t%0,%2"
> +  [(set_attr "type" "<mtype>")])

div and udiv are two different operations.  I don't see how we can
use the same eBPF instruction for both.  The rtl for udiv should also
use the udiv rtx code.

> +;;; Modulus
> +(define_insn "mod<AM:mode>3"
> +  [(set (match_operand:AM 0 "register_operand" "=r,r")
> +        (mod:AM (match_operand:AM 1 "register_operand" " 0,0")
> +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
> +  ""
> +  "mod<msuffix>\t%0,%2"
> +  [(set_attr "type" "<mtype>")])
> +
> +(define_insn "umod<AM:mode>3"
> +  [(set (match_operand:AM 0 "register_operand" "=r,r")
> +        (mod:AM (match_operand:AM 1 "register_operand" " 0,0")
> +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
> +  ""
> +  "mod<msuffix>\t%0,%2"
> +  [(set_attr "type" "<mtype>")])

Same here, with umod for the rtx code.

> [...]
> +;;; Sign-extension
> +
> +;; Sign-extending a 32-bit value into a 64-bit value is achieved using
> +;; shifting, with instructions generated by the expand below.
> +
> +(define_expand "extendsidi2"
> +  [(set (match_operand:DI 0 "register_operand" "=r")
> +	(sign_extend:DI (match_operand:SI 1 "register_operand" "r")))]

define_expands shouldn't have constraints.  (Same for the rest of the file.)

> [...]
> +(define_expand "mov<AMM:mode>"
> +  [(set (match_operand:AMM 0 "general_operand" "")
> +        (match_operand:AMM 1 "general_operand" ""))]
> +        ""
> +        "
> +{
> +    if (!register_operand(operands[0], <AMM:MODE>mode)
> +        && !register_operand(operands[1], <AMM:MODE>mode))
> +         operands[1] = force_reg (<AMM:MODE>mode, operands[1]); 

Some odd indentation here.  The code should be indented in the same
way as for .c files.

> +    /* In cases where the moved entity is a constant address, we
> +       need to emit an extra mov and modify the second operand to
> +       obtain something like:
> +
> +         lddw %T, %1
> +         ldxw %0, [%T+0]
> +
> +       Ditto for stores.  */
> +
> +    if (MEM_P (operands[1])
> +        && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
> +      {
> +         rtx tmp = gen_reg_rtx (DImode);
> +
> +         emit_move_insn (tmp, XEXP (operands[1], 0));
> +         operands[1] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
> +      }
> +
> +    if (MEM_P (operands[0])
> +        && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
> +      {
> +         rtx tmp = gen_reg_rtx (DImode);
> +  
> +         emit_move_insn (tmp, XEXP (operands[0], 0));
> +         operands[0] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
> +      }

But in that case, why not just say that constant addresses aren't
legitimate?  That will make it easier for the optimisers to do the
right thing and (hopefully) generate better code.

> +(define_constraint "B"
> +  "A constant argument for LDDW."
> +  (ior (match_code "const,symbol_ref,label_ref,const_double")
> +       (and (match_code "const_int")
> +            (match_test "IN_RANGE (ival, -1 - 0x7fffffffffffffff, 0x7fffffffffffffff)"))))

Like Segher says, the IN_RANGE check is always true.  So just:

  (match_code "const,symbol_ref,label_ref,const_double,const_int")

should be enough.

> +(define_predicate "imm32_operand"
> +  (ior (and (match_code "const_int")
> +            (match_test "IN_RANGE (INTVAL (op), 0, 0xffffffff)"))
> +       (match_code "symbol_ref,label_ref,const")))

This is only used with SImode.  const_ints are represented in
sign-extended form, so the INTVAL will have the range of int32_t
rather than uint32_t.  I.e.:

   IN_RANGE (INTVAL (op), 0x80000000, 0xffffffff)

is always false for const_ints interpreted as SImode.

> +(define_predicate "lddw_operand"
> +  (ior (and (match_code "const_int")
> +            (match_test "IN_RANGE (INTVAL (op), 0, 0xffffffffffffffff)"))
> +       (match_code "symbol_ref,label_ref,const,const_double")))

Same point here: the INTVAL will have the range of int64_t rather
than uint64_t.

> [...]
> +(define_predicate "call_operand"
> +  (match_code "mem")
> +{
> +  if (GET_CODE (op) != MEM)
> +    return 0;
> +
> +  op = XEXP (op, 0);
> +
> +  if (GET_MODE (op) != mode
> +      && GET_MODE (op) != VOIDmode
> +      && mode != VOIDmode)
> +    return 0;

No need for the check against "mode".  That logically applies to "op" rather
than the address, and is generated automatically.

> +
> +  switch (GET_CODE (op))
> +  {
> +  case REG:
> +  case CONST_INT:
> +  case SYMBOL_REF:
> +  case LABEL_REF:
> +    return 1;
> +    break;
> +  case CONST:
> +    {
> +      switch (GET_CODE (XEXP (op, 0)))
> +	{
> +	case SYMBOL_REF:
> +	case LABEL_REF:
> +	case CONST_INT:
> +	  return 1;
> +	default:
> +	  break;
> +	}
> +      break;
> +    }
> +  default:
> +    break;
> +  }

Stripping CONST via:

  if (GET_CODE (op) == CONST)
    op = XEXP (op, 0);

would be simpler.  Please also use true/false these days (realise a lot
of existing code doesn't).

It really would be nice to avoid the imports of elfos.h stuff,
but you know that already. :-)

Generally looks really good, thanks.

Richard
Jose E. Marchesi Aug. 20, 2019, 2:20 p.m. UTC | #2
Hi Richard!

Many thanks for the deep review.  I'm addressing some of your questions
below.

    > [...]
    > +/* Override options and do some other initialization.  */
    > +
    > +static void
    > +bpf_option_override (void)
    > +{
    > +  /* Set the default target kernel if no -mkernel was specified.  */
    > +  if (!global_options_set.x_bpf_kernel)
    > +    bpf_kernel = LINUX_LATEST;
    
    LINUX_LATEST is the default in the .opt file, so when is this needed?

It is an idiom I got from sparc.c:

  /* Set the default CPU if no -mcpu option was specified.  */
  if (!global_options_set.x_sparc_cpu_and_features)
    {
      ...
    }

Maybe the code in sparc.c predates the Init() directive in sparc.opt
file?
    
    > +/* Determine whether the port is prepared to handle insns involving
    > +   scalar mode MODE.  For a scalar mode to be considered supported,
    > +   all the basic arithmetic and comparisons must work.  */
    > +
    > +static bool
    > +bpf_scalar_mode_supported_p (scalar_mode mode)
    > +{
    > +  switch (mode)
    > +    {
    > +    case E_QImode:
    > +    case E_HImode:
    > +    case E_SImode:
    > +    case E_DImode:
    > +    case E_TImode:
    > +      return true;
    > +
    > +    default:
    > +      return false;
    > +    }
    > +
    > +  return false;
    > +}
    
    Are you overriding this just to exclude floating-point modes?
    If so, what specifically doesn't work?
    
    Would be worth a comment.

Reminiscence of not having support for TImodes at some stage.  I'm
removing the target hook.
    
    > [...]
    > +/* Return true if a value of mode MODE1 is accessible in mode MODE2
    > +   without copying.  */
    > +
    > +static bool
    > +bpf_modes_tieable_p (enum machine_mode mode1,
    > +		     enum machine_mode mode2)
    > +{
    > +  return (mode1 == mode2
    > +	  || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2));
    > +}
    
    The second part makes the first part redundant.  But why do you
    need to restrict it based on classes?  It seems like eBPF is an
    example of an architecture where any modes are tieable, so using
    the default would be better if possible.
    
    If the restriction is needed, it would be worth having a comment
    explaining which case you're excluding and why.

Hm yes, you are right.  TARGET_HARD_REGNO_MODE_OK (R,MODE1) ==
TARGET_HARD_REGNO_MODE_OK (R,MODE2) for all supported modes...  I'm
removing the target hook.
    
    > [...]
    > +    case PLUS:
    > +      {
    > +	/* The valid patterns here are:
    > +	   
    > +	   (PLUS ADDR_BASE CONST_INT)
    > +	   (PLUS CONST_INT ADDR_BASE)
    
    The second one isn't canonical rtl, so you shouldn't (need to) handle it.
    Please raise a bug if you find a case where it's being generated. :-)
    
Oooh, didn't know that, that's actually very handy :)

Do you know if this is documented anywhere?  I don't recall seeing this
in the internals manual, but maybe I missed it.

    > [...]
    > +/* Split an out-of-range address displacement into hi and lo parts.
    > +   The hi part will have to be loaded into a register separately, but
    > +   the low part will be folded into the memory operand.  */
    > +
    > +static bool
    > +bpf_legitimize_address_displacement (rtx *off1, rtx *off2,
    > +				     poly_int64 poly_offset, machine_mode)
    > +{
    > +  HOST_WIDE_INT orig_offset = poly_offset;
    > +
    > +  /* Our case is very easy: the REG part of an indirect address is
    > +     64-bit wide, so it can hold any address.  This always leads to
    > +     REG+0 */
    > +
    > +  *off1 = GEN_INT (orig_offset);
    > +  *off2 = GEN_INT (0);
    > +  return true;
    > +}
    > +
    > +#undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
    > +#define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT bpf_legitimize_address_displacement
    
    But then do you need to define the hook at all?  I think this is what
    LRA does by default.  The hook is only really there to get better spill
    code (via common anchor points) on targets with limited offset ranges.

Hm I think I wrote this hook to fix some invalid addresses being
generated at some point... but I don't recall the details.  It looks
like this hook is no longer necessary with the current implementation of
addresses (legitimize etc) so I'm removing it.
    
    > [...]
    > +/* Return true if memory address ADDR in address space AS can have
    > +   different meanings depending on the machine mode of the memory
    > +   reference it is used for or if the address is valid for some modes
    > +   but not others.  */
    > +
    > +static bool
    > +bpf_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
    > +                              addr_space_t as ATTRIBUTE_UNUSED)
    > +{
    > +  return true;
    > +}
    > +
    > +#undef TARGET_MODE_DEPENDENT_ADDRESS_P
    > +#define TARGET_MODE_DEPENDENT_ADDRESS_P bpf_mode_dependent_address_p
    
    Why does this need to be true?  False is the better answer if you can
    give it. :-)  And it looks like the set of legitimate addresses doesn't
    really care about modes.

That true was supposed to be false! :)
I'm removing the hook, as the default returns false anyway.
    
    > [...]
    > +/* Return a RTX indicating whether a function argument is passed in a
    > +   register and if so, which register.  */
    > +
    > +static rtx
    > +bpf_function_arg (cumulative_args_t ca, enum machine_mode mode ATTRIBUTE_UNUSED,
    > +                  const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
    > +{
    > +  CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
    > +
    > +  if (*cum < 5)
    > +    return gen_rtx_REG (mode, *cum + 1);
    > +  else
    > +    /* An error have been emitted for this in
    > +       bpf_function_arg_advance.  */
    > +    return NULL_RTX;
    
    This hook is called first, so "will be" rather than "has been".

    (BTW, I just submitted a series of patches to change this interface,
    but it should be a trivial change for whichever of us gets to make it.)

I see your interface change was approved yesterday, so I will just adapt
in my next rebase :)
        
    > [...]
    > +/* Return true if an argument at the position indicated by CUM should
    > +   be passed by reference.  If the hook returns true, a copy of that
    > +   argument is made in memory and a pointer to the argument is passed
    > +   instead of the argument itself.  */
    > +
    > +static bool
    > +bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
    > +		       enum machine_mode mode, const_tree type,
    > +		       bool named ATTRIBUTE_UNUSED)
    > +{
    > +  unsigned HOST_WIDE_INT size;
    > +
    > +  if (type)
    > +    {
    > +      if (AGGREGATE_TYPE_P (type))
    > +	return true;
    > +      size = int_size_in_bytes (type);
    > +    }
    > +  else
    > +    size = GET_MODE_SIZE (mode);
    > +
    > +  return (size > 8*5);
    > +}
    > +
    > +#undef TARGET_PASS_BY_REFERENCE
    > +#define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
    
    I might have misunderstood, but I thought from an earlier (IRC?)
    message, it wasn't possible for the callee to access the caller's
    frame, which was why you had the error about running out of argument
    registers.  If so, won't passing by reference make the argument
    inaccessible in practice?  I don't see what you gain by defining
    the hook, since I'd have assumed (at least after the fix above)
    that it would be better to pass by value and get an error about
    having no argument registers left.

Yes.  I added that hook before I had the restriction of number of
arguments in place.  Removing it.
    
    > [...]
    > +/* Always promote arguments and return values in function calls.  */
    > +
    > +#undef TARGET_PROMOTE_FUNCTION_MODE
    > +#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
    
    Not a review comment, just curious: why did you go for this choice?
    Is it already enshrined in the ABI?
    
    One of the historical problems with caller promotion from a security
    perspective is that the callee then trusts the caller to do the right
    thing.  See e.g. the code in combine.c that optimises away "redundant"
    extensions that the caller is assumed to have done for us.
    
    E.g. a rogue caller could induce an out-of-bounds access for:
    
      unsigned int a[256];
      unsigned int f (unsigned char c) { return a[c]; }
    
    because the zero-extension of "c" in the address calculation might be
    optimised away.  This might not matter in an eBPF context though...
    
Interesting.  I have to think about this, and also check whether llvm is
doing caller argument promotion or not.

    > [...]
    > +/* The widest floating-point format supported by the hardware is
    > +   64-bit.  */
    > +#define WIDEST_HARDWARE_FP_SIZE 64
    
    Normally soft-fp targets don't need to define this.  Is this related
    to the special conversion libcalls you install for eBPF?

No.  I didn't realize its value defaults to LONG_DOUBLE_TYPE_SIZE.
Removing the definition.
Segher Boessenkool Aug. 20, 2019, 2:42 p.m. UTC | #3
On Mon, Aug 19, 2019 at 08:57:22PM +0100, Richard Sandiford wrote:
> > +/*** Order of Allocation of Registers.  */
> > +
> > +/* We generally want to put call-clobbered registers ahead of
> > +   call-saved ones.  (IRA expects this.)  */
> > +#define REG_ALLOC_ORDER					\
> > +  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
> 
> Do you gain much by defining this?  I would have expected better
> code without, given the architecture is so regular.

It does exactly the same as not defining REG_ALLOC_ORDER, just a tiny
bit less efficient.

> > +#define REG_CLASS_CONTENTS			\
> > +{						\
> > +   0x00000000, /* NO_REGS */			\
> > +   0x000007ff, /* GR_REGS */			\
> > +   0x000007ff, /* ALL_REGS */		        \
> > +}
> > +
> > +/* A C expression whose value is a register class containing hard
> > +   register REGNO.  In general there is more that one such class;
> > +   choose a class which is "minimal", meaning that no smaller class
> > +   also contains the register.  */
> > +#define REGNO_REG_CLASS(REGNO) ((REGNO) < 11 ? GR_REGS : ALL_REGS)
> 
> Did you mean to include register 11 in ALL_REGS in REG_CLASS_CONTENTS?
> If not, then there doesn't seem to be any distinction between ALL_REGS
> and GR_REGS, and it'd be better to make one the alias of the other
> (and make REGNO_REG_CLASS return NO_REGS for 11).

ALL_REGS is required to contain all (hard) registers, too.  I wonder what
will go wrong this way...  Well nothing too obvious, apparently!  :-)

> > +(define_insn "*mulsi3_extended"
> > +  [(set (match_operand:DI	   0 "register_operand" "=r,r")
> > +        (sign_extend:DI
> > +         (mult:SI (match_operand:SI 1 "register_operand" "0,0")
> > +                  (match_operand:SI 2 "reg_or_imm_operand" "r,I"))))]
> > +  ""
> > +  "mul32\t%0,%2"
> > +  [(set_attr "type" "alu32")])
> 
> There's a named pattern for this: mulsidi3.  You might get better
> code by using that name instead.

mulsidi3 is something else (it extends the operands before the mult).

> > +(define_expand "extendsidi2"
> > +  [(set (match_operand:DI 0 "register_operand" "=r")
> > +	(sign_extend:DI (match_operand:SI 1 "register_operand" "r")))]
> 
> define_expands shouldn't have constraints.  (Same for the rest of the file.)
> 
> > [...]
> > +(define_expand "mov<AMM:mode>"
> > +  [(set (match_operand:AMM 0 "general_operand" "")
> > +        (match_operand:AMM 1 "general_operand" ""))]

Not empty constraints, either...  That is, you do not need to write them,
and internally it will be the same thing.


Segher
Jose E. Marchesi Aug. 20, 2019, 2:47 p.m. UTC | #4
> [...]
    > +;; Division
    > +(define_insn "div<AM:mode>3"
    > +  [(set (match_operand:AM 0 "register_operand" "=r,r")
    > +        (div:AM (match_operand:AM 1 "register_operand" " 0,0")
    > +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
    > +  ""
    > +  "div<msuffix>\t%0,%2"
    > +  [(set_attr "type" "<mtype>")])
    > +
    > +(define_insn "udiv<AM:mode>3"
    > +  [(set (match_operand:AM 0 "register_operand" "=r,r")
    > +        (div:AM (match_operand:AM 1 "register_operand" " 0,0")
    > +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
    > +  ""
    > +  "div<msuffix>\t%0,%2"
    > +  [(set_attr "type" "<mtype>")])
    
    div and udiv are two different operations.  I don't see how we can
    use the same eBPF instruction for both.  The rtl for udiv should also
    use the udiv rtx code.

    > +;;; Modulus
    > +(define_insn "mod<AM:mode>3"
    > +  [(set (match_operand:AM 0 "register_operand" "=r,r")
    > +        (mod:AM (match_operand:AM 1 "register_operand" " 0,0")
    > +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
    > +  ""
    > +  "mod<msuffix>\t%0,%2"
    > +  [(set_attr "type" "<mtype>")])
    > +
    > +(define_insn "umod<AM:mode>3"
    > +  [(set (match_operand:AM 0 "register_operand" "=r,r")
    > +        (mod:AM (match_operand:AM 1 "register_operand" " 0,0")
    > +                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
    > +  ""
    > +  "mod<msuffix>\t%0,%2"
    > +  [(set_attr "type" "<mtype>")])
    
    Same here, with umod for the rtx code.

Oh dear the signed division...  during development I just made both
signed and unsigned flavors to use the same instructions, then forgot to
change it.

Why did I do that?  Because eBPF does not provide instructions for doing
_signed_ division, nor signed remainder: both `div' and `mod' perform
unsigned arithmetic.

clang/llvm ICEs whenever it finds signed division in a C program:

   $ clang -target bpf foo.c
   Error: Unsupport signed division for DAG: t17: i64 = sdiv t15, t16Please convert to unsigned div/mod.
   fatal error: error in backend: Cannot select: t17: i64 = sdiv t15, t16

For GCC I much prefer for the compiler to generate funcalls instead, to
__divdi3/__divsi3/__moddi3/__modsi3 or the like, even if nothing is
providing implementations for these functions (yet.)

So I just defined the u{div,mod}MODE3 patterns in bpf.md (yes this time
using the right rtl opcode :P) and removed the insns for signed
operations.

Thanks for noticing this!
Jose E. Marchesi Aug. 20, 2019, 2:56 p.m. UTC | #5
> > +/*** Order of Allocation of Registers.  */
    > > +
    > > +/* We generally want to put call-clobbered registers ahead of
    > > +   call-saved ones.  (IRA expects this.)  */
    > > +#define REG_ALLOC_ORDER					\
    > > +  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
    > 
    > Do you gain much by defining this?  I would have expected better
    > code without, given the architecture is so regular.
    
    It does exactly the same as not defining REG_ALLOC_ORDER, just a tiny
    bit less efficient.

Yeah I just removed the definition from the port.
Segher Boessenkool Aug. 20, 2019, 3:13 p.m. UTC | #6
On Tue, Aug 20, 2019 at 04:20:03PM +0200, Jose E. Marchesi wrote:
>     > +	   (PLUS ADDR_BASE CONST_INT)
>     > +	   (PLUS CONST_INT ADDR_BASE)
>     
>     The second one isn't canonical rtl, so you shouldn't (need to) handle it.
>     Please raise a bug if you find a case where it's being generated. :-)
>     
> Oooh, didn't know that, that's actually very handy :)
> 
> Do you know if this is documented anywhere?  I don't recall seeing this
> in the internals manual, but maybe I missed it.

@node Insn Canonicalizations
...
@itemize @bullet
@item
For commutative and comparison operators, a constant is always made the
second operand.  If a machine only supports a constant as the second
operand, only patterns that match a constant in the second operand need
be supplied.

The whole section is worth reading :-)


Segher
Richard Sandiford Aug. 20, 2019, 4:03 p.m. UTC | #7
jose.marchesi@oracle.com (Jose E. Marchesi) writes:
> Hi Richard!
>
> Many thanks for the deep review.  I'm addressing some of your questions
> below.
>
>     > [...]
>     > +/* Override options and do some other initialization.  */
>     > +
>     > +static void
>     > +bpf_option_override (void)
>     > +{
>     > +  /* Set the default target kernel if no -mkernel was specified.  */
>     > +  if (!global_options_set.x_bpf_kernel)
>     > +    bpf_kernel = LINUX_LATEST;
>     
>     LINUX_LATEST is the default in the .opt file, so when is this needed?
>
> It is an idiom I got from sparc.c:
>
>   /* Set the default CPU if no -mcpu option was specified.  */
>   if (!global_options_set.x_sparc_cpu_and_features)
>     {
>       ...
>     }
>
> Maybe the code in sparc.c predates the Init() directive in sparc.opt
> file?

Might be wrong, but it looks like the SPARC code is overriding the .opt
default with the configure-time one.  I don't think it's needed when
the default is fixed.

>     > [...]
>     > +/* Return a RTX indicating whether a function argument is passed in a
>     > +   register and if so, which register.  */
>     > +
>     > +static rtx
>     > +bpf_function_arg (cumulative_args_t ca, enum machine_mode mode ATTRIBUTE_UNUSED,
>     > +                  const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
>     > +{
>     > +  CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
>     > +
>     > +  if (*cum < 5)
>     > +    return gen_rtx_REG (mode, *cum + 1);
>     > +  else
>     > +    /* An error have been emitted for this in
>     > +       bpf_function_arg_advance.  */
>     > +    return NULL_RTX;
>    
>     This hook is called first, so "will be" rather than "has been".
>
>     (BTW, I just submitted a series of patches to change this interface,
>     but it should be a trivial change for whichever of us gets to make it.)
>
> I see your interface change was approved yesterday, so I will just adapt
> in my next rebase :)

Thanks!

Richard
Jeff Law Aug. 20, 2019, 6:49 p.m. UTC | #8
On 8/16/19 6:59 PM, Jose E. Marchesi wrote:
> This patch adds a port for the Linux kernel eBPF architecture to GCC.
> 
> ChangeLog:
> 
>   * configure.ac: Support for bpf-*-* targets.
>   * configure: Regenerate.
> 
> contrib/ChangeLog:
> 
>   * config-list.mk (LIST): Disable go in bpf-*-* targets.
> 
> gcc/ChangeLog:
> 
>   * config.gcc: Support for bpf-*-* targets.
>   * common/config/bpf/bpf-common.c: New file.
>   * config/bpf/t-bpf: Likewise.
>   * config/bpf/predicates.md: Likewise.
>   * config/bpf/constraints.md: Likewise.
>   * config/bpf/bpf.opt: Likewise.
>   * config/bpf/bpf.md: Likewise.
>   * config/bpf/bpf.h: Likewise.
>   * config/bpf/bpf.c: Likewise.
>   * config/bpf/bpf-protos.h: Likewise.
>   * config/bpf/bpf-opts.h: Likewise.
>   * config/bpf/bpf-helpers.h: Likewise.
>   * config/bpf/bpf-helpers.def: Likewise.
So I think various folks have already mentioned the configure rebuild
issues, formatting and other stuff.  I'm going to try to keep them all
in mind so that I don't duplicate anything.  If I do duplicate someone's
comment, apologies in advance.

At a high level I realize there's lots of things not supported due to
the restricted environment it'll ultimately be used in.  However, you
might want to consider extensions that would allow larger portions of
the gcc testsuite to run and some kind of user mode simulator so that
you can reasonably test the target.  Not a requirement, but could be
useful (from experience :-)


> ---

> diff --git a/contrib/config-list.mk b/contrib/config-list.mk
> index 69c826e649a..aa9fdb64eaf 100644
> --- a/contrib/config-list.mk
> +++ b/contrib/config-list.mk
> @@ -123,7 +123,7 @@ $(LIST): make-log-dir
>  		TGT=`echo $@ | awk 'BEGIN { FS = "OPT" }; { print $$1 }'` &&			\
>  		TGT=`$(GCC_SRC_DIR)/config.sub $$TGT` &&					\
>  		case $$TGT in									\
> -			*-*-darwin* | *-*-cygwin* | *-*-mingw* | *-*-aix*)			\
> +			*-*-darwin* | *-*-cygwin* | *-*-mingw* | *-*-aix* | bpf-*-* )			\
>  				ADDITIONAL_LANGUAGES="";					\
>  				;;								\
>  			*)									\
So I've got no problem disabling Go for BFD, but I don't see bpf added
to LIST, which it should be.


> diff --git a/gcc/common/config/bpf/bpf-common.c b/gcc/common/config/bpf/bpf-common.c
> new file mode 100644
> index 00000000000..a68feb62897
> --- /dev/null
> +++ b/gcc/common/config/bpf/bpf-common.c
[ snip ]
> +/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */
> +static const struct default_options bpf_option_optimization_table[] =
> +  {
> +    /* Enable -funroll-all-loops by default.  */
> +    { OPT_LEVELS_ALL, OPT_funroll_all_loops, NULL, 1 },
> +    /* Disable -fomit-frame-pointer by default.  */
> +    { OPT_LEVELS_ALL, OPT_fomit_frame_pointer, NULL, 0 },
> +    { OPT_LEVELS_NONE, 0, NULL, 0 }
> +  };
Curious about the motivation on the loop unrolling stuff.  In general we
discourage targets from mucking around with the default
flags/optimizations, but it is sometimes the right thing to do.

Rather than -fomit-frame-pointer, I think you can use the
FRAME_POINTER_REQUIRED hook if you always want a frame pointer.


> +
> +#undef TARGET_OPTION_OPTIMIZATION_TABLE
> +#define TARGET_OPTION_OPTIMIZATION_TABLE bpf_option_optimization_table
> +
> +/* Implement TARGET_OPTION_DEFAULT_PARAMS.  */
> +
> +static void
> +bpf_option_default_params (void)
> +{
> +  /* XXX large-stack-frame = 512 bytes */
> +  /* XXX max-unrolled-insns */
> +  /* XXX max-unroll-times */
> +}
> +
> +#undef TARGET_OPTION_DEFAULT_PARAMS
> +#define TARGET_OPTION_DEFAULT_PARAMS bpf_option_default_params
I'd generally discourage twiddling the params like this, at least the
ones for the unroller.



> diff --git a/gcc/config/bpf/bpf-helpers.h b/gcc/config/bpf/bpf-helpers.h
> new file mode 100644
> index 00000000000..2fe96be7637
> --- /dev/null
> +++ b/gcc/config/bpf/bpf-helpers.h
I can't remember, is this an installed header that consumers are
expected to use?  If so you might want to be careful with polluting user
code with BPF #defines such as BPF_ANY, BPF_NOEXIST, BPF_EXIST, etc.
The #defines for mapping to the builtins are probably OK though.




> diff --git a/gcc/config/bpf/bpf.c b/gcc/config/bpf/bpf.c
> new file mode 100644
> index 00000000000..4a42259a9c3
> --- /dev/null
> +++ b/gcc/config/bpf/bpf.c
> @@ -0,0 +1,1136 @@
[ ... ]
> +
> +/* Return the builtin code corresponding to the kernel helper builtin
> +   __builtin_NAME, or 0 if the name doesn't correspond to a kernel
> +   helper builtin.  */
> +
> +static inline int
> +bpf_helper_code (const char *name)
> +{
> +  int i;
> +
> +  for (i = 1; i < BPF_BUILTIN_HELPER_MAX; ++i)
> +    {
> +      if (strcmp (name, bpf_helper_names[i]) == 0)
> +	return i;
> +    }
> +
> +  return 0;
> +}
Does this get called often?  If so the linear search could end up being
expensive from a compile-time standpoint.



> +#define KERNEL_VERSION_CODE "__BPF_KERNEL_VERSION_CODE__="    
> +    kernel_version_code
> +      = (char *) alloca (strlen (KERNEL_VERSION_CODE) + 7 + 1);
> +    strcpy (kernel_version_code, KERNEL_VERSION_CODE);
> +#undef KERNEL_VERSION_CODE
> +    strcat (kernel_version_code, version_code);
> +    builtin_define (kernel_version_code);
> +  }
> +}
Does builtin_define copy its argument?  If not, then I'd expect this to
be problematical as the alloca'd space will be reclaimed.


> +static rtx
> +bpf_function_value (const_tree ret_type,
> +		    const_tree fntype_or_decl ATTRIBUTE_UNUSED,
> +		    bool outgoing ATTRIBUTE_UNUSED)
> +{
> +  enum machine_mode mode;
> +  int unsignedp;
> +
> +  mode = TYPE_MODE (ret_type);
> +  if (INTEGRAL_TYPE_P (ret_type))
> +    mode = promote_function_mode (ret_type, mode, &unsignedp, fntype_or_decl, 1);
> +
> +  return gen_rtx_REG (mode, 0);
Rather than using "0" for the register number, consider using its name
from bpf.h.

> +}
> +
> +#undef TARGET_FUNCTION_VALUE
> +#define TARGET_FUNCTION_VALUE bpf_function_value
> +
> +/* Return true if REGNO is th enumber of a hard register in which the
> +   values of called function may come back.  */
> +
> +static bool
> +bpf_function_value_regno_p (const unsigned int regno)
> +{
> +  return (regno == 0);
> +}
Similarly.

> +
> +static void
> +bpf_compute_frame (void)
> +{
> +  int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
> +  int padding_locals, regno;
> +
> +  /* Set the space used in the stack by local variables.  This is
> +     rounded up to respect the minimum stack alignment.  */
> +  cfun->machine->local_vars_size = get_frame_size ();
> +
> +  padding_locals = cfun->machine->local_vars_size % stack_alignment;
> +  if (padding_locals)
> +    padding_locals = stack_alignment - padding_locals;
> +
> +  cfun->machine->local_vars_size += padding_locals;
> +
> +  /* Set the space used in the stack by callee-saved used registers in
> +     the current function.  There is no need to round up, since the
> +     registers are all 8 bytes wide.  */
> +  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
> +    if ((!fixed_regs[regno]
> +	 && df_regs_ever_live_p (regno)
> +	 && !call_used_regs[regno])
> +	|| (cfun->calls_alloca
> +	    && regno == STACK_POINTER_REGNUM))
> +      cfun->machine->callee_saved_reg_size += 8;
> +
> +  /* Check that the total size of the frame doesn't exceed the limit
> +     imposed by eBPF: currently 512 bytes.  */
> +  if ((cfun->machine->local_vars_size
> +       + cfun->machine->callee_saved_reg_size) > 512)
> +    {
> +      static int stack_limit_exceeded = 0;
> +
> +      if (!stack_limit_exceeded)
> +	error ("eBPF stack limit of 512 bytes exceeded");
> +      stack_limit_exceeded = 1;
> +    }
Is the stack limit likely to change?  Would a param work better here
which would allow us to accommodate such a change without having to
re-release GCC?



> +
> +/* Expand to the instructions in a function epilogue.  This function
> +   is called when expanding the 'prologue' pattern in bpf.md.  */
> +
> +void
> +bpf_expand_epilogue (void)
> +{
> +  int regno, fp_offset;
> +  rtx insn;
> +
> +  bpf_compute_frame ();
> +  fp_offset = -cfun->machine->local_vars_size;
> +
> +  /* Restore callee-saved hard registes from the stack.  */
> +  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
> +    {
> +      if ((!fixed_regs[regno]
> +	   && df_regs_ever_live_p (regno)
> +	   && !call_used_regs[regno])
> +	  || (cfun->calls_alloca
> +	      && regno == STACK_POINTER_REGNUM))
> +	{
> +	  rtx mem;
> +
> +	  if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
> +	    /* This has been already reported as an error in
> +	       bpf_compute_frame. */
> +	    break;
> +	  else
> +	    {
> +	      mem = gen_frame_mem (DImode,
> +				   plus_constant (DImode,
> +						  gen_rtx_REG (DImode, FRAME_POINTER_REGNUM),
> +						  fp_offset - 8));
> +	      insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
> +	      RTX_FRAME_RELATED_P (insn) = 1;
> +	      fp_offset -= 8;
> +	    }
> +	}
> +    }
> +
> +  emit_jump_insn (gen_exit ());
So ebpf doesn't need to do instruction scheduling, but even so it's
probably safest to emit a scheduling barrier before cutting back the stack.



> +HOST_WIDE_INT
> +bpf_initial_elimination_offset (int from,
> +				int to)
> +{
> +  HOST_WIDE_INT ret;
> +
> +  if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
> +    {
> +      bpf_compute_frame ();
> +      ret = (cfun->machine->local_vars_size
> +	     + cfun->machine->callee_saved_reg_size);
> +    }
> +  else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
> +    ret = 0;
> +  else
> +    abort ();
Rather than abort() these days gcc_unreachable () is preferred.

I'm going to skip over all the function argument passing stuff as that
all needs updating after Richard S's changes.

> diff --git a/gcc/config/bpf/bpf.h b/gcc/config/bpf/bpf.h
> +
> +/**** Debugging Info ****/
> +
> +/* We cannot support DWARF2 because of the limitations of eBPF.  */
> +#define DBX_DEBUGGING_INFO
Umm, we're trying to get rid of DBX_DEBUGGING_INFO.  I'd rather not add
another user at this point.  How tough would it be to support dwarf?

> +
> +/* Define how to find the value returned by a library function
> +   assuming the value has mode MODE.  This is always %r0 for eBPF.  */
> +#define LIBCALL_VALUE(MODE)  \
> +  gen_rtx_REG ((MODE), 0)
Consider using BPF_R0

> +
> +/* The maximum number of bytes that a signle instruction can move
s/signle/single/



> +
> +(define_insn "*mulsi3_extended"
> +  [(set (match_operand:DI	   0 "register_operand" "=r,r")
> +        (sign_extend:DI
> +         (mult:SI (match_operand:SI 1 "register_operand" "0,0")
> +                  (match_operand:SI 2 "reg_or_imm_operand" "r,I"))))]
> +  ""
> +  "mul32\t%0,%2"
> +  [(set_attr "type" "alu32")])
mulsidi3?  or umulsidi3 for the name?

I believe you already commented on the need to address the div vs udiv
problem.  Similarly for mod vs umod.

> +;;;; Data movement
> +
> +(define_mode_iterator AMM [QI HI SI DI SF DF])
> +
> +(define_expand "mov<AMM:mode>"
> +  [(set (match_operand:AMM 0 "general_operand" "")
> +        (match_operand:AMM 1 "general_operand" ""))]
> +        ""
> +        "
> +{
> +    if (!register_operand(operands[0], <AMM:MODE>mode)
> +        && !register_operand(operands[1], <AMM:MODE>mode))
> +         operands[1] = force_reg (<AMM:MODE>mode, operands[1]); 
> +
> +    /* In cases where the moved entity is a constant address, we
> +       need to emit an extra mov and modify the second operand to
> +       obtain something like:
> +
> +         lddw %T, %1
> +         ldxw %0, [%T+0]
> +
> +       Ditto for stores.  */
> +
> +    if (MEM_P (operands[1])
> +        && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
> +      {
> +         rtx tmp = gen_reg_rtx (DImode);
> +
> +         emit_move_insn (tmp, XEXP (operands[1], 0));
> +         operands[1] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
> +      }
> +
> +    if (MEM_P (operands[0])
> +        && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
> +      {
> +         rtx tmp = gen_reg_rtx (DImode);
> +  
> +         emit_move_insn (tmp, XEXP (operands[0], 0));
> +         operands[0] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
> +      }
> +
> +}")
Hmm, what happens if you need to reload something from a constant
address?  You can't call gen_reg_rtx once register allocation has
started.  THe case where you need a scratch register really feels like
you need to be defining secondary reloads.


Generally it looks pretty good.  I'd like to take more more looksie at
patch #2 of the series after you've addressed the comments you've
received so far.

jeff
Jose E. Marchesi Aug. 20, 2019, 9:14 p.m. UTC | #9
Hi Jeff.

    > This patch adds a port for the Linux kernel eBPF architecture to GCC.
    > 
    > ChangeLog:
    > 
    >   * configure.ac: Support for bpf-*-* targets.
    >   * configure: Regenerate.
    > 
    > contrib/ChangeLog:
    > 
    >   * config-list.mk (LIST): Disable go in bpf-*-* targets.
    > 
    > gcc/ChangeLog:
    > 
    >   * config.gcc: Support for bpf-*-* targets.
    >   * common/config/bpf/bpf-common.c: New file.
    >   * config/bpf/t-bpf: Likewise.
    >   * config/bpf/predicates.md: Likewise.
    >   * config/bpf/constraints.md: Likewise.
    >   * config/bpf/bpf.opt: Likewise.
    >   * config/bpf/bpf.md: Likewise.
    >   * config/bpf/bpf.h: Likewise.
    >   * config/bpf/bpf.c: Likewise.
    >   * config/bpf/bpf-protos.h: Likewise.
    >   * config/bpf/bpf-opts.h: Likewise.
    >   * config/bpf/bpf-helpers.h: Likewise.
    >   * config/bpf/bpf-helpers.def: Likewise.
    So I think various folks have already mentioned the configure rebuild
    issues, formatting and other stuff.  I'm going to try to keep them all
    in mind so that I don't duplicate anything.  If I do duplicate someone's
    comment, apologies in advance.
    
    At a high level I realize there's lots of things not supported due to
    the restricted environment it'll ultimately be used in.  However, you
    might want to consider extensions that would allow larger portions of
    the gcc testsuite to run and some kind of user mode simulator so that
    you can reasonably test the target.  Not a requirement, but could be
    useful (from experience :-)

I agree to both regards.

I have been thinking about Segher's suggestion on providing options to
lift some of the limitations, for compiler testing.  Unfortunately, many
of the restrictions are deeply rooted in the design of the
architecture... or the other way around.  Finding sane ways to implement
these extensions will be fun :)

As for the simulator, I have one, along with an initial GDB port... but
it doesn't work very well due to a particularly nasty bug in CGEN.  I
have a patch that seems to fix it but, as everything that touches cgen's
ifield handling code, it is difficult to be 100% sure about that, and I
also need to adapt some of the other existing cgen-based ports...  so it
will take a while before I have something that can run the GCC
testsuite.
    
    > diff --git a/gcc/common/config/bpf/bpf-common.c b/gcc/common/config/bpf/bpf-common.c
    > new file mode 100644
    > index 00000000000..a68feb62897
    > --- /dev/null
    > +++ b/gcc/common/config/bpf/bpf-common.c
    [ snip ]
    > +/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */
    > +static const struct default_options bpf_option_optimization_table[] =
    > +  {
    > +    /* Enable -funroll-all-loops by default.  */
    > +    { OPT_LEVELS_ALL, OPT_funroll_all_loops, NULL, 1 },
    > +    /* Disable -fomit-frame-pointer by default.  */
    > +    { OPT_LEVELS_ALL, OPT_fomit_frame_pointer, NULL, 0 },
    > +    { OPT_LEVELS_NONE, 0, NULL, 0 }
    > +  };
    Curious about the motivation on the loop unrolling stuff.  In general we
    discourage targets from mucking around with the default
    flags/optimizations, but it is sometimes the right thing to do.

The kernel verifier doesn't allow backward jumps.

This may change at some point.  There is much discussion among the
kernel hackers in whether it is possible to allow bounded loops in a
safe way.  In that case, some of the restrictions may be lifted.

For now, only loops that can be peeled/massaged and then fully unrolled
are supported.

    Rather than -fomit-frame-pointer, I think you can use the
    FRAME_POINTER_REQUIRED hook if you always want a frame pointer.

Oh so specifying -fomit-frame-pointer there is redundant... good to
know.  Will remove it.
    
    > diff --git a/gcc/config/bpf/bpf-helpers.h b/gcc/config/bpf/bpf-helpers.h
    > new file mode 100644
    > index 00000000000..2fe96be7637
    > --- /dev/null
    > +++ b/gcc/config/bpf/bpf-helpers.h
    I can't remember, is this an installed header that consumers are
    expected to use?  If so you might want to be careful with polluting user
    code with BPF #defines such as BPF_ANY, BPF_NOEXIST, BPF_EXIST, etc.
    The #defines for mapping to the builtins are probably OK though.
    
Yes, it is a header file for consumers.  Unfortunately, the whole
purpose of the header is to provide an interface that is compatible with
the kernel's bpf_helpers.h (which at the moment is llvm-specific).  The
API is given :(

This is a point I plan to raise with the eBPF developers in a few weeks,
at the Linux Plumbers conference in Lisbon.
    
    > diff --git a/gcc/config/bpf/bpf.c b/gcc/config/bpf/bpf.c
    > new file mode 100644
    > index 00000000000..4a42259a9c3
    > --- /dev/null
    > +++ b/gcc/config/bpf/bpf.c
    > @@ -0,0 +1,1136 @@
    [ ... ]
    > +
    > +/* Return the builtin code corresponding to the kernel helper builtin
    > +   __builtin_NAME, or 0 if the name doesn't correspond to a kernel
    > +   helper builtin.  */
    > +
    > +static inline int
    > +bpf_helper_code (const char *name)
    > +{
    > +  int i;
    > +
    > +  for (i = 1; i < BPF_BUILTIN_HELPER_MAX; ++i)
    > +    {
    > +      if (strcmp (name, bpf_helper_names[i]) == 0)
    > +	return i;
    > +    }
    > +
    > +  return 0;
    > +}
    Does this get called often?  If so the linear search could end up being
    expensive from a compile-time standpoint.

It gets called per function call to a symbol with the form
__builtin_bpf_helper_*...  you think it is worth of a hash?

    > +#define KERNEL_VERSION_CODE "__BPF_KERNEL_VERSION_CODE__="    
    > +    kernel_version_code
    > +      = (char *) alloca (strlen (KERNEL_VERSION_CODE) + 7 + 1);
    > +    strcpy (kernel_version_code, KERNEL_VERSION_CODE);
    > +#undef KERNEL_VERSION_CODE
    > +    strcat (kernel_version_code, version_code);
    > +    builtin_define (kernel_version_code);
    > +  }
    > +}
    Does builtin_define copy its argument?  If not, then I'd expect this to
    be problematical as the alloca'd space will be reclaimed.

Yes it does.
        
    > +
    > +static void
    > +bpf_compute_frame (void)
    > +{
    > +  int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
    > +  int padding_locals, regno;
    > +
    > +  /* Set the space used in the stack by local variables.  This is
    > +     rounded up to respect the minimum stack alignment.  */
    > +  cfun->machine->local_vars_size = get_frame_size ();
    > +
    > +  padding_locals = cfun->machine->local_vars_size % stack_alignment;
    > +  if (padding_locals)
    > +    padding_locals = stack_alignment - padding_locals;
    > +
    > +  cfun->machine->local_vars_size += padding_locals;
    > +
    > +  /* Set the space used in the stack by callee-saved used registers in
    > +     the current function.  There is no need to round up, since the
    > +     registers are all 8 bytes wide.  */
    > +  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    > +    if ((!fixed_regs[regno]
    > +	 && df_regs_ever_live_p (regno)
    > +	 && !call_used_regs[regno])
    > +	|| (cfun->calls_alloca
    > +	    && regno == STACK_POINTER_REGNUM))
    > +      cfun->machine->callee_saved_reg_size += 8;
    > +
    > +  /* Check that the total size of the frame doesn't exceed the limit
    > +     imposed by eBPF: currently 512 bytes.  */
    > +  if ((cfun->machine->local_vars_size
    > +       + cfun->machine->callee_saved_reg_size) > 512)
    > +    {
    > +      static int stack_limit_exceeded = 0;
    > +
    > +      if (!stack_limit_exceeded)
    > +	error ("eBPF stack limit of 512 bytes exceeded");
    > +      stack_limit_exceeded = 1;
    > +    }
    Is the stack limit likely to change?  Would a param work better here
    which would allow us to accommodate such a change without having to
    re-release GCC?
    
It will probably be increased at some point.  Using a param sounds like
a good idea.  However...

The stack limit is associated with kernel version.  I guess we can just
set the appropriate defaults in bpf_option_override if we make it
variable, in case the user didn't specify a --param for it, so no
problem.

Also, if we allow the user to specify a stack frame bigger than 0x7fff,
bpf_expand_prologue will break.  Probably in that case we want to detect
this, warn and truncate to the -mkernel's default, also in
bpf_option_override.

Does that sound reasonable?

    > diff --git a/gcc/config/bpf/bpf.h b/gcc/config/bpf/bpf.h
    > +
    > +/**** Debugging Info ****/
    > +
    > +/* We cannot support DWARF2 because of the limitations of eBPF.  */
    > +#define DBX_DEBUGGING_INFO
    Umm, we're trying to get rid of DBX_DEBUGGING_INFO.  I'd rather not add
    another user at this point.  How tough would it be to support dwarf?

Yes relying on stabs sucks.

The main problem I found is that it is not possible to define a CFA, nor
to unwind frames in any way.  Given these limitations, is it still
possible to make GCC emit minimally useful DWARF, with locations and
such?  That would be great.
    
    > +;;;; Data movement
    > +
    > +(define_mode_iterator AMM [QI HI SI DI SF DF])
    > +
    > +(define_expand "mov<AMM:mode>"
    > +  [(set (match_operand:AMM 0 "general_operand" "")
    > +        (match_operand:AMM 1 "general_operand" ""))]
    > +        ""
    > +        "
    > +{
    > +    if (!register_operand(operands[0], <AMM:MODE>mode)
    > +        && !register_operand(operands[1], <AMM:MODE>mode))
    > +         operands[1] = force_reg (<AMM:MODE>mode, operands[1]); 
    > +
    > +    /* In cases where the moved entity is a constant address, we
    > +       need to emit an extra mov and modify the second operand to
    > +       obtain something like:
    > +
    > +         lddw %T, %1
    > +         ldxw %0, [%T+0]
    > +
    > +       Ditto for stores.  */
    > +
    > +    if (MEM_P (operands[1])
    > +        && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
    > +      {
    > +         rtx tmp = gen_reg_rtx (DImode);
    > +
    > +         emit_move_insn (tmp, XEXP (operands[1], 0));
    > +         operands[1] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
    > +      }
    > +
    > +    if (MEM_P (operands[0])
    > +        && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
    > +      {
    > +         rtx tmp = gen_reg_rtx (DImode);
    > +  
    > +         emit_move_insn (tmp, XEXP (operands[0], 0));
    > +         operands[0] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
    > +      }
    > +
    > +}")
    Hmm, what happens if you need to reload something from a constant
    address?  You can't call gen_reg_rtx once register allocation has
    started.  THe case where you need a scratch register really feels like
    you need to be defining secondary reloads.

I really have to think about this.  Richard's comment about the
possibility of not considering constant addresses legit already made me
ponder whether it would be better to use a different strategy here.

    Generally it looks pretty good.  I'd like to take more more looksie at
    patch #2 of the series after you've addressed the comments you've
    received so far.

Thanks, I appreciate.
Expect a V3 of the series soon :)
Jeff Law Aug. 20, 2019, 9:42 p.m. UTC | #10
On 8/20/19 3:14 PM, Jose E. Marchesi wrote:
> 
> Hi Jeff.
> 
>     > This patch adds a port for the Linux kernel eBPF architecture to GCC.
>     > 
>     > ChangeLog:
>     > 
>     >   * configure.ac: Support for bpf-*-* targets.
>     >   * configure: Regenerate.
>     > 
>     > contrib/ChangeLog:
>     > 
>     >   * config-list.mk (LIST): Disable go in bpf-*-* targets.
>     > 
>     > gcc/ChangeLog:
>     > 
>     >   * config.gcc: Support for bpf-*-* targets.
>     >   * common/config/bpf/bpf-common.c: New file.
>     >   * config/bpf/t-bpf: Likewise.
>     >   * config/bpf/predicates.md: Likewise.
>     >   * config/bpf/constraints.md: Likewise.
>     >   * config/bpf/bpf.opt: Likewise.
>     >   * config/bpf/bpf.md: Likewise.
>     >   * config/bpf/bpf.h: Likewise.
>     >   * config/bpf/bpf.c: Likewise.
>     >   * config/bpf/bpf-protos.h: Likewise.
>     >   * config/bpf/bpf-opts.h: Likewise.
>     >   * config/bpf/bpf-helpers.h: Likewise.
>     >   * config/bpf/bpf-helpers.def: Likewise.
>     So I think various folks have already mentioned the configure rebuild
>     issues, formatting and other stuff.  I'm going to try to keep them all
>     in mind so that I don't duplicate anything.  If I do duplicate someone's
>     comment, apologies in advance.
>     
>     At a high level I realize there's lots of things not supported due to
>     the restricted environment it'll ultimately be used in.  However, you
>     might want to consider extensions that would allow larger portions of
>     the gcc testsuite to run and some kind of user mode simulator so that
>     you can reasonably test the target.  Not a requirement, but could be
>     useful (from experience :-)
> 
> I agree to both regards.
> 
> I have been thinking about Segher's suggestion on providing options to
> lift some of the limitations, for compiler testing.  Unfortunately, many
> of the restrictions are deeply rooted in the design of the
> architecture... or the other way around.  Finding sane ways to implement
> these extensions will be fun :)
Hell, it's a virtual architecture.  I'd just make up new instructions
for the missing functionality, make them dependent on an flag.  I think
the PRU is in a similar position and uses that approach.  PTX might have
as well.


> 
> As for the simulator, I have one, along with an initial GDB port... but
> it doesn't work very well due to a particularly nasty bug in CGEN.  I
> have a patch that seems to fix it but, as everything that touches cgen's
> ifield handling code, it is difficult to be 100% sure about that, and I
> also need to adapt some of the other existing cgen-based ports...  so it
> will take a while before I have something that can run the GCC
> testsuite.
ACK.

>     Curious about the motivation on the loop unrolling stuff.  In general we
>     discourage targets from mucking around with the default
>     flags/optimizations, but it is sometimes the right thing to do.
> 
> The kernel verifier doesn't allow backward jumps.
Oh yea, I should have remembered that.  I think it came up in a
systemtap and/or ebpf+systemtap discussion at some point.

> 
> This may change at some point.  There is much discussion among the
> kernel hackers in whether it is possible to allow bounded loops in a
> safe way.  In that case, some of the restrictions may be lifted.
ACK.  It's an interesting problem.  Would it help if we could annotate
loops with bound information?  Not sure how to preserve that from gimple
down to assembly, but it's worth pondering.

>     Does this get called often?  If so the linear search could end up being
>     expensive from a compile-time standpoint.
> 
> It gets called per function call to a symbol with the form
> __builtin_bpf_helper_*...  you think it is worth of a hash?
Hard to tell.  Maybe leave it for now and revisit post integration and
real world feedback.

>     Is the stack limit likely to change?  Would a param work better here
>     which would allow us to accommodate such a change without having to
>     re-release GCC?
>     
> It will probably be increased at some point.  Using a param sounds like
> a good idea.  However...
> 
> The stack limit is associated with kernel version.  I guess we can just
> set the appropriate defaults in bpf_option_override if we make it
> variable, in case the user didn't specify a --param for it, so no
> problem.
> 
> Also, if we allow the user to specify a stack frame bigger than 0x7fff,
> bpf_expand_prologue will break.  Probably in that case we want to detect
> this, warn and truncate to the -mkernel's default, also in
> bpf_option_override.
> 
> Does that sound reasonable?
It does.  I think PARAMS have the ability to enforce a min/max and
specify a default.  So set a default to 512 since that's what works
everywhere and if the kernel bumps up, folks can just use the param to
allow more stack space.

> 
>     > diff --git a/gcc/config/bpf/bpf.h b/gcc/config/bpf/bpf.h
>     > +
>     > +/**** Debugging Info ****/
>     > +
>     > +/* We cannot support DWARF2 because of the limitations of eBPF.  */
>     > +#define DBX_DEBUGGING_INFO
>     Umm, we're trying to get rid of DBX_DEBUGGING_INFO.  I'd rather not add
>     another user at this point.  How tough would it be to support dwarf?
> 
> Yes relying on stabs sucks.
> 
> The main problem I found is that it is not possible to define a CFA, nor
> to unwind frames in any way.  Given these limitations, is it still
> possible to make GCC emit minimally useful DWARF, with locations and
> such?  That would be great.
I thought we had that ability in the past.  It may have bitrotted since
most of our targets have moved to supporting CFA and dwarf2.


>     Hmm, what happens if you need to reload something from a constant
>     address?  You can't call gen_reg_rtx once register allocation has
>     started.  THe case where you need a scratch register really feels like
>     you need to be defining secondary reloads.
> 
> I really have to think about this.  Richard's comment about the
> possibility of not considering constant addresses legit already made me
> ponder whether it would be better to use a different strategy here.
One approach would be to not allow them initially.  You can then add
them back with the necessary secondary reload support at a later time.

This is one of those areas where being able to run the testsuite really
helps :-)


Jeff
Segher Boessenkool Aug. 21, 2019, 11:13 a.m. UTC | #11
On Tue, Aug 20, 2019 at 11:14:35PM +0200, Jose E. Marchesi wrote:
> The kernel verifier doesn't allow backward jumps.
> 
> This may change at some point.  There is much discussion among the
> kernel hackers in whether it is possible to allow bounded loops in a
> safe way.  In that case, some of the restrictions may be lifted.
> 
> For now, only loops that can be peeled/massaged and then fully unrolled
> are supported.

You can also generate code like

x5:	call x4
	jump x1
x4:	call x2
x2:	call x1
x1:	do things once here
	ret

to do fixed number of iteration loops.  Disgusting?  You decide :-)

(Or is something in that not allowed by the verifier?)


Segher
Segher Boessenkool Aug. 21, 2019, 11:25 a.m. UTC | #12
On Tue, Aug 20, 2019 at 03:42:53PM -0600, Jeff Law wrote:
> > I have been thinking about Segher's suggestion on providing options to
> > lift some of the limitations, for compiler testing.  Unfortunately, many
> > of the restrictions are deeply rooted in the design of the
> > architecture... or the other way around.  Finding sane ways to implement
> > these extensions will be fun :)
> Hell, it's a virtual architecture.  I'd just make up new instructions
> for the missing functionality, make them dependent on an flag.  I think
> the PRU is in a similar position and uses that approach.  PTX might have
> as well.

This approach works well for simulators for physical architectures, too.

> > This may change at some point.  There is much discussion among the
> > kernel hackers in whether it is possible to allow bounded loops in a
> > safe way.  In that case, some of the restrictions may be lifted.
> ACK.  It's an interesting problem.  Would it help if we could annotate
> loops with bound information?  Not sure how to preserve that from gimple
> down to assembly, but it's worth pondering.

You probably should have machine insns that iterate a loop some number of
times given when you first start the loop (and cannot be changed later,
except maybe exiting from the loop).  Like "doloop" in GCC.  Maybe only
allow a constant number of times, if the verifier want to see that?

The only thing the verifier should be concerned with is how long the code
takes to run, or am I missing something?


Segher
Jonathan Corbet Aug. 21, 2019, 2:30 p.m. UTC | #13
On Tue, 20 Aug 2019 23:14:35 +0200
jose.marchesi@oracle.com (Jose E. Marchesi) wrote:

> The kernel verifier doesn't allow backward jumps.
> 
> This may change at some point.  There is much discussion among the
> kernel hackers in whether it is possible to allow bounded loops in a
> safe way.  In that case, some of the restrictions may be lifted.

Actually, bounded loops are supported and allowed in the 5.3 kernel.

	https://lwn.net/Articles/794934/

jon
Jose E. Marchesi Aug. 22, 2019, 2:11 a.m. UTC | #14
> +#undef TARGET_PASS_BY_REFERENCE
        > +#define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
        
        I might have misunderstood, but I thought from an earlier (IRC?)
        message, it wasn't possible for the callee to access the caller's
        frame, which was why you had the error about running out of argument
        registers.  If so, won't passing by reference make the argument
        inaccessible in practice?  I don't see what you gain by defining
        the hook, since I'd have assumed (at least after the fix above)
        that it would be better to pass by value and get an error about
        having no argument registers left.
    
    Yes.  I added that hook before I had the restriction of number of
    arguments in place.  Removing it.

Happy auto correction :)

A colleague (who actually _uses_ eBPF extensively, ahem) tells me that
the kernel verifier allows to pass addresses of the caller's stack
frame, tracking that it is a ptr to a stack location, and it knows which
stack it came from.  So it is indeed possible for the callee to access
the caller's frame, and therefore to pass arguments by reference.

On the downside, it is not possible for a callee to access the caller's
frame applying an offset to its frame pointer, because the stacks are
disjoint.  This means that most probably I will have to dedicate a real,
not eliminable register to act as the arg pointer, if I want to get rid
of the annoying limitation on the number of arguments...  and in order
to keep ABI compatibility with llvm built objects, this register is
gonna have to be %r5, i.e. the last register usable to pass arguments,
but it should be only used for that purpose if the function gets more
than 5 arguments...  sounds messy, but there is hope, yay!

However, unless someone comes with a magical macro to define or an
existing target doing the same thing, I am deferring attacking this
problem for later (TM) and for the time being I will keep both the
ability of passing aggregates and other big arguments by reference, and
the limit on number of arguments (this is what clang does.)

I hope that's ok for you people.
Salud!
Segher Boessenkool Aug. 22, 2019, 7:50 a.m. UTC | #15
Hi!

On Thu, Aug 22, 2019 at 04:11:46AM +0200, Jose E. Marchesi wrote:
> A colleague (who actually _uses_ eBPF extensively, ahem) tells me that
> the kernel verifier allows to pass addresses of the caller's stack
> frame, tracking that it is a ptr to a stack location, and it knows which
> stack it came from.  So it is indeed possible for the callee to access
> the caller's frame, and therefore to pass arguments by reference.

Good news for testability of the GCC port, and also good news for users,
who will have one less (HUGE) arbitrary restriction to deal with :-)

> On the downside, it is not possible for a callee to access the caller's
> frame applying an offset to its frame pointer,

That is true for many targets.

> because the stacks are disjoint.

And even that sometimes.

> This means that most probably I will have to dedicate a real,
> not eliminable register to act as the arg pointer, if I want to get rid
> of the annoying limitation on the number of arguments...  and in order
> to keep ABI compatibility with llvm built objects, this register is
> gonna have to be %r5, i.e. the last register usable to pass arguments,
> but it should be only used for that purpose if the function gets more
> than 5 arguments...  sounds messy, but there is hope, yay!

At *function entry* it is in %r5, but you can immediately copy that
elsewhere, at function start; there is no need to dedicate a hard
register to it.


Segher
Richard Sandiford Aug. 23, 2019, 2:19 p.m. UTC | #16
jose.marchesi@oracle.com (Jose E. Marchesi) writes:
>         > +#undef TARGET_PASS_BY_REFERENCE
>         > +#define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
>         
>         I might have misunderstood, but I thought from an earlier (IRC?)
>         message, it wasn't possible for the callee to access the caller's
>         frame, which was why you had the error about running out of argument
>         registers.  If so, won't passing by reference make the argument
>         inaccessible in practice?  I don't see what you gain by defining
>         the hook, since I'd have assumed (at least after the fix above)
>         that it would be better to pass by value and get an error about
>         having no argument registers left.
>     
>     Yes.  I added that hook before I had the restriction of number of
>     arguments in place.  Removing it.
>
> Happy auto correction :)
>
> A colleague (who actually _uses_ eBPF extensively, ahem) tells me that
> the kernel verifier allows to pass addresses of the caller's stack
> frame, tracking that it is a ptr to a stack location, and it knows which
> stack it came from.  So it is indeed possible for the callee to access
> the caller's frame, and therefore to pass arguments by reference.
>
> On the downside, it is not possible for a callee to access the caller's
> frame applying an offset to its frame pointer, because the stacks are
> disjoint.  This means that most probably I will have to dedicate a real,
> not eliminable register to act as the arg pointer, if I want to get rid
> of the annoying limitation on the number of arguments...  and in order
> to keep ABI compatibility with llvm built objects, this register is
> gonna have to be %r5, i.e. the last register usable to pass arguments,
> but it should be only used for that purpose if the function gets more
> than 5 arguments...  sounds messy, but there is hope, yay!
>
> However, unless someone comes with a magical macro to define or an
> existing target doing the same thing, I am deferring attacking this
> problem for later (TM) and for the time being I will keep both the
> ability of passing aggregates and other big arguments by reference, and
> the limit on number of arguments (this is what clang does.)
>
> I hope that's ok for you people.

Sounds good :-)
Joseph Myers Aug. 23, 2019, 5:17 p.m. UTC | #17
On Sat, 17 Aug 2019, Jose E. Marchesi wrote:

> +(define_insn "*branch_on_di"
> +  [(set (pc)
> +	(if_then_else (match_operator 3 "ordered_comparison_operator"
> +			 [(match_operand:DI 0 "register_operand" "r")
> +			  (match_operand:DI 1 "reg_or_imm_operand" "rI")])
> +		      (label_ref (match_operand 2 "" ""))
> +		      (pc)))]
> +  ""
> +{
> +  int code = GET_CODE (operands[3]);
> +
> +  switch (code)
> +  {
> +  case EQ: return "jeq\t%0,%1,%2"; break;
> +  case NE: return "jne\t%0,%1,%2"; break;
> +  case LT: return "jslt\t%0,%1,%2"; break;
> +  case LE: return "jsle\t%0,%1,%2"; break;
> +  case GT: return "jsgt\t%0,%1,%2"; break;
> +  case GE: return "jsge\t%0,%1,%2"; break;
> +  case LTU: return "jlt\t%0,%1,%2"; break;
> +  case LEU: return "jle\t%0,%1,%2"; break;
> +  case GTU: return "jgt\t%0,%1,%2"; break;
> +  case GEU: return "jge\t%0,%1,%2"; break;
> +  default:
> +    error ("unknown comparison code %d in *branch_on_di\n", code);

This error message isn't written in terms meaningful to users (that is, 
saying what is wrong with their source code).  Calls to error () should 
only be for errors meaningful to users (and shouldn't appear in .md files 
at all, because exgettext doesn't extract messages from .md files for 
translation, and any error that late in compilation is liable not to have 
a very meaningful location either).  Internal errors - errors indicating a 
bug in the compiler rather than a problem with the user's program - need 
to use other functions such as internal_error.
Jose E. Marchesi Aug. 23, 2019, 7:18 p.m. UTC | #18
> +(define_insn "*branch_on_di"
    > +  [(set (pc)
    > +	(if_then_else (match_operator 3 "ordered_comparison_operator"
    > +			 [(match_operand:DI 0 "register_operand" "r")
    > +			  (match_operand:DI 1 "reg_or_imm_operand" "rI")])
    > +		      (label_ref (match_operand 2 "" ""))
    > +		      (pc)))]
    > +  ""
    > +{
    > +  int code = GET_CODE (operands[3]);
    > +
    > +  switch (code)
    > +  {
    > +  case EQ: return "jeq\t%0,%1,%2"; break;
    > +  case NE: return "jne\t%0,%1,%2"; break;
    > +  case LT: return "jslt\t%0,%1,%2"; break;
    > +  case LE: return "jsle\t%0,%1,%2"; break;
    > +  case GT: return "jsgt\t%0,%1,%2"; break;
    > +  case GE: return "jsge\t%0,%1,%2"; break;
    > +  case LTU: return "jlt\t%0,%1,%2"; break;
    > +  case LEU: return "jle\t%0,%1,%2"; break;
    > +  case GTU: return "jgt\t%0,%1,%2"; break;
    > +  case GEU: return "jge\t%0,%1,%2"; break;
    > +  default:
    > +    error ("unknown comparison code %d in *branch_on_di\n", code);
    
    This error message isn't written in terms meaningful to users (that is, 
    saying what is wrong with their source code).  Calls to error () should 
    only be for errors meaningful to users (and shouldn't appear in .md files 
    at all, because exgettext doesn't extract messages from .md files for 
    translation, and any error that late in compilation is liable not to have 
    a very meaningful location either).  Internal errors - errors indicating a 
    bug in the compiler rather than a problem with the user's program - need 
    to use other functions such as internal_error.

Yeah that's actually a gcc_unreachable, or fatal error.
Thanks for noticing!
diff mbox series

Patch

diff --git a/configure b/configure
index 63b1e33f41c..4f8e68a4085 100755
--- a/configure
+++ b/configure
@@ -754,6 +754,7 @@  infodir
 docdir
 oldincludedir
 includedir
+runstatedir
 localstatedir
 sharedstatedir
 sysconfdir
@@ -919,6 +920,7 @@  datadir='${datarootdir}'
 sysconfdir='${prefix}/etc'
 sharedstatedir='${prefix}/com'
 localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
 includedir='${prefix}/include'
 oldincludedir='/usr/include'
 docdir='${datarootdir}/doc/${PACKAGE}'
@@ -1171,6 +1173,15 @@  do
   | -silent | --silent | --silen | --sile | --sil)
     silent=yes ;;
 
+  -runstatedir | --runstatedir | --runstatedi | --runstated \
+  | --runstate | --runstat | --runsta | --runst | --runs \
+  | --run | --ru | --r)
+    ac_prev=runstatedir ;;
+  -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+  | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+  | --run=* | --ru=* | --r=*)
+    runstatedir=$ac_optarg ;;
+
   -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
     ac_prev=sbindir ;;
   -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1308,7 +1319,7 @@  fi
 for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
 		datadir sysconfdir sharedstatedir localstatedir includedir \
 		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir
+		libdir localedir mandir runstatedir
 do
   eval ac_val=\$$ac_var
   # Remove trailing slashes.
@@ -1468,6 +1479,7 @@  Fine tuning of the installation directories:
   --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
   --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
   --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+  --runstatedir=DIR       modifiable per-process data [LOCALSTATEDIR/run]
   --libdir=DIR            object code libraries [EPREFIX/lib]
   --includedir=DIR        C header files [PREFIX/include]
   --oldincludedir=DIR     C header files for non-gcc [/usr/include]
@@ -3353,6 +3365,9 @@  case "${target}" in
     # No hosted I/O support.
     noconfigdirs="$noconfigdirs target-libssp"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-libssp"
+    ;;
   powerpc-*-aix* | rs6000-*-aix*)
     noconfigdirs="$noconfigdirs target-libssp"
     ;;
@@ -3387,12 +3402,43 @@  if test "${ENABLE_LIBSTDCXX}" = "default" ; then
     avr-*-*)
       noconfigdirs="$noconfigdirs target-libstdc++-v3"
       ;;
+    bpf-*-*)
+      noconfigdirs="$noconfigdirs target-libstdc++-v3"
+      ;;
     ft32-*-*)
       noconfigdirs="$noconfigdirs target-libstdc++-v3"
       ;;
   esac
 fi
 
+# Disable C++ on systems where it is known to not work.
+# For testing, you can override this with --enable-languages=c++.
+case ,${enable_languages}, in
+  *,c++,*)
+    ;;
+  *)
+      case "${target}" in
+        bpf-*-*)
+          unsupported_languages="$unsupported_languages c++"
+          ;;
+      esac
+      ;;
+esac
+
+# Disable Objc on systems where it is known to not work.
+# For testing, you can override this with --enable-languages=objc.
+case ,${enable_languages}, in
+  *,objc,*)
+    ;;
+  *)
+      case "${target}" in
+        bpf-*-*)
+          unsupported_languages="$unsupported_languages objc"
+          ;;
+      esac
+      ;;
+esac
+
 # Disable D on systems where it is known to not work.
 # For testing, you can override this with --enable-languages=d.
 case ,${enable_languages}, in
@@ -3402,6 +3448,9 @@  case ,${enable_languages}, in
     case "${target}" in
       *-*-darwin*)
 	unsupported_languages="$unsupported_languages d"
+        ;;
+      bpf-*-*)
+	unsupported_languages="$unsupported_languages d"
 	;;
     esac
     ;;
@@ -3433,6 +3482,9 @@  case "${target}" in
     # See <http://gcc.gnu.org/ml/gcc-patches/2004-11/msg00572.html>.
     unsupported_languages="$unsupported_languages fortran"
     ;;
+  bpf-*-*)
+    unsupported_languages="$unsupported_languages fortran"
+    ;;
 esac
 
 # Disable libffi for some systems.
@@ -3479,6 +3531,9 @@  case "${target}" in
   arm*-*-symbianelf*)
     noconfigdirs="$noconfigdirs target-libffi"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-libffi"
+    ;;
   cris-*-* | crisv32-*-*)
     case "${target}" in
       *-*-linux*)
@@ -3525,7 +3580,7 @@  esac
 # Disable the go frontend on systems where it is known to not work. Please keep
 # this in sync with contrib/config-list.mk.
 case "${target}" in
-*-*-darwin* | *-*-cygwin* | *-*-mingw*)
+*-*-darwin* | *-*-cygwin* | *-*-mingw* | bpf-* )
     unsupported_languages="$unsupported_languages go"
     ;;
 esac
@@ -3541,6 +3596,9 @@  if test x$enable_libgo = x; then
     *-*-cygwin* | *-*-mingw*)
 	noconfigdirs="$noconfigdirs target-libgo"
 	;;
+    bpf-*-*)
+        noconfigdirs="$noconfigdirs target-libgo"
+        ;;
     esac
 fi
 
@@ -3612,6 +3670,9 @@  case "${target}" in
   sparc-*-sunos4*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-newlib target-libgloss"
+    ;;
   *-*-aix*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
@@ -3725,6 +3786,9 @@  case "${target}" in
     # newlib is not 64 bit ready
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-libobjc target-libbacktrace"
+    ;;
   sh*-*-pe|mips*-*-pe|*arm-wince-pe)
     noconfigdirs="$noconfigdirs tcl tk itcl libgui sim"
     ;;
diff --git a/configure.ac b/configure.ac
index dcc89fbdde1..b7ca25c04c1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -638,6 +638,9 @@  case "${target}" in
     # No hosted I/O support.
     noconfigdirs="$noconfigdirs target-libssp"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-libssp"
+    ;;
   powerpc-*-aix* | rs6000-*-aix*)
     noconfigdirs="$noconfigdirs target-libssp"
     ;;
@@ -672,12 +675,43 @@  if test "${ENABLE_LIBSTDCXX}" = "default" ; then
     avr-*-*)
       noconfigdirs="$noconfigdirs target-libstdc++-v3"
       ;;
+    bpf-*-*)
+      noconfigdirs="$noconfigdirs target-libstdc++-v3"
+      ;;
     ft32-*-*)
       noconfigdirs="$noconfigdirs target-libstdc++-v3"
       ;;
   esac
 fi
 
+# Disable C++ on systems where it is known to not work.
+# For testing, you can override this with --enable-languages=c++.
+case ,${enable_languages}, in
+  *,c++,*)
+    ;;
+  *)
+      case "${target}" in
+        bpf-*-*)
+          unsupported_languages="$unsupported_languages c++"
+          ;;
+      esac
+      ;;
+esac
+
+# Disable Objc on systems where it is known to not work.
+# For testing, you can override this with --enable-languages=objc.
+case ,${enable_languages}, in
+  *,objc,*)
+    ;;
+  *)
+      case "${target}" in
+        bpf-*-*)
+          unsupported_languages="$unsupported_languages objc"
+          ;;
+      esac
+      ;;
+esac
+
 # Disable D on systems where it is known to not work.
 # For testing, you can override this with --enable-languages=d.
 case ,${enable_languages}, in
@@ -687,6 +721,9 @@  case ,${enable_languages}, in
     case "${target}" in
       *-*-darwin*)
 	unsupported_languages="$unsupported_languages d"
+        ;;
+      bpf-*-*)
+	unsupported_languages="$unsupported_languages d"
 	;;
     esac
     ;;
@@ -715,6 +752,9 @@  case "${target}" in
     # See <http://gcc.gnu.org/ml/gcc-patches/2004-11/msg00572.html>.
     unsupported_languages="$unsupported_languages fortran"
     ;;
+  bpf-*-*)
+    unsupported_languages="$unsupported_languages fortran"
+    ;;
 esac
 
 # Disable libffi for some systems.
@@ -761,6 +801,9 @@  case "${target}" in
   arm*-*-symbianelf*)
     noconfigdirs="$noconfigdirs target-libffi"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-libffi"
+    ;;
   cris-*-* | crisv32-*-*)
     case "${target}" in
       *-*-linux*)
@@ -807,7 +850,7 @@  esac
 # Disable the go frontend on systems where it is known to not work. Please keep
 # this in sync with contrib/config-list.mk.
 case "${target}" in
-*-*-darwin* | *-*-cygwin* | *-*-mingw*)
+*-*-darwin* | *-*-cygwin* | *-*-mingw* | bpf-* )
     unsupported_languages="$unsupported_languages go"
     ;;
 esac
@@ -823,6 +866,9 @@  if test x$enable_libgo = x; then
     *-*-cygwin* | *-*-mingw*)
 	noconfigdirs="$noconfigdirs target-libgo"
 	;;
+    bpf-*-*)
+        noconfigdirs="$noconfigdirs target-libgo"
+        ;;
     esac
 fi
 
@@ -894,6 +940,9 @@  case "${target}" in
   sparc-*-sunos4*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-newlib target-libgloss"
+    ;;
   *-*-aix*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
@@ -1007,6 +1056,9 @@  case "${target}" in
     # newlib is not 64 bit ready
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
+  bpf-*-*)
+    noconfigdirs="$noconfigdirs target-libobjc target-libbacktrace"
+    ;;
   sh*-*-pe|mips*-*-pe|*arm-wince-pe)
     noconfigdirs="$noconfigdirs tcl tk itcl libgui sim"
     ;;
diff --git a/contrib/config-list.mk b/contrib/config-list.mk
index 69c826e649a..aa9fdb64eaf 100644
--- a/contrib/config-list.mk
+++ b/contrib/config-list.mk
@@ -123,7 +123,7 @@  $(LIST): make-log-dir
 		TGT=`echo $@ | awk 'BEGIN { FS = "OPT" }; { print $$1 }'` &&			\
 		TGT=`$(GCC_SRC_DIR)/config.sub $$TGT` &&					\
 		case $$TGT in									\
-			*-*-darwin* | *-*-cygwin* | *-*-mingw* | *-*-aix*)			\
+			*-*-darwin* | *-*-cygwin* | *-*-mingw* | *-*-aix* | bpf-*-* )			\
 				ADDITIONAL_LANGUAGES="";					\
 				;;								\
 			*)									\
diff --git a/gcc/common/config/bpf/bpf-common.c b/gcc/common/config/bpf/bpf-common.c
new file mode 100644
index 00000000000..a68feb62897
--- /dev/null
+++ b/gcc/common/config/bpf/bpf-common.c
@@ -0,0 +1,57 @@ 
+/* Common hooks for eBPF.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+#include "config/bpf/bpf-protos.h"
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS 0
+
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */
+static const struct default_options bpf_option_optimization_table[] =
+  {
+    /* Enable -funroll-all-loops by default.  */
+    { OPT_LEVELS_ALL, OPT_funroll_all_loops, NULL, 1 },
+    /* Disable -fomit-frame-pointer by default.  */
+    { OPT_LEVELS_ALL, OPT_fomit_frame_pointer, NULL, 0 },
+    { OPT_LEVELS_NONE, 0, NULL, 0 }
+  };
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE bpf_option_optimization_table
+
+/* Implement TARGET_OPTION_DEFAULT_PARAMS.  */
+
+static void
+bpf_option_default_params (void)
+{
+  /* XXX large-stack-frame = 512 bytes */
+  /* XXX max-unrolled-insns */
+  /* XXX max-unroll-times */
+}
+
+#undef TARGET_OPTION_DEFAULT_PARAMS
+#define TARGET_OPTION_DEFAULT_PARAMS bpf_option_default_params
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 40cbc52dc99..60673a422a7 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -360,6 +360,9 @@  avr-*-*)
 bfin*-*)
 	cpu_type=bfin
 	;;
+bpf-*-*)
+	cpu_type=bpf
+	;;
 crisv32-*)
 	cpu_type=cris
 	;;
@@ -1310,6 +1313,12 @@  bfin*-*)
 	use_collect2=no
 	use_gcc_stdint=wrap
 	;;
+bpf-*-*)
+        tmake_file="${tmake_file} bpf/t-bpf"
+        use_collect2=no
+        extra_headers="bpf-helpers.h"
+        use_gcc_stdint=provide
+        ;;
 cr16-*-elf)
         tm_file="elfos.h ${tm_file} newlib-stdint.h"
         tmake_file="${tmake_file} cr16/t-cr16 "
diff --git a/gcc/config/bpf/bpf-helpers.def b/gcc/config/bpf/bpf-helpers.def
new file mode 100644
index 00000000000..4f6b8eb0892
--- /dev/null
+++ b/gcc/config/bpf/bpf-helpers.def
@@ -0,0 +1,194 @@ 
+/* Kernel helpers database.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+/* This file contains the definition of the kernel helpers that are
+   available to eBPF programs.
+
+   The primary source for information on kernel helpers is the
+   linux/include/uapi/linux/bpf.h file in the Linux source tree.
+   Please keep this database in sync.
+
+   The first column is the first kernel version featuring the helper
+   function.  This should be an enumerate from bpf_kernel_version,
+   defined in bpf-opts.h.  Note that the backend assumes that helpers
+   never get deprecated in the kernel.  If that eventually happens,
+   then we will need to use a bitmask here instead of an enumerate.
+
+   The second column is the constant-name for the helper.
+   The third column is the program-name of the helper.
+
+   The fourth column is a list of names describing the types of the
+   values returned and accepted by the helper, in one of these forms:
+
+     TYPES (type1, type2, ..., 0)
+     VTYPES (type1, type2, ..., 0)
+
+   VTYPES should be used should the helper accept a variable number of
+   arguments, TYPES otherwise.  The valid type names are:
+
+     `vt' for void.
+     `it' for signed int.
+     `ut' for unsigned int.
+     `pt' for *void.
+     `cpt' for const *void.
+     `st' for short int.
+     `ust' for unsigned short int.
+     `cst' for const char *.
+     `ullt' for unsigned long long.
+     `llt' for long long.
+     `u32t' for uint32.
+     `u64t' for uint64.
+  
+   In types descriptions, the firt entry corresponds to the value
+   returned by the helper.  Subsequent names correspond to the helper
+   arguments.  Finally, a 0 should close the list.
+
+   VERY IMPORTANT: the helper entries should be listed in the same
+   order than in the definition of __BPF_FUNC_MAPPER in
+   linux/include/uapi/linux/bpf.h!  */
+
+DEF_HELPER (LINUX_V4_0, MAP_LOOKUP_ELEM, map_lookup_elem, TYPES (pt, pt, pt, 0))
+DEF_HELPER (LINUX_V4_0, MAP_UPDATE_ELEM, map_update_elem, TYPES (it, pt, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V4_0, MAP_DELETE_ELEM, map_delete_elem, TYPES (it, pt, pt, 0))
+DEF_HELPER (LINUX_V4_1, PROBE_READ, probe_read, TYPES (it, pt, ut, cpt, 0))
+DEF_HELPER (LINUX_V4_1, KTIME_GET_NS, ktime_get_ns, TYPES (ullt, 0))
+DEF_HELPER (LINUX_V4_1, TRACE_PRINTK, trace_printk, VTYPES (it, cst, it, 0))
+DEF_HELPER (LINUX_V4_1, GET_PRANDOM_U32, get_prandom_u32, TYPES (ullt, 0))
+DEF_HELPER (LINUX_V4_1, GET_SMP_PROCESSOR_ID, get_smp_processor_id, TYPES (ullt, 0))
+DEF_HELPER (LINUX_V4_1, SKB_STORE_BYTES, skb_store_bytes, TYPES (it, pt, it, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_1, L3_CSUM_REPLACE, l3_csum_replace, TYPES (it, pt, it, it ,it ,it, 0))
+DEF_HELPER (LINUX_V4_1, L4_CSUM_REPLACE, l4_csum_replace, TYPES (it, pt, it, it, it, it, 0))
+DEF_HELPER (LINUX_V4_2, TAIL_CALL, tail_call, TYPES (vt, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_2, CLONE_REDIRECT, clone_redirect, TYPES (it, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_2, GET_CURRENT_PID_TGID, get_current_pid_tgid, TYPES (ullt, 0))
+DEF_HELPER (LINUX_V4_2, GET_CURRENT_UID_GID, get_current_uid_gid, TYPES (ullt, 0))
+DEF_HELPER (LINUX_V4_2, GET_CURRENT_COMM, get_current_comm, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_3, GET_CGROUP_CLASSID, get_cgroup_classid, TYPES (it, pt, 0))
+DEF_HELPER (LINUX_V4_3, SKB_VLAN_PUSH, skb_vlan_push, TYPES (it, pt, st, ust, 0))
+DEF_HELPER (LINUX_V4_3, SKB_VLAN_POP, skb_vlan_pop, TYPES (it, pt, 0))
+DEF_HELPER (LINUX_V4_3, SKB_GET_TUNNEL_KEY, skb_get_tunnel_key, TYPES (it, pt, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_3, SKB_SET_TUNNEL_KEY, skb_set_tunnel_key, TYPES (it, pt, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_3, PERF_EVENT_READ, perf_event_read, TYPES (ullt, pt, ullt, 0))
+DEF_HELPER (LINUX_V4_4, REDIRECT, redirect, TYPES (it, it, it, 0))
+DEF_HELPER (LINUX_V4_4, GET_ROUTE_REALM, get_route_realm, TYPES (ut, pt, 0))
+DEF_HELPER (LINUX_V4_4, PERF_EVENT_OUTPUT, perf_event_output, \
+	    TYPES (it, pt, pt, ullt, pt, it, 0))
+DEF_HELPER (LINUX_V4_5, SKB_LOAD_BYTES, skb_load_bytes, TYPES (it, pt, it, pt, it, 0))
+DEF_HELPER (LINUX_V4_6, GET_STACKID, get_stackid, TYPES (it, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_6, CSUM_DIFF, csum_diff, TYPES (it, pt, it, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_6, SKB_GET_TUNNEL_OPT, skb_get_tunnel_opt, TYPES (it, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_6, SKB_SET_TUNNEL_OPT, skb_set_tunnel_opt, TYPES (it, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_8, SKB_CHANGE_PROTO, skb_change_proto, TYPES (it, pt, st, u64t, 0))
+DEF_HELPER (LINUX_V4_8, SKB_CHANGE_TYPE, skb_change_type, TYPES (it, pt, u32t, 0))
+DEF_HELPER (LINUX_V4_8, SKB_UNDER_CGROUP, skb_under_cgroup, TYPES (it, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_8, GET_HASH_RECALC, get_hash_recalc, TYPES (ut, pt, 0))
+DEF_HELPER (LINUX_V4_8, GET_CURRENT_TASK, get_current_task, TYPES (ullt, pt, 0))
+DEF_HELPER (LINUX_V4_8, PROBE_WRITE_USER, probe_write_user, TYPES (it, pt, cpt, ut, 0))
+DEF_HELPER (LINUX_V4_9, CURRENT_TASK_UNDER_CGROUP, current_task_under_cgroup, \
+	    TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_9, SKB_CHANGE_TAIL, skb_change_tail, TYPES (it, pt, ut, u64t, 0))
+DEF_HELPER (LINUX_V4_9, SKB_PULL_DATA, skb_pull_data, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_9, CSUM_UPDATE, csum_update, TYPES (llt, pt, u32t, 0))
+DEF_HELPER (LINUX_V4_9, SET_HASH_INVALID, set_hash_invalid, TYPES (vt, pt, 0))
+DEF_HELPER (LINUX_V4_10, GET_NUMA_NODE_ID, get_numa_node_id, TYPES (it, 0))
+DEF_HELPER (LINUX_V4_10, SKB_CHANGE_HEAD, skb_change_head, TYPES (it, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_10, XDP_ADJUST_HEAD, xdp_adjust_head, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_11, PROBE_READ_STR, probe_read_str, TYPES (it, pt, u32t, cpt, 0))
+DEF_HELPER (LINUX_V4_12, GET_SOCKET_COOKIE, get_socket_cookie, TYPES (it, pt, 0))
+DEF_HELPER (LINUX_V4_12, GET_SOCKET_UID, get_socket_uid, TYPES (ut, pt, 0))
+DEF_HELPER (LINUX_V4_13, SET_HASH, set_hash, TYPES (ut, pt, u32t, 0))
+DEF_HELPER (LINUX_V4_13, SETSOCKOPT, setsockopt, TYPES (it, pt, it, it, pt, it, 0))
+DEF_HELPER (LINUX_V4_13, SKB_ADJUST_ROOM, skb_adjust_room, TYPES (it, pt, st, u32t, ullt, 0))
+DEF_HELPER (LINUX_V4_14, REDIRECT_MAP, redirect_map, TYPES (it, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_14, SK_REDIRECT_MAP, sk_redirect_map, TYPES (it, pt, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_14, SOCK_MAP_UPDATE, sock_map_update, TYPES (it, pt, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V4_15, XDP_ADJUST_META, xdp_adjust_meta, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_15, PERF_EVENT_READ_VALUE, perf_event_read_value,
+	    TYPES (it, pt, ullt, pt, ut, 0))
+DEF_HELPER (LINUX_V4_15, PERF_PROG_READ_VALUE, perf_prog_read_value,
+	    TYPES (it, pt, pt, ut, 0))
+DEF_HELPER (LINUX_V4_15, GETSOCKOPT, getsockopt, TYPES (it, pt, it, it, pt, it, 0))
+
+DEF_HELPER (LINUX_V4_16, OVERRIDE_RETURN, override_return, TYPES (it, pt, ult, 0))
+DEF_HELPER (LINUX_V4_16, SOCK_OPS_CB_FLAGS_SET, sock_ops_cb_flags_set, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_17, MSG_REDIRECT_MAP, msg_redirect_map, TYPES (it, pt, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_17, MSG_APPLY_BYTES, msg_apply_bytes, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_17, MSG_CORK_BYTES, msg_cork_bytes, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_17, MSG_PULL_DATA, msg_pull_data, TYPES (it, pt, it, it, it, 0))
+DEF_HELPER (LINUX_V4_17, BIND, bind, TYPES (it, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_18, XDP_ADJUST_TAIL, xdp_adjust_tail, TYPES (it, pt, it, 0))
+DEF_HELPER (LINUX_V4_18, SKB_GET_XFRM_STATE,
+	    skb_get_xfrm_state, TYPES (it, pt, it, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_18, GET_STACK, get_stack, TYPES (it, pt, pt, it, it, 0))
+DEF_HELPER (LINUX_V4_18, SKB_LOAD_BYTES_RELATIVE, skb_load_bytes_relative,
+	    TYPES (it, pt, it, pt, it, ut, 0))
+DEF_HELPER (LINUX_V4_18, FIB_LOOKUP, fib_lookup, TYPES (it, pt, pt, it, ut, 0))
+DEF_HELPER (LINUX_V4_18, SOCK_HASH_UPDATE, sock_hash_update, TYPES (it, pt, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V4_18, MSG_REDIRECT_HASH, msg_redirect_hash, TYPES (it, pt, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_18, SK_REDIRECT_HASH, sk_redirect_hash, TYPES (it, pt, pt, pt, it, 0))
+DEF_HELPER (LINUX_V4_18, LWT_PUSH_ENCAP, lwt_push_encap, TYPES (it, pt, ut, pt, ut, 0))
+DEF_HELPER (LINUX_V4_18, LWT_SEG6_STORE_BYTES, lwt_seg6_store_bytes,
+	    TYPES (it, pt, ut, pt, ut, 0))
+DEF_HELPER (LINUX_V4_18, LWT_SEG6_ADJUST_SRH, lwt_seg6_adjust_srh, TYPES (it, pt, ut, ut, 0))
+DEF_HELPER (LINUX_V4_18, LWT_SEG6_ACTION, lwt_seg6_action, TYPES (it, pt, ut, pt, ut, 0))
+DEF_HELPER (LINUX_V4_18, RC_REPEAT, rc_repeat, TYPES (it, pt, 0))
+DEF_HELPER (LINUX_V4_18, RC_KEYDOWN, rc_keydown, TYPES (it, pt, ut, ullt, ut, 0))
+DEF_HELPER (LINUX_V4_18, SKB_CGROUP_ID, skb_cgroup_id, TYPES (ullt, pt, 0))
+DEF_HELPER (LINUX_V4_18, GET_CURRENT_CGROUP_ID, get_current_cgroup_id, TYPES (ullt, 0))
+DEF_HELPER (LINUX_V4_19, GET_LOCAL_STORAGE, get_local_storage, TYPES (pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V4_19, SK_SELECT_REUSEPORT, sk_select_reuseport,
+	    TYPES (it, pt, pt, pt, ut, 0))
+DEF_HELPER (LINUX_V4_19, SKB_ANCESTOR_CGROUP_ID, skb_ancestor_cgroup_id,
+	    TYPES (ullt, pt, it, 0))
+DEF_HELPER (LINUX_V4_20, SK_LOOKUP_TCP, sk_lookup_tcp, TYPES (pt, pt, pt, it, ullt, ullt, 0))
+DEF_HELPER (LINUX_V4_20, SK_LOOKUP_UDP, sk_lookup_udp, TYPES (pt, pt, pt, it, ullt, ullt, 0))
+DEF_HELPER (LINUX_V4_20, SK_RELEASE, sk_release, TYPES (it, pt, 0))
+DEF_HELPER (LINUX_V4_20, MAP_PUSH_ELEM, map_push_elem, TYPES (it, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V4_20, MAP_POP_ELEM, map_pop_elem, TYPES (it, pt, pt, 0))
+DEF_HELPER (LINUX_V4_20, MAP_PEEK_ELEM, map_peek_elem, TYPES (it, pt, pt, 0))
+DEF_HELPER (LINUX_V4_20, MSG_PUSH_DATA, msg_push_data, TYPES (it, pt, it, it, it, 0))
+DEF_HELPER (LINUX_V5_0, MSG_POP_DATA, msg_pop_data, TYPES (it, pt, it, it, it, 0))
+DEF_HELPER (LINUX_V5_0, RC_POINTER_REL, rc_pointer_rel, TYPES (it, pt, it, it, 0))
+DEF_HELPER (LINUX_V5_1, SPIN_LOCK, spin_lock, TYPES (vt, pt, 0))
+DEF_HELPER (LINUX_V5_1, SPIN_UNLOCK, spin_unlock, TYPES (vt, pt, 0))
+DEF_HELPER (LINUX_V5_1, SK_FULLSOCK, sk_fullsock, TYPES (pt, pt, 0))
+DEF_HELPER (LINUX_V5_1, TCP_SOCK, tcp_sock, TYPES (pt, pt, 0))
+DEF_HELPER (LINUX_V5_1, SKB_ECN_SET_CE, skb_ecn_set_ce, TYPES (it, pt, 0))
+DEF_HELPER (LINUX_V5_1, GET_LISTENER_SOCK, get_listener_sock, TYPES (pt, pt, 0))
+DEF_HELPER (LINUX_V5_2, SKC_LOOKUP_TCP, skc_lookup_tcp,
+	    TYPES (pt, pt, pt, u32t, u64t, u64t, 0))
+DEF_HELPER (LINUX_V5_2, TCP_CHECK_SYNCOOKIE, tcp_check_syncookie,
+	    TYPES (it, pt, pt, u32t, pt, u32t, 0))
+DEF_HELPER (LINUX_V5_2, SYSCTL_GET_NAME, sysctl_get_name, TYPES (it, pt, pt, ullt, u64t, 0))
+DEF_HELPER (LINUX_V5_2, SYSCTL_GET_CURRENT_VALUE, sysctl_get_current_value,
+	    TYPES (it, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V5_2, SYSCTL_GET_NEW_VALUE, sysctl_get_new_value,
+	    TYPES (it, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V5_2, SYSCTL_SET_NEW_VALUE, sysctl_set_new_value,
+	    TYPES (it, pt, pt, ullt, 0))
+DEF_HELPER (LINUX_V5_2, STRTOL, strtol, TYPES (it, cst, ullt, u64t, pt, 0))
+DEF_HELPER (LINUX_V5_2, STRTOUL, strtoul, TYPES (it, pt, ullt, u64t, pt, 0))
+DEF_HELPER (LINUX_V5_2, SK_STORAGE_GET, sk_storage_get, TYPES (pt, pt, pt, pt, u64t, 0))
+DEF_HELPER (LINUX_V5_2, SK_STORAGE_DELETE, sk_storage_delete, TYPES (it, pt, pt, 0))
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/gcc/config/bpf/bpf-helpers.h b/gcc/config/bpf/bpf-helpers.h
new file mode 100644
index 00000000000..2fe96be7637
--- /dev/null
+++ b/gcc/config/bpf/bpf-helpers.h
@@ -0,0 +1,324 @@ 
+/* Copyright (C) 2019 Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* The purpose of this file is to provide a compatiblity layer with
+   the Linux kernel bpf_helpers.h header that is located in
+   linux/tools/testing/selftests/bpf/bpf_helpers.h.  That file is
+   currently llvm-specific.  */
+
+#ifndef __BPF_HELPERS_H
+#define __BPF_HELPERS_H
+
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+/* Flags used in some kernel helpers.  */
+
+#define BPF_ANY     0
+#define BPF_NOEXIST 1
+#define BPF_EXIST   2
+
+#define BPF_F_LOCK 4
+#define BPF_F_NO_COMMON_LRU (1U << 1)
+#define BPF_F_NUMA_NODE (1U << 2)
+
+/* Functions to call kernel helpers.  We provide the "standard" bpf_*
+   names as synonyms of the corresponding GCC builtins.  In some
+   cases, where non-void pointers are passed to the helper, inline
+   functions are used to achieve proper type checking.  */
+
+#ifndef KERNEL_VERSION
+# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,0,0)
+
+#define bpf_map_lookup_elem	__builtin_bpf_helper_map_lookup_elem
+#define bpf_map_update_elem	__builtin_bpf_helper_map_update_elem
+#define bpf_map_delete_elem	__builtin_bpf_helper_map_delete_elem
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,1,0)
+
+#define bpf_probe_read		__builtin_bpf_helper_probe_read
+#define bpf_ktime_get_ns	__builtin_bpf_helper_ktime_get_ns
+#define bpf_trace_printk	__builtin_bpf_helper_trace_printk
+#define bpf_get_prandom_u32	__builtin_bpf_helper_get_prandom_u32
+#define bpf_get_smp_processor_id __builtin_bpf_helper_get_smp_processor_id
+#define bpf_skb_store_bytes	__builtin_bpf_helper_skb_store_bytes
+#define bpf_l3_csum_replace	__builtin_bpf_helper_l3_csum_replace
+#define bpf_l4_csum_replace	__builtin_bpf_helper_l4_csum_replace
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,2,0)
+
+#define bpf_tail_call		__builtin_bpf_helper_tail_call
+#define bpf_clone_redirect	__builtin_bpf_helper_clone_redirect
+#define bpf_get_current_pid_tgid __builtin_bpf_helper_get_current_pid_tgid
+#define bpf_get_current_uid_gid  __builtin_bpf_helper_get_current_uid_gid
+#define bpf_get_current_comm	__builtin_bpf_helper_get_current_comm
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,3,0)
+
+#define bpf_get_cgroup_classid	__builtin_bpf_helper_get_cgroup_classid
+#define bpf_skb_vlan_push	__builtin_bpf_helper_skb_vlan_push
+#define bpf_skb_vlan_pop	__builtin_bpf_helper_skb_vlan_pop
+#define bpf_skb_get_tunnel_key	__builtin_bpf_helper_skb_get_tunnel_key
+#define bpf_skb_set_tunnel_key	__builtin_bpf_helper_skb_set_tunnel_key
+#define bpf_perf_event_read	__builtin_bpf_helper_perf_event_read
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,4,0)
+
+#define bpf_redirect		__builtin_bpf_helper_redirect
+#define bpf_get_route_realm	__builtin_bpf_helper_get_route_realm
+#define bpf_perf_event_output	__builtin_bpf_helper_perf_event_output
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,5,0)
+
+#define bpf_skb_load_bytes	__builtin_bpf_helper_skb_load_bytes
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,6,0)
+
+#define bpf_get_stackid		__builtin_bpf_helper_get_stackid
+#define bpf_csum_diff		__builtin_bpf_helper_csum_diff
+#define bpf_skb_get_tunnel_opt	__builtin_bpf_helper_skb_get_tunnel_opt
+#define bpf_skb_set_tunnel_opt	__builtin_bpf_helper_skb_set_tunnel_opt
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,8,0)
+
+#define bpf_skb_change_proto	__builtin_bpf_helper_skb_change_proto
+#define bpf_skb_change_type	__builtin_bpf_helper_skb_change_type
+#define bpf_skb_under_cgroup	__builtin_bpf_helper_skb_under_cgroup
+#define bpf_get_hash_recalc	__builtin_bpf_helper_get_hash_recalc
+#define bpf_get_current_task	__builtin_bpf_helper_get_current_task
+#define bpf_probe_write_user	__builtin_bpf_helper_probe_write_user
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,9,0)
+
+#define bpf_current_task_under_cgroup __builtin_bpf_helper_current_task_under_cgroup
+#define bpf_skb_change_tail	__builtin_bpf_helper_skb_change_tail
+#define bpf_skb_pull_data	__builtin_bpf_helper_skb_pull_data
+#define bpf_csum_update		__builtin_bpf_helper_csum_update
+#define bpf_set_hash_invalid	__builtin_bpf_helper_set_hash_invalid
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,10,0)
+
+#define bpf_get_numa_node_id	__builtin_bpf_helper_get_numa_node_id
+#define bpf_skb_change_head	__builtin_bpf_helper_skb_change_head
+#define bpf_xdp_adjust_head	__builtin_bpf_helper_xdp_adjust_head
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,11,0)
+
+#define bpf_probe_read_str	__builtin_bpf_helper_probe_read_str
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,12,0)
+
+#define bpf_get_socket_cookie	__builtin_bpf_helper_get_socket_cookie
+#define bpf_get_socket_uid	__builtin_bpf_helper_get_socket_uid
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,13,0)
+
+#define bpf_set_hash		__builtin_bpf_helper_set_hash
+#define bpf_setsockopt		__builtin_bpf_helper_setsockopt
+#define bpf_skb_adjust_room	__builtin_bpf_helper_skb_adjust_room
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,14,0)
+
+#define bpf_redirect_map	__builtin_bpf_helper_redirect_map
+#define bpf_sk_redirect_map	__builtin_bpf_helper_sk_redirect_map
+#define bpf_sock_map_update	__builtin_bpf_helper_sock_map_update
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,15,0)
+
+#define bpf_perf_event_read_value __builtin_bpf_helper_perf_event_read_value
+#define bpf_perf_prog_read_value  __builtin_bpf_helper_perf_prog_read_value
+#define bpf_getsockopt		  __builtin_bpf_helper_getsockopt
+#define bpf_xdp_adjust_meta	__builtin_bpf_helper_xdp_adjust_meta
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,16,0)
+
+#define bpf_override_return	__builtin_bpf_helper_override_return
+#define bpf_sock_ops_cb_flags_set __builtin_bpf_helper_sock_ops_cb_flags_set
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,17,0)
+
+#define bpf_msg_redirect_map	__builtin_bpf_helper_msg_redirect_map
+#define bpf_msg_apply_bytes	__builtin_bpf_helper_msg_apply_bytes
+#define bpf_msg_cork_bytes	__builtin_bpf_helper_msg_cork_bytes
+#define bpf_pull_data		__builtin_bpf_helper_pull_data
+#define bpf_bind		__builtin_bpf_helper_bpf_bind
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,18,0)
+
+#define bpf_xdp_adjust_tail	__builtin_bpf_helper_xdp_adjust_tail
+#define bpf_skb_get_xfrm_state	__builtin_bpf_helper_skb_get_xfrm_state
+#define bpf_get_stack		__builtin_bpf_helper_get_stack
+#define bpf_skb_load_bytes_relative __builtin_bpf_helper_skb_load_bytes_relative
+#define bpf_sock_hash_update	__builtin_bpf_helper_sock_hash_update
+#define bpf_msg_redirect_hash	__builtin_bpf_helper_msg_redirect_hash
+#define bpf_sk_redirect_hash	__builtin_bpf_helper_sk_redirect_hash
+#define bpf_lwt_push_encap		__builtin_bpf_helper_lwt_push_encap
+#define bpf_lwt_seg6_store_bytes	__builtin_bpf_helper_lwt_seg6_store_bytes
+#define bpf_lwt_seg6_adjust_srh		__builtin_bpf_helper_lwt_seg6_adjust_srh
+#define bpf_lwt_seg6_action		__builtin_bpf_helper_lwt_seg6_action
+#define bpf_rc_repeat			__builtin_bpf_helper_rc_repeat
+#define bpf_rc_keydown			__builtin_bpf_helper_rc_keydown
+#define bpf_skb_cgroup_id		__builtin_bpf_helper_skb_cgroup_id
+#define bpf_get_current_cgroup_id	__builtin_bpf_helper_get_current_cgroup_id
+
+static inline int
+bpf_fib_lookup (void *ctx, struct bpf_fib_lookup *param, int plen,
+		unsigned int flags)
+{
+  return __builtin_bpf_helper_fib_lookup (ctx, (void *) param, plen, flags);
+}
+
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,19,0)
+
+#define bpf_get_local_storage	__builtin_bpf_helper_get_local_storage
+#define bpf_sk_select_reuseport	__builtin_bpf_helper_sk_select_reuseport
+#define bpf_skb_ancestor_cgroup_id	__builtin_bpf_helper_skb_ancestor_cgroup_id
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (4,20,0)
+
+#define bpf_sk_release		__builtin_bpf_helper_sk_release
+#define bpf_map_push_elem	__builtin_bpf_helper_map_push_elem
+#define bpf_map_pop_elem	__builtin_bpf_helper_map_pop_elem
+#define bpf_map_peek_elem	__builtin_bpf_helper_map_peek_elem
+#define bpf_msg_push_data	__builtin_bpf_helper_msg_push_data
+
+static inline struct bpf_sock *
+bpf_sk_lookup_tcp (void *ctx, struct bpf_sock_tuple *tuple,
+		   int size, unsigned long long netns_id,
+		   unsigned long long flags)
+{
+  return
+    (struct bpf_sock *) __builtin_bpf_helper_sk_lookup_tcp (ctx,
+							    (void *) tuple,
+							    size,
+							    netns_id, flags);
+}
+
+static inline struct bpf_sock *
+bpf_sk_lookup_udp (void *ctx, struct bpf_sock_tuple *tuple,
+		   int size, unsigned long long netns_id,
+		   unsigned long long flags)
+{
+  return
+    (struct bpf_sock *) __builtin_bpf_helper_sk_lookup_udp (ctx,
+							    (void *) tuple,
+							    size,
+							    netns_id, flags);
+}
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (5,0,0)
+
+#define bpf_msg_pop_data	__builtin_bpf_helper_pop_data
+#define bpf_rc_pointer_rel	__builtin_bpf_helper_rc_pointer_rel
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (5,1,0)
+
+#define bpf_spin_lock		__builtin_bpf_helper_spin_lock
+#define bpf_spin_unlock		__builtin_bpf_helper_spin_unlock
+#define bpf_skb_ecn_set_ce	__builtin_bpf_helper_skb_ecn_set_ce
+
+static inline struct bpf_sock *
+bpf_sk_fullsock (struct bpf_sock *sk)
+{
+  return
+    (struct bpf_sock *) __builtin_bpf_helper_sk_fullsock ((void *) sk);
+}
+
+static inline struct bpf_sock *
+bpf_tcp_sock (struct bpf_sock *sk)
+{
+  return
+    (struct bpf_sock *) __builtin_bpf_helper_tcp_sock ((void *) sk);
+}
+
+static inline struct bpf_sock *
+bpf_get_listener_sock (struct bpf_sock *sk)
+{
+  return
+    (struct bpf_sock *) __builtin_bpf_helper_get_listener_sock ((void *) sk);
+}
+
+#if __BPF_KERNEL_VERSION_CODE__ >= KERNEL_VERSION (5,2,0)
+
+
+#endif /* 5.2 */
+#endif /* 5.1 */
+#endif /* 5.0 */
+#endif /* 4.20 */
+#endif /* 4.19 */
+#endif /* 4.18 */
+#endif /* 4.17 */
+#endif /* 4.16 */
+#endif /* 4.15 */
+#endif /* 4.14 */
+#endif /* 4.13 */
+#endif /* 4.12 */
+#endif /* 4.11 */
+#endif /* 4.10 */
+#endif /* 4.9 */
+#endif /* 4.8 */
+#endif /* 4.6 */
+#endif /* 4.5 */
+#endif /* 4.4 */
+#endif /* 4.3 */
+#endif /* 4.2 */
+#endif /* 4.1 */
+#endif /* 4.0 */
+
+/* Functions to emit BPF_LD_ABS and BPF_LD_IND instructions.  We
+   provide the "standard" names as synonyms of the corresponding GCC
+   builtins.  Note how the SKB argument is ignored.  */
+
+static inline long long
+load_byte (void *skb, unsigned long long off)
+{
+  return __builtin_bpf_load_byte (off);
+}
+
+static inline long long
+load_half (void *skb, unsigned long long off)
+{
+  return __builtin_bpf_load_half (off);
+}
+
+static inline long long
+load_word (void *skb, unsigned long long off)
+{
+  return __builtin_bpf_load_word (off);
+}
+
+struct bpf_map_def
+{
+  unsigned int type;
+  unsigned int key_size;
+  unsigned int value_size;
+  unsigned int max_entries;
+  unsigned int map_flags;
+  unsigned int inner_map_idx;
+  unsigned int numa_node;
+};
+
+#endif /* ! __BPF_HELPERS_H */
diff --git a/gcc/config/bpf/bpf-opts.h b/gcc/config/bpf/bpf-opts.h
new file mode 100644
index 00000000000..5c76a40f131
--- /dev/null
+++ b/gcc/config/bpf/bpf-opts.h
@@ -0,0 +1,56 @@ 
+/* Definitions for option handling for eBPF.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef BPF_OPTS_H
+#define BPF_OPTS_H
+
+/* Supported versions of the Linux kernel.  */
+enum bpf_kernel_version
+{
+ /* Linux 4.x */
+ LINUX_V4_0,
+ LINUX_V4_1,
+ LINUX_V4_2,
+ LINUX_V4_3,
+ LINUX_V4_4,
+ LINUX_V4_5,
+ LINUX_V4_6,
+ LINUX_V4_7,
+ LINUX_V4_8,
+ LINUX_V4_9,
+ LINUX_V4_10,
+ LINUX_V4_11,
+ LINUX_V4_12,
+ LINUX_V4_13,
+ LINUX_V4_14,
+ LINUX_V4_15,
+ LINUX_V4_16,
+ LINUX_V4_17,
+ LINUX_V4_18,
+ LINUX_V4_19,
+ LINUX_V4_20,
+ /* Linux 5.x  */
+ LINUX_V5_0,
+ LINUX_V5_1,
+ LINUX_V5_2,
+ LINUX_LATEST = LINUX_V5_2,
+ LINUX_NATIVE,
+};
+
+#endif /* ! BPF_OPTS_H */
diff --git a/gcc/config/bpf/bpf-protos.h b/gcc/config/bpf/bpf-protos.h
new file mode 100644
index 00000000000..3a835f4f12e
--- /dev/null
+++ b/gcc/config/bpf/bpf-protos.h
@@ -0,0 +1,33 @@ 
+/* Definition of eBPF target for GNU compiler.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_BPF_PROTOS_H
+#define GCC_BPF_PROTOS_H
+
+/* Routines implemented in bpf.c.  */
+
+extern HOST_WIDE_INT bpf_initial_elimination_offset (int, int);
+extern const char *bpf_output_call (rtx);
+extern void bpf_target_macros (cpp_reader *);
+extern void bpf_print_operand (FILE *, rtx, int);
+extern void bpf_print_operand_address (FILE *, rtx);
+extern void bpf_expand_prologue (void);
+extern void bpf_expand_epilogue (void);
+
+#endif /* ! GCC_BPF_PROTOS_H */
diff --git a/gcc/config/bpf/bpf.c b/gcc/config/bpf/bpf.c
new file mode 100644
index 00000000000..4a42259a9c3
--- /dev/null
+++ b/gcc/config/bpf/bpf.c
@@ -0,0 +1,1136 @@ 
+/* Subroutines used for code generation for eBPF.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#define IN_TARGET_CODE 1
+
+#define INCLUDE_STRING
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "output.h"
+#include "alias.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "function.h"
+#include "explow.h"
+#include "memmodel.h"
+#include "emit-rtl.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "basic-block.h"
+#include "expr.h"
+#include "optabs.h"
+#include "bitmap.h"
+#include "df.h"
+#include "c-family/c-common.h"
+#include "diagnostic.h"
+#include "builtins.h"
+#include "predict.h"
+#include "langhooks.h"
+
+/* Per-function machine data.  */
+struct GTY(()) machine_function
+{
+  /* Number of bytes saved on the stack for local variables.  */
+  int local_vars_size;
+
+  /* Number of bytes saved on the stack for callee-saved
+     registers.  */
+  int callee_saved_reg_size;
+
+  /* Whether diagnostics for the function have been
+     checked/reported.  */
+  int diagnostics_checked_p;
+};
+
+/* Data structures for the eBPF specific built-ins.  */
+
+/* Maximum number of arguments taken by a builtin function, plus
+   one.  */
+#define BPF_BUILTIN_MAX_ARGS 5
+
+enum bpf_builtins
+{
+ BPF_BUILTIN_UNUSED = 0,
+ /* Built-ins for kernel helpers.  */
+#define DEF_HELPER(V,D,N,T) BPF_BUILTIN_HELPER_##D,
+#  include "bpf-helpers.def"
+#undef DEF_HELPER
+ BPF_BUILTIN_HELPER_MAX,
+ /* Built-ins for non-generic loads and stores.  */
+ BPF_BUILTIN_LOAD_BYTE = BPF_BUILTIN_HELPER_MAX,
+ BPF_BUILTIN_LOAD_HALF,
+ BPF_BUILTIN_LOAD_WORD,
+ BPF_BUILTIN_MAX,
+};
+
+/* This table is indexed by an enum bpf_builtin.  */
+static const char *bpf_helper_names[] =
+{
+ NULL,
+#define DEF_HELPER(V,D,N,T) #N,
+#  include "bpf-helpers.def"
+#undef DEF_HELPER
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+/* Return the builtin code corresponding to the kernel helper builtin
+   __builtin_NAME, or 0 if the name doesn't correspond to a kernel
+   helper builtin.  */
+
+static inline int
+bpf_helper_code (const char *name)
+{
+  int i;
+
+  for (i = 1; i < BPF_BUILTIN_HELPER_MAX; ++i)
+    {
+      if (strcmp (name, bpf_helper_names[i]) == 0)
+	return i;
+    }
+
+  return 0;
+}
+
+static GTY (()) tree bpf_builtins[(int) BPF_BUILTIN_MAX];
+
+/* Initialize the per-function machine status.  */
+
+static struct machine_function *
+bpf_init_machine_status (void)
+{
+  /* Note this initializes all fields to 0, which is just OK for
+     us.  */
+  return ggc_cleared_alloc<machine_function> ();
+}
+
+/* Override options and do some other initialization.  */
+
+static void
+bpf_option_override (void)
+{
+  /* Set the default target kernel if no -mkernel was specified.  */
+  if (!global_options_set.x_bpf_kernel)
+    bpf_kernel = LINUX_LATEST;
+
+  /* Set the initializer for the per-function status structure.  */
+  init_machine_status = bpf_init_machine_status;
+}
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE bpf_option_override
+
+/* Determine whether the port is prepared to handle insns involving
+   scalar mode MODE.  For a scalar mode to be considered supported,
+   all the basic arithmetic and comparisons must work.  */
+
+static bool
+bpf_scalar_mode_supported_p (scalar_mode mode)
+{
+  switch (mode)
+    {
+    case E_QImode:
+    case E_HImode:
+    case E_SImode:
+    case E_DImode:
+    case E_TImode:
+      return true;
+
+    default:
+      return false;
+    }
+
+  return false;
+}
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P bpf_scalar_mode_supported_p
+
+/* Define target-specific CPP macros.  This function in used in the
+   definition of TARGET_CPU_CPP_BUILTINS in bpf.h */
+
+#define builtin_define(TXT) cpp_define (pfile, TXT)
+
+void
+bpf_target_macros (cpp_reader *pfile)
+{
+  builtin_define ("__BPF__");
+  
+  if (TARGET_LITTLE_ENDIAN)
+    builtin_define ("__BPF_LITTLE_ENDIAN__");
+  else
+    builtin_define ("__BPF_BIG_ENDIAN__");
+
+  /* Define BPF_KERNEL_VERSION_CODE */
+  {
+    const char *version_code;
+    char *kernel_version_code;
+
+    switch (bpf_kernel)
+      {
+      case LINUX_V4_0: version_code = "0x40000"; break;
+      case LINUX_V4_1: version_code = "0x40100"; break;
+      case LINUX_V4_2: version_code = "0x40200"; break;
+      case LINUX_V4_3: version_code = "0x40300"; break;
+      case LINUX_V4_4: version_code = "0x40400"; break;
+      case LINUX_V4_5: version_code = "0x40500"; break;
+      case LINUX_V4_6: version_code = "0x40600"; break;
+      case LINUX_V4_7: version_code = "0x40700"; break;
+      case LINUX_V4_8: version_code = "0x40800"; break;
+      case LINUX_V4_9: version_code = "0x40900"; break;
+      case LINUX_V4_10: version_code = "0x40a00"; break;
+      case LINUX_V4_11: version_code = "0x40b00"; break;
+      case LINUX_V4_12: version_code = "0x40c00"; break;
+      case LINUX_V4_13: version_code = "0x40d00"; break;
+      case LINUX_V4_14: version_code = "0x40e00"; break;
+      case LINUX_V4_15: version_code = "0x40f00"; break;
+      case LINUX_V4_16: version_code = "0x41000"; break;
+      case LINUX_V4_17: version_code = "0x42000"; break;
+      case LINUX_V4_18: version_code = "0x43000"; break;
+      case LINUX_V4_19: version_code = "0x44000"; break;
+      case LINUX_V4_20: version_code = "0x45000"; break;
+      case LINUX_V5_0: version_code = "0x50000"; break;
+      case LINUX_V5_1: version_code = "0x50100"; break;
+      case LINUX_V5_2: version_code = "0x50200"; break;
+      default:
+	gcc_unreachable ();      
+      }
+
+#define KERNEL_VERSION_CODE "__BPF_KERNEL_VERSION_CODE__="    
+    kernel_version_code
+      = (char *) alloca (strlen (KERNEL_VERSION_CODE) + 7 + 1);
+    strcpy (kernel_version_code, KERNEL_VERSION_CODE);
+#undef KERNEL_VERSION_CODE
+    strcat (kernel_version_code, version_code);
+    builtin_define (kernel_version_code);
+  }
+}
+
+/* Output assembly directives to switch to section NAME.  The section
+   should have attributes as specified by FLAGS, which is a bit mask
+   of the 'SECTION_*' flags defined in 'output.h'.  If DECL is
+   non-NULL, it is the 'VAR_DECL' or 'FUNCTION_DECL' with which this
+   section is associated.  */
+
+static void
+bpf_asm_named_section (const char *name,
+		       unsigned int flags ATTRIBUTE_UNUSED,
+		       tree decl ATTRIBUTE_UNUSED)
+{
+  fprintf (asm_out_file, "\t.section\t%s\n", name);
+}
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION bpf_asm_named_section
+
+/* Return an RTX representing the place where a function returns or
+   receives a value of data type RET_TYPE, a tree node representing a
+   data type.  */
+
+static rtx
+bpf_function_value (const_tree ret_type,
+		    const_tree fntype_or_decl ATTRIBUTE_UNUSED,
+		    bool outgoing ATTRIBUTE_UNUSED)
+{
+  enum machine_mode mode;
+  int unsignedp;
+
+  mode = TYPE_MODE (ret_type);
+  if (INTEGRAL_TYPE_P (ret_type))
+    mode = promote_function_mode (ret_type, mode, &unsignedp, fntype_or_decl, 1);
+
+  return gen_rtx_REG (mode, 0);
+}
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE bpf_function_value
+
+/* Return true if REGNO is th enumber of a hard register in which the
+   values of called function may come back.  */
+
+static bool
+bpf_function_value_regno_p (const unsigned int regno)
+{
+  return (regno == 0);
+}
+
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P bpf_function_value_regno_p
+
+/* Compute the size of the function's stack frame, including the local
+   area and the register-save area.  */
+
+static void
+bpf_compute_frame (void)
+{
+  int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+  int padding_locals, regno;
+
+  /* Set the space used in the stack by local variables.  This is
+     rounded up to respect the minimum stack alignment.  */
+  cfun->machine->local_vars_size = get_frame_size ();
+
+  padding_locals = cfun->machine->local_vars_size % stack_alignment;
+  if (padding_locals)
+    padding_locals = stack_alignment - padding_locals;
+
+  cfun->machine->local_vars_size += padding_locals;
+
+  /* Set the space used in the stack by callee-saved used registers in
+     the current function.  There is no need to round up, since the
+     registers are all 8 bytes wide.  */
+  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+    if ((!fixed_regs[regno]
+	 && df_regs_ever_live_p (regno)
+	 && !call_used_regs[regno])
+	|| (cfun->calls_alloca
+	    && regno == STACK_POINTER_REGNUM))
+      cfun->machine->callee_saved_reg_size += 8;
+
+  /* Check that the total size of the frame doesn't exceed the limit
+     imposed by eBPF: currently 512 bytes.  */
+  if ((cfun->machine->local_vars_size
+       + cfun->machine->callee_saved_reg_size) > 512)
+    {
+      static int stack_limit_exceeded = 0;
+
+      if (!stack_limit_exceeded)
+	error ("eBPF stack limit of 512 bytes exceeded");
+      stack_limit_exceeded = 1;
+    }
+}
+
+/* Expand to the instructions in a function prologue.  This function
+   is called when expanding the 'prologue' pattern in bpf.md.  */
+
+void
+bpf_expand_prologue (void)
+{
+  int regno, fp_offset;
+  rtx insn;
+  HOST_WIDE_INT size;
+
+  bpf_compute_frame ();
+  size = (cfun->machine->local_vars_size
+	  + cfun->machine->callee_saved_reg_size);
+  fp_offset = -cfun->machine->local_vars_size;
+  
+  /* Save callee-saved hard registes.  The register-save-area starts
+     right after the local variables.  */
+  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+    {
+      if ((!fixed_regs[regno]
+	   && df_regs_ever_live_p (regno)
+	   && !call_used_regs[regno])
+	  || (cfun->calls_alloca
+	      && regno == STACK_POINTER_REGNUM))
+	{
+	  rtx mem;
+
+	  if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
+	    /* This has been already reported as an error in
+	       bpf_compute_frame. */
+	    break;
+	  else
+	    {
+	      mem = gen_frame_mem (DImode,
+				   plus_constant (DImode,
+						  gen_rtx_REG (DImode, FRAME_POINTER_REGNUM),
+						  fp_offset - 8));
+	      insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
+	      RTX_FRAME_RELATED_P (insn) = 1;
+	      fp_offset -= 8;
+	    }
+	}
+    }
+
+  /* Set the stack pointer, if the function allocates space
+     dynamically.  Note that the value of %sp should be directly
+     derived from %fp, for the kernel verifier to track it as a stack
+     accessor.  */
+  if (cfun->calls_alloca)
+    {
+      insn = emit_move_insn (stack_pointer_rtx,
+			     gen_rtx_REG (DImode, FRAME_POINTER_REGNUM));
+      RTX_FRAME_RELATED_P (insn) = 1;
+      
+      if (size > 0)
+	{
+	  insn = emit_insn (gen_rtx_SET (stack_pointer_rtx,
+					 gen_rtx_PLUS (Pmode,
+						       stack_pointer_rtx,
+						       GEN_INT (-size))));
+	  RTX_FRAME_RELATED_P (insn) = 1;
+	}
+    }
+}
+
+/* Expand to the instructions in a function epilogue.  This function
+   is called when expanding the 'prologue' pattern in bpf.md.  */
+
+void
+bpf_expand_epilogue (void)
+{
+  int regno, fp_offset;
+  rtx insn;
+
+  bpf_compute_frame ();
+  fp_offset = -cfun->machine->local_vars_size;
+
+  /* Restore callee-saved hard registes from the stack.  */
+  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+    {
+      if ((!fixed_regs[regno]
+	   && df_regs_ever_live_p (regno)
+	   && !call_used_regs[regno])
+	  || (cfun->calls_alloca
+	      && regno == STACK_POINTER_REGNUM))
+	{
+	  rtx mem;
+
+	  if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
+	    /* This has been already reported as an error in
+	       bpf_compute_frame. */
+	    break;
+	  else
+	    {
+	      mem = gen_frame_mem (DImode,
+				   plus_constant (DImode,
+						  gen_rtx_REG (DImode, FRAME_POINTER_REGNUM),
+						  fp_offset - 8));
+	      insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
+	      RTX_FRAME_RELATED_P (insn) = 1;
+	      fp_offset -= 8;
+	    }
+	}
+    }
+
+  emit_jump_insn (gen_exit ());
+}
+
+/* Output the extra assembler code for entry to a function.  Note this
+   doesn't generally include instructions, but extra needed
+   directives.  */
+
+void
+bpf_asm_function_prologue (FILE *file ATTRIBUTE_UNUSED)
+{
+}
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE bpf_asm_function_prologue
+
+/* Output the extra assembler code for exit from a function.  */
+
+static void
+bpf_asm_function_epilogue (FILE *file ATTRIBUTE_UNUSED)
+{
+}
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE bpf_asm_function_epilogue
+
+/* Return the initial difference between the specified pair of
+   registers.  The registers that can figure in FROM, and TO, are
+   specified by ELIMINABLE_REGS in bpf.h.
+
+   This function is used in the definition of
+   INITIAL_ELIMINATION_OFFSET in bpf.h  */
+
+HOST_WIDE_INT
+bpf_initial_elimination_offset (int from,
+				int to)
+{
+  HOST_WIDE_INT ret;
+
+  if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+    {
+      bpf_compute_frame ();
+      ret = (cfun->machine->local_vars_size
+	     + cfun->machine->callee_saved_reg_size);
+    }
+  else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
+    ret = 0;
+  else
+    abort ();
+
+  return ret;
+}
+
+/* Return the number of consecutive hard registers, starting at
+   register number REGNO, required to hold a value of mode MODE.  */
+
+static unsigned int
+bpf_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED,
+		      enum machine_mode mode)
+{
+  return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
+}
+
+#undef TARGET_HARD_REGNO_NREGS
+#define TARGET_HARD_REGNO_NREGS bpf_hard_regno_nregs
+
+/* Return true if it is permissible to store a value of mode MODE in
+   hard register number REGNO, or in several registers starting with
+   that one.  */
+
+static bool
+bpf_hard_regno_mode_ok (unsigned int regno ATTRIBUTE_UNUSED,
+			enum machine_mode mode)
+{
+  switch (mode)
+    {
+    case E_SImode:
+    case E_DImode:
+    case E_HImode:
+    case E_QImode:
+    case E_TImode:
+    case E_SFmode:
+    case E_DFmode:
+      return true;
+    default:
+      return false;
+    }
+}
+
+#undef TARGET_HARD_REGNO_MODE_OK
+#define TARGET_HARD_REGNO_MODE_OK bpf_hard_regno_mode_ok
+
+/* Return true if a value of mode MODE1 is accessible in mode MODE2
+   without copying.  */
+
+static bool
+bpf_modes_tieable_p (enum machine_mode mode1,
+		     enum machine_mode mode2)
+{
+  return (mode1 == mode2
+	  || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2));
+}
+
+#undef TARGET_MODES_TIEABLE_P
+#define TARGET_MODES_TIEABLE_P bpf_modes_tieable_p
+
+/* Return true if a function must have and use a frame pointer.  */
+
+static bool
+bpf_frame_pointer_required (void)
+{
+  /* We do not have a stack pointer, so we absolutely depend on the
+     frame-pointer in order to access the stack... and fishes walk and
+     pigs fly glglgl */
+  return true;
+}
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED bpf_frame_pointer_required
+
+/* Return `true' if the given RTX X is a valid base for an indirect
+   memory access.  STRICT has the same meaning than in
+   bpf_legitimate_address_p.  */
+
+static inline bool
+bpf_address_base_p (rtx x, bool strict)
+{
+  return (GET_CODE (x) == REG
+	  && (REGNO (x) < 11
+	      || (!strict && REGNO (x) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return true if X (a RTX) is a legitimate memory address on the
+   target machine for a memory operand of mode MODE.  */
+
+static bool
+bpf_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
+			  rtx x,
+			  bool strict)
+{
+  switch (GET_CODE (x))
+    {
+    case LABEL_REF:
+    case SYMBOL_REF:
+    case CONST:
+      /* These are assumed to fit in 32-bit, because the kernel
+	 imposes a limit to the size of eBPF programs.  */
+      return true;
+      break;
+  
+    case CONST_INT:
+      /* This is only valid if the constant fits in a signed
+	 32-bit.  */
+      return IN_RANGE (INTVAL (x), -1 - 0x7fffffff, 0x7fffffff);
+      break;
+
+    case REG:
+      return bpf_address_base_p (x, strict);
+      break;
+
+    case PLUS:
+      {
+	/* The valid patterns here are:
+	   
+	   (PLUS ADDR_BASE CONST_INT)
+	   (PLUS CONST_INT ADDR_BASE)
+
+	   provided CONST_INT fits in a signed 16-bit.
+
+	   Note that LABEL_REF and SYMBOL_REF are not allowed in
+	   REG+IMM addresses, because it is almost certain they will
+	   overload the offset field.  */
+
+	rtx x0 = XEXP (x, 0);
+	rtx x1 = XEXP (x, 1);
+	
+	if (bpf_address_base_p (x0, strict) && GET_CODE (x1) == CONST_INT)
+	  return IN_RANGE (INTVAL (x1), -1 - 0x7fff, 0x7fff);
+
+	if (bpf_address_base_p (x1, strict) && GET_CODE (x0) == CONST_INT)
+	  return IN_RANGE (INTVAL (x0), -1 - 0x7fff, 0x7fff);
+
+	break;
+      }
+    default:
+      break;
+    }
+
+  return false;
+}
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P bpf_legitimate_address_p
+
+/* Split an out-of-range address displacement into hi and lo parts.
+   The hi part will have to be loaded into a register separately, but
+   the low part will be folded into the memory operand.  */
+
+static bool
+bpf_legitimize_address_displacement (rtx *off1, rtx *off2,
+				     poly_int64 poly_offset, machine_mode)
+{
+  HOST_WIDE_INT orig_offset = poly_offset;
+
+  /* Our case is very easy: the REG part of an indirect address is
+     64-bit wide, so it can hold any address.  This always leads to
+     REG+0 */
+
+  *off1 = GEN_INT (orig_offset);
+  *off2 = GEN_INT (0);
+  return true;
+}
+
+#undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
+#define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT bpf_legitimize_address_displacement
+
+/* Return true if memory address ADDR in address space AS can have
+   different meanings depending on the machine mode of the memory
+   reference it is used for or if the address is valid for some modes
+   but not others.  */
+
+static bool
+bpf_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
+                              addr_space_t as ATTRIBUTE_UNUSED)
+{
+  return true;
+}
+
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define TARGET_MODE_DEPENDENT_ADDRESS_P bpf_mode_dependent_address_p
+
+/* Return true if X is a legitimate constant for a MODE-mode immediate
+   operand on the target machine.  */
+
+static bool
+bpf_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+			   rtx x ATTRIBUTE_UNUSED)
+{
+  return true;
+}
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P bpf_legitimate_constant_p
+
+/* Describe the relative costs of RTL expressions.  Return true when
+   all subexpressions of X have been processed, and false when
+   `rtx_cost' should recurse.  */
+
+static bool
+bpf_rtx_costs (rtx x ATTRIBUTE_UNUSED,
+	       enum machine_mode mode ATTRIBUTE_UNUSED,
+	       int outer_code ATTRIBUTE_UNUSED,
+	       int opno ATTRIBUTE_UNUSED,
+               int *total ATTRIBUTE_UNUSED,
+	       bool speed ATTRIBUTE_UNUSED)
+{
+  /* To be written.  */
+  return false;
+}
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS bpf_rtx_costs
+
+/* Return a RTX indicating whether a function argument is passed in a
+   register and if so, which register.  */
+
+static rtx
+bpf_function_arg (cumulative_args_t ca, enum machine_mode mode ATTRIBUTE_UNUSED,
+                  const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
+{
+  CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
+
+  if (*cum < 5)
+    return gen_rtx_REG (mode, *cum + 1);
+  else
+    /* An error have been emitted for this in
+       bpf_function_arg_advance.  */
+    return NULL_RTX;
+}
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG bpf_function_arg
+
+/* Update the summarizer variable pointed by CA to advance past an
+   argument in the argument list.  */
+
+static void
+bpf_function_arg_advance (cumulative_args_t ca, enum machine_mode mode ATTRIBUTE_UNUSED,
+                          const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
+{
+  CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
+
+  if (*cum > 4)
+    error ("eBPF doesn't support functions with more than 5 arguments");
+  (*cum)++;
+}
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE bpf_function_arg_advance
+
+/* Return true if an argument at the position indicated by CUM should
+   be passed by reference.  If the hook returns true, a copy of that
+   argument is made in memory and a pointer to the argument is passed
+   instead of the argument itself.  */
+
+static bool
+bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
+		       enum machine_mode mode, const_tree type,
+		       bool named ATTRIBUTE_UNUSED)
+{
+  unsigned HOST_WIDE_INT size;
+
+  if (type)
+    {
+      if (AGGREGATE_TYPE_P (type))
+	return true;
+      size = int_size_in_bytes (type);
+    }
+  else
+    size = GET_MODE_SIZE (mode);
+
+  return (size > 8*5);
+}
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
+
+/* Diagnostics on function contents.  */
+
+static void
+bpf_set_current_function (tree decl)
+{
+  if (decl == NULL_TREE
+      || current_function_decl == NULL_TREE
+      || current_function_decl == error_mark_node
+      || !cfun->machine
+      || cfun->machine->diagnostics_checked_p)
+    return;
+
+  /* Currently we don't do anything meaningful here.  To be
+     changed.  */
+
+  /* Don't print the above diagnostics more than once.  */
+  cfun->machine->diagnostics_checked_p = 1;
+}
+
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION bpf_set_current_function
+
+/* Output the assembly code for a constructor.  Since eBPF doesn't
+   support indirect calls, constructors are not supported.  */
+
+static void
+bpf_output_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+  fatal_insn ("no support for constructors sorry", symbol);
+}
+
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR bpf_output_constructor
+
+/* Output the assembly code for a destructor.  Since eBPF doesn't
+   support indirect calls, destructors are not supported.  */
+
+static void
+bpf_output_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+  fatal_insn ("no support for destructors sorry", symbol);
+}
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR bpf_output_destructor
+
+/* Return the appropriate instruction to CALL to a function.  TARGET
+   is a `mem' RTX denoting the address of the called function.
+
+   The main purposes of this function are:
+   - To reject indirect CALL instructions, which are not supported by
+     eBPf.
+   - To recognize calls to kernel helper functions and emit the
+     corresponding CALL N instruction.
+
+   This function is called from the expansion of the 'call' pattern in
+   bpf.md.  */
+
+const char *
+bpf_output_call (rtx target)
+{
+  static char *insn;
+  rtx op;
+
+  op = XEXP (target, 0);
+  switch (GET_CODE (op))
+    {
+    case CONST_INT:
+      insn = (char *) xmalloc (5 + 6 + 1);
+      sprintf (insn, "call\t%ld", INTVAL (op));
+      break;
+    case SYMBOL_REF:
+      {
+	const char *function_name = XSTR (op, 0);
+	int code;
+      
+	if (strncmp (function_name, "__builtin_bpf_helper_", 21) == 0
+	    && ((code = bpf_helper_code (function_name + 21)) != 0))
+	  {
+	    insn = (char *) xmalloc (5 + 6 + 1);
+	    sprintf (insn, "call\t%d", code);
+	  }
+	else
+	  {	  
+	    insn = (char *) xmalloc (strlen (function_name) + 5 + 1);
+	    sprintf (insn, "call\t%s", function_name);
+	  }
+	break;
+      }
+    default:
+      error ("indirect call in function, which are not supported by eBPF");
+      insn = xstrdup ("call 0");
+      break;
+    }
+
+  return insn;
+}
+
+/* Print an instruction operand.  This function is called in the macro
+   PRINT_OPERAND defined in bpf.h */
+
+void
+bpf_print_operand (FILE *file, rtx op, int code ATTRIBUTE_UNUSED)
+{
+  switch (GET_CODE (op))
+    {
+    case REG:
+      fprintf (file, "%s", reg_names[REGNO (op)]);
+      break;
+    case MEM:
+      output_address (GET_MODE (op), XEXP (op, 0));
+      break;
+    case CONST_DOUBLE:
+      if (CONST_DOUBLE_HIGH (op))
+	fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+		 CONST_DOUBLE_HIGH (op), CONST_DOUBLE_LOW (op));
+      else if (CONST_DOUBLE_LOW (op) < 0)
+	fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (op));
+      else
+	fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (op));
+      break;
+    case LABEL_REF:
+      /* This is for label values.  */
+      /* Fallthrough. */
+    default:
+      output_addr_const (file, op);
+    }
+}
+
+/* Print an operand which is an address.  This function should handle
+   any legit address, as accepted by bpf_legitimate_address_p.
+
+   This function is called in the PRINT_OPERAND_ADDRESS macro defined
+   in bpf.h */
+
+void
+bpf_print_operand_address (FILE *file, rtx addr)
+{
+  switch (GET_CODE (addr))
+    {
+    case REG:
+      fprintf (file, "[%s+0]", reg_names[REGNO (addr)]);
+      break;
+    case PLUS:
+      {
+	rtx op0 = XEXP (addr, 0);
+	rtx op1 = XEXP (addr, 1);
+
+	if (GET_CODE (op0) == REG && CONSTANT_ADDRESS_P (op1))
+	  {
+	    fprintf (file, "[%s+", reg_names[REGNO (op0)]);
+	    output_addr_const (file, op1);
+	    fputs ("]", file);
+	  }
+	else if (GET_CODE (op1) == REG && CONSTANT_ADDRESS_P (op0))
+	  {
+	    fprintf (file, "[%s+", reg_names[REGNO (op1)]);
+	    output_addr_const (file, op0);
+	    fputs ("]", file);
+	  }
+	else
+	  fatal_insn ("invalid address in operand", addr);
+	break;
+      }
+    case MEM:
+      /* Fallthrough.  */
+    case LABEL_REF:
+      /* Fallthrough.  */
+      fatal_insn ("unsupported operand", addr);
+      break;
+    default:
+      output_addr_const (file, addr);
+      break;
+    }
+}
+
+/* Add a BPF builtin function with NAME, CODE and TYPE.  Return
+   the function decl or NULL_TREE if the builtin was not added.  */
+
+static tree
+def_builtin (const char *name, enum bpf_builtins code, tree type)
+{
+  tree t
+    = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
+
+  if (t)
+    bpf_builtins[code] = t;
+
+  return t;
+}
+
+/* Define machine-specific built-in functions.  */
+
+static void
+bpf_init_builtins (void)
+{
+  /* Built-ins for calling kernel helpers.  */
+
+  tree pt = build_pointer_type (void_type_node);
+  tree const_void_type
+    = build_qualified_type (void_type_node, TYPE_QUAL_CONST);
+  tree cpt = build_pointer_type (const_void_type);
+  tree st = short_integer_type_node;
+  tree ust = uint16_type_node;
+  tree it = integer_type_node;
+  tree ut = unsigned_type_node;
+  tree const_char_type
+    = build_qualified_type (char_type_node, TYPE_QUAL_CONST);
+  tree cst = build_pointer_type (const_char_type);
+  tree vt = void_type_node;
+  tree ult = long_unsigned_type_node;
+  tree u32t = uint32_type_node;
+  tree u64t = uint64_type_node;
+  tree llt = long_long_integer_type_node;
+  tree ullt = long_long_unsigned_type_node;
+  
+#define TYPES build_function_type_list
+#define VTYPES build_varargs_function_type_list
+#define DEF_HELPER(V,D,N,T)				\
+  do							\
+    {							\
+      if (bpf_kernel >= (V))				\
+	def_builtin ("__builtin_bpf_helper_" #N,	\
+		     BPF_BUILTIN_HELPER_##D,		\
+		     T);				\
+    } while (0);
+#  include "bpf-helpers.def"
+#undef TYPES
+#undef VTYPES
+#undef DEF_HELPER
+
+  /* Built-ins for BPF_LD_ABS and BPF_LD_IND instructions.  */
+
+  def_builtin ("__builtin_bpf_load_byte", BPF_BUILTIN_LOAD_BYTE,
+	       build_function_type_list (ullt, ullt, 0));
+  def_builtin ("__builtin_bpf_load_half", BPF_BUILTIN_LOAD_HALF,
+	       build_function_type_list (ullt, ullt, 0));
+  def_builtin ("__builtin_bpf_load_word", BPF_BUILTIN_LOAD_WORD,
+	       build_function_type_list (ullt, ullt, 0));
+}
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS bpf_init_builtins
+
+/* Expand a call to a BPF-specific built-in function that was set up
+   with bpf_init_builtins.  */
+
+static rtx
+bpf_expand_builtin (tree exp, rtx target,
+		    rtx subtarget ATTRIBUTE_UNUSED,
+		    machine_mode mode ATTRIBUTE_UNUSED,
+		    int ignore)
+{
+  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+  int code = DECL_MD_FUNCTION_CODE (fndecl);
+
+  if (code >= 1 && code < BPF_BUILTIN_HELPER_MAX)
+    {
+      /* This is a builtin to call a kernel helper function.
+
+	 For these builtins, we just expand the function call normally
+	 with expand_call like we would do for a libcall. The function
+	 bpf_output_call below will then do The Right Thing (TM),
+	 recognizing the name of the called __builtin_helper_* symbol
+	 and emitting the corresponding CALL N instruction whenever
+	 necessary.  */
+
+      return expand_call (exp, target, ignore);
+    }
+  else if (code == BPF_BUILTIN_LOAD_BYTE
+	   || code == BPF_BUILTIN_LOAD_HALF
+	   || code == BPF_BUILTIN_LOAD_WORD)
+    {
+      /* Expand an indirect load from the sk_buff in the context.
+	 There is just one argument to the builtin, which is the
+	 offset.
+
+	 We try first to expand a ldabs* instruction.  In case this
+	 fails, we try a ldind* instruction.  */
+
+      enum insn_code abs_icode
+	= (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldabsb
+	   : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldabsh
+	   : CODE_FOR_ldabsw);
+
+      enum insn_code ind_icode
+	= (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldindb
+	   : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldindh
+	   : CODE_FOR_ldindw);
+
+      tree offset_arg = CALL_EXPR_ARG (exp, 0);
+      struct expand_operand ops[2];
+
+      create_input_operand (&ops[0], expand_normal (offset_arg),
+			    TYPE_MODE (TREE_TYPE (offset_arg)));
+      create_input_operand (&ops[1], gen_rtx_CONST_INT (SImode, 0),
+			    SImode);
+
+      if (!maybe_expand_insn (abs_icode, 2, ops)
+	  && !maybe_expand_insn (ind_icode, 2, ops))
+	{
+	  error ("invalid argument to built-in function");
+	  return gen_rtx_REG (ops[0].mode, 0);
+	}
+
+      /* The result of the load is in R0.  */
+      return gen_rtx_REG (ops[0].mode, 0);
+    }
+
+  gcc_unreachable ();
+}
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN bpf_expand_builtin
+
+/* Initialize target-specific function library calls.  This is mainly
+   used to call library-provided soft-fp operations, since eBPF
+   doesn't support floating-point in "hardware".  */
+
+static void
+bpf_init_libfuncs (void)
+{
+  set_conv_libfunc (sext_optab, DFmode, SFmode,
+		    "__bpf_extendsfdf2");
+  set_conv_libfunc (trunc_optab, SFmode, DFmode,
+		    "__bpf_truncdfsf2");
+  set_conv_libfunc (sfix_optab, SImode, DFmode,
+		    "__bpf_fix_truncdfsi");
+  set_conv_libfunc (sfloat_optab, DFmode, SImode,
+		    "__bpf_floatsidf");
+  set_conv_libfunc (ufloat_optab, DFmode, SImode,
+		    "__bpf_floatunsidf");
+}
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS bpf_init_libfuncs
+
+/* Define the mechanism that will be used for describing frame unwind
+   information to the debugger.  In eBPF it is not possible to unwind
+   frames.  */
+
+static enum unwind_info_type
+bpf_debug_unwind_info ()
+{
+  return UI_NONE;
+}
+
+#undef TARGET_DEBUG_UNWIND_INFO
+#define TARGET_DEBUG_UNWIND_INFO bpf_debug_unwind_info
+
+/* Always promote arguments and return values in function calls.  */
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
+
+/* Output assembly directives to assemble data of various sized and
+   alignments.  */
+
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\t.byte\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
+
+/* Finally, build the GCC target.  */
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-bpf.h"
diff --git a/gcc/config/bpf/bpf.h b/gcc/config/bpf/bpf.h
new file mode 100644
index 00000000000..038542b5e22
--- /dev/null
+++ b/gcc/config/bpf/bpf.h
@@ -0,0 +1,565 @@ 
+/* Definition of the eBPF target for GCC.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef GCC_BPF_H
+#define GCC_BPF_H
+
+/**** Controlling the Compilation Driver.  */
+
+#define ASM_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL}"
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL}"
+#define LIB_SPEC ""
+#define STARTFILE_SPEC ""
+
+/**** Run-time Target Specification.  */
+
+#define TARGET_CPU_CPP_BUILTINS() bpf_target_macros (pfile)
+
+/**** Storage Layout.  */
+
+/* Endianness and word size.  */
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN (!TARGET_LITTLE_ENDIAN)
+#define WORDS_BIG_ENDIAN (!TARGET_LITTLE_ENDIAN)
+#define BITS_PER_WORD 64
+#define UNITS_PER_WORD 8
+
+/* This should not be needed, because ptr_mode, Pmode and word_mode
+   are all the same width.  */
+#define POINTERS_EXTEND_UNSIGNED 1
+
+/* When storing an integer whose size is less than 64-bit in a
+   register, promote it to a DImode.  */
+#define PROMOTE_MODE(M, UNSIGNEDP, TYPE)	\
+  do						\
+    {						\
+      if (GET_MODE_CLASS (M) == MODE_INT	\
+	  && GET_MODE_SIZE (M) < 8)		\
+	M = DImode;				\
+    } while (0)
+
+/* Biggest alignment supported by the object file format of this
+   machine.  In this case this is ELF.  Use the same definition than
+   in elfos.h */
+#define MAX_OFILE_ALIGNMENT (((unsigned int) 1 << 28) * 8)
+
+/* Align argument parameters on the stack to 64-bit, at a minimum.  */
+#define PARM_BOUNDARY 64
+
+/* The hardware enforces that the stack pointer should be aligned to
+   64-bit at any time.  */
+#define STACK_BOUNDARY 64
+
+/* Function entry points are aligned to 128 bits.  */
+#define FUNCTION_BOUNDARY 128
+
+/* Maximum alignment required by data of any type.  */
+#define BIGGEST_ALIGNMENT 64
+
+/* The load and store instructions won't work if the data is not in
+   it's expected alignment.  */
+#define STRICT_ALIGNMENT 1
+
+/* We use Pmode as the mode of the size increment operand in an
+   `allocate_stack' pattern.  */
+#define STACK_SIZE_MODE Pmode
+
+/**** Layout of Source Language Data Types.  */
+
+#define INT_TYPE_SIZE         32
+#define SHORT_TYPE_SIZE       16
+#define LONG_TYPE_SIZE        64
+#define LONG_LONG_TYPE_SIZE   64
+#define CHAR_TYPE_SIZE         8
+#define FLOAT_TYPE_SIZE       32
+#define DOUBLE_TYPE_SIZE      64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#define INTPTR_TYPE	"long int"
+#define UINTPTR_TYPE	"long unsigned int"
+#define SIZE_TYPE	"long unsigned int"
+#define PTRDIFF_TYPE "long int"
+
+#define SIG_ATOMIC_TYPE "char"
+
+#define INT8_TYPE "char"
+#define INT16_TYPE "short int"
+#define INT32_TYPE "int"
+#define INT64_TYPE "long int"
+#define UINT8_TYPE "unsigned char"
+#define UINT16_TYPE "short unsigned int"
+#define UINT32_TYPE "unsigned int"
+#define UINT64_TYPE "long unsigned int"
+
+#define INT_LEAST8_TYPE INT8_TYPE
+#define INT_LEAST16_TYPE INT16_TYPE
+#define INT_LEAST32_TYPE INT32_TYPE
+#define INT_LEAST64_TYPE INT64_TYPE
+#define UINT_LEAST8_TYPE UINT8_TYPE
+#define UINT_LEAST16_TYPE UINT16_TYPE
+#define UINT_LEAST32_TYPE UINT32_TYPE
+#define UINT_LEAST64_TYPE UINT64_TYPE
+
+#define INT_FAST8_TYPE INT8_TYPE
+#define INT_FAST16_TYPE INT16_TYPE
+#define INT_FAST32_TYPE INT32_TYPE
+#define INT_FAST64_TYPE INT64_TYPE
+#define UINT_FAST8_TYPE UINT8_TYPE
+#define UINT_FAST16_TYPE UINT16_TYPE
+#define UINT_FAST32_TYPE UINT32_TYPE
+#define UINT_FAST64_TYPE UINT64_TYPE
+
+/* `char' is signed by default, like in x86.  */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* The widest floating-point format supported by the hardware is
+   64-bit.  */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
+/* `wchar_t' is a signed 32-bit type.  The second constant is used by
+   cpp, which can't use WCHAR_TYPE.  */
+#define WCHAR_TYPE "int"
+#define WCHAR_TYPE_SIZE 32
+
+/* `wint_t' is a signed 32-bit type.  */
+#define WINT_TYPE "int"
+#define WINT_TYPE_SIZE 32
+
+/**** Register Usage.  */
+
+/*** Basic Characteristics of Registers.  */
+
+#define BPF_R0	0
+#define BPF_R1	1
+#define BPF_R2	2
+#define BPF_R3	3
+#define BPF_R4	4
+#define BPF_R5	5
+#define BPF_R6	6
+#define BPF_CTX BPF_R6
+#define BPF_R7	7
+#define BPF_R8	8
+#define BPF_R9	9
+#define BPF_SP BPF_R9
+#define BPF_R10	10
+#define BPF_FP  BPF_R10
+/* 11 is not a real eBPF hard register and is eliminated or not used
+   in the final assembler.  See below.  */
+
+#define FIRST_PSEUDO_REGISTER 12
+
+/* The registers %r0..%r8 are available for general allocation.  */
+#define FIXED_REGISTERS				\
+  {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1}
+
+/* %r0..%r5 are clobbered by function calls.  */
+#define CALL_USED_REGISTERS				\
+  {1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1}
+
+/*** Order of Allocation of Registers.  */
+
+/* We generally want to put call-clobbered registers ahead of
+   call-saved ones.  (IRA expects this.)  */
+#define REG_ALLOC_ORDER					\
+  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
+
+/**** Register Classes.  */
+
+enum reg_class
+{
+  NO_REGS,		/* no registers in set.  */
+  GR_REGS,		/* general-purpose integer registers.  */
+  ALL_REGS,		/* all registers.  */
+  LIM_REG_CLASSES	/* max value + 1.  */
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+#define GENERAL_REGS GR_REGS
+
+/* An initializer containing the names of the register classes as C
+   string constants.  These names are used in writing some of the
+   debugging dumps.  */
+#define REG_CLASS_NAMES				\
+{						\
+  "NO_REGS",					\
+  "GR_REGS",					\
+  "ALL_REGS"					\
+}
+
+/* An initializer containing the contents of the register classes, as
+   integers which are bit masks.  The Nth integer specifies the
+   contents of class N.  The way the integer MASK is interpreted is
+   that register R is in the class if `MASK & (1 << R)' is 1.  */
+#define REG_CLASS_CONTENTS			\
+{						\
+   0x00000000, /* NO_REGS */			\
+   0x000007ff, /* GR_REGS */			\
+   0x000007ff, /* ALL_REGS */		        \
+}
+
+/* A C expression whose value is a register class containing hard
+   register REGNO.  In general there is more that one such class;
+   choose a class which is "minimal", meaning that no smaller class
+   also contains the register.  */
+#define REGNO_REG_CLASS(REGNO) ((REGNO) < 11 ? GR_REGS : ALL_REGS)
+
+/* A macro whose definition is the name of the class to which a
+   valid base register must belong.  A base register is one used in
+   an address which is the register value plus a displacement.  */
+#define BASE_REG_CLASS GR_REGS
+
+/* A macro whose definition is the name of the class to which a
+   valid index register must belong.  An index register is one used
+   in an address where its value is either multiplied by a scale
+   factor or added to another register (as well as added to a
+   displacement).  */
+#define INDEX_REG_CLASS GR_REGS
+
+/* C expression which is nonzero if register number REGNO is suitable
+   for use as a base register in operand addresses.  In eBPF every
+   hard register can be used for this purpose.  */
+#define REGNO_OK_FOR_BASE_P(REGNO) 			\
+  ((REGNO) < FIRST_PSEUDO_REGISTER			\
+   || (unsigned)reg_renumber[REGNO] < FIRST_PSEUDO_REGISTER)
+
+/* C expression which is nonzero if register number REGNO is suitable
+   for use as an index register in operand addresses.  */
+#define REGNO_OK_FOR_INDEX_P(REGNO)		\
+  REGNO_OK_FOR_BASE_P(REGNO)
+
+/* It is safe to return CLASS here.  No more restrictive class is
+   needed.  */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
+
+/* Maximum number of consecutive registers of class CLASS needed to
+   hold a value of mode MODE.  */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+  (((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+
+/**** Debugging Info ****/
+
+/* We cannot support DWARF2 because of the limitations of eBPF.  */
+#define DBX_DEBUGGING_INFO
+
+/**** Stack Layout and Calling Conventions.  */
+
+/*** Basic Stack Layout.  */
+
+#define STACK_GROWS_DOWNWARD 1
+#define FRAME_GROWS_DOWNWARD 1
+
+/* The argument pointer always points to the first argument.  */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Unsupported.  */
+#define RETURN_ADDR_RTX(count, frame) const0_rtx
+
+/*** Registers That Address the Stack Frame.  */
+
+#define FRAME_POINTER_REGNUM 10
+#define STACK_POINTER_REGNUM 9
+#define ARG_POINTER_REGNUM 11
+#define STATIC_CHAIN_REGNUM 8
+
+/*** Registers elimination.  */
+
+#define ELIMINABLE_REGS					\
+  {{ ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM },	\
+   { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }}
+
+/* Define the offset between two registers, one to be eliminated, and
+   the other its replacement, at the start of a routine.  */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET)		\
+  do								\
+    {								\
+      (OFFSET) = bpf_initial_elimination_offset ((FROM), (TO));	\
+    } while (0)
+
+/*** Passing Function Arguments on the Stack.  */
+
+/* The eBPF ABI doesn't support passing arguments on the stack.  Only
+   in the first five registers.  Code in bpf.c assures the stack is
+   never used when passing arguments.  However, we still have to
+   define the constants below.  */
+
+/* If nonzero, push insns will be used to pass outgoing arguments.  */
+#define PUSH_ARGS 0
+
+/* If nonzero, function arguments will be evaluated from last to
+   first, rather than from first to last.  */
+#define PUSH_ARGS_REVERSED 1
+
+/* Allocate stack space for arguments at the beginning of each
+   function.  */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/*** Passing Arguments in Registers.  */
+
+/* Use an integer in order to keep track of the number of arguments
+   passed to a function in integer registers, up to
+   MAX_ARGS_IN_REGISTERS.  */
+#define CUMULATIVE_ARGS int
+
+/* INIT_CUMULATIVE_ARGS initializes a variable CUM of type
+   CUMULATIVE_ARGS for a call to a function whose data type is FNTYPE.
+   For a library call, FNTYPE is 0.  */
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,FNDECL,N_NAMED_ARGS) \
+  memset (&(CUM), 0, sizeof (CUM))
+
+/* Nonzero if N is the number of a hard register in which function
+   arguments are sometimes passed.  */
+#define FUNCTION_ARG_REGNO_P(N) ((N) >= 1 && (N) <= 5)
+
+/*** How Scalar Function Values are Returned.  */
+
+/* Define how to find the value returned by a library function
+   assuming the value has mode MODE.  This is always %r0 for eBPF.  */
+#define LIBCALL_VALUE(MODE)  \
+  gen_rtx_REG ((MODE), 0)
+
+/*** Generating Code for Profiling.  */
+
+/* We do not support profiling yet, so do not call `mcount'.  */
+#define FUNCTION_PROFILER(FILE, LABELNO) do { } while (0)
+
+/*** Function Entry and Exit.  */
+
+/* We do not require an accurate stack pointer at function return.
+   This is because the stack pointer's original value is initialized
+   from the frame pointer, rather than decreased, to satisfy the
+   kernel's verifier.  Thus, we have to save the stack pointer in
+   function prologue and restore it in function epilogue.  If
+   EXIT_IGNORE_STACK is not set, then superfluous instructions are
+   generated to save and restore the stack pointer after and before
+   the function epilogue, respectively.  */
+#define EXIT_IGNORE_STACK 1
+
+/**** Support for Nested Functions.  */
+
+/* We have to define TRAMPOLINE_SIZE even if we don't ever generate
+   them.  Set to 64 arbitrarily.  */
+#define TRAMPOLINE_SIZE 64
+
+/**** Addressing Modes.  */
+
+/* Maximum number of registers that can appear in a valid memory
+   address.  */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* 1 if X is an rtx for a constant that is a valid address.  */
+
+#define CONSTANT_ADDRESS_P(X)   \
+  (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF		\
+   || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST)
+
+/**** Describing Relative Costs of Operations.  */
+
+/* Cost of moving data of mode MODE from a register in class FROM to a
+   register in class TO.  Note that 2 is the default.  */
+#define REGISTER_MOVE_COST(MODE,FROM,TO) 2
+
+/* Cost of moving data of mode MODE between a register of class CLASS
+   and memory. IN is zero if the value is to be written to memory,
+   nonzero if it is to be read in.  */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 4
+
+/* Cost of a branch instruction.  A value of 1 is the default.  */
+#define BRANCH_COST(SPEED_P,PREDICTABLE_P) 1
+
+/* The SPARC port says: Nonzero if access to memory by bytes is slow
+   and undesirable.  For RISC chips, it means that access to memory by
+   bytes is no better than access by words when possible, so grab a
+   whole word and maybe make use of that.  */
+#define SLOW_BYTE_ACCESS 1
+
+/* Threshold of number of scalar memory-to-memory move instructions,
+   _below_ which a sequence of insns should be generated instead of a
+   string move insn or a library call.  */
+#define MOVE_RATIO(speed) 128
+
+/* Threshold of number of scalar move instructions, _below_ which a
+   sequence of insns should be generated to clear memory instead of a
+   string clear insn or a library call.  */
+#define CLEAR_RATIO(speed) 128
+
+/* Threshold of number of scalar move instructions, _below_ which a
+   sequence of insns should be generated to set memory to a constant
+   value, instead of a block set insn or a library call.  */
+#define SET_RATIO(speed) 128
+
+/* True if it is as good or better to call a constant function address
+   than to call an address kept in a register.  */
+#define NO_FUNCTION_CSE 1
+
+/**** Dividing the Output into Sections.  */
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define BSS_SECTION_ASM_OP "\t.bss"
+#define COMMON_ASM_OP "\t.common\t"
+
+/**** Defining the Output Assembler Language.  */
+
+/*** The Overall Framework of an Assembler File.  */
+
+#define ASM_COMMENT_START ";"
+
+/* Output to assembler file text saying following lines
+   may contain character constants, extra white space, comments, etc.  */
+
+#ifndef ASM_APP_ON
+#define ASM_APP_ON " #APP\n"
+#endif
+
+/* Output to assembler file text saying following lines
+   no longer contain unusual constructs.  */
+
+#ifndef ASM_APP_OFF
+#define ASM_APP_OFF " #NO_APP\n"
+#endif
+
+/*** Output of Data.  */
+
+/*** Output of Uninitialized Variables.  */
+
+/* How to output an assembler line to define a local common
+   symbol.  */
+
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN)		\
+  do									\
+    {									\
+      fprintf ((FILE), "%s", COMMON_ASM_OP);				\
+      assemble_name ((FILE), (NAME));					\
+      fprintf ((FILE), ",%u,%u\n", (int)(SIZE), (ALIGN) / (BITS_PER_UNIT)); \
+    }									\
+  while (0)
+
+/* A C statement (sans semicolon) to output to the stdio stream
+   FILE the assembler definition of uninitialized global DECL named
+   NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+   Try to use asm_output_aligned_bss to implement this macro.  */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN)	\
+  do {								\
+    ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN);		\
+  } while (0)
+
+/* This says how to output an assembler line to define a local common
+   symbol.  */
+
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE,NAME,SIZE,ALIGN)			\
+  ( fputs ("\t.lcomm ", (FILE)),					\
+    assemble_name ((FILE), (NAME)),					\
+    fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED "\n",		\
+	     (SIZE), ((ALIGN) / BITS_PER_UNIT)))
+
+/*** Output and Generation of Labels.  */
+
+/* Globalizing directive for a label.  */
+#define GLOBAL_ASM_OP "\t.global\t"
+
+/* This is how to store into the string LABEL
+   the symbol_ref name of an internal numbered label where
+   PREFIX is the class of label and NUM is the number within the class.
+   This is suitable for output with `assemble_name'.  */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM)			\
+  sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
+
+/*** Macros Controlling Initialization Routines.  */
+
+#define INIT_SECTION_ASM_OP "\t.init"
+#define FINI_SECTION_ASM_OP "\t.fini"
+
+/*** Output of Assembler Instructions.  */
+
+#define REGISTER_NAMES						\
+  { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",	\
+    "%r8", "%r9", "%fp", "__arg__" }
+
+#define ADDITIONAL_REGISTER_NAMES		\
+  { { "%a", 0 }, { "%ctx", 6 }, { "%r10" , 10 } }
+
+#define LOCAL_LABEL_PREFIX "."
+#define USER_LABEL_PREFIX  ""
+
+#define PRINT_OPERAND(STREAM,X,CODE)		\
+  bpf_print_operand ((STREAM),(X),(CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X)		\
+  bpf_print_operand_address ((STREAM), (X))
+
+/*** Assembler Commands for Alignment.  */
+
+/* This is how to output an assembler line that says to advance the
+   location counter to a multiple of 2**LOG bytes.  */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG)		\
+  fprintf (STREAM, "\t.align\t%d\n", (LOG))
+
+/* This is how to output an assembler line
+   that says to advance the location counter by SIZE bytes.  */
+#define ASM_OUTPUT_SKIP(FILE,SIZE)		\
+  fprintf (FILE, "\t.skip\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n", (SIZE))
+
+/**** Miscellaneous Parameters.  */
+
+/* Specify the machine mode that this machine uses for the index in
+   the tablejump instruction.  */
+#define CASE_VECTOR_MODE DImode
+
+/* Define if operations between registers with integral mode smaller
+   than a word are always performed on the entire register.  */
+#define WORD_REGISTER_OPERATIONS 1
+
+/* C expression indicating when insns that read memory in MEM_MODE, an
+   integral mode narrower than a word, set the bits outsize of
+   MEM_MODE to be either the sign-extension or the zero-extension of
+   the data read.  */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* The maximum number of bytes that a signle instruction can move
+   quickly between memory and registers or between two memory
+   locations.  */
+#define MOVE_MAX 8
+
+/* An alias for the machine mode for pointers.  */
+#define Pmode DImode
+
+/* An alias for the machine mode used for memory references to
+   functions being called, in 'call' RTL expressions.  */
+#define FUNCTION_MODE Pmode
+
+/* No libm on eBPF (for now.)  */
+#define MATH_LIBRARY ""
+
+/**** libgcc settings.  */
+
+/* Iterating over the global constructors and destructors and
+   executing them requires the ability of doing indirect calls.
+
+   eBPF doesn't support indirect calls, so no chance of supporting
+   constructors and destructors.  */
+#define DO_GLOBAL_CTORS_BODY			\
+  do { } while (0)
+#define DO_GLOBAL_DTORS_BODY			\
+  do { } while (0)
+
+#endif /* ! GCC_BPF_H */
diff --git a/gcc/config/bpf/bpf.md b/gcc/config/bpf/bpf.md
new file mode 100644
index 00000000000..6954a438c20
--- /dev/null
+++ b/gcc/config/bpf/bpf.md
@@ -0,0 +1,528 @@ 
+;; Machine description for eBPF.
+;; Copyright (C) 2019 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(include "predicates.md")
+(include "constraints.md")
+
+;;;; Unspecs
+
+(define_c_enum "unspec" [
+  UNSPEC_LDINDABS
+  UNSPEC_XADD
+])
+
+;;;; Constants
+
+(define_constants
+  [(R0_REGNUM		0)
+   (R1_REGNUM		1)
+   (R2_REGNUM		2)
+   (R3_REGNUM		3)
+   (R4_REGNUM		4)
+   (R5_REGNUM		5)
+   (R6_REGNUM		6)
+   (R7_REGNUM		7)
+   (R8_REGNUM		8)
+   (R9_REGNUM		9)
+   (R10_REGNUM		10)
+   (R11_REGNUM		11)
+])
+
+;;;; Attributes
+
+;; Instruction classes.
+;; alu		64-bit arithmetic.
+;; alu32	32-bit arithmetic.
+;; end		endianness conversion instructions.
+;; ld		load instructions.
+;; lddx		load 64-bit immediate instruction.
+;; ldx		generic load instructions.
+;; st		generic store instructions for immediates.
+;; stx		generic store instructions.
+;; jmp		jump instructions.
+;; xadd		atomic exchange-and-add instructions.
+;; multi	multiword sequence (or user asm statements).
+
+(define_attr "type"
+  "unknown,alu,alu32,end,ld,lddw,ldx,st,stx,jmp,xadd,multi"
+  (const_string "unknown"))
+
+;; Length of instruction in bytes.
+(define_attr "length" ""
+  (cond [
+         (eq_attr "type" "lddw") (const_int 16)
+         ] (const_int 8)))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+  [(set_attr "type" "multi")])
+
+;;;; Mode attributes and iterators
+
+(define_mode_attr mop [(QI "b") (HI "h") (SI "w") (DI "dw")
+                       (SF "w") (DF "dw")])
+(define_mode_attr mtype [(SI "alu32") (DI "alu")])
+(define_mode_attr msuffix [(SI "32") (DI "")])
+
+;;;; NOPs
+
+(define_insn "nop"
+  [(const_int 0)]
+  ""
+  "mov\t%%r0,%%r0"
+  [(set_attr "type" "alu")])
+
+;;;; Arithmetic/Logical
+
+;; The arithmetic and logic operations below are defined for SI and DI
+;; modes.  The mode iterator AM is used in order to expand to two
+;; insns, with the proper modes.
+;;
+;; 32-bit arithmetic (for SI modes) is implemented using the alu32
+;; instructions.
+
+(define_mode_iterator AM [SI DI])
+
+;;; Addition
+(define_insn "add<AM:mode>3"
+  [(set (match_operand:AM          0 "register_operand"   "=r,r")
+        (plus:AM (match_operand:AM 1 "register_operand"   " 0,0")
+                 (match_operand:AM 2 "reg_or_imm_operand" " r,I")))]
+  "1"
+  "add<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;; Subtraction
+(define_insn "sub<AM:mode>3"
+  [(set (match_operand:AM          0 "register_operand"   "=r,r")
+        (plus:AM (match_operand:AM 1 "register_operand"   " 0,0")
+                 (match_operand:AM 2 "reg_or_imm_operand" " r,I")))]
+  "1"
+  "sub<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;; Negation
+(define_insn "neg<AM:mode>2"
+  [(set (match_operand:AM 0 "register_operand" "=r")
+        (neg:AM (match_operand:AM 1 "register_operand" " 0")))]
+  ""
+  "neg<msuffix>\t%0"
+  [(set_attr "type" "<mtype>")])
+
+;;; Multiplication
+(define_insn "mul<AM:mode>3"
+  [(set (match_operand:AM          0 "register_operand"   "=r,r")
+        (mult:AM (match_operand:AM 1 "register_operand"   " 0,0")
+                 (match_operand:AM 2 "reg_or_imm_operand" " r,I")))]
+  ""
+  "mul<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+(define_insn "*mulsi3_extended"
+  [(set (match_operand:DI	   0 "register_operand" "=r,r")
+        (sign_extend:DI
+         (mult:SI (match_operand:SI 1 "register_operand" "0,0")
+                  (match_operand:SI 2 "reg_or_imm_operand" "r,I"))))]
+  ""
+  "mul32\t%0,%2"
+  [(set_attr "type" "alu32")])
+
+;; Division
+(define_insn "div<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (div:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "div<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+(define_insn "udiv<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (div:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "div<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;; Modulus
+(define_insn "mod<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (mod:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "mod<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+(define_insn "umod<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (mod:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "mod<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;; Logical AND
+(define_insn "and<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (and:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "and<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;; Logical inclusive-OR
+(define_insn "ior<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (ior:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "or<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;; Logical exclusive-OR
+(define_insn "xor<AM:mode>3"
+  [(set (match_operand:AM 0 "register_operand" "=r,r")
+        (xor:AM (match_operand:AM 1 "register_operand" " 0,0")
+                (match_operand:AM 2 "reg_or_imm_operand" "r,I")))]
+  ""
+  "xor<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;;; Conversions
+
+;;; Zero-extensions
+
+;; For register operands smaller than 32-bit zero-extending is
+;; achieved ANDing the value in the source register to a suitable
+;; mask.
+;;
+;; For register operands bigger or equal than 32-bit, we generate a
+;; sequence of two shift instructions to zero the high 32-bits of the
+;; destination register.
+;;
+;; For memory operands, of any width, zero-extending is achieved using
+;; the ldx{bhwdw} instructions to load the values in registers.
+
+(define_insn "zero_extendhidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(zero_extend:DI (match_operand:HI 1 "reg_or_indirect_memory_operand" "r,m")))]
+  ""
+  "@
+   and\t%0,0xffff
+   ldxh\t%0,%1"
+  [(set_attr "type" "alu,ldx")])
+
+(define_insn "zero_extendqidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(zero_extend:DI (match_operand:QI 1 "reg_or_indirect_memory_operand" "r,m")))]
+  ""
+  "@
+   and\t%0,0xff
+   ldxb\t%0,%1"
+  [(set_attr "type" "alu,ldx")])
+
+(define_expand "zero_extendsidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(zero_extend:DI (match_operand:SI 1 "reg_or_indirect_memory_operand" "r,m")))]
+  ""
+{
+  if (register_operand (operands[1], SImode))
+    {
+      operands[1] = gen_lowpart (DImode, operands[1]);
+      emit_insn (gen_ashldi3 (operands[0], operands[1], GEN_INT (32)));
+      emit_insn (gen_lshrdi3 (operands[0], operands[0], GEN_INT (32)));
+      DONE;
+    }
+})
+
+(define_insn "*zero_extendsidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r,r")
+	(zero_extend:DI (match_operand:SI 1 "reg_or_indirect_memory_operand" "0,m")))]
+  ""
+  "@
+   lsh\t%0,32\n\trsh\t%0,32
+   ldxw\t%0,%1"
+  [(set_attr "type" "alu,ldx")
+   (set_attr "length" "16,8")])
+
+;;; Sign-extension
+
+;; Sign-extending a 32-bit value into a 64-bit value is achieved using
+;; shifting, with instructions generated by the expand below.
+
+(define_expand "extendsidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(sign_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+  ""
+{
+  operands[1] = gen_lowpart (DImode, operands[1]);
+  emit_insn (gen_ashldi3 (operands[0], operands[1], GEN_INT (32)));
+  emit_insn (gen_ashrdi3 (operands[0], operands[0], GEN_INT (32)));
+  DONE;
+})
+
+(define_insn "*extendsidi2"
+  [(set (match_operand:DI 0 "register_operand" "=r")
+	(sign_extend:DI (match_operand:SI 1 "register_operand" "0")))]
+  ""
+  "lsh\t%0,32\n\tarsh\t%0,32"
+  [(set_attr "type" "alu")
+   (set_attr "length" "16")])
+
+;;;; Data movement
+
+(define_mode_iterator AMM [QI HI SI DI SF DF])
+
+(define_expand "mov<AMM:mode>"
+  [(set (match_operand:AMM 0 "general_operand" "")
+        (match_operand:AMM 1 "general_operand" ""))]
+        ""
+        "
+{
+    if (!register_operand(operands[0], <AMM:MODE>mode)
+        && !register_operand(operands[1], <AMM:MODE>mode))
+         operands[1] = force_reg (<AMM:MODE>mode, operands[1]); 
+
+    /* In cases where the moved entity is a constant address, we
+       need to emit an extra mov and modify the second operand to
+       obtain something like:
+
+         lddw %T, %1
+         ldxw %0, [%T+0]
+
+       Ditto for stores.  */
+
+    if (MEM_P (operands[1])
+        && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+      {
+         rtx tmp = gen_reg_rtx (DImode);
+
+         emit_move_insn (tmp, XEXP (operands[1], 0));
+         operands[1] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
+      }
+
+    if (MEM_P (operands[0])
+        && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+      {
+         rtx tmp = gen_reg_rtx (DImode);
+  
+         emit_move_insn (tmp, XEXP (operands[0], 0));
+         operands[0] = gen_rtx_MEM (<AMM:MODE>mode, tmp);
+      }
+
+}")
+
+(define_mode_iterator MM [QI HI SI DI SF DF])
+
+(define_insn "*mov<MM:mode>"
+  [(set (match_operand:MM 0 "mov_dst_operand" "=r, r,r,m,m")
+        (match_operand:MM 1 "mov_src_operand" " m,rI,B,r,I"))]
+  ""
+  "@
+   ldx<mop>\t%0,%1
+   mov\t%0,%1
+   lddw\t%0,%1
+   stx<mop>\t%0,%1
+   st<mop>\t%0,%1"
+[(set_attr "type" "ldx,alu,alu,stx,st")])
+
+;;;; Shifts
+
+(define_mode_iterator SIM [SI DI])
+
+(define_insn "ashr<SIM:mode>3"
+  [(set (match_operand:SIM 0 "register_operand"                 "=r,r")
+        (ashiftrt:SIM (match_operand:SIM 1 "register_operand"   " 0,0")
+                      (match_operand:SIM 2 "reg_or_imm_operand" " r,I")))]
+  ""
+  "arsh<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+(define_insn "ashl<SIM:mode>3"
+  [(set (match_operand:SIM 0 "register_operand"               "=r,r")
+        (ashift:SIM (match_operand:SIM 1 "register_operand"   " 0,0")
+                    (match_operand:SIM 2 "reg_or_imm_operand" " r,I")))]
+  ""
+  "lsh<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+(define_insn "lshr<SIM:mode>3"
+  [(set (match_operand:SIM 0 "register_operand"                 "=r,r")
+        (lshiftrt:SIM (match_operand:SIM 1 "register_operand"   " 0,0")
+                      (match_operand:SIM 2 "reg_or_imm_operand" " r,I")))]
+  ""
+  "rsh<msuffix>\t%0,%2"
+  [(set_attr "type" "<mtype>")])
+
+;;;; Conditional branches
+
+;; The eBPF jump instructions use 64-bit arithmetic when evaluating
+;; the jump conditions.  Therefore we use DI modes below.
+
+(define_expand "cbranchdi4"
+  [(set (pc)
+	(if_then_else (match_operator 0 "comparison_operator"
+			[(match_operand:DI 1 "register_operand" "r")
+			 (match_operand:DI 2 "reg_or_imm_operand" "rI")])
+		      (label_ref (match_operand 3 "" ""))
+		      (pc)))]
+  ""
+{
+    if (!ordered_comparison_operator (operands[0], VOIDmode))
+      FAIL;
+})
+
+(define_insn "*branch_on_di"
+  [(set (pc)
+	(if_then_else (match_operator 3 "ordered_comparison_operator"
+			 [(match_operand:DI 0 "register_operand" "r")
+			  (match_operand:DI 1 "reg_or_imm_operand" "rI")])
+		      (label_ref (match_operand 2 "" ""))
+		      (pc)))]
+  ""
+{
+  int code = GET_CODE (operands[3]);
+
+  switch (code)
+  {
+  case EQ: return "jeq\t%0,%1,%2"; break;
+  case NE: return "jne\t%0,%1,%2"; break;
+  case LT: return "jslt\t%0,%1,%2"; break;
+  case LE: return "jsle\t%0,%1,%2"; break;
+  case GT: return "jsgt\t%0,%1,%2"; break;
+  case GE: return "jsge\t%0,%1,%2"; break;
+  case LTU: return "jlt\t%0,%1,%2"; break;
+  case LEU: return "jle\t%0,%1,%2"; break;
+  case GTU: return "jgt\t%0,%1,%2"; break;
+  case GEU: return "jge\t%0,%1,%2"; break;
+  default:
+    error ("unknown comparison code %d in *branch_on_di\n", code);
+    return "";
+  }
+}
+  [(set_attr "type" "jmp")])
+
+;;;; Unconditional branches
+
+(define_insn "jump"
+  [(set (pc)
+        (label_ref (match_operand 0 "" "")))]
+  ""
+  "ja\t%0"
+[(set_attr "type" "jmp")])
+
+;;;; Function prologue/epilogue
+
+(define_insn "exit"
+  [(simple_return)]
+  ""
+  "exit"
+  [(set_attr "type" "jmp")])
+
+(define_expand "prologue"
+  [(const_int 0)]
+  ""
+{
+  bpf_expand_prologue ();
+  DONE;
+})
+
+(define_expand "epilogue"
+  [(const_int 0)]
+  ""
+{
+  bpf_expand_epilogue ();
+  DONE;
+})
+
+;;;; Function calls
+
+(define_insn "call"
+  [(call (match_operand:DI 0 "call_operand" "m")
+	 (match_operand:SI 1 "general_operand" ""))]
+  ;; operands[2] is next_arg_register
+  ;; operands[3] is struct_value_size_rtx.
+  ""
+  { return bpf_output_call (operands[0]); }
+  [(set_attr "type" "jmp")])
+
+(define_insn "call_value"
+  [(set (match_operand 0 "register_operand" "")
+	(call (match_operand:DI 1 "call_operand" "m")
+	      (match_operand:SI 2 "general_operand" "")))]
+  ;; operands[3] is next_arg_register
+  ;; operands[4] is struct_value_size_rtx.
+  ""
+  { return bpf_output_call (operands[1]); }
+  [(set_attr "type" "jmp")])
+
+(define_insn "sibcall"
+  [(call (label_ref (match_operand 0 "" ""))
+	 (match_operand:SI 1 "general_operand" ""))]
+  ;; operands[2] is next_arg_register
+  ;; operands[3] is struct_value_size_rtx.
+  ""
+  "ja\t%0"
+  [(set_attr "type" "jmp")])
+
+;;;; Non-generic load instructions
+
+(define_mode_iterator LDM [QI HI SI DI])
+(define_mode_attr ldop [(QI "b") (HI "h") (SI "w") (DI "dw")])
+
+(define_insn "ldind<ldop>"
+  [(set (reg:LDM R0_REGNUM)
+        (unspec:LDM [(match_operand:DI 0 "register_operand" "r")
+                    (match_operand:SI 1 "imm32_operand" "I")]
+                    UNSPEC_LDINDABS))
+   (clobber (reg:DI R1_REGNUM))
+   (clobber (reg:DI R2_REGNUM))
+   (clobber (reg:DI R3_REGNUM))
+   (clobber (reg:DI R4_REGNUM))]
+  ""
+  "ldind<ldop>\t%0,%1"
+  [(set_attr "type" "ld")])
+
+(define_insn "ldabs<ldop>"
+  [(set (reg:LDM R0_REGNUM)
+        (unspec:LDM [(match_operand:SI 0 "imm32_operand" "I")
+                    (match_operand:SI 1 "imm32_operand" "I")]
+                    UNSPEC_LDINDABS))
+   (clobber (reg:DI R1_REGNUM))
+   (clobber (reg:DI R2_REGNUM))
+   (clobber (reg:DI R3_REGNUM))
+   (clobber (reg:DI R4_REGNUM))]
+  ""
+  "ldabs<ldop>\t%0"
+  [(set_attr "type" "ld")])
+
+;;;; Atomic increments
+
+(define_mode_iterator AMO [SI DI])
+
+(define_insn "atomic_add<AMO:mode>"
+  [(set (match_operand:AMO 0 "indirect_memory_operand" "+m")
+        (unspec_volatile:AMO
+         [(plus:AMO (match_dup 0)
+                    (match_operand:AMO 1 "register_operand" "r"))
+          (match_operand:SI 2 "const_int_operand")] ;; Memory model.
+         UNSPEC_XADD))]
+  ""
+  "xadd<mop>\t%0,%1"
+  [(set_attr "type" "xadd")])
diff --git a/gcc/config/bpf/bpf.opt b/gcc/config/bpf/bpf.opt
new file mode 100644
index 00000000000..b04c8c8f504
--- /dev/null
+++ b/gcc/config/bpf/bpf.opt
@@ -0,0 +1,119 @@ 
+; Options for the eBPF compiler port.
+
+; Copyright (C) 2019 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3.  If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/bpf/bpf-opts.h
+
+; Selecting the kind of kernel the eBPF will be running on.
+
+mkernel=
+Target RejectNegative Joined Var(bpf_kernel) Enum(bpf_kernel) Init(LINUX_LATEST)
+Generate eBPF for the given Linux kernel version.
+
+Enum
+Name(bpf_kernel) Type(enum bpf_kernel_version)
+
+EnumValue
+Enum(bpf_kernel) String(native) Value(LINUX_NATIVE) DriverOnly
+
+EnumVAlue
+Enum(bpf_kernel) String(latest) Value(LINUX_LATEST) DriverOnly
+
+EnumValue
+Enum(bpf_kernel) String(4.0) Value(LINUX_V4_0)
+
+EnumValue
+Enum(bpf_kernel) String(4.1) Value(LINUX_V4_1)
+
+EnumValue
+Enum(bpf_kernel) String(4.2) Value(LINUX_V4_2)
+
+EnumValue
+Enum(bpf_kernel) String(4.3) Value(LINUX_V4_3)
+
+EnumValue
+Enum(bpf_kernel) String(4.4) Value(LINUX_V4_4)
+
+EnumValue
+Enum(bpf_kernel) String(4.5) Value(LINUX_V4_5)
+
+EnumValue
+Enum(bpf_kernel) String(4.6) Value(LINUX_V4_6)
+
+EnumValue
+Enum(bpf_kernel) String(4.7) Value(LINUX_V4_7)
+
+EnumValue
+Enum(bpf_kernel) String(4.8) Value(LINUX_V4_8)
+
+EnumValue
+Enum(bpf_kernel) String(4.9) Value(LINUX_V4_9)
+
+EnumValue
+Enum(bpf_kernel) String(4.10) Value(LINUX_V4_10)
+
+EnumValue
+Enum(bpf_kernel) String(4.11) Value(LINUX_V4_11)
+
+EnumValue
+Enum(bpf_kernel) String(4.12) Value(LINUX_V4_12)
+
+EnumValue
+Enum(bpf_kernel) String(4.13) Value(LINUX_V4_13)
+
+EnumValue
+Enum(bpf_kernel) String(4.14) Value(LINUX_V4_14)
+
+EnumValue
+Enum(bpf_kernel) String(4.15) Value(LINUX_V4_15)
+
+EnumValue
+Enum(bpf_kernel) String(4.16) Value(LINUX_V4_16)
+
+EnumValue
+Enum(bpf_kernel) String(4.17) Value(LINUX_V4_17)
+
+EnumValue
+Enum(bpf_kernel) String(4.18) Value(LINUX_V4_18)
+
+EnumValue
+Enum(bpf_kernel) String(4.19) Value(LINUX_V4_19)
+
+EnumValue
+Enum(bpf_kernel) String(4.20) Value(LINUX_V4_20)
+
+EnumValue
+Enum(bpf_kernel) String(5.0) Value(LINUX_V5_0)
+
+EnumValue
+Enum(bpf_kernel) String(5.1) Value(LINUX_V5_1)
+
+EnumValue
+Enum(bpf_kernel) String(5.2) Value(LINUX_V5_2)
+
+; Selecting big endian or little endian targets.
+
+mbig-endian
+Target RejectNegative Report InverseMask(LITTLE_ENDIAN)
+Generate big-endian eBPF.
+
+mlittle-endian
+Target RejectNegative Report Mask(LITTLE_ENDIAN)
+Generate little-endian eBPF.
diff --git a/gcc/config/bpf/constraints.md b/gcc/config/bpf/constraints.md
new file mode 100644
index 00000000000..aa886d0c075
--- /dev/null
+++ b/gcc/config/bpf/constraints.md
@@ -0,0 +1,29 @@ 
+;; Constraint definitions for eBPF.
+;; Copyright (C) 2019 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constraint "I"
+  "A 32-bit signed immediate."
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (ival, -1 - 0x7fffffff, 0x7fffffff)")))
+
+(define_constraint "B"
+  "A constant argument for LDDW."
+  (ior (match_code "const,symbol_ref,label_ref,const_double")
+       (and (match_code "const_int")
+            (match_test "IN_RANGE (ival, -1 - 0x7fffffffffffffff, 0x7fffffffffffffff)"))))
diff --git a/gcc/config/bpf/predicates.md b/gcc/config/bpf/predicates.md
new file mode 100644
index 00000000000..73cd2fe9b90
--- /dev/null
+++ b/gcc/config/bpf/predicates.md
@@ -0,0 +1,105 @@ 
+;; Predicate definitions for eBPF.
+;; Copyright (C) 2019 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_predicate "reg_or_imm_operand"
+  (ior (and (match_code "const_int")
+            (match_test "IN_RANGE (INTVAL (op), -1 - 0x7fffffff, 0x7fffffff)"))
+       (match_operand 0 "register_operand")))
+
+(define_predicate "imm32_operand"
+  (ior (and (match_code "const_int")
+            (match_test "IN_RANGE (INTVAL (op), 0, 0xffffffff)"))
+       (match_code "symbol_ref,label_ref,const")))
+
+(define_predicate "lddw_operand"
+  (ior (and (match_code "const_int")
+            (match_test "IN_RANGE (INTVAL (op), 0, 0xffffffffffffffff)"))
+       (match_code "symbol_ref,label_ref,const,const_double")))
+
+(define_predicate "call_operand"
+  (match_code "mem")
+{
+  if (GET_CODE (op) != MEM)
+    return 0;
+
+  op = XEXP (op, 0);
+
+  if (GET_MODE (op) != mode
+      && GET_MODE (op) != VOIDmode
+      && mode != VOIDmode)
+    return 0;
+
+  switch (GET_CODE (op))
+  {
+  case REG:
+  case CONST_INT:
+  case SYMBOL_REF:
+  case LABEL_REF:
+    return 1;
+    break;
+  case CONST:
+    {
+      switch (GET_CODE (XEXP (op, 0)))
+	{
+	case SYMBOL_REF:
+	case LABEL_REF:
+	case CONST_INT:
+	  return 1;
+	default:
+	  break;
+	}
+      break;
+    }
+  default:
+    break;
+  }
+
+  return 0;
+})
+
+(define_predicate "indirect_memory_operand"
+  (match_operand 0 "memory_operand")
+{
+  if (GET_CODE (op) != MEM)
+    return 0;
+
+  op = XEXP (op, 0);
+
+  if (CONSTANT_ADDRESS_P (op))
+    return 0;
+
+  return 1;
+})
+
+(define_predicate "reg_or_indirect_memory_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "indirect_memory_operand")))
+
+(define_predicate "mov_dst_operand"
+  (ior (match_operand 0 "register_operand")
+       (match_operand 0 "indirect_memory_operand")))
+
+(define_predicate "mov_src_operand"
+  (ior (match_operand 0 "indirect_memory_operand")
+       (match_operand 0 "reg_or_imm_operand")
+       (match_operand 0 "lddw_operand")))
+
+(define_predicate "register_compare_operator"
+  (match_code "eq,ne,geu,gtu,ge,gt"))
+
diff --git a/gcc/config/bpf/t-bpf b/gcc/config/bpf/t-bpf
new file mode 100644
index 00000000000..e69de29bb2d