new file mode 100644
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2019 Intel Corporation. */
+#ifndef _LINUX_XDP_CALL_H
+#define _LINUX_XDP_CALL_H
+
+#include <linux/filter.h>
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_RETPOLINE)
+
+void bpf_dispatcher_change_prog(void *func, struct bpf_prog *from,
+ struct bpf_prog *to);
+
+#define XDP_CALL_TRAMP(name) ____xdp_call_##name##_tramp
+
+#define DEFINE_XDP_CALL(name) \
+ unsigned int XDP_CALL_TRAMP(name)( \
+ const void *xdp_ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)) \
+ { \
+ return bpf_func(xdp_ctx, insnsi); \
+ }
+
+#define DECLARE_XDP_CALL(name) \
+ unsigned int XDP_CALL_TRAMP(name)( \
+ const void *xdp_ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *))
+
+#define xdp_call_run(name, prog, ctx) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = XDP_CALL_TRAMP(name)(ctx, \
+ (prog)->insnsi, \
+ (prog)->bpf_func); \
+ stats = this_cpu_ptr((prog)->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = XDP_CALL_TRAMP(name)(ctx, \
+ (prog)->insnsi, \
+ (prog)->bpf_func); \
+ } \
+ ret; })
+
+#define xdp_call_update(name, from_xdp_prog, to_xdp_prog) \
+ bpf_dispatcher_change_prog(&XDP_CALL_TRAMP(name), \
+ from_xdp_prog, \
+ to_xdp_prog)
+
+#else /* !defined(CONFIG_BPF_JIT) || !defined(CONFIG_RETPOLINE) */
+
+#define DEFINE_XDP_CALL(name)
+#define DECLARE_XDP_CALL(name)
+#define xdp_call_run(name, xdp_prog, xdp) bpf_prog_run_xdp(xdp_prog, xdp)
+#define xdp_call_update(name, from_xdp_prog, to_xdp_prog)
+
+#endif /* defined(CONFIG_BPF_JIT) && defined(CONFIG_RETPOLINE) */
+#endif /* _LINUX_XDP_CALL_H */