diff mbox series

[focal:linux-azure,1/1] UBUNTU: SAUCE: linux-azure: Update SGX to version LD_1.33

Message ID 20200609165144.1433875-2-marcelo.cerri@canonical.com
State New
Headers show
Series LP:#1881338 - linux-azure: Update SGX version to version LD_1.33 | expand

Commit Message

Marcelo Henrique Cerri June 9, 2020, 4:51 p.m. UTC
BugLink: https://bugs.launchpad.net/bugs/1881338

Update the SGX driver with the version from Intel's DCAP
repository[1], version LD_1.33 (commit id 8e812569b9bc).

[1] https://github.com/intel/SGXDataCenterAttestationPrimitives.git

Also fix the a kernel version conditional around
sgx_encl_mm_release_deferred() following the same criteria used for
the struct sgx_encl_mm at encl.h.

Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
---
 arch/x86/include/asm/sgx.h          | 233 -------
 arch/x86/include/asm/sgx_arch.h     | 278 --------
 arch/x86/include/asm/sgx_pr.h       |  80 ---
 arch/x86/include/dcap.h             |  39 ++
 arch/x86/include/uapi/asm/sgx.h     | 154 -----
 arch/x86/include/uapi/asm/sgx_oot.h | 114 ++++
 ubuntu/sgx/Makefile                 |  68 +-
 ubuntu/sgx/arch.h                   | 342 ++++++++++
 ubuntu/sgx/dkms.conf                |   6 +
 ubuntu/sgx/driver.c                 | 223 +++++++
 ubuntu/sgx/driver.h                 |  33 +
 ubuntu/sgx/encl.c                   | 821 +++++++++++++++++++++++
 ubuntu/sgx/encl.h                   | 132 ++++
 ubuntu/sgx/encls.h                  | 239 +++++++
 ubuntu/sgx/ioctl.c                  | 829 ++++++++++++++++++++++++
 ubuntu/sgx/main.c                   | 329 ++++++++++
 ubuntu/sgx/reclaim.c                | 505 +++++++++++++++
 ubuntu/sgx/sgx.h                    | 342 +++-------
 ubuntu/sgx/sgx_driver_info.h        |  62 --
 ubuntu/sgx/sgx_encl.c               | 973 ----------------------------
 ubuntu/sgx/sgx_ioctl.c              | 332 ----------
 ubuntu/sgx/sgx_main.c               | 498 --------------
 ubuntu/sgx/sgx_page_cache.c         | 596 -----------------
 ubuntu/sgx/sgx_util.c               | 382 -----------
 ubuntu/sgx/sgx_version.h            |  60 --
 ubuntu/sgx/sgx_vma.c                | 242 -------
 ubuntu/sgx/version.h                |  10 +
 27 files changed, 3708 insertions(+), 4214 deletions(-)
 delete mode 100644 arch/x86/include/asm/sgx.h
 delete mode 100755 arch/x86/include/asm/sgx_arch.h
 delete mode 100644 arch/x86/include/asm/sgx_pr.h
 create mode 100644 arch/x86/include/dcap.h
 delete mode 100644 arch/x86/include/uapi/asm/sgx.h
 create mode 100644 arch/x86/include/uapi/asm/sgx_oot.h
 create mode 100644 ubuntu/sgx/arch.h
 create mode 100644 ubuntu/sgx/dkms.conf
 create mode 100644 ubuntu/sgx/driver.c
 create mode 100644 ubuntu/sgx/driver.h
 create mode 100644 ubuntu/sgx/encl.c
 create mode 100644 ubuntu/sgx/encl.h
 create mode 100644 ubuntu/sgx/encls.h
 create mode 100644 ubuntu/sgx/ioctl.c
 create mode 100644 ubuntu/sgx/main.c
 create mode 100644 ubuntu/sgx/reclaim.c
 delete mode 100644 ubuntu/sgx/sgx_driver_info.h
 delete mode 100644 ubuntu/sgx/sgx_encl.c
 delete mode 100644 ubuntu/sgx/sgx_ioctl.c
 delete mode 100644 ubuntu/sgx/sgx_main.c
 delete mode 100644 ubuntu/sgx/sgx_page_cache.c
 delete mode 100644 ubuntu/sgx/sgx_util.c
 delete mode 100644 ubuntu/sgx/sgx_version.h
 delete mode 100644 ubuntu/sgx/sgx_vma.c
 create mode 100644 ubuntu/sgx/version.h
diff mbox series

Patch

diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
deleted file mode 100644
index 1daf4e4c0790..000000000000
--- a/arch/x86/include/asm/sgx.h
+++ /dev/null
@@ -1,233 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-
-#ifndef _ASM_X86_SGX_H
-#define _ASM_X86_SGX_H
-
-#include <asm/sgx_arch.h>
-#include <asm/asm.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/types.h>
-
-#define SGX_CPUID 0x12
-
-enum sgx_cpuid {
-	SGX_CPUID_CAPABILITIES	= 0,
-	SGX_CPUID_ATTRIBUTES	= 1,
-	SGX_CPUID_EPC_BANKS	= 2,
-};
-
-enum sgx_commands {
-	ECREATE	= 0x0,
-	EADD	= 0x1,
-	EINIT	= 0x2,
-	EREMOVE	= 0x3,
-	EDGBRD	= 0x4,
-	EDGBWR	= 0x5,
-	EEXTEND	= 0x6,
-	ELDU	= 0x8,
-	EBLOCK	= 0x9,
-	EPA	= 0xA,
-	EWB	= 0xB,
-	ETRACK	= 0xC,
-	EAUG	= 0xD,
-	EMODPR	= 0xE,
-	EMODT	= 0xF,
-};
-
-#ifdef CONFIG_X86_64
-#define XAX "%%rax"
-#else
-#define XAX "%%eax"
-#endif
-
-#define __encls_ret(rax, rbx, rcx, rdx)			\
-	({						\
-	int ret;					\
-	asm volatile(					\
-	"1: .byte 0x0f, 0x01, 0xcf;\n\t"		\
-	"2:\n"						\
-	".section .fixup,\"ax\"\n"			\
-	"3: mov $-14,"XAX"\n"				\
-	"   jmp 2b\n"					\
-	".previous\n"					\
-	_ASM_EXTABLE(1b, 3b)				\
-	: "=a"(ret)					\
-	: "a"(rax), "b"(rbx), "c"(rcx), "d"(rdx)	\
-	: "memory");					\
-	ret;						\
-	})
-
-#define __encls(rax, rbx, rcx, rdx...)			\
-	({						\
-	int ret;					\
-	asm volatile(					\
-	"1: .byte 0x0f, 0x01, 0xcf;\n\t"		\
-	"   xor "XAX","XAX"\n"				\
-	"2:\n"						\
-	".section .fixup,\"ax\"\n"			\
-	"3: mov $-14,"XAX"\n"				\
-	"   jmp 2b\n"					\
-	".previous\n"					\
-	_ASM_EXTABLE(1b, 3b)				\
-	: "=a"(ret), "=b"(rbx), "=c"(rcx)		\
-	: "a"(rax), "b"(rbx), "c"(rcx), rdx		\
-	: "memory");					\
-	ret;						\
-	})
-
-static inline unsigned long __ecreate(struct sgx_pageinfo *pginfo, void *secs)
-{
-	return __encls(ECREATE, pginfo, secs, "d"(0));
-}
-
-static inline int __eextend(void *secs, void *epc)
-{
-	return __encls(EEXTEND, secs, epc, "d"(0));
-}
-
-static inline int __eadd(struct sgx_pageinfo *pginfo, void *epc)
-{
-	return __encls(EADD, pginfo, epc, "d"(0));
-}
-
-static inline int __einit(void *sigstruct, struct sgx_einittoken *einittoken,
-			  void *secs)
-{
-	return __encls_ret(EINIT, sigstruct, secs, einittoken);
-}
-
-static inline int __eremove(void *epc)
-{
-	unsigned long rbx = 0;
-	unsigned long rdx = 0;
-
-	return __encls_ret(EREMOVE, rbx, epc, rdx);
-}
-
-static inline int __edbgwr(unsigned long addr, unsigned long *data)
-{
-	return __encls(EDGBWR, *data, addr, "d"(0));
-}
-
-static inline int __edbgrd(unsigned long addr, unsigned long *data)
-{
-	unsigned long rbx = 0;
-	int ret;
-
-	ret = __encls(EDGBRD, rbx, addr, "d"(0));
-	if (!ret)
-		*(unsigned long *) data = rbx;
-
-	return ret;
-}
-
-static inline int __etrack(void *epc)
-{
-	unsigned long rbx = 0;
-	unsigned long rdx = 0;
-
-	return __encls_ret(ETRACK, rbx, epc, rdx);
-}
-
-static inline int __eldu(unsigned long rbx, unsigned long rcx,
-			 unsigned long rdx)
-{
-	return __encls_ret(ELDU, rbx, rcx, rdx);
-}
-
-static inline int __eblock(void *epc)
-{
-	unsigned long rbx = 0;
-	unsigned long rdx = 0;
-
-	return __encls_ret(EBLOCK, rbx, epc, rdx);
-}
-
-static inline int __epa(void *epc)
-{
-	unsigned long rbx = SGX_PAGE_TYPE_VA;
-
-	return __encls(EPA, rbx, epc, "d"(0));
-}
-
-static inline int __ewb(struct sgx_pageinfo *pginfo, void *epc, void *va)
-{
-	return __encls_ret(EWB, pginfo, epc, va);
-}
-
-static inline int __eaug(struct sgx_pageinfo *pginfo, void *epc)
-{
-	return __encls(EAUG, pginfo, epc, "d"(0));
-}
-
-static inline int __emodpr(struct sgx_secinfo *secinfo, void *epc)
-{
-	unsigned long rdx = 0;
-
-	return __encls_ret(EMODPR, secinfo, epc, rdx);
-}
-
-static inline int __emodt(struct sgx_secinfo *secinfo, void *epc)
-{
-	unsigned long rdx = 0;
-
-	return __encls_ret(EMODT, secinfo, epc, rdx);
-}
-
-extern bool sgx_enabled;
-
-#endif /* _ASM_X86_SGX_H */
diff --git a/arch/x86/include/asm/sgx_arch.h b/arch/x86/include/asm/sgx_arch.h
deleted file mode 100755
index 9cddf83967d1..000000000000
--- a/arch/x86/include/asm/sgx_arch.h
+++ /dev/null
@@ -1,278 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-
-#ifndef _ASM_X86_SGX_ARCH_H
-#define _ASM_X86_SGX_ARCH_H
-
-#include <linux/types.h>
-
-#define SGX_SSA_GPRS_SIZE		182
-#define SGX_SSA_MISC_EXINFO_SIZE	16
-
-enum sgx_misc {
-	SGX_MISC_EXINFO		= 0x01,
-};
-
-#define SGX_MISC_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
-
-enum sgx_attribute {
-	SGX_ATTR_INIT			= BIT(0),
-	SGX_ATTR_DEBUG			= BIT(1),
-	SGX_ATTR_MODE64BIT		= BIT(2),
-	SGX_ATTR_PROVISIONKEY	= BIT(4),
-	SGX_ATTR_EINITTOKENKEY	= BIT(5),
-	SGX_ATTR_KSS			= BIT(7)
-};
-
-#define SGX_ATTR_RESERVED_MASK	(BIT_ULL(0) | BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8))
-#define SGX_ATTR_ALLOWED_MASK	(SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS)
-
-
-#define SGX_SECS_RESERVED1_SIZE 24
-#define SGX_SECS_RESERVED2_SIZE 32
-#define SGX_SECS_RESERVED3_SIZE 32
-#define SGX_SECS_RESERVED4_SIZE 3834
-
-struct sgx_secs {
-	uint64_t size;
-	uint64_t base;
-	uint32_t ssaframesize;
-	uint32_t miscselect;
-	uint8_t reserved1[SGX_SECS_RESERVED1_SIZE];
-	uint64_t attributes;
-	uint64_t xfrm;
-	uint32_t mrenclave[8];
-	uint8_t reserved2[SGX_SECS_RESERVED2_SIZE];
-	uint32_t mrsigner[8];
-	uint8_t	reserved3[SGX_SECS_RESERVED3_SIZE];
-	uint32_t configid[16];
-	uint16_t isvvprodid;
-	uint16_t isvsvn;
-	uint16_t configsvn;
-	uint8_t reserved4[SGX_SECS_RESERVED4_SIZE];
-} __attribute__((__packed__));
-
-enum sgx_tcs_flags {
-	SGX_TCS_DBGOPTIN	= 0x01, /* cleared on EADD */
-};
-
-#define SGX_TCS_RESERVED_MASK 0xFFFFFFFFFFFFFFFEL
-
-struct sgx_tcs {
-	uint64_t state;
-	uint64_t flags;
-	uint64_t ossa;
-	uint32_t cssa;
-	uint32_t nssa;
-	uint64_t oentry;
-	uint64_t aep;
-	uint64_t ofsbase;
-	uint64_t ogsbase;
-	uint32_t fslimit;
-	uint32_t gslimit;
-	uint64_t reserved[503];
-} __attribute__((__packed__));
-
-struct sgx_pageinfo {
-	uint64_t linaddr;
-	uint64_t srcpge;
-	union {
-		uint64_t secinfo;
-		uint64_t pcmd;
-	};
-	uint64_t secs;
-} __attribute__((packed, aligned(32)));
-
-
-#define SGX_SECINFO_PERMISSION_MASK	0x0000000000000007L
-#define SGX_SECINFO_PAGE_TYPE_MASK	0x000000000000FF00L
-#define SGX_SECINFO_RESERVED_MASK	0xFFFFFFFFFFFF00F8L
-
-enum sgx_page_type {
-	SGX_PAGE_TYPE_SECS	= 0x00,
-	SGX_PAGE_TYPE_TCS	= 0x01,
-	SGX_PAGE_TYPE_REG	= 0x02,
-	SGX_PAGE_TYPE_VA	= 0x03,
-};
-
-enum sgx_secinfo_flags {
-	SGX_SECINFO_R		= 0x01,
-	SGX_SECINFO_W		= 0x02,
-	SGX_SECINFO_X		= 0x04,
-	SGX_SECINFO_SECS	= (SGX_PAGE_TYPE_SECS << 8),
-	SGX_SECINFO_TCS		= (SGX_PAGE_TYPE_TCS << 8),
-	SGX_SECINFO_REG		= (SGX_PAGE_TYPE_REG << 8),
-};
-
-struct sgx_secinfo {
-	uint64_t flags;
-	uint64_t reserved[7];
-} __attribute__((__packed__, aligned(64)));
-
-struct sgx_pcmd {
-	struct sgx_secinfo secinfo;
-	uint64_t enclave_id;
-	uint8_t reserved[40];
-	uint8_t mac[16];
-} __attribute__((__packed__, aligned(64)));
-
-#define SGX_MODULUS_SIZE 384
-
-struct sgx_sigstruct_header {
-	uint64_t header1[2];
-	uint32_t vendor;
-	uint32_t date;
-	uint64_t header2[2];
-	uint32_t swdefined;
-	uint8_t reserved1[84];
-} __attribute__((__packed__));
-
-struct sgx_sigstruct_body {
-	uint32_t miscselect;
-	uint32_t miscmask;
-	uint8_t reserved2[4];
-	uint8_t isvfamilyid[16];
-	uint64_t attributes;
-	uint64_t xfrm;
-	uint64_t attributesmask;
-	uint64_t xfrmmask;
-	uint8_t mrenclave[32];
-	uint8_t reserved3[16];
-	uint8_t isvextprodid[16];
-	uint16_t isvprodid;
-	uint16_t isvsvn;
-} __attribute__((__packed__));
-
-struct sgx_sigstruct {
-	struct sgx_sigstruct_header header;
-	uint8_t modulus[SGX_MODULUS_SIZE];
-	uint32_t exponent;
-	uint8_t signature[SGX_MODULUS_SIZE];
-	struct sgx_sigstruct_body body;
-	uint8_t reserved4[12];
-	uint8_t q1[SGX_MODULUS_SIZE];
-	uint8_t q2[SGX_MODULUS_SIZE];
-} __attribute__((__packed__));
-
-struct sgx_sigstruct_payload {
-	struct sgx_sigstruct_header header;
-	struct sgx_sigstruct_body body;
-} __attribute__((__packed__));
-
-struct sgx_einittoken_payload {
-	uint32_t valid;
-	uint32_t reserved1[11];
-	uint64_t attributes;
-	uint64_t xfrm;
-	uint8_t mrenclave[32];
-	uint8_t reserved2[32];
-	uint8_t mrsigner[32];
-	uint8_t reserved3[32];
-} __attribute__((__packed__));
-
-struct sgx_einittoken {
-	struct sgx_einittoken_payload payload;
-	uint8_t cpusvnle[16];
-	uint16_t isvprodidle;
-	uint16_t isvsvnle;
-	uint8_t reserved2[24];
-	uint32_t maskedmiscselectle;
-	uint64_t maskedattributesle;
-	uint64_t maskedxfrmle;
-	uint8_t keyid[32];
-	uint8_t mac[16];
-} __attribute__((__packed__));
-
-struct sgx_report {
-	uint8_t cpusvn[16];
-	uint32_t miscselect;
-	uint8_t reserved1[28];
-	uint64_t attributes;
-	uint64_t xfrm;
-	uint8_t mrenclave[32];
-	uint8_t reserved2[32];
-	uint8_t mrsigner[32];
-	uint8_t reserved3[96];
-	uint16_t isvprodid;
-	uint16_t isvsvn;
-	uint8_t reserved4[60];
-	uint8_t reportdata[64];
-	uint8_t keyid[32];
-	uint8_t mac[16];
-} __attribute__((__packed__));
-
-struct sgx_targetinfo {
-	uint8_t mrenclave[32];
-	uint64_t attributes;
-	uint64_t xfrm;
-	uint8_t reserved1[4];
-	uint32_t miscselect;
-	uint8_t reserved2[456];
-} __attribute__((__packed__));
-
-struct sgx_keyrequest {
-	uint16_t keyname;
-	uint16_t keypolicy;
-	uint16_t isvsvn;
-	uint16_t reserved1;
-	uint8_t cpusvn[16];
-	uint64_t attributemask;
-	uint64_t xfrmmask;
-	uint8_t keyid[32];
-	uint32_t miscmask;
-	uint8_t reserved2[436];
-} __attribute__((__packed__));
-
-#endif /* _ASM_X86_SGX_ARCH_H */
diff --git a/arch/x86/include/asm/sgx_pr.h b/arch/x86/include/asm/sgx_pr.h
deleted file mode 100644
index 9479a6c6b3ca..000000000000
--- a/arch/x86/include/asm/sgx_pr.h
+++ /dev/null
@@ -1,80 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-
-#ifndef _ASM_X86_SGX_PR_H
-#define _ASM_X86_SGX_PR_H
-
-#undef pr_fmt
-#define pr_fmt(fmt) "intel_sgx: " fmt
-
-#define sgx_pr_ratelimited(level, encl, fmt, ...)			\
-	pr_ ## level ## _ratelimited("[%d:0x%p] " fmt,			\
-				     pid_nr((encl)->tgid),		\
-				     (void *)(encl)->base, ##__VA_ARGS__)
-
-#define sgx_dbg(encl, fmt, ...) \
-	sgx_pr_ratelimited(debug, encl, fmt, ##__VA_ARGS__)
-#define sgx_info(encl, fmt, ...) \
-	sgx_pr_ratelimited(info, encl, fmt, ##__VA_ARGS__)
-#define sgx_warn(encl, fmt, ...) \
-	sgx_pr_ratelimited(warn, encl, fmt, ##__VA_ARGS__)
-#define sgx_err(encl, fmt, ...) \
-	sgx_pr_ratelimited(err, encl, fmt, ##__VA_ARGS__)
-#define sgx_crit(encl, fmt, ...) \
-	sgx_pr_ratelimited(crit, encl, fmt, ##__VA_ARGS__)
-
-#endif /* _ASM_X86_SGX_PR_H */
diff --git a/arch/x86/include/dcap.h b/arch/x86/include/dcap.h
new file mode 100644
index 000000000000..a29b7edff502
--- /dev/null
+++ b/arch/x86/include/dcap.h
@@ -0,0 +1,39 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-17 Intel Corporation.
+
+#ifndef __DCAP_H__
+#define __DCAP_H__
+
+#ifndef X86_FEATURE_SGX
+	#define X86_FEATURE_SGX				(9 * 32 + 2)
+#endif
+
+#ifndef X86_FEATURE_SGX1
+	#define X86_FEATURE_SGX1         ( 8*32+ 0) /* SGX1 leaf functions */
+#endif
+
+#ifndef X86_FEATURE_SGX2
+	#define X86_FEATURE_SGX2         ( 8*32+ 1) /* SGX2 leaf functions */
+#endif
+
+#ifndef X86_FEATURE_SGX_LC
+	#define X86_FEATURE_SGX_LC			(16*32+30) /* supports SGX launch configuration */
+#endif
+
+#ifndef FEAT_CTL_SGX_ENABLED
+	#define FEAT_CTL_SGX_ENABLED                             (1<<18)
+#endif
+
+#ifndef FEAT_CTL_SGX_LC_ENABLED
+	#define FEAT_CTL_SGX_LC_ENABLED                         (1<<17)
+#endif
+
+#ifndef MSR_IA32_SGXLEPUBKEYHASH0
+    #define MSR_IA32_SGXLEPUBKEYHASH0	0x0000008C
+    #define MSR_IA32_SGXLEPUBKEYHASH1	0x0000008D
+    #define MSR_IA32_SGXLEPUBKEYHASH2	0x0000008E
+    #define MSR_IA32_SGXLEPUBKEYHASH3	0x0000008F
+#endif
+
+
+#endif
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
deleted file mode 100644
index 9a50f0bdd0be..000000000000
--- a/arch/x86/include/uapi/asm/sgx.h
+++ /dev/null
@@ -1,154 +0,0 @@ 
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016-2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
- * Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016-2018 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *   * Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *   * Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- *   * Neither the name of Intel Corporation nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors:
- *
- * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- */
-
-#ifndef _UAPI_ASM_X86_SGX_H
-#define _UAPI_ASM_X86_SGX_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define SGX_MAGIC 0xA4
-
-#define SGX_IOC_ENCLAVE_CREATE \
-	_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
-#define SGX_IOC_ENCLAVE_ADD_PAGE \
-	_IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
-#define SGX_IOC_ENCLAVE_INIT \
-	_IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
-#define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \
-	_IOW(SGX_MAGIC, 0x03, struct sgx_enclave_set_attribute)
-
-/* SGX leaf instruction return values */
-#define SGX_SUCCESS			0
-#define SGX_INVALID_SIG_STRUCT		1
-#define SGX_INVALID_ATTRIBUTE		2
-#define SGX_BLKSTATE			3
-#define SGX_INVALID_MEASUREMENT		4
-#define SGX_NOTBLOCKABLE		5
-#define SGX_PG_INVLD			6
-#define SGX_LOCKFAIL			7
-#define SGX_INVALID_SIGNATURE		8
-#define SGX_MAC_COMPARE_FAIL		9
-#define SGX_PAGE_NOT_BLOCKED		10
-#define SGX_NOT_TRACKED			11
-#define SGX_VA_SLOT_OCCUPIED		12
-#define SGX_CHILD_PRESENT		13
-#define SGX_ENCLAVE_ACT			14
-#define SGX_ENTRYEPOCH_LOCKED		15
-#define SGX_INVALID_EINITTOKEN		16
-#define SGX_PREV_TRK_INCMPL		17
-#define SGX_PG_IS_SECS			18
-#define SGX_INVALID_CPUSVN		32
-#define SGX_INVALID_ISVSVN		64
-#define SGX_UNMASKED_EVENT		128
-#define SGX_INVALID_KEYNAME		256
-
-/* IOCTL return values */
-#define SGX_POWER_LOST_ENCLAVE	0x40000000
-#define SGX_LE_ROLLBACK	0x40000001
-#define SGX_INVALID_PRIVILEGE	0x40000002
-#define SGX_UNEXPECTED_ERROR	0x40000003
-
-/**
- * struct sgx_enclave_create - parameter structure for the
- *                             %SGX_IOC_ENCLAVE_CREATE ioctl
- * @src:	address for the SECS page data
- */
-struct sgx_enclave_create  {
-	__u64	src;
-} __attribute__((__packed__));
-
-/**
- * struct sgx_enclave_add_page - parameter structure for the
- *                               %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
- * @addr:	address within the ELRANGE
- * @src:	address for the page data
- * @secinfo:	address for the SECINFO data
- * @mrmask:	bitmask for the measured 256 byte chunks
- */
-struct sgx_enclave_add_page {
-	__u64	addr;
-	__u64	src;
-	__u64	secinfo;
-	__u16	mrmask;
-} __attribute__((__packed__));
-
-
-/**
- * struct sgx_enclave_init - parameter structure for the
- *                           %SGX_IOC_ENCLAVE_INIT ioctl
- * @addr:	address within the ELRANGE
- * @sigstruct:	address for the SIGSTRUCT data
- */
-struct sgx_enclave_init {
-	__u64	addr;
-	__u64	sigstruct;
-} __attribute__((__packed__));
-
-/**
- * struct sgx_enclave_set_attribute - parameter structure for the
- *				      %SGX_IOC_ENCLAVE_SET_ATTRIBUTE ioctl
- * @addr:	address within the ELRANGE
- * @attribute_fd:	file handle of the attribute file in the securityfs
- */
-struct sgx_enclave_set_attribute {
-	__u64	addr;
-	__u64	attribute_fd;
-} __attribute__((__packed__));;
-
-
-#endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/arch/x86/include/uapi/asm/sgx_oot.h b/arch/x86/include/uapi/asm/sgx_oot.h
new file mode 100644
index 000000000000..e196cfd44b70
--- /dev/null
+++ b/arch/x86/include/uapi/asm/sgx_oot.h
@@ -0,0 +1,114 @@ 
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) WITH Linux-syscall-note */
+/*
+ * Copyright(c) 2016-19 Intel Corporation.
+ */
+#ifndef _UAPI_ASM_X86_SGX_H
+#define _UAPI_ASM_X86_SGX_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * enum sgx_epage_flags - page control flags
+ * %SGX_PAGE_MEASURE:	Measure the page contents with a sequence of
+ *			ENCLS[EEXTEND] operations.
+ */
+enum sgx_page_flags {
+	SGX_PAGE_MEASURE	= 0x01,
+};
+
+#define SGX_MAGIC 0xA4
+
+#define SGX_IOC_ENCLAVE_CREATE \
+	_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
+#define SGX_IOC_ENCLAVE_ADD_PAGES \
+	_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
+#define SGX_IOC_ENCLAVE_INIT \
+	_IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
+#define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \
+	_IOW(SGX_MAGIC, 0x03, struct sgx_enclave_set_attribute)
+
+/**
+ * struct sgx_enclave_create - parameter structure for the
+ *                             %SGX_IOC_ENCLAVE_CREATE ioctl
+ * @src:	address for the SECS page data
+ */
+struct sgx_enclave_create  {
+	__u64	src;
+};
+
+/**
+ * struct sgx_enclave_add_pages - parameter structure for the
+ *                                %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
+ * @src:	start address for the page data
+ * @offset:	starting page offset
+ * @length:	length of the data (multiple of the page size)
+ * @secinfo:	address for the SECINFO data
+ * @flags:	page control flags
+ * @count:	number of bytes added (multiple of the page size)
+ */
+struct sgx_enclave_add_pages {
+	__u64	src;
+	__u64	offset;
+	__u64	length;
+	__u64	secinfo;
+	__u64	flags;
+	__u64	count;
+};
+
+/**
+ * struct sgx_enclave_init - parameter structure for the
+ *                           %SGX_IOC_ENCLAVE_INIT ioctl
+ * @sigstruct:	address for the SIGSTRUCT data
+ */
+struct sgx_enclave_init {
+	__u64 sigstruct;
+};
+
+/**
+ * struct sgx_enclave_set_attribute - parameter structure for the
+ *				      %SGX_IOC_ENCLAVE_SET_ATTRIBUTE ioctl
+ * @attribute_fd:	file handle of the attribute file in the securityfs
+ */
+struct sgx_enclave_set_attribute {
+	__u64 attribute_fd;
+};
+
+/**
+ * struct sgx_enclave_exception - structure to report exceptions encountered in
+ *				  __vdso_sgx_enter_enclave()
+ *
+ * @leaf:	ENCLU leaf from \%eax at time of exception
+ * @trapnr:	exception trap number, a.k.a. fault vector
+ * @error_code:	exception error code
+ * @address:	exception address, e.g. CR2 on a #PF
+ * @reserved:	reserved for future use
+ */
+struct sgx_enclave_exception {
+	__u32 leaf;
+	__u16 trapnr;
+	__u16 error_code;
+	__u64 address;
+	__u64 reserved[2];
+};
+
+/**
+ * typedef sgx_enclave_exit_handler_t - Exit handler function accepted by
+ *					__vdso_sgx_enter_enclave()
+ *
+ * @rdi:	RDI at the time of enclave exit
+ * @rsi:	RSI at the time of enclave exit
+ * @rdx:	RDX at the time of enclave exit
+ * @ursp:	RSP at the time of enclave exit (untrusted stack)
+ * @r8:		R8 at the time of enclave exit
+ * @r9:		R9 at the time of enclave exit
+ * @tcs:	Thread Control Structure used to enter enclave
+ * @ret:	0 on success (EEXIT), -EFAULT on an exception
+ * @e:		Pointer to struct sgx_enclave_exception (as provided by caller)
+ */
+typedef int (*sgx_enclave_exit_handler_t)(long rdi, long rsi, long rdx,
+					  long ursp, long r8, long r9,
+					  void *tcs, int ret,
+					  struct sgx_enclave_exception *e);
+
+#endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/ubuntu/sgx/Makefile b/ubuntu/sgx/Makefile
index 4401e9d8e71d..394c176a1d53 100644
--- a/ubuntu/sgx/Makefile
+++ b/ubuntu/sgx/Makefile
@@ -1,67 +1,10 @@ 
-# This file is provided under a dual BSD/GPLv2 license.  When using or
-# redistributing this file, you may do so under either license.
-#
-# GPL LICENSE SUMMARY
-#
-# Copyright(c) 2016-2018 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of version 2 of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# Contact Information:
-# Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-# Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-#
-# BSD LICENSE
-#
-# Copyright(c) 2016-2018 Intel Corporation.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-#   * Redistributions of source code must retain the above copyright
-#     notice, this list of conditions and the following disclaimer.
-#   * Redistributions in binary form must reproduce the above copyright
-#     notice, this list of conditions and the following disclaimer in
-#     the documentation and/or other materials provided with the
-#     distribution.
-#   * Neither the name of Intel Corporation nor the names of its
-#     contributors may be used to endorse or promote products derived
-#     from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-#
-# Intel SGX
-#
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# Copyright(c) 2016-19 Intel Corporation.
 
 ifneq ($(KERNELRELEASE),)
 
 obj-m += intel_sgx.o
-intel_sgx-y := \
-	sgx_ioctl.o \
-	sgx_encl.o \
-	sgx_main.o \
-	sgx_page_cache.o \
-	sgx_util.o \
-	sgx_vma.o
+intel_sgx-y := encl.o main.o reclaim.o driver.o ioctl.o
 
 else
 
@@ -75,7 +18,6 @@  default:
 else
 
 PWD  := $(shell pwd)
-
 default:
 	$(MAKE) -C $(KDIR) M=$(PWD) CFLAGS_MODULE="-I$(PWD) -I$(PWD)/include" modules
 
@@ -83,6 +25,4 @@  endif
 endif
 
 clean:
-	rm -vrf *.o *.ko *.order *.symvers *.mod.c .tmp_versions .*.cmd *.o.ur-safe
-
-
+	rm -vrf *.o *.ko *.order *.symvers *.mod.c .tmp_versions .*.cmd *.o.ur-safe *.mod
diff --git a/ubuntu/sgx/arch.h b/ubuntu/sgx/arch.h
new file mode 100644
index 000000000000..0a2cf7a59a13
--- /dev/null
+++ b/ubuntu/sgx/arch.h
@@ -0,0 +1,342 @@ 
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/**
+ * Copyright(c) 2016-18 Intel Corporation.
+ *
+ * Contains data structures defined by the SGX architecture.  Data structures
+ * defined by the Linux software stack should not be placed here.
+ */
+#ifndef _ASM_X86_SGX_ARCH_H
+#define _ASM_X86_SGX_ARCH_H
+
+#include <linux/types.h>
+
+#define SGX_CPUID				0x12
+#define SGX_CPUID_FIRST_VARIABLE_SUB_LEAF	2
+
+/**
+ * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
+ * %SGX_NOT_TRACKED:		Previous ETRACK's shootdown sequence has not
+ *				been completed yet.
+ * %SGX_INVALID_EINITTOKEN:	EINITTOKEN is invalid and enclave signer's
+ *				public key does not match IA32_SGXLEPUBKEYHASH.
+ * %SGX_UNMASKED_EVENT:		An unmasked event, e.g. INTR, was received
+ */
+enum sgx_return_code {
+	SGX_NOT_TRACKED			= 11,
+	SGX_INVALID_EINITTOKEN		= 16,
+	SGX_UNMASKED_EVENT		= 128,
+};
+
+/**
+ * enum sgx_sub_leaf_types - SGX CPUID variable sub-leaf types
+ * %SGX_CPUID_SUB_LEAF_INVALID:		Indicates this sub-leaf is invalid.
+ * %SGX_CPUID_SUB_LEAF_EPC_SECTION:	Sub-leaf enumerates an EPC section.
+ */
+enum sgx_sub_leaf_types {
+	SGX_CPUID_SUB_LEAF_INVALID	= 0x0,
+	SGX_CPUID_SUB_LEAF_EPC_SECTION	= 0x1,
+};
+
+#define SGX_CPUID_SUB_LEAF_TYPE_MASK	GENMASK(3, 0)
+
+#define SGX_MODULUS_SIZE 384
+
+/**
+ * enum sgx_miscselect - additional information to an SSA frame
+ * %SGX_MISC_EXINFO:	Report #PF or #GP to the SSA frame.
+ *
+ * Save State Area (SSA) is a stack inside the enclave used to store processor
+ * state when an exception or interrupt occurs. This enum defines additional
+ * information stored to an SSA frame.
+ */
+enum sgx_miscselect {
+	SGX_MISC_EXINFO		= BIT(0),
+};
+
+#define SGX_MISC_RESERVED_MASK	GENMASK_ULL(63, 1)
+
+#define SGX_SSA_GPRS_SIZE		184
+#define SGX_SSA_MISC_EXINFO_SIZE	16
+
+/**
+ * enum sgx_attributes - the attributes field in &struct sgx_secs
+ * %SGX_ATTR_INIT:		Enclave can be entered (is initialized).
+ * %SGX_ATTR_DEBUG:		Allow ENCLS(EDBGRD) and ENCLS(EDBGWR).
+ * %SGX_ATTR_MODE64BIT:		Tell that this a 64-bit enclave.
+ * %SGX_ATTR_PROVISIONKEY:      Allow to use provisioning keys for remote
+ *				attestation.
+ * %SGX_ATTR_KSS:		Allow to use key separation and sharing (KSS).
+ * %SGX_ATTR_EINITTOKENKEY:	Allow to use token signing key that is used to
+ *				sign cryptographic tokens that can be passed to
+ *				EINIT as an authorization to run an enclave.
+ */
+enum sgx_attribute {
+	SGX_ATTR_INIT		= BIT(0),
+	SGX_ATTR_DEBUG		= BIT(1),
+	SGX_ATTR_MODE64BIT	= BIT(2),
+	SGX_ATTR_PROVISIONKEY	= BIT(4),
+	SGX_ATTR_EINITTOKENKEY	= BIT(5),
+	SGX_ATTR_KSS		= BIT(7),
+};
+
+#define SGX_ATTR_RESERVED_MASK	(BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8))
+#define SGX_ATTR_ALLOWED_MASK	(SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | \
+				 SGX_ATTR_KSS)
+
+/**
+ * struct sgx_secs - SGX Enclave Control Structure (SECS)
+ * @size:		size of the address space
+ * @base:		base address of the  address space
+ * @ssa_frame_size:	size of an SSA frame
+ * @miscselect:		additional information stored to an SSA frame
+ * @attributes:		attributes for enclave
+ * @xfrm:		XSave-Feature Request Mask (subset of XCR0)
+ * @mrenclave:		SHA256-hash of the enclave contents
+ * @mrsigner:		SHA256-hash of the public key used to sign the SIGSTRUCT
+ * @config_id:		a user-defined value that is used in key derivation
+ * @isv_prod_id:	a user-defined value that is used in key derivation
+ * @isv_svn:		a user-defined value that is used in key derivation
+ * @config_svn:		a user-defined value that is used in key derivation
+ *
+ * SGX Enclave Control Structure (SECS) is a special enclave page that is not
+ * visible in the address space. In fact, this structure defines the address
+ * range and other global attributes for the enclave and it is the first EPC
+ * page created for any enclave. It is moved from a temporary buffer to an EPC
+ * by the means of ENCLS(ECREATE) leaf.
+ */
+struct sgx_secs {
+	u64 size;
+	u64 base;
+	u32 ssa_frame_size;
+	u32 miscselect;
+	u8  reserved1[24];
+	u64 attributes;
+	u64 xfrm;
+	u32 mrenclave[8];
+	u8  reserved2[32];
+	u32 mrsigner[8];
+	u8  reserved3[32];
+	u32 config_id[16];
+	u16 isv_prod_id;
+	u16 isv_svn;
+	u16 config_svn;
+	u8  reserved4[3834];
+} __packed;
+
+/**
+ * enum sgx_tcs_flags - execution flags for TCS
+ * %SGX_TCS_DBGOPTIN:	If enabled allows single-stepping and breakpoints
+ *			inside an enclave. It is cleared by EADD but can
+ *			be set later with EDBGWR.
+ */
+enum sgx_tcs_flags {
+	SGX_TCS_DBGOPTIN	= 0x01,
+};
+
+#define SGX_TCS_RESERVED_MASK	GENMASK_ULL(63, 1)
+#define SGX_TCS_RESERVED_SIZE	4024
+
+/**
+ * struct sgx_tcs - Thread Control Structure (TCS)
+ * @state:		used to mark an entered TCS
+ * @flags:		execution flags (cleared by EADD)
+ * @ssa_offset:		SSA stack offset relative to the enclave base
+ * @ssa_index:		the current SSA frame index (cleard by EADD)
+ * @nr_ssa_frames:	the number of frame in the SSA stack
+ * @entry_offset:	entry point offset relative to the enclave base
+ * @exit_addr:		address outside the enclave to exit on an exception or
+ *			interrupt
+ * @fs_offset:		offset relative to the enclave base to become FS
+ *			segment inside the enclave
+ * @gs_offset:		offset relative to the enclave base to become GS
+ *			segment inside the enclave
+ * @fs_limit:		size to become a new FS-limit (only 32-bit enclaves)
+ * @gs_limit:		size to become a new GS-limit (only 32-bit enclaves)
+ *
+ * Thread Control Structure (TCS) is an enclave page visible in its address
+ * space that defines an entry point inside the enclave. A thread enters inside
+ * an enclave by supplying address of TCS to ENCLU(EENTER). A TCS can be entered
+ * by only one thread at a time.
+ */
+struct sgx_tcs {
+	u64 state;
+	u64 flags;
+	u64 ssa_offset;
+	u32 ssa_index;
+	u32 nr_ssa_frames;
+	u64 entry_offset;
+	u64 exit_addr;
+	u64 fs_offset;
+	u64 gs_offset;
+	u32 fs_limit;
+	u32 gs_limit;
+	u8  reserved[SGX_TCS_RESERVED_SIZE];
+} __packed;
+
+/**
+ * struct sgx_pageinfo - an enclave page descriptor
+ * @addr:	address of the enclave page
+ * @contents:	pointer to the page contents
+ * @metadata:	pointer either to a SECINFO or PCMD instance
+ * @secs:	address of the SECS page
+ */
+struct sgx_pageinfo {
+	u64 addr;
+	u64 contents;
+	u64 metadata;
+	u64 secs;
+} __packed __aligned(32);
+
+
+/**
+ * enum sgx_page_type - bits in the SECINFO flags defining the page type
+ * %SGX_PAGE_TYPE_SECS:	a SECS page
+ * %SGX_PAGE_TYPE_TCS:	a TCS page
+ * %SGX_PAGE_TYPE_REG:	a regular page
+ * %SGX_PAGE_TYPE_VA:	a VA page
+ * %SGX_PAGE_TYPE_TRIM:	a page in trimmed state
+ */
+enum sgx_page_type {
+	SGX_PAGE_TYPE_SECS,
+	SGX_PAGE_TYPE_TCS,
+	SGX_PAGE_TYPE_REG,
+	SGX_PAGE_TYPE_VA,
+	SGX_PAGE_TYPE_TRIM,
+};
+
+#define SGX_NR_PAGE_TYPES	5
+#define SGX_PAGE_TYPE_MASK	GENMASK(7, 0)
+
+/**
+ * enum sgx_secinfo_flags - the flags field in &struct sgx_secinfo
+ * %SGX_SECINFO_R:	allow read
+ * %SGX_SECINFO_W:	allow write
+ * %SGX_SECINFO_X:	allow execution
+ * %SGX_SECINFO_SECS:	a SECS page
+ * %SGX_SECINFO_TCS:	a TCS page
+ * %SGX_SECINFO_REG:	a regular page
+ * %SGX_SECINFO_VA:	a VA page
+ * %SGX_SECINFO_TRIM:	a page in trimmed state
+ */
+enum sgx_secinfo_flags {
+	SGX_SECINFO_R			= BIT(0),
+	SGX_SECINFO_W			= BIT(1),
+	SGX_SECINFO_X			= BIT(2),
+	SGX_SECINFO_SECS		= (SGX_PAGE_TYPE_SECS << 8),
+	SGX_SECINFO_TCS			= (SGX_PAGE_TYPE_TCS << 8),
+	SGX_SECINFO_REG			= (SGX_PAGE_TYPE_REG << 8),
+	SGX_SECINFO_VA			= (SGX_PAGE_TYPE_VA << 8),
+	SGX_SECINFO_TRIM		= (SGX_PAGE_TYPE_TRIM << 8),
+};
+
+#define SGX_SECINFO_PERMISSION_MASK	GENMASK_ULL(2, 0)
+#define SGX_SECINFO_PAGE_TYPE_MASK	(SGX_PAGE_TYPE_MASK << 8)
+#define SGX_SECINFO_RESERVED_MASK	~(SGX_SECINFO_PERMISSION_MASK | \
+					  SGX_SECINFO_PAGE_TYPE_MASK)
+
+/**
+ * struct sgx_secinfo - describes attributes of an EPC page
+ * @flags:	permissions and type
+ *
+ * Used together with ENCLS leaves that add or modify an EPC page to an
+ * enclave to define page permissions and type.
+ */
+struct sgx_secinfo {
+	u64 flags;
+	u8  reserved[56];
+} __packed __aligned(64);
+
+#define SGX_PCMD_RESERVED_SIZE 40
+
+/**
+ * struct sgx_pcmd - Paging Crypto Metadata (PCMD)
+ * @enclave_id:	enclave identifier
+ * @mac:	MAC over PCMD, page contents and isvsvn
+ *
+ * PCMD is stored for every swapped page to the regular memory. When ELDU loads
+ * the page back it recalculates the MAC by using a isvsvn number stored in a
+ * VA page. Together these two structures bring integrity and rollback
+ * protection.
+ */
+struct sgx_pcmd {
+	struct sgx_secinfo secinfo;
+	u64 enclave_id;
+	u8  reserved[SGX_PCMD_RESERVED_SIZE];
+	u8  mac[16];
+} __packed __aligned(128);
+
+#define SGX_SIGSTRUCT_RESERVED1_SIZE 84
+#define SGX_SIGSTRUCT_RESERVED2_SIZE 20
+#define SGX_SIGSTRUCT_RESERVED3_SIZE 32
+#define SGX_SIGSTRUCT_RESERVED4_SIZE 12
+
+/**
+ * struct sgx_sigstruct_header -  defines author of the enclave
+ * @header1:		constant byte string
+ * @vendor:		must be either 0x0000 or 0x8086
+ * @date:		YYYYMMDD in BCD
+ * @header2:		costant byte string
+ * @swdefined:		software defined value
+ */
+struct sgx_sigstruct_header {
+	u64 header1[2];
+	u32 vendor;
+	u32 date;
+	u64 header2[2];
+	u32 swdefined;
+	u8  reserved1[84];
+} __packed;
+
+/**
+ * struct sgx_sigstruct_body - defines contents of the enclave
+ * @miscselect:		additional information stored to an SSA frame
+ * @misc_mask:		required miscselect in SECS
+ * @attributes:		attributes for enclave
+ * @xfrm:		XSave-Feature Request Mask (subset of XCR0)
+ * @attributes_mask:	required attributes in SECS
+ * @xfrm_mask:		required XFRM in SECS
+ * @mrenclave:		SHA256-hash of the enclave contents
+ * @isvprodid:		a user-defined value that is used in key derivation
+ * @isvsvn:		a user-defined value that is used in key derivation
+ */
+struct sgx_sigstruct_body {
+	u32 miscselect;
+	u32 misc_mask;
+	u8  reserved2[20];
+	u64 attributes;
+	u64 xfrm;
+	u64 attributes_mask;
+	u64 xfrm_mask;
+	u8  mrenclave[32];
+	u8  reserved3[32];
+	u16 isvprodid;
+	u16 isvsvn;
+} __packed;
+
+/**
+ * struct sgx_sigstruct - an enclave signature
+ * @header:		defines author of the enclave
+ * @modulus:		the modulus of the public key
+ * @exponent:		the exponent of the public key
+ * @signature:		the signature calculated over the fields except modulus,
+ * @body:		defines contents of the enclave
+ * @q1:			a value used in RSA signature verification
+ * @q2:			a value used in RSA signature verification
+ *
+ * Header and body are the parts that are actual signed. The remaining fields
+ * define the signature of the enclave.
+ */
+struct sgx_sigstruct {
+	struct sgx_sigstruct_header header;
+	u8  modulus[SGX_MODULUS_SIZE];
+	u32 exponent;
+	u8  signature[SGX_MODULUS_SIZE];
+	struct sgx_sigstruct_body body;
+	u8  reserved4[12];
+	u8  q1[SGX_MODULUS_SIZE];
+	u8  q2[SGX_MODULUS_SIZE];
+} __packed;
+
+#define SGX_LAUNCH_TOKEN_SIZE 304
+
+#endif /* _ASM_X86_SGX_ARCH_H */
diff --git a/ubuntu/sgx/dkms.conf b/ubuntu/sgx/dkms.conf
new file mode 100644
index 000000000000..bef858e01c4e
--- /dev/null
+++ b/ubuntu/sgx/dkms.conf
@@ -0,0 +1,6 @@ 
+PACKAGE_NAME="sgx"
+PACKAGE_VERSION="1.33"
+BUILT_MODULE_NAME[0]="intel_sgx"
+DEST_MODULE_LOCATION[0]="/kernel/drivers/intel/sgx"
+AUTOINSTALL="yes"
+MAKE[0]="'make'  KDIR=/lib/modules/${kernelver}/build"
diff --git a/ubuntu/sgx/driver.c b/ubuntu/sgx/driver.c
new file mode 100644
index 000000000000..379eb636fd8b
--- /dev/null
+++ b/ubuntu/sgx/driver.c
@@ -0,0 +1,223 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-18 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/miscdevice.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include <linux/suspend.h>
+#include <asm/traps.h>
+#include "driver.h"
+#include "encl.h"
+#include "dcap.h"
+
+#include "version.h"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+u64 sgx_encl_size_max_32;
+u64 sgx_encl_size_max_64;
+u32 sgx_misc_reserved_mask;
+u64 sgx_attributes_reserved_mask;
+u64 sgx_xfrm_reserved_mask = ~0x3;
+u32 sgx_xsave_size_tbl[64];
+
+static int sgx_open(struct inode *inode, struct file *file)
+{
+	struct sgx_encl *encl;
+	int ret;
+
+	encl = kzalloc(sizeof(*encl), GFP_KERNEL);
+	if (!encl)
+		return -ENOMEM;
+
+	atomic_set(&encl->flags, 0);
+	kref_init(&encl->refcount);
+	INIT_LIST_HEAD(&encl->va_pages);
+	INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
+	mutex_init(&encl->lock);
+	INIT_LIST_HEAD(&encl->mm_list);
+	spin_lock_init(&encl->mm_lock);
+
+	ret = init_srcu_struct(&encl->srcu);
+	if (ret) {
+		kfree(encl);
+		return ret;
+	}
+
+	file->private_data = encl;
+
+	return 0;
+}
+
+static int sgx_release(struct inode *inode, struct file *file)
+{
+	struct sgx_encl *encl = file->private_data;
+	struct sgx_encl_mm *encl_mm;
+
+	for ( ; ; )  {
+		spin_lock(&encl->mm_lock);
+
+		if (list_empty(&encl->mm_list)) {
+			encl_mm = NULL;
+		} else {
+			encl_mm = list_first_entry(&encl->mm_list,
+						   struct sgx_encl_mm, list);
+			list_del_rcu(&encl_mm->list);
+		}
+
+		spin_unlock(&encl->mm_lock);
+
+		/* The list is empty, ready to go. */
+		if (!encl_mm)
+			break;
+
+		synchronize_srcu(&encl->srcu);
+		mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
+		kfree(encl_mm);
+	};
+
+	mutex_lock(&encl->lock);
+	atomic_or(SGX_ENCL_DEAD, &encl->flags);
+	mutex_unlock(&encl->lock);
+
+	kref_put(&encl->refcount, sgx_encl_release);
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
+			      unsigned long arg)
+{
+	return sgx_ioctl(filep, cmd, arg);
+}
+#endif
+
+static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct sgx_encl *encl = file->private_data;
+	int ret;
+
+	ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end,
+			       vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC));
+	if (ret)
+		return ret;
+
+	ret = sgx_encl_mm_add(encl, vma->vm_mm);
+	if (ret)
+		return ret;
+
+	vma->vm_ops = &sgx_vm_ops;
+	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+	vma->vm_private_data = encl;
+
+	return 0;
+}
+
+static unsigned long sgx_get_unmapped_area(struct file *file,
+					   unsigned long addr,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags)
+{
+	if (flags & MAP_PRIVATE)
+		return -EINVAL;
+
+	if (flags & MAP_FIXED)
+		return addr;
+
+	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+static const struct file_operations sgx_encl_fops = {
+	.owner			= THIS_MODULE,
+	.open			= sgx_open,
+	.release		= sgx_release,
+	.unlocked_ioctl		= sgx_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sgx_compat_ioctl,
+#endif
+	.mmap			= sgx_mmap,
+	.get_unmapped_area	= sgx_get_unmapped_area,
+};
+
+const struct file_operations sgx_provision_fops = {
+	.owner			= THIS_MODULE,
+};
+
+static struct miscdevice sgx_dev_enclave = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "enclave",
+	.nodename = "sgx/enclave",
+	.fops = &sgx_encl_fops,
+};
+
+static struct miscdevice sgx_dev_provision = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "provision",
+	.nodename = "sgx/provision",
+	.fops = &sgx_provision_fops,
+};
+
+
+int __init sgx_drv_init(void)
+{
+	unsigned int eax, ebx, ecx, edx;
+	u64 attr_mask, xfrm_mask;
+	int ret;
+	int i;
+
+	if (!boot_cpu_has(X86_FEATURE_SGX_LC)) {
+		pr_info("The public key MSRs are not writable.\n");
+		return -ENODEV;
+	}
+
+	cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
+	sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK;
+	sgx_encl_size_max_64 = 1ULL << ((edx >> 8) & 0xFF);
+	sgx_encl_size_max_32 = 1ULL << (edx & 0xFF);
+
+	cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx);
+
+	attr_mask = (((u64)ebx) << 32) + (u64)eax;
+	sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK;
+
+	if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+		xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
+
+		for (i = 2; i < 64; i++) {
+			cpuid_count(0x0D, i, &eax, &ebx, &ecx, &edx);
+			if ((1 << i) & xfrm_mask)
+				sgx_xsave_size_tbl[i] = eax + ebx;
+		}
+
+		sgx_xfrm_reserved_mask = ~xfrm_mask;
+	}
+
+	ret = misc_register(&sgx_dev_enclave);
+	if (ret) {
+		pr_err("Creating /dev/sgx/enclave failed with %d.\n", ret);
+		return ret;
+	}
+
+	ret = misc_register(&sgx_dev_provision);
+	if (ret) {
+		pr_err("Creating /dev/sgx/provision failed with %d.\n", ret);
+		misc_deregister(&sgx_dev_enclave);
+		return ret;
+	}
+
+
+	return 0;
+}
+
+int __exit sgx_drv_exit(void)
+{
+	misc_deregister(&sgx_dev_enclave);
+	misc_deregister(&sgx_dev_provision);
+
+	return 0;
+}
diff --git a/ubuntu/sgx/driver.h b/ubuntu/sgx/driver.h
new file mode 100644
index 000000000000..c90e13275060
--- /dev/null
+++ b/ubuntu/sgx/driver.h
@@ -0,0 +1,33 @@ 
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+#ifndef __ARCH_SGX_DRIVER_H__
+#define __ARCH_SGX_DRIVER_H__
+
+#include <crypto/hash.h>
+#include <linux/kref.h>
+#include <linux/mmu_notifier.h>
+#include <linux/radix-tree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include "uapi/asm/sgx_oot.h"
+#include "sgx.h"
+
+#define SGX_EINIT_SPIN_COUNT	20
+#define SGX_EINIT_SLEEP_COUNT	50
+#define SGX_EINIT_SLEEP_TIME	20
+
+extern u64 sgx_encl_size_max_32;
+extern u64 sgx_encl_size_max_64;
+extern u32 sgx_misc_reserved_mask;
+extern u64 sgx_attributes_reserved_mask;
+extern u64 sgx_xfrm_reserved_mask;
+extern u32 sgx_xsave_size_tbl[64];
+
+extern const struct file_operations sgx_provision_fops;
+
+long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+int sgx_drv_init(void);
+int sgx_drv_exit(void);
+
+#endif /* __ARCH_X86_SGX_DRIVER_H__ */
diff --git a/ubuntu/sgx/encl.c b/ubuntu/sgx/encl.c
new file mode 100644
index 000000000000..8e8e861d5c7e
--- /dev/null
+++ b/ubuntu/sgx/encl.c
@@ -0,0 +1,821 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-18 Intel Corporation.
+
+#include <linux/lockdep.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/shmem_fs.h>
+#include <linux/suspend.h>
+#include <linux/sched/mm.h>
+#include "arch.h"
+#include "encl.h"
+#include "encls.h"
+#include "sgx.h"
+#include "dcap.h"
+
+#include <linux/version.h>
+
+static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
+			   struct sgx_epc_page *epc_page,
+			   struct sgx_epc_page *secs_page)
+{
+	unsigned long va_offset = SGX_ENCL_PAGE_VA_OFFSET(encl_page);
+	struct sgx_encl *encl = encl_page->encl;
+	struct sgx_pageinfo pginfo;
+	struct sgx_backing b;
+	pgoff_t page_index;
+	int ret;
+
+	if (secs_page)
+		page_index = SGX_ENCL_PAGE_INDEX(encl_page);
+	else
+		page_index = PFN_DOWN(encl->size);
+
+	ret = sgx_encl_get_backing(encl, page_index, &b);
+	if (ret)
+		return ret;
+
+	pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
+	pginfo.contents = (unsigned long)kmap_atomic(b.contents);
+	pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
+			  b.pcmd_offset;
+
+	if (secs_page)
+		pginfo.secs = (u64)sgx_epc_addr(secs_page);
+	else
+		pginfo.secs = 0;
+
+	ret = __eldu(&pginfo, sgx_epc_addr(epc_page),
+		     sgx_epc_addr(encl_page->va_page->epc_page) + va_offset);
+	if (ret) {
+		if (encls_failed(ret))
+			ENCLS_WARN(ret, "ELDU");
+
+		ret = -EFAULT;
+	}
+
+	kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
+	kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
+	sgx_encl_put_backing(&b, false);
+
+	return ret;
+}
+
+static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
+					  struct sgx_epc_page *secs_page)
+{
+	unsigned long va_offset = SGX_ENCL_PAGE_VA_OFFSET(encl_page);
+	struct sgx_encl *encl = encl_page->encl;
+	struct sgx_epc_page *epc_page;
+	int ret;
+
+	epc_page = sgx_alloc_page(encl_page, false);
+	if (IS_ERR(epc_page))
+		return epc_page;
+
+	ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
+	if (ret) {
+		sgx_free_page(epc_page);
+		return ERR_PTR(ret);
+	}
+
+	sgx_free_va_slot(encl_page->va_page, va_offset);
+	list_move(&encl_page->va_page->list, &encl->va_pages);
+	encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
+	encl_page->epc_page = epc_page;
+
+	return epc_page;
+}
+
+static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+						unsigned long addr)
+{
+	struct sgx_epc_page *epc_page;
+	struct sgx_encl_page *entry;
+	unsigned int flags;
+
+	/* If process was forked, VMA is still there but vm_private_data is set
+	 * to NULL.
+	 */
+	if (!encl)
+		return ERR_PTR(-EFAULT);
+
+	flags = atomic_read(&encl->flags);
+
+	if ((flags & SGX_ENCL_DEAD) || !(flags & SGX_ENCL_INITIALIZED))
+		return ERR_PTR(-EFAULT);
+
+	entry = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
+	if (!entry)
+		return ERR_PTR(-EFAULT);
+
+	/* Page is already resident in the EPC. */
+	if (entry->epc_page) {
+		if (entry->desc & SGX_ENCL_PAGE_RECLAIMED)
+			return ERR_PTR(-EBUSY);
+
+		return entry;
+	}
+
+	if (!(encl->secs.epc_page)) {
+		epc_page = sgx_encl_eldu(&encl->secs, NULL);
+		if (IS_ERR(epc_page))
+			return ERR_CAST(epc_page);
+	}
+
+	epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
+	if (IS_ERR(epc_page))
+		return ERR_CAST(epc_page);
+
+	encl->secs_child_cnt++;
+	sgx_mark_page_reclaimable(entry->epc_page);
+
+	return entry;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0))
+static void sgx_encl_mm_release_deferred(struct rcu_head *rcu)
+{
+	struct sgx_encl_mm *encl_mm =
+		container_of(rcu, struct sgx_encl_mm, rcu);
+
+	kfree(encl_mm);
+}
+#endif
+
+static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
+				     struct mm_struct *mm)
+{
+	struct sgx_encl_mm *encl_mm =
+		container_of(mn, struct sgx_encl_mm, mmu_notifier);
+	struct sgx_encl_mm *tmp = NULL;
+
+	/*
+	 * The enclave itself can remove encl_mm.  Note, objects can't be moved
+	 * off an RCU protected list, but deletion is ok.
+	 */
+	spin_lock(&encl_mm->encl->mm_lock);
+	list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
+		if (tmp == encl_mm) {
+			list_del_rcu(&encl_mm->list);
+			break;
+		}
+	}
+	spin_unlock(&encl_mm->encl->mm_lock);
+
+	if (tmp == encl_mm) {
+		synchronize_srcu(&encl_mm->encl->srcu);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0))
+		mmu_notifier_put(mn);
+#else
+            /*
+            * Delay freeing encl_mm until after mmu_notifier synchronizes
+            * its SRCU to ensure encl_mm cannot be dereferenced.
+            */
+            mmu_notifier_unregister_no_release(mn, mm);
+            mmu_notifier_call_srcu(&encl_mm->rcu,
+                               &sgx_encl_mm_release_deferred);
+#endif
+	}
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0))
+static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
+{
+	struct sgx_encl_mm *encl_mm =
+		container_of(mn, struct sgx_encl_mm, mmu_notifier);
+
+	kfree(encl_mm);
+}
+#endif
+
+static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
+	.release		= sgx_mmu_notifier_release,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0))
+	.free_notifier		= sgx_mmu_notifier_free,
+#endif
+};
+
+static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl,
+					    struct mm_struct *mm)
+{
+	struct sgx_encl_mm *encl_mm = NULL;
+	struct sgx_encl_mm *tmp;
+	int idx;
+
+	idx = srcu_read_lock(&encl->srcu);
+
+	list_for_each_entry_rcu(tmp, &encl->mm_list, list) {
+		if (tmp->mm == mm) {
+			encl_mm = tmp;
+			break;
+		}
+	}
+
+	srcu_read_unlock(&encl->srcu, idx);
+
+	return encl_mm;
+}
+
+int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+{
+	struct sgx_encl_mm *encl_mm;
+	int ret;
+
+	/*
+	 * This flow relies on mmap_sem being held for write to prevent adding
+	 * multiple encl_mm instances for a single mm_struct, i.e. it prevents
+	 * races between checking sgx_encl_find_mm() and adding to mm_list.
+	 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0))
+	lockdep_assert_held_write(&mm->mmap_sem);
+#else
+	lockdep_assert_held_exclusive(&mm->mmap_sem);
+#endif
+
+	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD)
+		return -EINVAL;
+
+	/*
+	 * mm_structs are kept on mm_list until the mm or the enclave dies,
+	 * i.e. once an mm is off the list, it's gone for good, therefore it's
+	 * impossible to get a false positive on @mm due to a stale mm_list.
+	 */
+	if (sgx_encl_find_mm(encl, mm))
+		return 0;
+
+	encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL);
+	if (!encl_mm)
+		return -ENOMEM;
+
+	encl_mm->encl = encl;
+	encl_mm->mm = mm;
+	encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
+
+	ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm);
+	if (ret) {
+		kfree(encl_mm);
+		return ret;
+	}
+
+	spin_lock(&encl->mm_lock);
+	list_add_rcu(&encl_mm->list, &encl->mm_list);
+	/*
+	 * Ensure the mm is added to mm_list before updating the generation.
+	 * Pairs with the smp_rmb() in sgx_reclaimer_block().
+	 */
+	smp_wmb();
+	encl->mm_list_gen++;
+	spin_unlock(&encl->mm_lock);
+
+	/*
+	 * DO NOT call synchronize_srcu()!  When this is called via dup_mmap(),
+	 * mmap_sem is held for write in both the old mm and new mm, and the
+	 * reclaimer may be holding srcu for read while waiting on down_read()
+	 * for the old mm's mmap_sem, i.e. synchronize_srcu() will deadlock.
+	 * Incrementing mm_list_gen ensures readers that must not race with a
+	 * mm being added will see the updated list.
+	 */
+	return 0;
+}
+
+static void sgx_vma_open(struct vm_area_struct *vma)
+{
+	struct sgx_encl *encl = vma->vm_private_data;
+
+	if (!encl)
+		return;
+
+	if (sgx_encl_mm_add(encl, vma->vm_mm))
+		vma->vm_private_data = NULL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+static unsigned int sgx_vma_fault(struct vm_fault *vmf)
+#else
+static int sgx_vma_fault(struct vm_fault *vmf)
+#endif
+{
+	unsigned long addr = (unsigned long)vmf->address;
+	struct vm_area_struct *vma = vmf->vma;
+	struct sgx_encl *encl = vma->vm_private_data;
+	struct sgx_encl_page *entry;
+	int ret = VM_FAULT_NOPAGE;
+	unsigned long pfn;
+
+	if (!encl)
+		return VM_FAULT_SIGBUS;
+
+	mutex_lock(&encl->lock);
+
+	entry = sgx_encl_load_page(encl, addr);
+	if (IS_ERR(entry)) {
+		if (unlikely(PTR_ERR(entry) != -EBUSY))
+			ret = VM_FAULT_SIGBUS;
+
+		goto out;
+	}
+
+	if (!follow_pfn(vma, addr, &pfn))
+		goto out;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+	ret = vmf_insert_pfn(vma, addr, PFN_DOWN(entry->epc_page->desc));
+	if (ret != VM_FAULT_NOPAGE) {
+#else
+	ret = vm_insert_pfn(vma, addr, PFN_DOWN(entry->epc_page->desc));
+	if (!ret){
+		ret = VM_FAULT_NOPAGE;
+	}
+	else{
+#endif
+		ret = VM_FAULT_SIGBUS;
+		goto out;
+	}
+	sgx_encl_test_and_clear_young(vma->vm_mm, entry);
+
+out:
+	mutex_unlock(&encl->lock);
+	return ret;
+}
+
+/**
+ * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
+ * @encl:		an enclave
+ * @start:		lower bound of the address range, inclusive
+ * @end:		upper bound of the address range, exclusive
+ * @vm_prot_bits:	requested protections of the address range
+ *
+ * Iterate through the enclave pages contained within [@start, @end) to verify
+ * the permissions requested by @vm_prot_bits do not exceed that of any enclave
+ * page to be mapped.  Page addresses that do not have an associated enclave
+ * page are interpreted to zero permissions.
+ *
+ * Return:
+ *   0 on success,
+ *   -EACCES if VMA permissions exceed enclave page permissions
+ */
+int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+		     unsigned long end, unsigned long vm_prot_bits)
+{
+	unsigned long idx, idx_start, idx_end;
+	struct sgx_encl_page *page;
+       /*
+        * Disallow RIE tasks as their VMA permissions might conflict with the
+        * enclave page permissions.
+        */
+       if (!!(current->personality & READ_IMPLIES_EXEC))
+               return -EACCES;
+
+	/* PROT_NONE always succeeds. */
+	if (!vm_prot_bits)
+		return 0;
+
+	idx_start = PFN_DOWN(start);
+	idx_end = PFN_DOWN(end - 1);
+
+	for (idx = idx_start; idx <= idx_end; ++idx) {
+		mutex_lock(&encl->lock);
+		page = radix_tree_lookup(&encl->page_tree, idx);
+		mutex_unlock(&encl->lock);
+
+		if (!page || (~page->vm_max_prot_bits & vm_prot_bits))
+			return -EACCES;
+	}
+
+	return 0;
+}
+
+/*
+static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end, unsigned long prot)
+{
+	return sgx_encl_may_map(vma->vm_private_data, start, end,
+				calc_vm_prot_bits(prot, 0));
+}
+*/
+
+static int sgx_edbgrd(struct sgx_encl *encl, struct sgx_encl_page *page,
+		      unsigned long addr, void *data)
+{
+	unsigned long offset = addr & ~PAGE_MASK;
+	int ret;
+
+
+	ret = __edbgrd(sgx_epc_addr(page->epc_page) + offset, data);
+	if (ret)
+		return -EIO;
+
+	return 0;
+}
+
+static int sgx_edbgwr(struct sgx_encl *encl, struct sgx_encl_page *page,
+		      unsigned long addr, void *data)
+{
+	unsigned long offset = addr & ~PAGE_MASK;
+	int ret;
+
+	ret = __edbgwr(sgx_epc_addr(page->epc_page) + offset, data);
+	if (ret)
+		return -EIO;
+
+	return 0;
+}
+
+static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
+			  void *buf, int len, int write)
+{
+	struct sgx_encl *encl = vma->vm_private_data;
+	struct sgx_encl_page *entry = NULL;
+	char data[sizeof(unsigned long)];
+	unsigned long align;
+	unsigned int flags;
+	int offset;
+	int cnt;
+	int ret = 0;
+	int i;
+
+	/* If process was forked, VMA is still there but vm_private_data is set
+	 * to NULL.
+	 */
+	if (!encl)
+		return -EFAULT;
+
+	flags = atomic_read(&encl->flags);
+
+	if (!(flags & SGX_ENCL_DEBUG) || !(flags & SGX_ENCL_INITIALIZED) ||
+	    (flags & SGX_ENCL_DEAD))
+		return -EFAULT;
+
+	for (i = 0; i < len; i += cnt) {
+		entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK);
+		if (IS_ERR(entry)) {
+			ret = PTR_ERR(entry);
+			break;
+		}
+
+		align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
+		offset = (addr + i) & (sizeof(unsigned long) - 1);
+		cnt = sizeof(unsigned long) - offset;
+		cnt = min(cnt, len - i);
+
+		ret = sgx_edbgrd(encl, entry, align, data);
+		if (ret)
+			goto out;
+
+		if (write) {
+			memcpy(data + offset, buf + i, cnt);
+			ret = sgx_edbgwr(encl, entry, align, data);
+			if (ret)
+				goto out;
+		} else
+			memcpy(buf + i, data + offset, cnt);
+
+out:
+		mutex_unlock(&encl->lock);
+
+		if (ret)
+			break;
+	}
+
+	return ret < 0 ? ret : i;
+}
+
+const struct vm_operations_struct sgx_vm_ops = {
+	.open = sgx_vma_open,
+	.fault = sgx_vma_fault,
+//	.may_mprotect = sgx_vma_mprotect,
+	.access = sgx_vma_access,
+};
+
+/**
+ * sgx_encl_find - find an enclave
+ * @mm:		mm struct of the current process
+ * @addr:	address in the ELRANGE
+ * @vma:	the resulting VMA
+ *
+ * Find an enclave identified by the given address. Give back a VMA that is
+ * part of the enclave and located in that address. The VMA is given back if it
+ * is a proper enclave VMA even if an &sgx_encl instance does not exist yet
+ * (enclave creation has not been performed).
+ *
+ * Return:
+ *   0 on success,
+ *   -EINVAL if an enclave was not found,
+ *   -ENOENT if the enclave has not been created yet
+ */
+int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
+		  struct vm_area_struct **vma)
+{
+	struct vm_area_struct *result;
+	struct sgx_encl *encl;
+
+	result = find_vma(mm, addr);
+	if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
+		return -EINVAL;
+
+	encl = result->vm_private_data;
+	*vma = result;
+
+	return encl ? 0 : -ENOENT;
+}
+
+/**
+ * sgx_encl_destroy() - destroy enclave resources
+ * @encl:	an &sgx_encl instance
+ */
+void sgx_encl_destroy(struct sgx_encl *encl)
+{
+	struct sgx_va_page *va_page;
+	struct sgx_encl_page *entry;
+	struct radix_tree_iter iter;
+	void **slot;
+
+	atomic_or(SGX_ENCL_DEAD, &encl->flags);
+
+	radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
+		entry = *slot;
+
+		if (entry->epc_page) {
+			/*
+			 * The page and its radix tree entry cannot be freed
+			 * if the page is being held by the reclaimer.
+			 */
+			if (sgx_unmark_page_reclaimable(entry->epc_page))
+				continue;
+
+			sgx_free_page(entry->epc_page);
+			encl->secs_child_cnt--;
+			entry->epc_page = NULL;
+		}
+
+		radix_tree_delete(&entry->encl->page_tree,
+				  PFN_DOWN(entry->desc));
+		kfree(entry);
+	}
+
+	if (!encl->secs_child_cnt && encl->secs.epc_page) {
+		sgx_free_page(encl->secs.epc_page);
+		encl->secs.epc_page = NULL;
+	}
+
+	/*
+	 * The reclaimer is responsible for checking SGX_ENCL_DEAD before doing
+	 * EWB, thus it's safe to free VA pages even if the reclaimer holds a
+	 * reference to the enclave.
+	 */
+	while (!list_empty(&encl->va_pages)) {
+		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
+					   list);
+		list_del(&va_page->list);
+		sgx_free_page(va_page->epc_page);
+		kfree(va_page);
+	}
+}
+
+/**
+ * sgx_encl_release - Destroy an enclave instance
+ * @kref:	address of a kref inside &sgx_encl
+ *
+ * Used together with kref_put(). Frees all the resources associated with the
+ * enclave and the instance itself.
+ */
+void sgx_encl_release(struct kref *ref)
+{
+	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
+
+	sgx_encl_destroy(encl);
+
+	if (encl->backing)
+		fput(encl->backing);
+
+	WARN_ON_ONCE(!list_empty(&encl->mm_list));
+
+	/* Detect EPC page leak's. */
+	WARN_ON_ONCE(encl->secs_child_cnt);
+	WARN_ON_ONCE(encl->secs.epc_page);
+
+	kfree(encl);
+}
+
+static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
+					      pgoff_t index)
+{
+	struct inode *inode = encl->backing->f_path.dentry->d_inode;
+	struct address_space *mapping = inode->i_mapping;
+	gfp_t gfpmask = mapping_gfp_mask(mapping);
+
+	return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
+}
+
+/**
+ * sgx_encl_get_backing() - Pin the backing storage
+ * @encl:	an enclave
+ * @page_index:	enclave page index
+ * @backing:	data for accessing backing storage for the page
+ *
+ * Pin the backing storage pages for storing the encrypted contents and Paging
+ * Crypto MetaData (PCMD) of an enclave page.
+ *
+ * Return:
+ *   0 on success,
+ *   -errno otherwise.
+ */
+int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+			 struct sgx_backing *backing)
+{
+	pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
+	struct page *contents;
+	struct page *pcmd;
+
+	contents = sgx_encl_get_backing_page(encl, page_index);
+	if (IS_ERR(contents))
+		return PTR_ERR(contents);
+
+	pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
+	if (IS_ERR(pcmd)) {
+		put_page(contents);
+		return PTR_ERR(pcmd);
+	}
+
+	backing->page_index = page_index;
+	backing->contents = contents;
+	backing->pcmd = pcmd;
+	backing->pcmd_offset =
+		(page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
+		sizeof(struct sgx_pcmd);
+
+	return 0;
+}
+
+/**
+ * sgx_encl_put_backing() - Unpin the backing storage
+ * @backing:	data for accessing backing storage for the page
+ * @do_write:	mark pages dirty
+ */
+void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
+{
+	if (do_write) {
+		set_page_dirty(backing->pcmd);
+		set_page_dirty(backing->contents);
+	}
+
+	put_page(backing->pcmd);
+	put_page(backing->contents);
+}
+
+static int sgx_encl_test_and_clear_young_cb(pte_t *ptep,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
+				       pgtable_t token,
+#endif
+				       unsigned long addr, void *data)
+{
+	pte_t pte;
+	int ret;
+
+	ret = pte_young(*ptep);
+	if (ret) {
+		pte = pte_mkold(*ptep);
+		set_pte_at((struct mm_struct *)data, addr, ptep, pte);
+	}
+
+	return ret;
+}
+
+/**
+ * sgx_encl_test_and_clear_young() - Test and reset the accessed bit
+ * @mm:		mm_struct that is checked
+ * @page:	enclave page to be tested for recent access
+ *
+ * Checks the Access (A) bit from the PTE corresponding to the enclave page and
+ * clears it.
+ *
+ * Return: 1 if the page has been recently accessed and 0 if not.
+ */
+int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+				  struct sgx_encl_page *page)
+{
+	unsigned long addr = SGX_ENCL_PAGE_ADDR(page);
+	struct sgx_encl *encl = page->encl;
+	struct vm_area_struct *vma;
+	int ret;
+
+	ret = sgx_encl_find(mm, addr, &vma);
+	if (ret)
+		return 0;
+
+	if (encl != vma->vm_private_data)
+		return 0;
+
+	ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
+				  sgx_encl_test_and_clear_young_cb, vma->vm_mm);
+	if (ret < 0)
+		return 0;
+
+	return ret;
+}
+
+/**
+ * sgx_encl_reserve_page() - Reserve an enclave page
+ * @encl:	an enclave
+ * @addr:	a page address
+ *
+ * Load an enclave page and lock the enclave so that the page can be used by
+ * EDBG* and EMOD*.
+ *
+ * Return:
+ *   an enclave page on success
+ *   -EFAULT	if the load fails
+ */
+struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
+					    unsigned long addr)
+{
+	struct sgx_encl_page *entry;
+
+	for ( ; ; ) {
+		mutex_lock(&encl->lock);
+
+		entry = sgx_encl_load_page(encl, addr);
+		if (PTR_ERR(entry) != -EBUSY)
+			break;
+
+		mutex_unlock(&encl->lock);
+	}
+
+	if (IS_ERR(entry))
+		mutex_unlock(&encl->lock);
+
+	return entry;
+}
+
+/**
+ * sgx_alloc_page - allocate a VA page
+ *
+ * Allocates an &sgx_epc_page instance and converts it to a VA page.
+ *
+ * Return:
+ *   a &struct sgx_va_page instance,
+ *   -errno otherwise
+ */
+struct sgx_epc_page *sgx_alloc_va_page(void)
+{
+	struct sgx_epc_page *epc_page;
+	int ret;
+
+	epc_page = sgx_alloc_page(NULL, true);
+	if (IS_ERR(epc_page))
+		return ERR_CAST(epc_page);
+
+	ret = __epa(sgx_epc_addr(epc_page));
+	if (ret) {
+		WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
+		sgx_free_page(epc_page);
+		return ERR_PTR(-EFAULT);
+	}
+
+	return epc_page;
+}
+
+/**
+ * sgx_alloc_va_slot - allocate a VA slot
+ * @va_page:	a &struct sgx_va_page instance
+ *
+ * Allocates a slot from a &struct sgx_va_page instance.
+ *
+ * Return: offset of the slot inside the VA page
+ */
+unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
+{
+	int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
+
+	if (slot < SGX_VA_SLOT_COUNT)
+		set_bit(slot, va_page->slots);
+
+	return slot << 3;
+}
+
+/**
+ * sgx_free_va_slot - free a VA slot
+ * @va_page:	a &struct sgx_va_page instance
+ * @offset:	offset of the slot inside the VA page
+ *
+ * Frees a slot from a &struct sgx_va_page instance.
+ */
+void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
+{
+	clear_bit(offset >> 3, va_page->slots);
+}
+
+/**
+ * sgx_va_page_full - is the VA page full?
+ * @va_page:	a &struct sgx_va_page instance
+ *
+ * Return: true if all slots have been taken
+ */
+bool sgx_va_page_full(struct sgx_va_page *va_page)
+{
+	int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
+
+	return slot == SGX_VA_SLOT_COUNT;
+}
diff --git a/ubuntu/sgx/encl.h b/ubuntu/sgx/encl.h
new file mode 100644
index 000000000000..8683da87b49a
--- /dev/null
+++ b/ubuntu/sgx/encl.h
@@ -0,0 +1,132 @@ 
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/**
+ * Copyright(c) 2016-19 Intel Corporation.
+ */
+#ifndef _X86_ENCL_H
+#define _X86_ENCL_H
+
+#include <linux/version.h>
+#include <linux/cpumask.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/radix-tree.h>
+#include <linux/srcu.h>
+#include <linux/workqueue.h>
+#include "sgx.h"
+
+/**
+ * enum sgx_encl_page_desc - defines bits for an enclave page's descriptor
+ * %SGX_ENCL_PAGE_RECLAIMED:		The page is in the process of being
+ *					reclaimed.
+ * %SGX_ENCL_PAGE_VA_OFFSET_MASK:	Holds the offset in the Version Array
+ *					(VA) page for a swapped page.
+ * %SGX_ENCL_PAGE_ADDR_MASK:		Holds the virtual address of the page.
+ *
+ * The page address for SECS is zero and is used by the subsystem to recognize
+ * the SECS page.
+ */
+enum sgx_encl_page_desc {
+	/* Bits 11:3 are available when the page is not swapped. */
+	SGX_ENCL_PAGE_RECLAIMED		= BIT(3),
+	SGX_ENCL_PAGE_VA_OFFSET_MASK	= GENMASK_ULL(11, 3),
+	SGX_ENCL_PAGE_ADDR_MASK		= PAGE_MASK,
+};
+
+#define SGX_ENCL_PAGE_ADDR(page) \
+	((page)->desc & SGX_ENCL_PAGE_ADDR_MASK)
+#define SGX_ENCL_PAGE_VA_OFFSET(page) \
+	((page)->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK)
+#define SGX_ENCL_PAGE_INDEX(page) \
+	PFN_DOWN((page)->desc - (page)->encl->base)
+
+struct sgx_encl_page {
+	unsigned long desc;
+	unsigned long vm_max_prot_bits;
+	struct sgx_epc_page *epc_page;
+	struct sgx_va_page *va_page;
+	struct sgx_encl *encl;
+};
+
+enum sgx_encl_flags {
+	SGX_ENCL_CREATED	= BIT(0),
+	SGX_ENCL_INITIALIZED	= BIT(1),
+	SGX_ENCL_DEBUG		= BIT(2),
+	SGX_ENCL_DEAD		= BIT(3),
+	SGX_ENCL_IOCTL		= BIT(4),
+};
+
+struct sgx_encl_mm {
+	struct sgx_encl *encl;
+	struct mm_struct *mm;
+	struct list_head list;
+	struct mmu_notifier mmu_notifier;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0))
+	struct rcu_head rcu;
+#endif
+};
+
+struct sgx_encl {
+	atomic_t flags;
+	u64 secs_attributes;
+	u64 allowed_attributes;
+	unsigned int page_cnt;
+	unsigned int secs_child_cnt;
+	struct mutex lock;
+	struct list_head mm_list;
+	spinlock_t mm_lock;
+	unsigned long mm_list_gen;
+	struct file *backing;
+	struct kref refcount;
+	struct srcu_struct srcu;
+	unsigned long base;
+	unsigned long size;
+	unsigned long ssaframesize;
+	struct list_head va_pages;
+	struct radix_tree_root page_tree;
+	struct sgx_encl_page secs;
+	cpumask_t cpumask;
+};
+
+#define SGX_VA_SLOT_COUNT 512
+
+struct sgx_va_page {
+	struct sgx_epc_page *epc_page;
+	DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
+	struct list_head list;
+};
+
+extern const struct vm_operations_struct sgx_vm_ops;
+
+int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
+		  struct vm_area_struct **vma);
+void sgx_encl_destroy(struct sgx_encl *encl);
+void sgx_encl_release(struct kref *ref);
+int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
+int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+		     unsigned long end, unsigned long vm_prot_bits);
+
+struct sgx_backing {
+	pgoff_t page_index;
+	struct page *contents;
+	struct page *pcmd;
+	unsigned long pcmd_offset;
+};
+
+int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+			 struct sgx_backing *backing);
+void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
+int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+				  struct sgx_encl_page *page);
+struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
+					    unsigned long addr);
+
+struct sgx_epc_page *sgx_alloc_va_page(void);
+unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
+void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
+bool sgx_va_page_full(struct sgx_va_page *va_page);
+
+#endif /* _X86_ENCL_H */
diff --git a/ubuntu/sgx/encls.h b/ubuntu/sgx/encls.h
new file mode 100644
index 000000000000..4bc9b553d653
--- /dev/null
+++ b/ubuntu/sgx/encls.h
@@ -0,0 +1,239 @@ 
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+#ifndef _X86_ENCLS_H
+#define _X86_ENCLS_H
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <asm/asm.h>
+#include <asm/traps.h>
+#include "sgx.h"
+#include "dcap.h"
+
+enum sgx_encls_leaf {
+	ECREATE	= 0x00,
+	EADD	= 0x01,
+	EINIT	= 0x02,
+	EREMOVE	= 0x03,
+	EDGBRD	= 0x04,
+	EDGBWR	= 0x05,
+	EEXTEND	= 0x06,
+	ELDU	= 0x08,
+	EBLOCK	= 0x09,
+	EPA	= 0x0A,
+	EWB	= 0x0B,
+	ETRACK	= 0x0C,
+};
+
+/**
+ * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
+ *
+ * ENCLS has its own (positive value) error codes and also generates
+ * ENCLS specific #GP and #PF faults.  And the ENCLS values get munged
+ * with system error codes as everything percolates back up the stack.
+ * Unfortunately (for us), we need to precisely identify each unique
+ * error code, e.g. the action taken if EWB fails varies based on the
+ * type of fault and on the exact SGX error code, i.e. we can't simply
+ * convert all faults to -EFAULT.
+ *
+ * To make all three error types coexist, we set bit 30 to identify an
+ * ENCLS fault.  Bit 31 (technically bits N:31) is used to differentiate
+ * between positive (faults and SGX error codes) and negative (system
+ * error codes) values.
+ */
+#define ENCLS_FAULT_FLAG 0x40000000
+
+/* Retrieve the encoded trapnr from the specified return code. */
+#define ENCLS_TRAPNR(r) ((r) & ~ENCLS_FAULT_FLAG)
+
+/* Issue a WARN() about an ENCLS leaf. */
+#define ENCLS_WARN(r, name) {						  \
+	do {								  \
+		int _r = (r);						  \
+		WARN(_r, "%s returned %d (0x%x)\n", (name), _r, _r); \
+	} while (0);							  \
+}
+
+/**
+ * encls_failed() - Check if an ENCLS leaf function failed
+ * @ret:	the return value of an ENCLS leaf function call
+ *
+ * Check if an ENCLS leaf function failed. This happens when the leaf function
+ * causes a fault that is not caused by an EPCM conflict or when the leaf
+ * function returns a non-zero value.
+ */
+static inline bool encls_failed(int ret)
+{
+        int epcm_trapnr;
+
+        if (boot_cpu_has(X86_FEATURE_SGX2))
+                epcm_trapnr = X86_TRAP_PF;
+        else
+                epcm_trapnr = X86_TRAP_GP;
+
+        if (ret & ENCLS_FAULT_FLAG)
+                return ENCLS_TRAPNR(ret) != epcm_trapnr;
+
+        return !!ret;
+}
+
+/**
+ * __encls_ret_N - encode an ENCLS leaf that returns an error code in EAX
+ * @rax:	leaf number
+ * @inputs:	asm inputs for the leaf
+ *
+ * Emit assembly for an ENCLS leaf that returns an error code, e.g. EREMOVE.
+ * And because SGX isn't complex enough as it is, leafs that return an error
+ * code also modify flags.
+ *
+ * Return:
+ *	0 on success,
+ *	SGX error code on failure
+ */
+#define __encls_ret_N(rax, inputs...)				\
+	({							\
+	int ret;						\
+	asm volatile(						\
+	"1: .byte 0x0f, 0x01, 0xcf;\n\t"			\
+	"2:\n"							\
+	".section .fixup,\"ax\"\n"				\
+	"3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n"	\
+	"   jmp 2b\n"						\
+	".previous\n"						\
+	_ASM_EXTABLE_FAULT(1b, 3b)				\
+	: "=a"(ret)						\
+	: "a"(rax), inputs					\
+	: "memory", "cc");					\
+	ret;							\
+	})
+
+#define __encls_ret_1(rax, rcx)		\
+	({				\
+	__encls_ret_N(rax, "c"(rcx));	\
+	})
+
+#define __encls_ret_2(rax, rbx, rcx)		\
+	({					\
+	__encls_ret_N(rax, "b"(rbx), "c"(rcx));	\
+	})
+
+#define __encls_ret_3(rax, rbx, rcx, rdx)			\
+	({							\
+	__encls_ret_N(rax, "b"(rbx), "c"(rcx), "d"(rdx));	\
+	})
+
+/**
+ * __encls_N - encode an ENCLS leaf that doesn't return an error code
+ * @rax:	leaf number
+ * @rbx_out:	optional output variable
+ * @inputs:	asm inputs for the leaf
+ *
+ * Emit assembly for an ENCLS leaf that does not return an error code,
+ * e.g. ECREATE.  Leaves without error codes either succeed or fault.
+ * @rbx_out is an optional parameter for use by EDGBRD, which returns
+ * the the requested value in RBX.
+ *
+ * Return:
+ *   0 on success,
+ *   trapnr with ENCLS_FAULT_FLAG set on fault
+ */
+#define __encls_N(rax, rbx_out, inputs...)			\
+	({							\
+	int ret;						\
+	asm volatile(						\
+	"1: .byte 0x0f, 0x01, 0xcf;\n\t"			\
+	"   xor %%eax,%%eax;\n"					\
+	"2:\n"							\
+	".section .fixup,\"ax\"\n"				\
+	"3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n"	\
+	"   jmp 2b\n"						\
+	".previous\n"						\
+	_ASM_EXTABLE_FAULT(1b, 3b)				\
+	: "=a"(ret), "=b"(rbx_out)				\
+	: "a"(rax), inputs					\
+	: "memory");						\
+	ret;							\
+	})
+
+#define __encls_2(rax, rbx, rcx)				\
+	({							\
+	unsigned long ign_rbx_out;				\
+	__encls_N(rax, ign_rbx_out, "b"(rbx), "c"(rcx));	\
+	})
+
+#define __encls_1_1(rax, data, rcx)			\
+	({						\
+	unsigned long rbx_out;				\
+	int ret = __encls_N(rax, rbx_out, "c"(rcx));	\
+	if (!ret)					\
+		data = rbx_out;				\
+	ret;						\
+	})
+
+static inline int __ecreate(struct sgx_pageinfo *pginfo, void *secs)
+{
+	return __encls_2(ECREATE, pginfo, secs);
+}
+
+static inline int __eextend(void *secs, void *addr)
+{
+	return __encls_2(EEXTEND, secs, addr);
+}
+
+static inline int __eadd(struct sgx_pageinfo *pginfo, void *addr)
+{
+	return __encls_2(EADD, pginfo, addr);
+}
+
+static inline int __einit(void *sigstruct, void* token, void *secs)
+{
+	return __encls_ret_3(EINIT, sigstruct, secs, token);
+}
+
+static inline int __eremove(void *addr)
+{
+	return __encls_ret_1(EREMOVE, addr);
+}
+
+static inline int __edbgwr(void *addr, unsigned long *data)
+{
+	return __encls_2(EDGBWR, *data, addr);
+}
+
+static inline int __edbgrd(void *addr, unsigned long *data)
+{
+	return __encls_1_1(EDGBRD, *data, addr);
+}
+
+static inline int __etrack(void *addr)
+{
+	return __encls_ret_1(ETRACK, addr);
+}
+
+static inline int __eldu(struct sgx_pageinfo *pginfo, void *addr,
+			 void *va)
+{
+	return __encls_ret_3(ELDU, pginfo, addr, va);
+}
+
+static inline int __eblock(void *addr)
+{
+	return __encls_ret_1(EBLOCK, addr);
+}
+
+static inline int __epa(void *addr)
+{
+	unsigned long rbx = SGX_PAGE_TYPE_VA;
+
+	return __encls_2(EPA, rbx, addr);
+}
+
+static inline int __ewb(struct sgx_pageinfo *pginfo, void *addr,
+			void *va)
+{
+	return __encls_ret_3(EWB, pginfo, addr, va);
+}
+
+#endif /* _X86_ENCLS_H */
diff --git a/ubuntu/sgx/ioctl.c b/ubuntu/sgx/ioctl.c
new file mode 100644
index 000000000000..79b8c8019765
--- /dev/null
+++ b/ubuntu/sgx/ioctl.c
@@ -0,0 +1,829 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-19 Intel Corporation.
+
+#include <asm/mman.h>
+#include <linux/mman.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/hashtable.h>
+#include <linux/highmem.h>
+#include <linux/ratelimit.h>
+#include <linux/sched/signal.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include "driver.h"
+#include "encl.h"
+#include "encls.h"
+
+#include <linux/version.h>
+#include "sgx_wl.h"
+
+/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. */
+static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
+
+static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
+{
+	struct sgx_va_page *va_page = NULL;
+	void *err;
+
+	BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
+		(SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
+
+	if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
+		va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
+		if (!va_page)
+			return ERR_PTR(-ENOMEM);
+
+		va_page->epc_page = sgx_alloc_va_page();
+		if (IS_ERR(va_page->epc_page)) {
+			err = ERR_CAST(va_page->epc_page);
+			kfree(va_page);
+			return err;
+		}
+
+		WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
+	}
+	encl->page_cnt++;
+	return va_page;
+}
+
+static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
+{
+	encl->page_cnt--;
+
+	if (va_page) {
+		sgx_free_page(va_page->epc_page);
+		list_del(&va_page->list);
+		kfree(va_page);
+	}
+}
+
+static u32 sgx_calc_ssaframesize(u32 miscselect, u64 xfrm)
+{
+	u32 size_max = PAGE_SIZE;
+	u32 size;
+	int i;
+
+	for (i = 2; i < 64; i++) {
+		if (!((1 << i) & xfrm))
+			continue;
+
+		size = SGX_SSA_GPRS_SIZE + sgx_xsave_size_tbl[i];
+		if (miscselect & SGX_MISC_EXINFO)
+			size += SGX_SSA_MISC_EXINFO_SIZE;
+
+		if (size > size_max)
+			size_max = size;
+	}
+
+	return PFN_UP(size_max);
+}
+
+static int sgx_validate_secs(const struct sgx_secs *secs,
+			     unsigned long ssaframesize)
+{
+	if (secs->size < (2 * PAGE_SIZE) || !is_power_of_2(secs->size))
+		return -EINVAL;
+
+	if (secs->base & (secs->size - 1))
+		return -EINVAL;
+
+	if (secs->miscselect & sgx_misc_reserved_mask ||
+	    secs->attributes & sgx_attributes_reserved_mask ||
+	    secs->xfrm & sgx_xfrm_reserved_mask)
+		return -EINVAL;
+
+	if (secs->attributes & SGX_ATTR_MODE64BIT) {
+		if (secs->size > sgx_encl_size_max_64)
+			return -EINVAL;
+	} else if (secs->size > sgx_encl_size_max_32)
+		return -EINVAL;
+
+	if (!(secs->xfrm & XFEATURE_MASK_FP) ||
+	    !(secs->xfrm & XFEATURE_MASK_SSE) ||
+	    (((secs->xfrm >> XFEATURE_BNDREGS) & 1) !=
+	     ((secs->xfrm >> XFEATURE_BNDCSR) & 1)))
+		return -EINVAL;
+
+	if (!secs->ssa_frame_size || ssaframesize > secs->ssa_frame_size)
+		return -EINVAL;
+
+	if (memchr_inv(secs->reserved1, 0, sizeof(secs->reserved1)) ||
+	    memchr_inv(secs->reserved2, 0, sizeof(secs->reserved2)) ||
+	    memchr_inv(secs->reserved3, 0, sizeof(secs->reserved3)) ||
+	    memchr_inv(secs->reserved4, 0, sizeof(secs->reserved4)))
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
+						 unsigned long offset,
+						 u64 secinfo_flags)
+{
+	struct sgx_encl_page *encl_page;
+	unsigned long prot;
+
+	encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
+	if (!encl_page)
+		return ERR_PTR(-ENOMEM);
+
+	encl_page->desc = encl->base + offset;
+	encl_page->encl = encl;
+
+	prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ)  |
+	       _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
+	       _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
+
+	/*
+	 * TCS pages must always RW set for CPU access while the SECINFO
+	 * permissions are *always* zero - the CPU ignores the user provided
+	 * values and silently overwrites them with zero permissions.
+	 */
+	if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
+		prot |= PROT_READ | PROT_WRITE;
+
+	/* Calculate maximum of the VM flags for the page. */
+	encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
+
+	return encl_page;
+}
+
+static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
+{
+	unsigned long encl_size = secs->size + PAGE_SIZE;
+	struct sgx_epc_page *secs_epc;
+	struct sgx_va_page *va_page;
+	unsigned long ssaframesize;
+	struct sgx_pageinfo pginfo;
+	struct sgx_secinfo secinfo;
+	struct file *backing;
+	long ret;
+
+	if (atomic_read(&encl->flags) & SGX_ENCL_CREATED)
+		return -EINVAL;
+
+	va_page = sgx_encl_grow(encl);
+	if (IS_ERR(va_page))
+		return PTR_ERR(va_page);
+	else if (va_page)
+		list_add(&va_page->list, &encl->va_pages);
+
+	ssaframesize = sgx_calc_ssaframesize(secs->miscselect, secs->xfrm);
+	if (sgx_validate_secs(secs, ssaframesize)) {
+		pr_debug("invalid SECS\n");
+		ret = -EINVAL;
+		goto err_out_shrink;
+	}
+
+	backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
+				   VM_NORESERVE);
+	if (IS_ERR(backing)) {
+		ret = PTR_ERR(backing);
+		goto err_out_shrink;
+	}
+
+	encl->backing = backing;
+
+	secs_epc = sgx_alloc_page(&encl->secs, true);
+	if (IS_ERR(secs_epc)) {
+		ret = PTR_ERR(secs_epc);
+		goto err_out_backing;
+	}
+
+	encl->secs.epc_page = secs_epc;
+
+	pginfo.addr = 0;
+	pginfo.contents = (unsigned long)secs;
+	pginfo.metadata = (unsigned long)&secinfo;
+	pginfo.secs = 0;
+	memset(&secinfo, 0, sizeof(secinfo));
+
+	ret = __ecreate((void *)&pginfo, sgx_epc_addr(secs_epc));
+	if (ret) {
+		pr_debug("ECREATE returned %ld\n", ret);
+		goto err_out;
+	}
+
+	if (secs->attributes & SGX_ATTR_DEBUG)
+		atomic_or(SGX_ENCL_DEBUG, &encl->flags);
+
+	encl->secs.encl = encl;
+	encl->secs_attributes = secs->attributes;
+	encl->allowed_attributes |= SGX_ATTR_ALLOWED_MASK;
+	encl->base = secs->base;
+	encl->size = secs->size;
+	encl->ssaframesize = secs->ssa_frame_size;
+
+	/*
+	 * Set SGX_ENCL_CREATED only after the enclave is fully prepped.  This
+	 * allows setting and checking enclave creation without having to take
+	 * encl->lock.
+	 */
+	atomic_or(SGX_ENCL_CREATED, &encl->flags);
+
+	return 0;
+
+err_out:
+	sgx_free_page(encl->secs.epc_page);
+	encl->secs.epc_page = NULL;
+
+err_out_backing:
+	fput(encl->backing);
+	encl->backing = NULL;
+
+err_out_shrink:
+	sgx_encl_shrink(encl, va_page);
+
+	return ret;
+}
+
+/**
+ * sgx_ioc_enclave_create - handler for %SGX_IOC_ENCLAVE_CREATE
+ * @filep:	open file to /dev/sgx
+ * @arg:	userspace pointer to a struct sgx_enclave_create instance
+ *
+ * Allocate kernel data structures for a new enclave and execute ECREATE after
+ * verifying the correctness of the provided SECS.
+ *
+ * Note, enforcement of restricted and disallowed attributes is deferred until
+ * sgx_ioc_enclave_init(), only the architectural correctness of the SECS is
+ * checked by sgx_ioc_enclave_create().
+ *
+ * Return:
+ *   0 on success,
+ *   -errno otherwise
+ */
+static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
+{
+	struct sgx_enclave_create ecreate;
+	struct page *secs_page;
+	struct sgx_secs *secs;
+	int ret;
+
+	if (copy_from_user(&ecreate, arg, sizeof(ecreate)))
+		return -EFAULT;
+
+	secs_page = alloc_page(GFP_KERNEL);
+	if (!secs_page)
+		return -ENOMEM;
+
+	secs = kmap(secs_page);
+	if (copy_from_user(secs, (void __user *)ecreate.src, sizeof(*secs))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = sgx_encl_create(encl, secs);
+
+out:
+	kunmap(secs_page);
+	__free_page(secs_page);
+	return ret;
+}
+
+static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
+{
+	u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
+	u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
+
+	if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS)
+		return -EINVAL;
+
+	if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R))
+		return -EINVAL;
+
+	/*
+	 * CPU will silently overwrite the permissions as zero, which means
+	 * that we need to validate it ourselves.
+	 */
+	if (pt == SGX_SECINFO_TCS && perm)
+		return -EINVAL;
+
+	if (secinfo->flags & SGX_SECINFO_RESERVED_MASK)
+		return -EINVAL;
+
+	if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved)))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int __sgx_encl_add_page(struct sgx_encl *encl,
+			       struct sgx_encl_page *encl_page,
+			       struct sgx_epc_page *epc_page,
+			       struct sgx_secinfo *secinfo, unsigned long src)
+{
+	struct sgx_pageinfo pginfo;
+	struct vm_area_struct *vma;
+	struct page *src_page;
+	int ret;
+
+	/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
+	if (encl_page->vm_max_prot_bits & VM_EXEC) {
+		vma = find_vma(current->mm, src);
+		if (!vma)
+			return -EFAULT;
+
+		if (!(vma->vm_flags & VM_MAYEXEC))
+			return -EACCES;
+	}
+
+	ret = get_user_pages(src, 1, 0, &src_page, NULL);
+	if (ret < 1)
+		return ret;
+
+	pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
+	pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
+	pginfo.metadata = (unsigned long)secinfo;
+	pginfo.contents = (unsigned long)kmap_atomic(src_page);
+
+	ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
+
+	kunmap_atomic((void *)pginfo.contents);
+	put_page(src_page);
+
+	return ret ? -EIO : 0;
+}
+
+static int __sgx_encl_extend(struct sgx_encl *encl,
+			     struct sgx_epc_page *epc_page)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
+				sgx_epc_addr(epc_page) + (i * 0x100));
+		if (ret) {
+			if (encls_failed(ret))
+				ENCLS_WARN(ret, "EEXTEND");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
+			     unsigned long offset, unsigned long length,
+			     struct sgx_secinfo *secinfo, unsigned long flags)
+{
+	struct sgx_encl_page *encl_page;
+	struct sgx_epc_page *epc_page;
+	struct sgx_va_page *va_page;
+	int ret;
+
+	encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
+	if (IS_ERR(encl_page))
+		return PTR_ERR(encl_page);
+
+	epc_page = sgx_alloc_page(encl_page, true);
+	if (IS_ERR(epc_page)) {
+		kfree(encl_page);
+		return PTR_ERR(epc_page);
+	}
+
+	if (atomic_read(&encl->flags) &
+	    (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
+		ret = -EFAULT;
+		goto err_out_free;
+	}
+
+	va_page = sgx_encl_grow(encl);
+	if (IS_ERR(va_page)) {
+		ret = PTR_ERR(va_page);
+		goto err_out_free;
+	}
+
+	down_read(&current->mm->mmap_sem);
+	mutex_lock(&encl->lock);
+
+	/*
+	 * Adding to encl->va_pages must be done under encl->lock.  Ditto for
+	 * deleting (via sgx_encl_shrink()) in the error path.
+	 */
+	if (va_page)
+		list_add(&va_page->list, &encl->va_pages);
+
+	/*
+	 * Insert prior to EADD in case of OOM.  EADD modifies MRENCLAVE, i.e.
+	 * can't be gracefully unwound, while failure on EADD/EXTEND is limited
+	 * to userspace errors (or kernel/hardware bugs).
+	 */
+	ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
+				encl_page);
+	if (ret)
+		goto err_out_unlock;
+
+	ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
+				  src);
+	if (ret)
+		goto err_out;
+
+	/*
+	 * Complete the "add" before doing the "extend" so that the "add"
+	 * isn't in a half-baked state in the extremely unlikely scenario the
+	 * the enclave will be destroyed in response to EEXTEND failure.
+	 */
+	encl_page->encl = encl;
+	encl_page->epc_page = epc_page;
+	encl->secs_child_cnt++;
+
+	if (flags & SGX_PAGE_MEASURE) {
+		ret = __sgx_encl_extend(encl, epc_page);
+		if (ret)
+			goto err_out;
+	}
+
+	sgx_mark_page_reclaimable(encl_page->epc_page);
+	mutex_unlock(&encl->lock);
+	up_read(&current->mm->mmap_sem);
+	return ret;
+
+err_out:
+	radix_tree_delete(&encl_page->encl->page_tree,
+			  PFN_DOWN(encl_page->desc));
+
+err_out_unlock:
+	sgx_encl_shrink(encl, va_page);
+	mutex_unlock(&encl->lock);
+	up_read(&current->mm->mmap_sem);
+
+err_out_free:
+	sgx_free_page(epc_page);
+	kfree(encl_page);
+
+	/*
+	 * Destroy enclave on ENCLS failure as this means that EPC has been
+	 * invalidated.
+	 */
+	if (ret == -EIO)
+		sgx_encl_destroy(encl);
+
+	return ret;
+}
+
+/**
+ * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
+ * @encl:       pointer to an enclave instance (via ioctl() file pointer)
+ * @arg:	a user pointer to a struct sgx_enclave_add_pages instance
+ *
+ * Add one or more pages to an uninitialized enclave, and optionally extend the
+ * measurement with the contents of the page. The address range of pages must
+ * be contiguous. The SECINFO and measurement mask are applied to all pages.
+ *
+ * A SECINFO for a TCS is required to always contain zero permissions because
+ * CPU silently zeros them. Allowing anything else would cause a mismatch in
+ * the measurement.
+ *
+ * mmap()'s protection bits are capped by the page permissions. For each page
+ * address, the maximum protection bits are computed with the following
+ * heuristics:
+ *
+ * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions.
+ * 2. A TCS page: PROT_R | PROT_W.
+ * 3. No page: PROT_NONE.
+ *
+ * mmap() is not allowed to surpass the minimum of the maximum protection bits
+ * within the given address range.
+ *
+ * As stated above, a non-existent page is interpreted as a page with no
+ * permissions. In effect, this allows mmap() with PROT_NONE to be used to seek
+ * an address range for the enclave that can be then populated into SECS.
+ *
+ * If ENCLS opcode fails, that effectively means that EPC has been invalidated.
+ * When this happens the enclave is destroyed and -EIO is returned to the
+ * caller.
+ *
+ * Return:
+ *   0 on success,
+ *   -EACCES if an executable source page is located in a noexec partition,
+ *   -EIO if either ENCLS[EADD] or ENCLS[EEXTEND] fails
+ *   -errno otherwise
+ */
+static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
+{
+	struct sgx_enclave_add_pages addp;
+	struct sgx_secinfo secinfo;
+	unsigned long c;
+	int ret;
+
+	if (!(atomic_read(&encl->flags) & SGX_ENCL_CREATED))
+		return -EINVAL;
+
+	if (copy_from_user(&addp, arg, sizeof(addp)))
+		return -EFAULT;
+
+	if (!IS_ALIGNED(addp.offset, PAGE_SIZE) ||
+	    !IS_ALIGNED(addp.src, PAGE_SIZE))
+		return -EINVAL;
+
+	if (!(access_ok(
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && (!defined(RHEL_RELEASE_CODE)))
+		VERIFY_READ,
+#else
+    #if( defined(RHEL_RELEASE_VERSION) && defined(RHEL_RELEASE_CODE))
+        #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0))
+            #error "RHEL version not supported"
+        #elif (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 1))
+		VERIFY_READ,
+        #endif
+    #endif
+#endif
+	        addp.src, PAGE_SIZE)))
+		return -EFAULT;
+
+	if (addp.length & (PAGE_SIZE - 1))
+		return -EINVAL;
+
+	if (addp.offset + addp.length - PAGE_SIZE >= encl->size)
+		return -EINVAL;
+
+	if (copy_from_user(&secinfo, (void __user *)addp.secinfo,
+			   sizeof(secinfo)))
+		return -EFAULT;
+
+	if (sgx_validate_secinfo(&secinfo))
+		return -EINVAL;
+
+	for (c = 0 ; c < addp.length; c += PAGE_SIZE) {
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		if (need_resched())
+			cond_resched();
+
+		ret = sgx_encl_add_page(encl, addp.src + c, addp.offset + c,
+					addp.length - c, &secinfo, addp.flags);
+		if (ret)
+			break;
+	}
+
+	addp.count = c;
+
+	if (copy_to_user(arg, &addp, sizeof(addp)))
+		return -EFAULT;
+
+	return ret;
+}
+
+static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
+			      void *hash)
+{
+	SHASH_DESC_ON_STACK(shash, tfm);
+
+	shash->tfm = tfm;
+
+	return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
+}
+
+static int sgx_get_key_hash(const void *modulus, void *hash)
+{
+	struct crypto_shash *tfm;
+	int ret;
+
+	tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	ret = __sgx_get_key_hash(tfm, modulus, hash);
+
+	crypto_free_shash(tfm);
+	return ret;
+}
+
+static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
+{
+	u64 *cache;
+	int i;
+
+	cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
+	for (i = 0; i < 4; i++) {
+		if (enforce || (lepubkeyhash[i] != cache[i])) {
+			wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+			cache[i] = lepubkeyhash[i];
+		}
+	}
+}
+
+static int sgx_einit(struct sgx_sigstruct *sigstruct, void *token,
+		     struct sgx_epc_page *secs, u64 *lepubkeyhash)
+{
+	int ret;
+
+	preempt_disable();
+	sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
+	ret = __einit(sigstruct, token, sgx_epc_addr(secs));
+	if (ret == SGX_INVALID_EINITTOKEN) {
+		sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
+		ret = __einit(sigstruct, token, sgx_epc_addr(secs));
+	}
+	preempt_enable();
+	return ret;
+}
+
+static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
+			 void *token)
+{
+	u64 mrsigner[4];
+	int ret;
+	int i;
+	int j;
+
+	ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
+	if (ret)
+		return ret;
+
+	if((encl->secs_attributes & ~encl->allowed_attributes) && (encl->secs_attributes & SGX_ATTR_PROVISIONKEY)) {
+		for(i = 0; i < (sizeof(G_SERVICE_ENCLAVE_MRSIGNER) / sizeof(G_SERVICE_ENCLAVE_MRSIGNER[0])); i++) {
+			if(0 == memcmp(&G_SERVICE_ENCLAVE_MRSIGNER[i], mrsigner, sizeof(G_SERVICE_ENCLAVE_MRSIGNER[0]))) {
+				encl->allowed_attributes |= SGX_ATTR_PROVISIONKEY;
+				break;
+			}
+		}
+	}
+
+	/* Check that the required attributes have been authorized. */
+	if (encl->secs_attributes & ~encl->allowed_attributes)
+		return -EACCES;
+
+	mutex_lock(&encl->lock);
+
+	if (atomic_read(&encl->flags) & SGX_ENCL_INITIALIZED) {
+		ret = -EFAULT;
+		goto err_out;
+	}
+
+	for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
+		for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
+			ret = sgx_einit(sigstruct, token, encl->secs.epc_page,
+					mrsigner);
+			if (ret == SGX_UNMASKED_EVENT)
+				continue;
+			else
+				break;
+		}
+
+		if (ret != SGX_UNMASKED_EVENT)
+			break;
+
+		msleep_interruptible(SGX_EINIT_SLEEP_TIME);
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			goto err_out;
+		}
+	}
+
+	if (ret & ENCLS_FAULT_FLAG) {
+		if (encls_failed(ret))
+			ENCLS_WARN(ret, "EINIT");
+
+		sgx_encl_destroy(encl);
+		ret = -EFAULT;
+	} else if (ret) {
+		pr_debug("EINIT returned %d\n", ret);
+		ret = -EPERM;
+	} else {
+		atomic_or(SGX_ENCL_INITIALIZED, &encl->flags);
+	}
+
+err_out:
+	mutex_unlock(&encl->lock);
+	return ret;
+}
+
+/**
+ * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
+ *
+ * @filep:	open file to /dev/sgx
+ * @arg:	userspace pointer to a struct sgx_enclave_init instance
+ *
+ * Flush any outstanding enqueued EADD operations and perform EINIT.  The
+ * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
+ * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
+ *
+ * Return:
+ *   0 on success,
+ *   SGX error code on EINIT failure,
+ *   -errno otherwise
+ */
+static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
+{
+	struct sgx_sigstruct *sigstruct;
+	struct sgx_enclave_init einit;
+	struct page *initp_page;
+	void* token;
+	int ret;
+
+	if (!(atomic_read(&encl->flags) & SGX_ENCL_CREATED))
+		return -EINVAL;
+
+	if (copy_from_user(&einit, arg, sizeof(einit)))
+		return -EFAULT;
+
+	initp_page = alloc_page(GFP_KERNEL);
+	if (!initp_page)
+		return -ENOMEM;
+
+	sigstruct = kmap(initp_page);
+        token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
+        memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
+
+	if (copy_from_user(sigstruct, (void __user *)einit.sigstruct,
+			   sizeof(*sigstruct))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = sgx_encl_init(encl, sigstruct, token);
+
+out:
+	kunmap(initp_page);
+	__free_page(initp_page);
+	return ret;
+}
+
+/**
+ * sgx_ioc_enclave_set_attribute - handler for %SGX_IOC_ENCLAVE_SET_ATTRIBUTE
+ * @filep:	open file to /dev/sgx
+ * @arg:	userspace pointer to a struct sgx_enclave_set_attribute instance
+ *
+ * Mark the enclave as being allowed to access a restricted attribute bit.
+ * The requested attribute is specified via the attribute_fd field in the
+ * provided struct sgx_enclave_set_attribute.  The attribute_fd must be a
+ * handle to an SGX attribute file, e.g. “/dev/sgx/provision".
+ *
+ * Failure to explicitly request access to a restricted attribute will cause
+ * sgx_ioc_enclave_init() to fail.  Currently, the only restricted attribute
+ * is access to the PROVISION_KEY.
+ *
+ * Note, access to the EINITTOKEN_KEY is disallowed entirely.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+static long sgx_ioc_enclave_set_attribute(struct sgx_encl *encl,
+					  void __user *arg)
+{
+	struct sgx_enclave_set_attribute params;
+	struct file *attribute_file;
+	int ret;
+
+	if (copy_from_user(&params, arg, sizeof(params)))
+		return -EFAULT;
+
+	attribute_file = fget(params.attribute_fd);
+	if (!attribute_file)
+		return -EINVAL;
+
+	if (attribute_file->f_op != &sgx_provision_fops) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	encl->allowed_attributes |= SGX_ATTR_PROVISIONKEY;
+	ret = 0;
+
+out:
+	fput(attribute_file);
+	return ret;
+}
+
+long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	struct sgx_encl *encl = filep->private_data;
+	int ret, encl_flags;
+
+	encl_flags = atomic_fetch_or(SGX_ENCL_IOCTL, &encl->flags);
+	if (encl_flags & SGX_ENCL_IOCTL)
+		return -EBUSY;
+
+	if (encl_flags & SGX_ENCL_DEAD)
+		return -EFAULT;
+
+	switch (cmd) {
+	case SGX_IOC_ENCLAVE_CREATE:
+		ret = sgx_ioc_enclave_create(encl, (void __user *)arg);
+		break;
+	case SGX_IOC_ENCLAVE_ADD_PAGES:
+		ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
+		break;
+	case SGX_IOC_ENCLAVE_INIT:
+		ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
+		break;
+	case SGX_IOC_ENCLAVE_SET_ATTRIBUTE:
+		ret = sgx_ioc_enclave_set_attribute(encl, (void __user *)arg);
+		break;
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+
+	atomic_andnot(SGX_ENCL_IOCTL, &encl->flags);
+
+	return ret;
+}
diff --git a/ubuntu/sgx/main.c b/ubuntu/sgx/main.c
new file mode 100644
index 000000000000..82c7b77e9180
--- /dev/null
+++ b/ubuntu/sgx/main.c
@@ -0,0 +1,329 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-17 Intel Corporation.
+
+#include <linux/freezer.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/ratelimit.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include "driver.h"
+#include "encls.h"
+
+#include <linux/module.h>
+#include "version.h"
+#include "dcap.h"
+#ifndef MSR_IA32_FEAT_CTL
+#define MSR_IA32_FEAT_CTL MSR_IA32_FEATURE_CONTROL
+#endif
+
+#ifndef FEAT_CTL_LOCKED
+#define FEAT_CTL_LOCKED FEATURE_CONTROL_LOCKED
+#endif
+
+struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
+int sgx_nr_epc_sections;
+
+// Based on arch/x86/kernel/cpu/intel.c
+static bool detect_sgx(struct cpuinfo_x86 *c)
+{
+    unsigned long long fc;
+
+    rdmsrl(MSR_IA32_FEAT_CTL, fc);
+    if (!(fc & FEAT_CTL_LOCKED)) {
+        pr_err_once("sgx: The feature control MSR is not locked\n");
+        return false;
+    }
+
+    if (!(fc & FEAT_CTL_SGX_ENABLED)) {
+        pr_err_once("sgx: SGX is not enabled in IA32_FEATURE_CONTROL MSR\n");
+        return false;
+    }
+
+    if (!cpu_has(c, X86_FEATURE_SGX)) {
+        pr_err_once("sgx: SGX1 instruction set is not supported\n");
+        return false;
+    }
+
+    if (!(fc & FEAT_CTL_SGX_LC_ENABLED)) {
+        pr_info_once("sgx: The launch control MSRs are not writable\n");
+        return false;
+    }
+
+    return true;
+}
+static struct sgx_epc_page *__sgx_try_alloc_page(struct sgx_epc_section *section)
+{
+	struct sgx_epc_page *page;
+
+	if (list_empty(&section->page_list))
+		return NULL;
+
+	page = list_first_entry(&section->page_list, struct sgx_epc_page, list);
+	list_del_init(&page->list);
+	section->free_cnt--;
+
+	return page;
+}
+
+/**
+ * sgx_try_alloc_page() - Allocate an EPC page
+ *
+ * Try to grab a page from the free EPC page list.
+ *
+ * Return:
+ *   a pointer to a &struct sgx_epc_page instance,
+ *   -errno on error
+ */
+struct sgx_epc_page *sgx_try_alloc_page(void)
+{
+	struct sgx_epc_section *section;
+	struct sgx_epc_page *page;
+	int i;
+
+	for (i = 0; i < sgx_nr_epc_sections; i++) {
+		section = &sgx_epc_sections[i];
+		spin_lock(&section->lock);
+		page = __sgx_try_alloc_page(section);
+		spin_unlock(&section->lock);
+
+		if (page)
+			return page;
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * sgx_alloc_page() - Allocate an EPC page
+ * @owner:	the owner of the EPC page
+ * @reclaim:	reclaim pages if necessary
+ *
+ * Try to grab a page from the free EPC page list. If there is a free page
+ * available, it is returned to the caller. The @reclaim parameter hints
+ * the EPC memory manager to swap pages when required.
+ *
+ * Return:
+ *   a pointer to a &struct sgx_epc_page instance,
+ *   -errno on error
+ */
+struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim)
+{
+	struct sgx_epc_page *entry;
+
+	for ( ; ; ) {
+		entry = sgx_try_alloc_page();
+		if (!IS_ERR(entry)) {
+			entry->owner = owner;
+			break;
+		}
+
+		if (list_empty(&sgx_active_page_list))
+			return ERR_PTR(-ENOMEM);
+
+		if (!reclaim) {
+			entry = ERR_PTR(-EBUSY);
+			break;
+		}
+
+		if (signal_pending(current)) {
+			entry = ERR_PTR(-ERESTARTSYS);
+			break;
+		}
+
+		sgx_reclaim_pages();
+		schedule();
+	}
+
+	if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
+		wake_up(&ksgxswapd_waitq);
+
+	return entry;
+}
+
+/**
+ * sgx_free_page() - Free an EPC page
+ * @page:	pointer a previously allocated EPC page
+ *
+ * EREMOVE an EPC page and insert it back to the list of free pages. The page
+ * must not be reclaimable.
+ */
+void sgx_free_page(struct sgx_epc_page *page)
+{
+	struct sgx_epc_section *section = sgx_epc_section(page);
+	int ret;
+
+	/*
+	 * Don't take sgx_active_page_list_lock when asserting the page isn't
+	 * reclaimable, missing a WARN in the very rare case is preferable to
+	 * unnecessarily taking a global lock in the common case.
+	 */
+	WARN_ON_ONCE(page->desc & SGX_EPC_PAGE_RECLAIMABLE);
+
+	ret = __eremove(sgx_epc_addr(page));
+	if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
+		return;
+
+	spin_lock(&section->lock);
+	list_add_tail(&page->list, &section->page_list);
+	section->free_cnt++;
+	spin_unlock(&section->lock);
+}
+
+static void sgx_free_epc_section(struct sgx_epc_section *section)
+{
+	struct sgx_epc_page *page;
+
+	while (!list_empty(&section->page_list)) {
+		page = list_first_entry(&section->page_list,
+					struct sgx_epc_page, list);
+		list_del(&page->list);
+		kfree(page);
+	}
+
+	while (!list_empty(&section->unsanitized_page_list)) {
+		page = list_first_entry(&section->unsanitized_page_list,
+					struct sgx_epc_page, list);
+		list_del(&page->list);
+		kfree(page);
+	}
+
+	memunmap(section->va);
+}
+
+static bool __init sgx_alloc_epc_section(u64 addr, u64 size,
+					 unsigned long index,
+					 struct sgx_epc_section *section)
+{
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	struct sgx_epc_page *page;
+	unsigned long i;
+
+	section->va = memremap(addr, size, MEMREMAP_WB);
+	if (!section->va)
+		return false;
+
+	section->pa = addr;
+	spin_lock_init(&section->lock);
+	INIT_LIST_HEAD(&section->page_list);
+	INIT_LIST_HEAD(&section->unsanitized_page_list);
+
+	for (i = 0; i < nr_pages; i++) {
+		page = kzalloc(sizeof(*page), GFP_KERNEL);
+		if (!page)
+			goto err_out;
+
+		page->desc = (addr + (i << PAGE_SHIFT)) | index;
+		list_add_tail(&page->list, &section->unsanitized_page_list);
+	}
+
+	section->free_cnt = nr_pages;
+	return true;
+
+err_out:
+	sgx_free_epc_section(section);
+	return false;
+}
+
+static void sgx_page_cache_teardown(void)
+{
+	int i;
+
+	for (i = 0; i < sgx_nr_epc_sections; i++)
+		sgx_free_epc_section(&sgx_epc_sections[i]);
+}
+
+/**
+ * A section metric is concatenated in a way that @low bits 12-31 define the
+ * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
+ * metric.
+ */
+static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
+{
+	return (low & GENMASK_ULL(31, 12)) +
+	       ((high & GENMASK_ULL(19, 0)) << 32);
+}
+
+static bool __init sgx_page_cache_init(void)
+{
+	u32 eax, ebx, ecx, edx, type;
+	u64 pa, size;
+	int i;
+
+	for (i = 0; i <= ARRAY_SIZE(sgx_epc_sections); i++) {
+		cpuid_count(SGX_CPUID, i + SGX_CPUID_FIRST_VARIABLE_SUB_LEAF,
+			    &eax, &ebx, &ecx, &edx);
+
+		type = eax & SGX_CPUID_SUB_LEAF_TYPE_MASK;
+		if (type == SGX_CPUID_SUB_LEAF_INVALID)
+			break;
+
+		if (type != SGX_CPUID_SUB_LEAF_EPC_SECTION) {
+			pr_err_once("Unknown EPC section type: %u\n", type);
+			break;
+		}
+
+		if (i == ARRAY_SIZE(sgx_epc_sections)) {
+			pr_warn("No free slot for an EPC section\n");
+			break;
+		}
+
+		pa = sgx_calc_section_metric(eax, ebx);
+		size = sgx_calc_section_metric(ecx, edx);
+
+		pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
+
+		if (!sgx_alloc_epc_section(pa, size, i, &sgx_epc_sections[i])) {
+			pr_err("No free memory for an EPC section\n");
+			break;
+		}
+
+		sgx_nr_epc_sections++;
+	}
+
+	if (!sgx_nr_epc_sections) {
+		pr_err("There are zero EPC sections.\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int __init sgx_init(void)
+{
+	int ret;
+
+	if (!detect_sgx(&boot_cpu_data))
+		return -ENODEV;
+
+	if (!sgx_page_cache_init())
+		return -EFAULT;
+
+	if (!sgx_page_reclaimer_init())
+		goto err_page_cache;
+
+	ret = sgx_drv_init();
+	if (ret)
+		goto err_kthread;
+
+	pr_info("intel_sgx: " DRV_DESCRIPTION " v" DRV_VERSION "\n");
+
+	return 0;
+
+err_kthread:
+	kthread_stop(ksgxswapd_tsk);
+
+err_page_cache:
+	sgx_page_cache_teardown();
+	return -EFAULT;
+}
+module_init(sgx_init);
+
+static void __exit sgx_exit(void)
+{
+	sgx_drv_exit();
+	kthread_stop(ksgxswapd_tsk);
+	sgx_page_cache_teardown();
+}
+module_exit(sgx_exit);
diff --git a/ubuntu/sgx/reclaim.c b/ubuntu/sgx/reclaim.c
new file mode 100644
index 000000000000..99ada8857aee
--- /dev/null
+++ b/ubuntu/sgx/reclaim.c
@@ -0,0 +1,505 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-19 Intel Corporation.
+
+#include <linux/freezer.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include "encl.h"
+#include "encls.h"
+#include "driver.h"
+
+#include <linux/version.h>
+
+struct task_struct *ksgxswapd_tsk;
+DECLARE_WAIT_QUEUE_HEAD(ksgxswapd_waitq);
+LIST_HEAD(sgx_active_page_list);
+DEFINE_SPINLOCK(sgx_active_page_list_lock);
+
+static void sgx_sanitize_section(struct sgx_epc_section *section)
+{
+	struct sgx_epc_page *page;
+	LIST_HEAD(secs_list);
+	int ret;
+
+	while (!list_empty(&section->unsanitized_page_list)) {
+		if (kthread_should_stop())
+			return;
+
+		spin_lock(&section->lock);
+
+		page = list_first_entry(&section->unsanitized_page_list,
+					struct sgx_epc_page, list);
+
+		ret = __eremove(sgx_epc_addr(page));
+		if (!ret)
+			list_move(&page->list, &section->page_list);
+		else
+			list_move_tail(&page->list, &secs_list);
+
+		spin_unlock(&section->lock);
+
+		cond_resched();
+	}
+}
+
+static int ksgxswapd(void *p)
+{
+	int i;
+
+	set_freezable();
+
+	/*
+	 * Reset all pages to uninitialized state. Pages could be in initialized
+	 * on kmemexec.
+	 */
+	for (i = 0; i < sgx_nr_epc_sections; i++)
+		sgx_sanitize_section(&sgx_epc_sections[i]);
+
+	/*
+	 * 2nd round for the SECS pages as they cannot be removed when they
+	 * still hold child pages.
+	 */
+	for (i = 0; i < sgx_nr_epc_sections; i++) {
+		sgx_sanitize_section(&sgx_epc_sections[i]);
+
+		/* Should never happen. */
+		if (!list_empty(&sgx_epc_sections[i].unsanitized_page_list))
+			WARN(1, "EPC section %d has unsanitized pages.\n", i);
+	}
+
+	while (!kthread_should_stop()) {
+		if (try_to_freeze())
+			continue;
+
+		wait_event_freezable(ksgxswapd_waitq,
+				     kthread_should_stop() ||
+				     sgx_should_reclaim(SGX_NR_HIGH_PAGES));
+
+		if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
+			sgx_reclaim_pages();
+
+		cond_resched();
+	}
+
+	return 0;
+}
+
+bool __init sgx_page_reclaimer_init(void)
+{
+	struct task_struct *tsk;
+
+	tsk = kthread_run(ksgxswapd, NULL, "ksgxswapd");
+	if (IS_ERR(tsk))
+		return false;
+
+	ksgxswapd_tsk = tsk;
+
+	return true;
+}
+
+/**
+ * sgx_mark_page_reclaimable() - Mark a page as reclaimable
+ * @page:	EPC page
+ *
+ * Mark a page as reclaimable and add it to the active page list. Pages
+ * are automatically removed from the active list when freed.
+ */
+void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
+{
+	spin_lock(&sgx_active_page_list_lock);
+	page->desc |= SGX_EPC_PAGE_RECLAIMABLE;
+	list_add_tail(&page->list, &sgx_active_page_list);
+	spin_unlock(&sgx_active_page_list_lock);
+}
+
+/**
+ * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list
+ * @page:	EPC page
+ *
+ * Clear the reclaimable flag and remove the page from the active page list.
+ *
+ * Return:
+ *   0 on success,
+ *   -EBUSY if the page is in the process of being reclaimed
+ */
+int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
+{
+	/*
+	 * Remove the page from the active list if necessary.  If the page
+	 * is actively being reclaimed, i.e. RECLAIMABLE is set but the
+	 * page isn't on the active list, return -EBUSY as we can't free
+	 * the page at this time since it is "owned" by the reclaimer.
+	 */
+	spin_lock(&sgx_active_page_list_lock);
+	if (page->desc & SGX_EPC_PAGE_RECLAIMABLE) {
+		if (list_empty(&page->list)) {
+			spin_unlock(&sgx_active_page_list_lock);
+			return -EBUSY;
+		}
+		list_del(&page->list);
+		page->desc &= ~SGX_EPC_PAGE_RECLAIMABLE;
+	}
+	spin_unlock(&sgx_active_page_list_lock);
+
+	return 0;
+}
+
+static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
+{
+	struct sgx_encl_page *page = epc_page->owner;
+	struct sgx_encl *encl = page->encl;
+	struct sgx_encl_mm *encl_mm;
+	bool ret = true;
+	int idx;
+
+	/*
+	 * Note, this can race with sgx_encl_mm_add(), but worst case scenario
+	 * a page will be reclaimed immediately after it's accessed in the new
+	 * process/mm.
+	 */
+	idx = srcu_read_lock(&encl->srcu);
+
+	list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+		if (!mmget_not_zero(encl_mm->mm))
+			continue;
+
+		down_read(&encl_mm->mm->mmap_sem);
+		ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page);
+		up_read(&encl_mm->mm->mmap_sem);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) || LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0) )
+		mmput(encl_mm->mm);
+#else
+		mmput_async(encl_mm->mm);
+#endif
+
+		if (!ret || (atomic_read(&encl->flags) & SGX_ENCL_DEAD))
+			break;
+	}
+
+	srcu_read_unlock(&encl->srcu, idx);
+
+	if (!ret && !(atomic_read(&encl->flags) & SGX_ENCL_DEAD))
+		return false;
+
+	return true;
+}
+
+static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
+{
+	struct sgx_encl_page *page = epc_page->owner;
+	unsigned long addr = SGX_ENCL_PAGE_ADDR(page);
+	struct sgx_encl *encl = page->encl;
+	struct sgx_encl_mm *encl_mm;
+	struct vm_area_struct *vma;
+	unsigned long mm_list_gen;
+	int idx, ret;
+
+retry:
+	mm_list_gen = encl->mm_list_gen;
+	/*
+	 * Ensure mm_list_gen is snapshotted before walking mm_list to prevent
+	 * beginning the walk with the old list in the new generation.  Pairs
+	 * with the smp_wmb() in sgx_encl_mm_add().
+	 */
+	smp_rmb();
+
+	idx = srcu_read_lock(&encl->srcu);
+
+	list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+		if (!mmget_not_zero(encl_mm->mm))
+			continue;
+
+		down_read(&encl_mm->mm->mmap_sem);
+
+		ret = sgx_encl_find(encl_mm->mm, addr, &vma);
+		if (!ret && encl == vma->vm_private_data)
+			zap_vma_ptes(vma, addr, PAGE_SIZE);
+
+		up_read(&encl_mm->mm->mmap_sem);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) || LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0) )
+		mmput(encl_mm->mm);
+#else
+		mmput_async(encl_mm->mm);
+#endif
+	}
+
+	srcu_read_unlock(&encl->srcu, idx);
+
+	/*
+	 * Redo the zapping if a mm was added to mm_list while zapping was in
+	 * progress.  dup_mmap() copies the PTEs for VM_PFNMAP VMAs, i.e. the
+	 * new mm won't take a page fault and so won't see that the page is
+	 * tagged RECLAIMED.  Note, vm_ops->open()/sgx_encl_mm_add() is called
+	 * _after_ PTEs are copied, and dup_mmap() holds the old mm's mmap_sem
+	 * for write, so the generation check is only needed to protect against
+	 * dup_mmap() running after the mm_list walk started but before the old
+	 * mm's PTEs were zapped.
+	 */
+	if (unlikely(encl->mm_list_gen != mm_list_gen))
+		goto retry;
+
+	mutex_lock(&encl->lock);
+
+	if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) {
+		ret = __eblock(sgx_epc_addr(epc_page));
+		if (encls_failed(ret))
+			ENCLS_WARN(ret, "EBLOCK");
+	}
+
+	mutex_unlock(&encl->lock);
+}
+
+static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
+			  struct sgx_backing *backing)
+{
+	struct sgx_pageinfo pginfo;
+	int ret;
+
+	pginfo.addr = 0;
+	pginfo.secs = 0;
+
+	pginfo.contents = (unsigned long)kmap_atomic(backing->contents);
+	pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) +
+			  backing->pcmd_offset;
+
+	ret = __ewb(&pginfo, sgx_epc_addr(epc_page), va_slot);
+
+	kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
+					      backing->pcmd_offset));
+	kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
+	return ret;
+}
+
+static void sgx_ipi_cb(void *info)
+{
+}
+
+static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
+{
+	cpumask_t *cpumask = &encl->cpumask;
+	struct sgx_encl_mm *encl_mm;
+	int idx;
+
+	/*
+	 * Note, this can race with sgx_encl_mm_add(), but ETRACK has already
+	 * been executed, so CPUs running in the new mm will enter the enclave
+	 * in a different epoch.
+	 */
+	cpumask_clear(cpumask);
+
+	idx = srcu_read_lock(&encl->srcu);
+
+	list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+		if (!mmget_not_zero(encl_mm->mm))
+			continue;
+
+		cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) || LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0) )
+		mmput(encl_mm->mm);
+#else
+		mmput_async(encl_mm->mm);
+#endif
+	}
+
+	srcu_read_unlock(&encl->srcu, idx);
+
+	return cpumask;
+}
+
+static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
+			 struct sgx_backing *backing)
+{
+	struct sgx_encl_page *encl_page = epc_page->owner;
+	struct sgx_encl *encl = encl_page->encl;
+	struct sgx_va_page *va_page;
+	unsigned int va_offset;
+	void *va_slot;
+	int ret;
+
+	encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
+
+	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
+				   list);
+	va_offset = sgx_alloc_va_slot(va_page);
+	va_slot = sgx_epc_addr(va_page->epc_page) + va_offset;
+	if (sgx_va_page_full(va_page))
+		list_move_tail(&va_page->list, &encl->va_pages);
+
+	ret = __sgx_encl_ewb(epc_page, va_slot, backing);
+	if (ret == SGX_NOT_TRACKED) {
+		ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
+		if (ret) {
+			if (encls_failed(ret))
+				ENCLS_WARN(ret, "ETRACK");
+		}
+
+		ret = __sgx_encl_ewb(epc_page, va_slot, backing);
+		if (ret == SGX_NOT_TRACKED) {
+			/*
+			 * Slow path, send IPIs to kick cpus out of the
+			 * enclave.  Note, it's imperative that the cpu
+			 * mask is generated *after* ETRACK, else we'll
+			 * miss cpus that entered the enclave between
+			 * generating the mask and incrementing epoch.
+			 */
+			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
+					 sgx_ipi_cb, NULL, 1);
+			ret = __sgx_encl_ewb(epc_page, va_slot, backing);
+		}
+	}
+
+	if (ret) {
+		if (encls_failed(ret))
+			ENCLS_WARN(ret, "EWB");
+
+		sgx_free_va_slot(va_page, va_offset);
+	} else {
+		encl_page->desc |= va_offset;
+		encl_page->va_page = va_page;
+	}
+}
+
+static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
+				struct sgx_backing *backing)
+{
+	struct sgx_encl_page *encl_page = epc_page->owner;
+	struct sgx_encl *encl = encl_page->encl;
+	struct sgx_backing secs_backing;
+	int ret;
+
+	mutex_lock(&encl->lock);
+
+	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
+		ret = __eremove(sgx_epc_addr(epc_page));
+		WARN(ret, "EREMOVE returned %d\n", ret);
+	} else {
+		sgx_encl_ewb(epc_page, backing);
+	}
+
+	encl_page->epc_page = NULL;
+	encl->secs_child_cnt--;
+
+	if (!encl->secs_child_cnt) {
+		if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
+			sgx_free_page(encl->secs.epc_page);
+			encl->secs.epc_page = NULL;
+		} else if (atomic_read(&encl->flags) & SGX_ENCL_INITIALIZED) {
+			ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
+						   &secs_backing);
+			if (ret)
+				goto out;
+
+			sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
+
+			sgx_free_page(encl->secs.epc_page);
+			encl->secs.epc_page = NULL;
+
+			sgx_encl_put_backing(&secs_backing, true);
+		}
+	}
+
+out:
+	mutex_unlock(&encl->lock);
+}
+
+/**
+ * sgx_reclaim_pages() - Reclaim EPC pages from the consumers
+ *
+ * Take a fixed number of pages from the head of the active page pool and
+ * reclaim them to the enclave's private shmem files. Skip the pages, which
+ * have been accessed since the last scan. Move those pages to the tail of
+ * active page pool so that the pages get scanned in LRU like fashion.
+ */
+void sgx_reclaim_pages(void)
+{
+	struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
+	struct sgx_backing backing[SGX_NR_TO_SCAN];
+	struct sgx_epc_section *section;
+	struct sgx_encl_page *encl_page;
+	struct sgx_epc_page *epc_page;
+	int cnt = 0;
+	int ret;
+	int i;
+
+	spin_lock(&sgx_active_page_list_lock);
+	for (i = 0; i < SGX_NR_TO_SCAN; i++) {
+		if (list_empty(&sgx_active_page_list))
+			break;
+
+		epc_page = list_first_entry(&sgx_active_page_list,
+					    struct sgx_epc_page, list);
+		list_del_init(&epc_page->list);
+		encl_page = epc_page->owner;
+
+		if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
+			chunk[cnt++] = epc_page;
+		else
+			/* The owner is freeing the page. No need to add the
+			 * page back to the list of reclaimable pages.
+			 */
+			epc_page->desc &= ~SGX_EPC_PAGE_RECLAIMABLE;
+	}
+	spin_unlock(&sgx_active_page_list_lock);
+
+	for (i = 0; i < cnt; i++) {
+		epc_page = chunk[i];
+		encl_page = epc_page->owner;
+
+		if (!sgx_reclaimer_age(epc_page))
+			goto skip;
+
+		ret = sgx_encl_get_backing(encl_page->encl,
+					   SGX_ENCL_PAGE_INDEX(encl_page),
+					   &backing[i]);
+		if (ret)
+			goto skip;
+
+		mutex_lock(&encl_page->encl->lock);
+		encl_page->desc |= SGX_ENCL_PAGE_RECLAIMED;
+		mutex_unlock(&encl_page->encl->lock);
+		continue;
+
+skip:
+		kref_put(&encl_page->encl->refcount, sgx_encl_release);
+
+		spin_lock(&sgx_active_page_list_lock);
+		list_add_tail(&epc_page->list, &sgx_active_page_list);
+		spin_unlock(&sgx_active_page_list_lock);
+
+		chunk[i] = NULL;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		epc_page = chunk[i];
+		if (epc_page)
+			sgx_reclaimer_block(epc_page);
+	}
+
+	for (i = 0; i < cnt; i++) {
+		epc_page = chunk[i];
+		if (!epc_page)
+			continue;
+
+		encl_page = epc_page->owner;
+		sgx_reclaimer_write(epc_page, &backing[i]);
+		sgx_encl_put_backing(&backing[i], true);
+
+		kref_put(&encl_page->encl->refcount, sgx_encl_release);
+		epc_page->desc &= ~SGX_EPC_PAGE_RECLAIMABLE;
+
+		section = sgx_epc_section(epc_page);
+		spin_lock(&section->lock);
+		list_add_tail(&epc_page->list, &section->page_list);
+		section->free_cnt++;
+		spin_unlock(&section->lock);
+	}
+}
diff --git a/ubuntu/sgx/sgx.h b/ubuntu/sgx/sgx.h
index 69503db5399f..0c481e6f2c95 100644
--- a/ubuntu/sgx/sgx.h
+++ b/ubuntu/sgx/sgx.h
@@ -1,286 +1,108 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+#ifndef _X86_SGX_H
+#define _X86_SGX_H
 
-#ifndef __ARCH_INTEL_SGX_H__
-#define __ARCH_INTEL_SGX_H__
-
-#include <crypto/hash.h>
-#include <linux/kref.h>
-#include <linux/mmu_notifier.h>
-#include <linux/mmu_notifier.h>
-#include <linux/radix-tree.h>
-#include <linux/radix-tree.h>
-#include <linux/rbtree.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
 #include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#include <asm/sgx.h>
-#include <asm/sgx_pr.h>
-#include <uapi/asm/sgx.h>
-
-#define SGX_MAX_EPC_BANKS 8
-
-#ifndef X86_FEATURE_SGX
-        #define X86_FEATURE_SGX 			(9 * 32 + 2)
-#endif
-
-#define FEATURE_CONTROL_SGX_ENABLE			(1<<18)
-
-#ifndef MSR_IA32_FEATURE_CONTROL
-    #define MSR_IA32_FEATURE_CONTROL 		0x0000003a
-#endif
-
-#ifndef FEATURE_CONTROL_SGX_LE_WR
-    #define FEATURE_CONTROL_SGX_LE_WR		(1<<17)
-#endif
-
-#ifndef X86_FEATURE_SGX_LC
-    #define X86_FEATURE_SGX_LC				(16*32+30) /* supports SGX launch configuration */
-#endif
-
-#ifndef MSR_IA32_FEATURE_CONFIG
-	#define MSR_IA32_FEATURE_CONFIG			0x0000013C
-#endif
-
-#ifndef FEATURE_CONFIG_LOCKED
-	#define FEATURE_CONFIG_LOCKED			(1<<0)
-#endif
-
-#ifndef FEATURE_CONFIG_AES_DISABLE
-	#define FEATURE_CONFIG_AES_DISABLE		(1<<1)
-#endif
+#include <linux/types.h>
+#include <asm/asm.h>
+#include "arch.h"
 
-#define FEATURE_CONFIG_AES_DISABLE_LOCKED (FEATURE_CONFIG_AES_DISABLE | FEATURE_CONFIG_LOCKED)
+#undef pr_fmt
+#define pr_fmt(fmt) "sgx: " fmt
 
-
-/* Intel SGX MSRs */
-#ifndef MSR_IA32_SGXLEPUBKEYHASH0
-    #define MSR_IA32_SGXLEPUBKEYHASH0	0x0000008C
-    #define MSR_IA32_SGXLEPUBKEYHASH1	0x0000008D
-    #define MSR_IA32_SGXLEPUBKEYHASH2	0x0000008E
-    #define MSR_IA32_SGXLEPUBKEYHASH3	0x0000008F
-#endif
-
-#define SGX_EINIT_SPIN_COUNT	20
-#define SGX_EINIT_SLEEP_COUNT	50
-#define SGX_EINIT_SLEEP_TIME	20
-
-#define SGX_VA_SLOT_COUNT 512
-#define SGX_VA_OFFSET_MASK ((SGX_VA_SLOT_COUNT - 1) << 3)
-
-#define SGX_EPC_BANK(epc_page) \
-	(&sgx_epc_banks[(unsigned long)(epc_page) & ~PAGE_MASK])
-#define SGX_EPC_PFN(epc_page) PFN_DOWN((unsigned long)(epc_page))
-#define SGX_EPC_ADDR(epc_page) ((unsigned long)(epc_page) & PAGE_MASK)
-
-enum sgx_alloc_flags {
-	SGX_ALLOC_ATOMIC	= BIT(0),
+struct sgx_epc_page {
+	unsigned long desc;
+	struct sgx_encl_page *owner;
+	struct list_head list;
 };
 
-struct sgx_va_page {
-	void *epc_page;
-	DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
-	struct list_head list;
+/**
+ * struct sgx_epc_section
+ *
+ * The firmware can define multiple chunks of EPC to the different areas of the
+ * physical memory e.g. for memory areas of the each node. This structure is
+ * used to store EPC pages for one EPC section and virtual memory area where
+ * the pages have been mapped.
+ */
+struct sgx_epc_section {
+	unsigned long pa;
+	void *va;
+	unsigned long free_cnt;
+	struct list_head page_list;
+	struct list_head unsanitized_page_list;
+	spinlock_t lock;
 };
 
-static inline unsigned int sgx_alloc_va_slot(struct sgx_va_page *page)
-{
-	int slot = find_first_zero_bit(page->slots, SGX_VA_SLOT_COUNT);
+/**
+ * enum sgx_epc_page_desc - bits and masks for an EPC page's descriptor
+ * %SGX_EPC_SECTION_MASK:	SGX allows to have multiple EPC sections in the
+ *				physical memory. The existing and near-future
+ *				hardware defines at most eight sections, hence
+ *				three bits to hold a section.
+ * %SGX_EPC_PAGE_RECLAIMABLE:	The page has been been marked as reclaimable.
+ *				Pages need to be colored this way because a page
+ *				can be out of the active page list in the
+ *				process of being swapped out.
+ */
+enum sgx_epc_page_desc {
+	SGX_EPC_SECTION_MASK			= GENMASK_ULL(3, 0),
+	SGX_EPC_PAGE_RECLAIMABLE		= BIT(4),
+	/* bits 12-63 are reserved for the physical page address of the page */
+};
 
-	if (slot < SGX_VA_SLOT_COUNT)
-		set_bit(slot, page->slots);
+#define SGX_MAX_EPC_SECTIONS (SGX_EPC_SECTION_MASK + 1)
 
-	return slot << 3;
-}
+extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
 
-static inline void sgx_free_va_slot(struct sgx_va_page *page,
-				    unsigned int offset)
+static inline struct sgx_epc_section *sgx_epc_section(struct sgx_epc_page *page)
 {
-	clear_bit(offset >> 3, page->slots);
+	return &sgx_epc_sections[page->desc & SGX_EPC_SECTION_MASK];
 }
 
-static inline bool sgx_va_page_full(struct sgx_va_page *page)
+static inline void *sgx_epc_addr(struct sgx_epc_page *page)
 {
-	int slot = find_first_zero_bit(page->slots, SGX_VA_SLOT_COUNT);
+	struct sgx_epc_section *section = sgx_epc_section(page);
 
-	return slot == SGX_VA_SLOT_COUNT;
+	return section->va + (page->desc & PAGE_MASK) - section->pa;
 }
 
-enum sgx_encl_page_flags {
-	SGX_ENCL_PAGE_TCS	= BIT(0),
-	SGX_ENCL_PAGE_RESERVED	= BIT(1),
-	SGX_ENCL_PAGE_LOADED	= BIT(2),
-};
-
-#define SGX_ENCL_PAGE_ADDR(encl_page) ((encl_page)->desc & PAGE_MASK)
-#define SGX_ENCL_PAGE_VA_OFFSET(encl_page) \
-	((encl_page)->desc & SGX_VA_OFFSET_MASK)
-#define SGX_ENCL_PAGE_PCMD_OFFSET(encl_page) \
-	((PFN_DOWN((encl_page)->desc) & 31) * 128)
-
-struct sgx_encl_page {
-	unsigned long desc;
-	union {
-		void *epc_page;
-		struct sgx_va_page *va_page;
-	};
-	struct sgx_encl *encl;
-	struct list_head list;
-};
-
-enum sgx_encl_flags {
-	SGX_ENCL_INITIALIZED	= BIT(0),
-	SGX_ENCL_DEBUG		= BIT(1),
-	SGX_ENCL_SECS_EVICTED	= BIT(2),
-	SGX_ENCL_SUSPEND	= BIT(3),
-	SGX_ENCL_DEAD		= BIT(4),
-};
-
-struct sgx_encl {
-	unsigned int flags;
-	uint64_t attributes;
-	uint64_t allowed_attributes;
-	uint64_t xfrm;
-	unsigned int page_cnt;
-	unsigned int secs_child_cnt;
-	struct mutex lock;
-	struct mm_struct *mm;
-	struct file *backing;
-	struct file *pcmd;
-	struct list_head load_list;
-	struct kref refcount;
-	unsigned long base;
-	unsigned long size;
-	unsigned long ssaframesize;
-	struct list_head va_pages;
-	struct radix_tree_root page_tree;
-	struct list_head add_page_reqs;
-	struct work_struct add_page_work;
-	struct sgx_encl_page secs;
-	struct pid *tgid;
-	struct list_head encl_list;
-	struct mmu_notifier mmu_notifier;
-};
-
-extern struct workqueue_struct *sgx_add_page_wq;
-extern u64 sgx_encl_size_max_32;
-extern u64 sgx_encl_size_max_64;
-extern u64 sgx_xfrm_mask;
-extern u32 sgx_misc_reserved;
-extern u32 sgx_xsave_size_tbl[64];
-extern bool sgx_unlocked_msrs;
-
-extern const struct file_operations sgx_fops;
-extern const struct vm_operations_struct sgx_vm_ops;
-extern const struct file_operations sgx_provision_fops;
-
-int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
-		  struct vm_area_struct **vma);
-struct sgx_encl *sgx_encl_alloc(struct sgx_secs *secs);
-int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs);
-int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
-		      struct sgx_secinfo *secinfo, unsigned int mrmask);
-int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
-		  struct sgx_einittoken *einittoken);
-void sgx_encl_release(struct kref *ref);
-
-long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_COMPAT
-long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
-#endif
+#define SGX_NR_TO_SCAN		16
+#define SGX_NR_LOW_PAGES	32
+#define SGX_NR_HIGH_PAGES	64
 
-/* Utility functions */
-int sgx_test_and_clear_young(struct sgx_encl_page *page);
-struct page *sgx_get_backing(struct sgx_encl *encl,
-			     struct sgx_encl_page *entry,
-			     bool pcmd);
-void sgx_put_backing(struct page *backing, bool write);
-void sgx_insert_pte(struct sgx_encl *encl,
-		    struct sgx_encl_page *encl_page,
-		    void *epc_page,
-		    struct vm_area_struct *vma);
-int sgx_eremove(void *epc_page);
-void sgx_zap_tcs_ptes(struct sgx_encl *encl,
-		      struct vm_area_struct *vma);
-void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus);
-void sgx_flush_cpus(struct sgx_encl *encl);
+extern int sgx_nr_epc_sections;
+extern struct task_struct *ksgxswapd_tsk;
+extern struct wait_queue_head(ksgxswapd_waitq);
+extern struct list_head sgx_active_page_list;
+extern spinlock_t sgx_active_page_list_lock;
 
-enum sgx_fault_flags {
-	SGX_FAULT_RESERVE	= BIT(0),
-};
+static inline unsigned long sgx_nr_free_pages(void)
+{
+	unsigned long cnt = 0;
+	int i;
 
-struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
-				     unsigned long addr,
-				     unsigned int flags);
+	for (i = 0; i < sgx_nr_epc_sections; i++)
+		cnt += sgx_epc_sections[i].free_cnt;
 
-int sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus, void *hash);
-int sgx_get_key_hash_simple(const void *modulus, void *hash);
+	return cnt;
+}
 
-extern struct mutex sgx_encl_list_lock;
-extern struct list_head sgx_encl_list;
-extern atomic_t sgx_va_pages_cnt;
+static inline bool sgx_should_reclaim(unsigned long watermark)
+{
+	return sgx_nr_free_pages() < watermark &&
+	       !list_empty(&sgx_active_page_list);
+}
 
-int sgx_add_epc_bank(resource_size_t start, unsigned long size, int bank);
-int sgx_page_cache_init(struct device *parent);
-void sgx_page_cache_teardown(void);
-void *sgx_alloc_page(unsigned int flags);
-void sgx_free_page(void *page, struct sgx_encl *encl);
-void *sgx_get_page(void *page);
-void sgx_put_page(void *ptr);
+bool __init sgx_page_reclaimer_init(void);
+void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
+int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
+void sgx_reclaim_pages(void);
 
+struct sgx_epc_page *sgx_try_alloc_page(void);
+struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim);
+void sgx_free_page(struct sgx_epc_page *page);
 
-#endif /* __ARCH_X86_INTEL_SGX_H__ */
+#endif /* _X86_SGX_H */
diff --git a/ubuntu/sgx/sgx_driver_info.h b/ubuntu/sgx/sgx_driver_info.h
deleted file mode 100644
index 72719d6357d0..000000000000
--- a/ubuntu/sgx/sgx_driver_info.h
+++ /dev/null
@@ -1,62 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-
-#ifndef _SGX_DRIVER_INFO_H
-#define _SGX_DRIVER_INFO_H
-
-#define SGX_DRIVER_INFO_OOT					0x80000000
-#define SGX_DRIVER_INFO_DCAP				0x40000000
-
-#define SGX_DRIVER_INFO_FEATURE_SGX2		0x00000001
-
-#endif /* _SGX_DRIVER_INFO_H */
diff --git a/ubuntu/sgx/sgx_encl.c b/ubuntu/sgx/sgx_encl.c
deleted file mode 100644
index af232b0a80f9..000000000000
--- a/ubuntu/sgx/sgx_encl.c
+++ /dev/null
@@ -1,973 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-// Sean Christopherson <sean.j.christopherson@intel.com>
-
-#include <asm/mman.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/hashtable.h>
-#include <linux/highmem.h>
-#include <linux/ratelimit.h>
-
-#include <linux/version.h>
-#include <linux/sched/signal.h>
-#include <linux/shmem_fs.h>
-#include <linux/slab.h>
-#include "sgx.h"
-#include "sgx_wl.h"
-
-
-struct sgx_add_page_req {
-	struct sgx_encl *encl;
-	struct sgx_encl_page *encl_page;
-	struct sgx_secinfo secinfo;
-	u16 mrmask;
-	struct list_head list;
-};
-
-/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. */
-static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
-
-/**
- * sgx_encl_find - find an enclave
- * @mm:		mm struct of the current process
- * @addr:	address in the ELRANGE
- * @vma:	the resulting VMA
- *
- * Finds an enclave identified by the given address. Gives back the VMA, that
- * is part of the enclave, located in that address. The VMA is given back if it
- * is a proper enclave VMA even if a &struct sgx_encl instance does not exist
- * yet (enclave creation has not been performed).
- *
- * Return:
- * 0 on success,
- * -EINVAL if an enclave was not found,
- * -ENOENT if the enclave has not been created yet
- */
-int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
-		  struct vm_area_struct **vma)
-{
-	struct vm_area_struct *result;
-	struct sgx_encl *encl;
-
-	result = find_vma(mm, addr);
-	if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
-		return -EINVAL;
-
-	encl = result->vm_private_data;
-	*vma = result;
-
-	return encl ? 0 : -ENOENT;
-}
-
-static int sgx_measure(void *secs_page,
-		       void *epc_page,
-		       u16 mrmask)
-{
-	int ret = 0;
-	void *secs;
-	void *epc;
-	int i;
-	int j;
-
-	for (i = 0, j = 1; i < 0x1000 && !ret; i += 0x100, j <<= 1) {
-		if (!(j & mrmask))
-			continue;
-
-		secs = sgx_get_page(secs_page);
-		epc = sgx_get_page(epc_page);
-
-		ret = __eextend(secs, (void *)((unsigned long)epc + i));
-
-		sgx_put_page(epc);
-		sgx_put_page(secs);
-	}
-
-	return ret;
-}
-
-static int sgx_eadd(void *secs_page,
-		    void *epc_page,
-		    unsigned long linaddr,
-		    struct sgx_secinfo *secinfo,
-		    struct page *backing)
-{
-	struct sgx_pageinfo pginfo;
-	void *epc_page_vaddr;
-	int ret;
-
-	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
-	pginfo.secs = (unsigned long)sgx_get_page(secs_page);
-	epc_page_vaddr = sgx_get_page(epc_page);
-
-	pginfo.linaddr = linaddr;
-	pginfo.secinfo = (unsigned long)secinfo;
-	ret = __eadd(&pginfo, epc_page_vaddr);
-
-	sgx_put_page(epc_page_vaddr);
-	sgx_put_page((void *)(unsigned long)pginfo.secs);
-	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
-
-	return ret;
-}
-
-static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
-				     void *epc_page)
-{
-	struct sgx_encl_page *encl_page = req->encl_page;
-	struct sgx_encl *encl = req->encl;
-	struct vm_area_struct *vma;
-	struct page *backing;
-	unsigned long addr;
-	int ret;
-
-	if (encl->flags & (SGX_ENCL_SUSPEND | SGX_ENCL_DEAD))
-		return false;
-
-	addr = SGX_ENCL_PAGE_ADDR(encl_page);
-	ret = sgx_encl_find(encl->mm, addr, &vma);
-	if (ret)
-		return false;
-
-	backing = sgx_get_backing(encl, encl_page, false);
-	if (IS_ERR(backing))
-		return false;
-
-	/* Do not race with do_exit() */
-	if (!atomic_read(&encl->mm->mm_users)) {
-		sgx_put_backing(backing, 0);
-		return false;
-	}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
-	ret = vmf_insert_pfn(vma, addr, SGX_EPC_PFN(epc_page));
-	if (ret != VM_FAULT_NOPAGE) {
-#else
-	ret = vm_insert_pfn(vma, addr, SGX_EPC_PFN(epc_page));
-	if (ret) {
-#endif
-		sgx_put_backing(backing, 0);
-		return false;
-	}
-
-	ret = sgx_eadd(encl->secs.epc_page, epc_page, addr, &req->secinfo,
-		       backing);
-
-	sgx_put_backing(backing, 0);
-	if (ret) {
-		sgx_warn(encl, "EADD returned %d\n", ret);
-		zap_vma_ptes(vma, addr, PAGE_SIZE);
-		return false;
-	}
-
-	encl->secs_child_cnt++;
-
-	ret = sgx_measure(encl->secs.epc_page, epc_page, req->mrmask);
-	if (ret) {
-		sgx_warn(encl, "EEXTEND returned %d\n", ret);
-		zap_vma_ptes(vma, addr, PAGE_SIZE);
-		return false;
-	}
-
-	encl_page->encl = encl;
-	encl_page->epc_page = epc_page;
-	encl_page->desc |= SGX_ENCL_PAGE_LOADED;
-	sgx_test_and_clear_young(encl_page);
-	list_add_tail(&encl_page->list, &encl->load_list);
-
-	return true;
-}
-
-static void sgx_add_page_worker(struct work_struct *work)
-{
-	struct sgx_add_page_req *req;
-	bool skip_rest = false;
-	bool is_empty = false;
-	struct sgx_encl *encl;
-	void *epc_page;
-
-	encl = container_of(work, struct sgx_encl, add_page_work);
-
-	do {
-		schedule();
-
-		if (encl->flags & SGX_ENCL_DEAD)
-			skip_rest = true;
-
-		mutex_lock(&encl->lock);
-		req = list_first_entry(&encl->add_page_reqs,
-				       struct sgx_add_page_req, list);
-		list_del(&req->list);
-		is_empty = list_empty(&encl->add_page_reqs);
-		mutex_unlock(&encl->lock);
-
-		if (skip_rest)
-			goto next;
-
-		epc_page = sgx_alloc_page(0);
-		if (IS_ERR(epc_page)) {
-			skip_rest = true;
-			goto next;
-		}
-
-		down_read(&encl->mm->mmap_sem);
-		mutex_lock(&encl->lock);
-
-		if (!sgx_process_add_page_req(req, epc_page)) {
-			sgx_free_page(epc_page, encl);
-			skip_rest = true;
-		}
-
-		mutex_unlock(&encl->lock);
-		up_read(&encl->mm->mmap_sem);
-
-next:
-		kfree(req);
-	} while (!kref_put(&encl->refcount, sgx_encl_release) && !is_empty);
-}
-
-static u32 sgx_calc_ssaframesize(u32 miscselect, u64 xfrm)
-{
-	u32 size_max = PAGE_SIZE;
-	u32 size;
-	int i;
-
-	for (i = 2; i < 64; i++) {
-		if (!((1ULL << i) & xfrm))
-			continue;
-
-		size = SGX_SSA_GPRS_SIZE + sgx_xsave_size_tbl[i];
-		if (miscselect & SGX_MISC_EXINFO)
-			size += SGX_SSA_MISC_EXINFO_SIZE;
-
-		if (size > size_max)
-			size_max = size;
-	}
-
-	return (size_max + PAGE_SIZE - 1) >> PAGE_SHIFT;
-}
-
-static int sgx_validate_secs(const struct sgx_secs *secs,
-			     unsigned long ssaframesize)
-{
-	int i;
-
-	if (secs->size < (2 * PAGE_SIZE) ||
-	    (secs->size & (secs->size - 1)) != 0)
-		return -EINVAL;
-
-	if (secs->base & (secs->size - 1))
-		return -EINVAL;
-
-	if (secs->attributes & SGX_ATTR_RESERVED_MASK ||
-	    secs->miscselect & sgx_misc_reserved)
-		return -EINVAL;
-
-	if (secs->attributes & SGX_ATTR_MODE64BIT) {
-#ifdef CONFIG_X86_64
-		if (secs->size > sgx_encl_size_max_64)
-			return -EINVAL;
-#else
-		return -EINVAL;
-#endif
-	} else {
-		/* On 64-bit architecture allow 32-bit encls only in
-		 * the compatibility mode.
-		 */
-#ifdef CONFIG_X86_64
-		if (!test_thread_flag(TIF_ADDR32))
-			return -EINVAL;
-#endif
-		if (secs->size > sgx_encl_size_max_32)
-			return -EINVAL;
-	}
-
-	if ((secs->xfrm & 0x3) != 0x3 || (secs->xfrm & ~sgx_xfrm_mask))
-		return -EINVAL;
-
-	/* Check that BNDREGS and BNDCSR are equal. */
-	if (((secs->xfrm >> 3) & 1) != ((secs->xfrm >> 4) & 1))
-		return -EINVAL;
-
-	if (!secs->ssaframesize || ssaframesize > secs->ssaframesize)
-		return -EINVAL;
-
-	for (i = 0; i < SGX_SECS_RESERVED1_SIZE; i++)
-		if (secs->reserved1[i])
-			return -EINVAL;
-
-	for (i = 0; i < SGX_SECS_RESERVED2_SIZE; i++)
-		if (secs->reserved2[i])
-			return -EINVAL;
-
-	for (i = 0; i < SGX_SECS_RESERVED3_SIZE; i++)
-		if (secs->reserved3[i])
-			return -EINVAL;
-
-	for (i = 0; i < SGX_SECS_RESERVED4_SIZE; i++)
-		if (secs->reserved4[i])
-			return -EINVAL;
-
-	return 0;
-}
-
-static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
-				     struct mm_struct *mm)
-{
-	struct sgx_encl *encl =
-		container_of(mn, struct sgx_encl, mmu_notifier);
-
-	mutex_lock(&encl->lock);
-	encl->flags |= SGX_ENCL_DEAD;
-	mutex_unlock(&encl->lock);
-}
-
-static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
-	.release	= sgx_mmu_notifier_release,
-};
-
-static int sgx_init_page(struct sgx_encl *encl, struct sgx_encl_page *entry,
-			 unsigned long addr)
-{
-	struct sgx_va_page *va_page;
-	void *epc_page = NULL;
-	void *ptr;
-	int ret = 0;
-
-	/* fast path */
-	mutex_lock(&encl->lock);
-	if (encl->page_cnt % SGX_VA_SLOT_COUNT)
-		goto out;
-	mutex_unlock(&encl->lock);
-
-	/* slow path */
-	epc_page = sgx_alloc_page(0);
-	if (IS_ERR(epc_page))
-		return PTR_ERR(epc_page);
-
-	mutex_lock(&encl->lock);
-	if (encl->page_cnt % SGX_VA_SLOT_COUNT) {
-		sgx_free_page(epc_page, encl);
-		goto out;
-	}
-
-	ptr = sgx_get_page(epc_page);
-	ret = __epa(ptr);
-	sgx_put_page(ptr);
-	if (ret) {
-		sgx_crit(encl, "EPA returned %d\n", ret);
-		sgx_free_page(epc_page, encl);
-		ret = -EFAULT;
-		goto out;
-	}
-
-	va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
-	if (!va_page) {
-		sgx_free_page(epc_page, encl);
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	atomic_inc(&sgx_va_pages_cnt);
-	va_page->epc_page = epc_page;
-	list_add(&va_page->list, &encl->va_pages);
-
-out:
-	if (!ret) {
-		entry->desc = addr;
-		encl->page_cnt++;
-	}
-	mutex_unlock(&encl->lock);
-	return ret;
-}
-
-/**
- * sgx_encl_alloc - allocate memory for an enclave and set attributes
- *
- * @secs:	SECS data (must be page aligned)
- *
- * Allocates a new &struct sgx_encl instance. Validates SECS attributes, creates
- * backing storage for the enclave and sets enclave attributes to sane initial
- * values.
- *
- * Return:
- * &struct sgx_encl instance on success,
- * system error on failure
- */
-struct sgx_encl *sgx_encl_alloc(struct sgx_secs *secs)
-{
-	unsigned long ssaframesize;
-	struct sgx_encl *encl;
-	struct file *backing;
-	struct file *pcmd;
-
-	ssaframesize = sgx_calc_ssaframesize(secs->miscselect, secs->xfrm);
-	if (sgx_validate_secs(secs, ssaframesize))
-		return ERR_PTR(-EINVAL);
-
-	backing = shmem_file_setup("[dev/sgx]", secs->size + PAGE_SIZE,
-				   VM_NORESERVE);
-	if (IS_ERR(backing))
-		return (void *)backing;
-
-	pcmd = shmem_file_setup("[dev/sgx]", (secs->size + PAGE_SIZE) >> 5,
-				VM_NORESERVE);
-	if (IS_ERR(pcmd)) {
-		fput(backing);
-		return (void *)pcmd;
-	}
-
-	encl = kzalloc(sizeof(*encl), GFP_KERNEL);
-	if (!encl) {
-		fput(backing);
-		fput(pcmd);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	encl->attributes = secs->attributes;
-	encl->allowed_attributes = SGX_ATTR_ALLOWED_MASK;
-	encl->xfrm = secs->xfrm;
-
-	kref_init(&encl->refcount);
-	INIT_LIST_HEAD(&encl->add_page_reqs);
-	INIT_LIST_HEAD(&encl->va_pages);
-	INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
-	INIT_LIST_HEAD(&encl->load_list);
-	INIT_LIST_HEAD(&encl->encl_list);
-	mutex_init(&encl->lock);
-	INIT_WORK(&encl->add_page_work, sgx_add_page_worker);
-
-	encl->mm = current->mm;
-	encl->base = secs->base;
-	encl->size = secs->size;
-	encl->ssaframesize = secs->ssaframesize;
-	encl->backing = backing;
-	encl->pcmd = pcmd;
-
-	return encl;
-}
-
-/**
- * sgx_encl_create - create an enclave
- *
- * @encl:	an enclave
- * @secs:	page aligned SECS data
- *
- * Validates SECS attributes, allocates an EPC page for the SECS and creates
- * the enclave by performing ECREATE.
- *
- * Return:
- * 0 on success,
- * system error on failure
- */
-int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
-{
-	struct vm_area_struct *vma;
-	struct sgx_pageinfo pginfo;
-	struct sgx_secinfo secinfo;
-	void *secs_epc;
-	void *secs_vaddr;
-	long ret;
-
-	secs_epc = sgx_alloc_page(0);
-	if (IS_ERR(secs_epc)) {
-		ret = PTR_ERR(secs_epc);
-		return ret;
-	}
-
-	encl->secs.epc_page = secs_epc;
-	encl->tgid = get_pid(task_tgid(current));
-
-	ret = sgx_init_page(encl, &encl->secs, encl->base + encl->size);
-	if (ret)
-		return ret;
-
-	secs_vaddr = sgx_get_page(secs_epc);
-
-	pginfo.srcpge = (unsigned long)secs;
-	pginfo.linaddr = 0;
-	pginfo.secinfo = (unsigned long)&secinfo;
-	pginfo.secs = 0;
-	memset(&secinfo, 0, sizeof(secinfo));
-	ret = __ecreate((void *)&pginfo, secs_vaddr);
-
-	sgx_put_page(secs_vaddr);
-
-	if (ret) {
-		sgx_dbg(encl, "ECREATE returned %ld\n", ret);
-		ret = -EFAULT;
-		return ret;
-	}
-
-	if (secs->attributes & SGX_ATTR_DEBUG)
-		encl->flags |= SGX_ENCL_DEBUG;
-
-	encl->mmu_notifier.ops = &sgx_mmu_notifier_ops;
-	ret = mmu_notifier_register(&encl->mmu_notifier, encl->mm);
-	if (ret) {
-		if (ret == -EINTR)
-			ret = -ERESTARTSYS;
-		encl->mmu_notifier.ops = NULL;
-		return ret;
-	}
-
-	down_read(&current->mm->mmap_sem);
-	ret = sgx_encl_find(current->mm, secs->base, &vma);
-	if (ret != -ENOENT) {
-		if (!ret)
-			ret = -EINVAL;
-		up_read(&current->mm->mmap_sem);
-		return ret;
-	}
-
-	if (vma->vm_start != secs->base ||
-	    vma->vm_end != (secs->base + secs->size) ||
-	    vma->vm_pgoff != 0) {
-		ret = -EINVAL;
-		up_read(&current->mm->mmap_sem);
-		return ret;
-	}
-
-	vma->vm_private_data = encl;
-	up_read(&current->mm->mmap_sem);
-
-	mutex_lock(&sgx_encl_list_lock);
-	list_add_tail(&encl->encl_list, &sgx_encl_list);
-	mutex_unlock(&sgx_encl_list_lock);
-
-	return 0;
-}
-
-static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
-{
-	u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
-	u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
-	int i;
-
-	if ((secinfo->flags & SGX_SECINFO_RESERVED_MASK) ||
-	    ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) ||
-	    (page_type != SGX_SECINFO_TCS &&
-	     page_type != SGX_SECINFO_REG))
-		return -EINVAL;
-
-	for (i = 0; i < sizeof(secinfo->reserved) / sizeof(u64); i++)
-		if (secinfo->reserved[i])
-			return -EINVAL;
-
-	return 0;
-}
-
-static bool sgx_validate_offset(struct sgx_encl *encl, unsigned long offset)
-{
-	if (offset & (PAGE_SIZE - 1))
-		return false;
-
-	if (offset >= encl->size)
-		return false;
-
-	return true;
-}
-
-static int sgx_validate_tcs(struct sgx_encl *encl, struct sgx_tcs *tcs)
-{
-	int i;
-
-	if (tcs->flags & SGX_TCS_RESERVED_MASK) {
-		sgx_dbg(encl, "%s: invalid TCS flags = 0x%lx\n",
-			__func__, (unsigned long)tcs->flags);
-		return -EINVAL;
-	}
-
-	if (tcs->flags & SGX_TCS_DBGOPTIN) {
-		sgx_dbg(encl, "%s: DBGOPTIN TCS flag is set, EADD will clear it\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	if (!sgx_validate_offset(encl, tcs->ossa)) {
-		sgx_dbg(encl, "%s: invalid OSSA: 0x%lx\n", __func__,
-			(unsigned long)tcs->ossa);
-		return -EINVAL;
-	}
-
-	if (!sgx_validate_offset(encl, tcs->ofsbase)) {
-		sgx_dbg(encl, "%s: invalid OFSBASE: 0x%lx\n", __func__,
-			(unsigned long)tcs->ofsbase);
-		return -EINVAL;
-	}
-
-	if (!sgx_validate_offset(encl, tcs->ogsbase)) {
-		sgx_dbg(encl, "%s: invalid OGSBASE: 0x%lx\n", __func__,
-			(unsigned long)tcs->ogsbase);
-		return -EINVAL;
-	}
-
-	if ((tcs->fslimit & 0xFFF) != 0xFFF) {
-		sgx_dbg(encl, "%s: invalid FSLIMIT: 0x%x\n", __func__,
-			tcs->fslimit);
-		return -EINVAL;
-	}
-
-	if ((tcs->gslimit & 0xFFF) != 0xFFF) {
-		sgx_dbg(encl, "%s: invalid GSLIMIT: 0x%x\n", __func__,
-			tcs->gslimit);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < sizeof(tcs->reserved) / sizeof(u64); i++)
-		if (tcs->reserved[i])
-			return -EINVAL;
-
-	return 0;
-}
-
-static int __sgx_encl_add_page(struct sgx_encl *encl,
-			       struct sgx_encl_page *encl_page,
-			       unsigned long addr,
-			       void *data,
-			       struct sgx_secinfo *secinfo,
-			       unsigned int mrmask)
-{
-	u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
-	struct sgx_add_page_req *req = NULL;
-	struct page *backing;
-	void *backing_ptr;
-	int ret;
-	int empty;
-
-	if (sgx_validate_secinfo(secinfo))
-		return -EINVAL;
-
-	if (page_type == SGX_SECINFO_TCS) {
-		ret = sgx_validate_tcs(encl, data);
-		if (ret)
-			return ret;
-	}
-
-	ret = sgx_init_page(encl, encl_page, addr);
-	if (ret)
-		return ret;
-
-	mutex_lock(&encl->lock);
-
-	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT)) {
-		ret = -EEXIST;
-		goto out;
-	}
-
-	req = kzalloc(sizeof(*req), GFP_KERNEL);
-	if (!req) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	backing = sgx_get_backing(encl, encl_page, false);
-	if (IS_ERR((void *)backing)) {
-		ret = PTR_ERR((void *)backing);
-		goto out;
-	}
-
-	ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
-				encl_page);
-	if (ret) {
-		sgx_put_backing(backing, false /* write */);
-		goto out;
-	}
-
-	backing_ptr = kmap(backing);
-	memcpy(backing_ptr, data, PAGE_SIZE);
-	kunmap(backing);
-
-	if (page_type == SGX_SECINFO_TCS)
-		encl_page->desc |= SGX_ENCL_PAGE_TCS;
-
-	memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
-
-	req->encl = encl;
-	req->encl_page = encl_page;
-	req->mrmask = mrmask;
-	empty = list_empty(&encl->add_page_reqs);
-	kref_get(&encl->refcount);
-	list_add_tail(&req->list, &encl->add_page_reqs);
-	if (empty)
-		queue_work(sgx_add_page_wq, &encl->add_page_work);
-
-	sgx_put_backing(backing, true /* write */);
-
-	mutex_unlock(&encl->lock);
-	return 0;
-out:
-	kfree(req);
-	mutex_unlock(&encl->lock);
-	return ret;
-}
-
-
-/**
- * sgx_encl_add_page - add a page to the enclave
- *
- * @encl:	an enclave
- * @addr:	page address in the ELRANGE
- * @data:	page data
- * @secinfo:	page permissions
- * @mrmask:	bitmask to select the 256 byte chunks to be measured
- *
- * Creates a new enclave page and enqueues an EADD operation that will be
- * processed by a worker thread later on.
- *
- * Return:
- * 0 on success,
- * system error on failure
- */
-int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr, void *data,
-		      struct sgx_secinfo *secinfo, unsigned int mrmask)
-{
-	struct sgx_encl_page *page;
-	int ret;
-
-	page = kzalloc(sizeof(*page), GFP_KERNEL);
-	if (!page)
-		return -ENOMEM;
-
-	ret = __sgx_encl_add_page(encl, page, addr, data, secinfo, mrmask);
-
-	if (ret)
-		kfree(page);
-
-	return ret;
-}
-
-
-static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
-{
-	u64 *cache;
-	int i;
-
-	cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
-	for (i = 0; i < 4; i++) {
-		if (enforce || (lepubkeyhash[i] != cache[i])) {
-			wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
-			cache[i] = lepubkeyhash[i];
-		}
-	}
-}
-
-
-static int sgx_einit(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
-		     struct sgx_einittoken *token, u64 *lepubkeyhash)
-{
-	void *secs_epc = encl->secs.epc_page;
-	void *secs_va;
-	int ret;
-
-	secs_va = sgx_get_page(secs_epc);
-
-	if (!boot_cpu_has(X86_FEATURE_SGX_LC)) {
-		ret = __einit(sigstruct, token, secs_va);
-		goto out;
-	}
-
-	token->payload.valid = 0;
-
-	preempt_disable();
-	sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
-	ret = __einit(sigstruct, token, secs_va);
-	if (ret == SGX_INVALID_EINITTOKEN) {
-		sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
-		ret = __einit(sigstruct, token, secs_va);
-	}
-	preempt_enable();
-
-out:
-	sgx_put_page(secs_va);
-
-	return ret;
-}
-
-/**
- * sgx_encl_init - perform EINIT for the given enclave
- *
- * @encl:	an enclave
- * @sigstruct:	SIGSTRUCT for the enclave
- * @token:	EINITTOKEN for the enclave
- *
- * Retries a few times in order to perform EINIT operation on an enclave
- * because there could be potentially an interrupt storm.
- *
- * Return:
- * 0 on success,
- * -FAULT on a CPU exception during EINIT,
- * SGX error code
- */
-int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
-		  struct sgx_einittoken *token)
-{
-	u64 mrsigner[4];
-	int ret;
-	int i;
-	int j;
-
-	ret = sgx_get_key_hash_simple(sigstruct->modulus, mrsigner);
-	if (ret)
-		return ret;
-
-	if((encl->attributes & ~encl->allowed_attributes) && (encl->attributes & SGX_ATTR_PROVISIONKEY)) {
-		for(i = 0; i < (sizeof(G_SERVICE_ENCLAVE_MRSIGNER) / sizeof(G_SERVICE_ENCLAVE_MRSIGNER[0])); i++) {
-			if(0 == memcmp(&G_SERVICE_ENCLAVE_MRSIGNER[i], mrsigner, sizeof(G_SERVICE_ENCLAVE_MRSIGNER[0]))) {
-				encl->allowed_attributes |= SGX_ATTR_PROVISIONKEY;
-				break;
-			}
-		}
-	}
-
-	/* Check that the required attributes have been authorized. */
-	if (encl->attributes & ~encl->allowed_attributes)
-		return -EACCES;
-
-	flush_work(&encl->add_page_work);
-
-	mutex_lock(&encl->lock);
-
-	if (encl->flags & SGX_ENCL_INITIALIZED) {
-		mutex_unlock(&encl->lock);
-		return 0;
-	}
-
-	for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
-		for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
-			ret = sgx_einit(encl, sigstruct, token, mrsigner);
-			if (ret == SGX_UNMASKED_EVENT)
-				continue;
-			else
-				break;
-		}
-
-		if (ret != SGX_UNMASKED_EVENT)
-			break;
-
-		msleep_interruptible(SGX_EINIT_SLEEP_TIME);
-
-		if (signal_pending(current)) {
-			mutex_unlock(&encl->lock);
-			return -ERESTARTSYS;
-		}
-	}
-
-	mutex_unlock(&encl->lock);
-
-	if (ret) {
-		if (ret > 0)
-			sgx_dbg(encl, "EINIT returned %d\n", ret);
-		return ret;
-	}
-
-	encl->flags |= SGX_ENCL_INITIALIZED;
-	return 0;
-}
-
-void sgx_encl_release(struct kref *ref)
-{
-	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
-	struct sgx_encl_page *entry;
-	struct sgx_va_page *va_page;
-	struct radix_tree_iter iter;
-	void **slot;
-
-	mutex_lock(&sgx_encl_list_lock);
-	if (!list_empty(&encl->encl_list))
-		list_del(&encl->encl_list);
-	mutex_unlock(&sgx_encl_list_lock);
-
-	put_pid(encl->tgid);
-
-	if (encl->mmu_notifier.ops)
-		mmu_notifier_unregister(&encl->mmu_notifier, encl->mm);
-
-	list_for_each_entry(entry, &encl->load_list, list)
-		sgx_free_page(entry->epc_page, encl);
-
-	radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
-		entry = *slot;
-		radix_tree_delete(&encl->page_tree, PFN_DOWN(entry->desc));
-		kfree(entry);
-	}
-
-	while (!list_empty(&encl->va_pages)) {
-		va_page = list_first_entry(&encl->va_pages,
-					   struct sgx_va_page, list);
-		list_del(&va_page->list);
-		sgx_free_page(va_page->epc_page, encl);
-		kfree(va_page);
-		atomic_dec(&sgx_va_pages_cnt);
-	}
-
-	if (!(encl->flags & SGX_ENCL_SECS_EVICTED))
-		sgx_free_page(encl->secs.epc_page, encl);
-
-	if (encl->backing)
-		fput(encl->backing);
-
-	if (encl->pcmd)
-		fput(encl->pcmd);
-
-	kfree(encl);
-}
diff --git a/ubuntu/sgx/sgx_ioctl.c b/ubuntu/sgx/sgx_ioctl.c
deleted file mode 100644
index e8267f9da317..000000000000
--- a/ubuntu/sgx/sgx_ioctl.c
+++ /dev/null
@@ -1,332 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-// Sean Christopherson <sean.j.christopherson@intel.com>
-
-#include <asm/mman.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/hashtable.h>
-#include <linux/highmem.h>
-#include <linux/ratelimit.h>
-#include <linux/version.h>
-#include <linux/sched/signal.h>
-#include <linux/shmem_fs.h>
-#include <linux/slab.h>
-#include "sgx.h"
-
-static int sgx_encl_get(unsigned long addr, struct sgx_encl **encl)
-{
-	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
-	int ret;
-
-	if (addr & (PAGE_SIZE - 1))
-		return -EINVAL;
-
-	down_read(&mm->mmap_sem);
-
-	ret = sgx_encl_find(mm, addr, &vma);
-	if (!ret) {
-		*encl = vma->vm_private_data;
-
-		if ((*encl)->flags & SGX_ENCL_SUSPEND)
-			ret = SGX_POWER_LOST_ENCLAVE;
-		else
-			kref_get(&(*encl)->refcount);
-	}
-
-	up_read(&mm->mmap_sem);
-	return ret;
-}
-
-/**
- * sgx_ioc_enclave_create - handler for %SGX_IOC_ENCLAVE_CREATE
- * @filep:	open file to /dev/sgx
- * @cmd:	the command value
- * @arg:	pointer to the &struct sgx_enclave_create
- *
- * Validates SECS attributes, allocates an EPC page for the SECS and performs
- * ECREATE.
- *
- * Return:
- * 0 on success,
- * system error on failure
- */
-static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
-				   unsigned long arg)
-{
-	struct sgx_enclave_create *createp = (struct sgx_enclave_create *)arg;
-	struct sgx_secs *secs;
-	struct sgx_encl *encl;
-	int ret;
-
-	secs = kzalloc(sizeof(*secs),  GFP_KERNEL);
-	if (!secs)
-		return -ENOMEM;
-
-	ret = copy_from_user(secs, (void __user *)createp->src, sizeof(*secs));
-	if (ret)
-		goto out;
-
-	encl = sgx_encl_alloc(secs);
-	if (IS_ERR(encl)) {
-		ret = PTR_ERR(encl);
-		goto out;
-	}
-
-	ret = sgx_encl_create(encl, secs);
-	if (ret)
-		kref_put(&encl->refcount, sgx_encl_release);
-
-out:
-	kfree(secs);
-	return ret;
-}
-
-/**
- * sgx_ioc_enclave_add_page - handler for %SGX_IOC_ENCLAVE_ADD_PAGE
- *
- * @filep:	open file to /dev/sgx
- * @cmd:	the command value
- * @arg:	pointer to the &struct sgx_enclave_add_page
- *
- * Creates a new enclave page and enqueues an EADD operation that will be
- * processed by a worker thread later on.
- *
- * Return:
- * 0 on success,
- * system error on failure
- */
-static long sgx_ioc_enclave_add_page(struct file *filep, unsigned int cmd,
-				     unsigned long arg)
-{
-	struct sgx_enclave_add_page *addp = (void *)arg;
-	struct sgx_secinfo secinfo;
-	struct sgx_encl *encl;
-	struct page *data_page;
-	void *data;
-	int ret;
-
-	ret = sgx_encl_get(addp->addr, &encl);
-	if (ret)
-		return ret;
-
-	if (copy_from_user(&secinfo, (void __user *)addp->secinfo,
-			   sizeof(secinfo))) {
-		kref_put(&encl->refcount, sgx_encl_release);
-		return -EFAULT;
-	}
-
-	data_page = alloc_page(GFP_HIGHUSER);
-	if (!data_page) {
-		kref_put(&encl->refcount, sgx_encl_release);
-		return -ENOMEM;
-	}
-
-	data = kmap(data_page);
-
-	ret = copy_from_user((void *)data, (void __user *)addp->src, PAGE_SIZE);
-	if (ret)
-		goto out;
-
-	ret = sgx_encl_add_page(encl, addp->addr, data, &secinfo, addp->mrmask);
-	if (ret)
-		goto out;
-
-out:
-	kref_put(&encl->refcount, sgx_encl_release);
-	kunmap(data_page);
-	__free_page(data_page);
-	return ret;
-}
-
-/**
- * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
- *
- * @filep:	open file to /dev/sgx
- * @cmd:	the command value
- * @arg:	pointer to the &struct sgx_enclave_init
- *
- * Flushes the remaining enqueued EADD operations and performs EINIT.
- *
- * Return:
- * 0 on success,
- * system error on failure
- */
-static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd,
-				 unsigned long arg)
-{
-	struct sgx_enclave_init *initp = (struct sgx_enclave_init *)arg;
-	struct sgx_sigstruct *sigstruct;
-	struct sgx_einittoken *einittoken;
-	struct sgx_encl *encl;
-	struct page *initp_page;
-	int ret;
-
-	initp_page = alloc_page(GFP_HIGHUSER);
-	if (!initp_page)
-		return -ENOMEM;
-
-	sigstruct = kmap(initp_page);
-	einittoken = (struct sgx_einittoken *)
-		((unsigned long)sigstruct + PAGE_SIZE / 2);
-
-	ret = copy_from_user(sigstruct, (void __user *)initp->sigstruct,
-			     sizeof(*sigstruct));
-	if (ret)
-		goto out;
-
-	ret = sgx_encl_get(initp->addr, &encl);
-	if (ret)
-		goto out;
-
-	ret = sgx_encl_init(encl, sigstruct, einittoken);
-
-	kref_put(&encl->refcount, sgx_encl_release);
-
-out:
-	kunmap(initp_page);
-	__free_page(initp_page);
-	return ret;
-}
-
-/**
- * sgx_ioc_enclave_set_attribute - handler for %SGX_IOC_ENCLAVE_SET_ATTRIBUTE
- * @filep:	open file to /dev/sgx
- * @cmd:	the command value
- * @arg:	pointer to a struct sgx_enclave_set_attribute instance
- *
- * Mark the enclave as being allowed to access a restricted attribute bit.
- * The requested attribute is specified via the attribute_fd field in the
- * provided struct sgx_enclave_set_attribute.  The attribute_fd must be a
- * handle to an SGX attribute file, e.g. “/dev/sgx/provision".
- *
- * Failure to explicitly request access to a restricted attribute will cause
- * sgx_ioc_enclave_init() to fail.  Currently, the only restricted attribute
- * is access to the PROVISION_KEY.
- *
- * Note, access to the EINITTOKEN_KEY is disallowed entirely.
- *
- * Return: 0 on success, -errno otherwise
- */
-static long sgx_ioc_enclave_set_attribute(struct file *filep, unsigned int cmd,
-					  unsigned long arg)
-{
-	struct sgx_enclave_set_attribute *params = (void *)arg;
-	struct sgx_encl *encl;
-	struct file *attribute_file;
-	int ret;
-
-	attribute_file = fget(params->attribute_fd);
-	if (!attribute_file || !attribute_file->f_op)
-		return -EINVAL;
-
-	if (attribute_file->f_op != &sgx_provision_fops) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = sgx_encl_get(params->addr, &encl);
-	if (ret)
-		goto out;
-
-	encl->allowed_attributes |= SGX_ATTR_PROVISIONKEY;
-
-	kref_put(&encl->refcount, sgx_encl_release);
-
-out:
-	fput(attribute_file);
-	return ret;
-}
-
-typedef long (*sgx_ioc_t)(struct file *filep, unsigned int cmd,
-			  unsigned long arg);
-
-long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
-	char data[256];
-	sgx_ioc_t handler = NULL;
-	long ret;
-
-	switch (cmd) {
-	case SGX_IOC_ENCLAVE_CREATE:
-		handler = sgx_ioc_enclave_create;
-		break;
-	case SGX_IOC_ENCLAVE_ADD_PAGE:
-		handler = sgx_ioc_enclave_add_page;
-		break;
-	case SGX_IOC_ENCLAVE_INIT:
-		handler = sgx_ioc_enclave_init;
-		break;
-	case SGX_IOC_ENCLAVE_SET_ATTRIBUTE:
-		handler = sgx_ioc_enclave_set_attribute;
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-
-	if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd)))
-		return -EFAULT;
-
-	ret = handler(filep, cmd, (unsigned long)((void *)data));
-	if (!ret && (cmd & IOC_OUT)) {
-		if (copy_to_user((void __user *)arg, data, _IOC_SIZE(cmd)))
-			return -EFAULT;
-	}
-
-	return ret;
-}
diff --git a/ubuntu/sgx/sgx_main.c b/ubuntu/sgx/sgx_main.c
deleted file mode 100644
index 9f3a1eac66ce..000000000000
--- a/ubuntu/sgx/sgx_main.c
+++ /dev/null
@@ -1,498 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-// Sean Christopherson <sean.j.christopherson@intel.com>
-
-#include <linux/acpi.h>
-#include <linux/platform_device.h>
-#include <linux/suspend.h>
-#include <linux/version.h>
-#include <linux/mman.h>
-#include <linux/cdev.h>
-
-#include "sgx.h"
-#include "sgx_version.h"
-#include "sgx_driver_info.h"
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-
-/*
- * Global data.
- */
-
-struct workqueue_struct *sgx_add_page_wq;
-u64 sgx_encl_size_max_32;
-u64 sgx_encl_size_max_64;
-u64 sgx_xfrm_mask = 0x3;
-u32 sgx_misc_reserved;
-u32 sgx_xsave_size_tbl[64];
-
-
-// From intel_sgx.c
-bool sgx_enabled = false;
-
-static bool sgx_is_enabled(void)
-{
-	unsigned int eax;
-	unsigned int ebx;
-	unsigned int ecx;
-	unsigned int edx;
-	unsigned long fc;
-
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
-		pr_err("intel_sgx: Not an Intel CPU vendor!\n");
-		return false;
-	}
-
-	if (!boot_cpu_has(X86_FEATURE_SGX)) {
-		pr_err("intel_sgx: SGX is not supported on the platform!\n");
-		return false;
-	}
-
-	if (!boot_cpu_has(X86_FEATURE_SGX_LC)) {
-		pr_err("intel_sgx: FLC feature is not supported on the platform!\n");
-		return false;
-	}
-
-	if (!boot_cpu_has(X86_FEATURE_AES)) {
-		pr_err("intel_sgx: AES-NI instructions are not supported on the platform!\n");
-		return false;
-	}
-
-	rdmsrl(MSR_IA32_FEATURE_CONTROL, fc);
-	if (!(fc & FEATURE_CONTROL_LOCKED)) {
-		pr_err("intel_sgx: FEATURE_CONTROL MSR is not locked!\n");
-		return false;
-	}
-
-	if (!(fc & FEATURE_CONTROL_SGX_ENABLE)) {
-		pr_err("intel_sgx: SGX is not enalbed in FEATURE_CONTROL MSR!\n");
-		return false;
-	}
-
-	if (!(fc & FEATURE_CONTROL_SGX_LE_WR)) {
-		pr_err("intel_sgx: FLC feature is not enalbed in FEATURE_CONTROL MSR!\n");
-		return false;
-	}
-
-	cpuid(0, &eax, &ebx, &ecx, &edx);
-	if (eax < SGX_CPUID) {
-		pr_err("intel_sgx: SGX CPUID leaf is not supported!\n");
-		return false;
-	}
-
-	cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-
-	/* The first bit indicates support for SGX1 instruction set. */
-	if (!(eax & 1)) {
-		pr_err("intel_sgx: Platform does not support SGX!\n");
-		return false;
-	}
-
-	return true;
-}
-
-static int sgx_init(void)
-{
-	sgx_enabled = sgx_is_enabled();
-	return 0;
-}
-
-
-static DECLARE_RWSEM(sgx_file_sem);
-
-
-#ifdef CONFIG_COMPAT
-long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
-	return sgx_ioctl(filep, cmd, arg);
-}
-#endif
-
-static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	vma->vm_ops = &sgx_vm_ops;
-	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO |
-			 VM_DONTCOPY;
-
-	return 0;
-}
-
-static unsigned long sgx_get_unmapped_area(struct file *file,
-					   unsigned long addr,
-					   unsigned long len,
-					   unsigned long pgoff,
-					   unsigned long flags)
-{
-	if (len < 2 * PAGE_SIZE || (len & (len - 1)) || flags & MAP_PRIVATE)
-		return -EINVAL;
-
-	/* On 64-bit architecture, allow mmap() to exceed 32-bit encl
-	 * limit only if the task is not running in 32-bit compatibility
-	 * mode.
-	 */
-	if (len > sgx_encl_size_max_32)
-#ifdef CONFIG_X86_64
-		if (test_thread_flag(TIF_ADDR32))
-			return -EINVAL;
-#else
-		return -EINVAL;
-#endif
-
-#ifdef CONFIG_X86_64
-	if (len > sgx_encl_size_max_64)
-		return -EINVAL;
-#endif
-
-	addr = current->mm->get_unmapped_area(file, addr, 2 * len, pgoff,
-					      flags);
-	if (IS_ERR_VALUE(addr))
-		return addr;
-
-	addr = (addr + (len - 1)) & ~(len - 1);
-
-	return addr;
-}
-
-const struct file_operations sgx_fops = {
-	.owner			= THIS_MODULE,
-	.unlocked_ioctl		= sgx_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl		= sgx_compat_ioctl,
-#endif
-	.mmap			= sgx_mmap,
-	.get_unmapped_area	= sgx_get_unmapped_area,
-};
-
-const struct file_operations sgx_provision_fops = {
-	.owner			= THIS_MODULE,
-};
-
-static int sgx_pm_suspend(struct device *dev)
-{
-	struct sgx_encl *encl;
-
-	list_for_each_entry(encl, &sgx_encl_list, encl_list) {
-		sgx_invalidate(encl, false);
-		encl->flags |= SGX_ENCL_SUSPEND;
-		flush_work(&encl->add_page_work);
-	}
-
-	return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sgx_drv_pm, sgx_pm_suspend, NULL);
-
-
-static struct bus_type sgx_bus_type = {
-	.name	= "sgx",
-};
-
-struct sgx_context {
-	struct device dev;
-	struct cdev cdev;
-	struct device provision_dev;
-	struct cdev provision_cdev;
-	struct kobject *kobj_dir;
-};
-
-static dev_t sgx_devt;
-
-static void sgx_dev_release(struct device *dev)
-{
-	struct sgx_context *ctx = container_of(dev, struct sgx_context, dev);
-
-	kfree(ctx);
-}
-
-static struct sgx_context *sgx_ctx_alloc(struct device *parent)
-{
-	struct sgx_context *ctx;
-
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-	if (!ctx)
-		return ERR_PTR(-ENOMEM);
-
-	// /dev/sgx
-	device_initialize(&ctx->dev);
-
-	ctx->dev.bus = &sgx_bus_type;
-	ctx->dev.parent = parent;
-	ctx->dev.devt = MKDEV(MAJOR(sgx_devt), 0);
-	ctx->dev.release = sgx_dev_release;
-
-	dev_set_name(&ctx->dev, "sgx");
-
-	cdev_init(&ctx->cdev, &sgx_fops);
-	ctx->cdev.owner = THIS_MODULE;
-
-	// /dev/sgx_prv
-	device_initialize(&ctx->provision_dev);
-
-	ctx->provision_dev.bus = &sgx_bus_type;
-	ctx->provision_dev.parent = parent;
-	ctx->provision_dev.devt = MKDEV(MAJOR(sgx_devt), 1);
-	ctx->provision_dev.release = sgx_dev_release;
-
-	dev_set_name(&ctx->provision_dev, "sgx_prv");
-
-	cdev_init(&ctx->provision_cdev, &sgx_provision_fops);
-	ctx->provision_cdev.owner = THIS_MODULE;
-
-	// device
-	dev_set_drvdata(parent, ctx);
-
-	return ctx;
-}
-
-static struct sgx_context *sgxm_ctx_alloc(struct device *parent)
-{
-	struct sgx_context *ctx;
-	int rc;
-
-	ctx = sgx_ctx_alloc(parent);
-	if (IS_ERR(ctx))
-		return ctx;
-
-	rc = devm_add_action_or_reset(parent, (void (*)(void *))put_device,
-				      &ctx->dev);
-	if (rc) {
-		kfree(ctx);
-		return ERR_PTR(rc);
-	}
-
-	return ctx;
-}
-
-static ssize_t info_show(struct kobject *kobj,
-					struct kobj_attribute *attr, char *buf)
-{
-    return sprintf(buf, "0x%08X\n", SGX_DRIVER_INFO_DCAP);
-}
-
-static ssize_t version_show(struct kobject *kobj,
-					struct kobj_attribute *attr, char *buf)
-{
-    return sprintf(buf, "v"  DRV_VERSION "\n");
-}
-
-struct kobj_attribute info_attr = __ATTR_RO(info);
-struct kobj_attribute version_attr = __ATTR_RO(version);
-
-static int sgx_dev_init(struct device *parent)
-{
-	struct sgx_context *sgx_dev;
-	unsigned int eax;
-	unsigned int ebx;
-	unsigned int ecx;
-	unsigned int edx;
-	int ret;
-	int i;
-
-	pr_info("intel_sgx: " DRV_DESCRIPTION " v" DRV_VERSION "\n");
-
-	sgx_dev = sgxm_ctx_alloc(parent);
-
-	cpuid_count(SGX_CPUID, SGX_CPUID_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-	/* Only allow misc bits supported by the driver. */
-	sgx_misc_reserved = ~ebx | SGX_MISC_RESERVED_MASK;
-#ifdef CONFIG_X86_64
-	sgx_encl_size_max_64 = 1ULL << ((edx >> 8) & 0xFF);
-#endif
-	sgx_encl_size_max_32 = 1ULL << (edx & 0xFF);
-
-	if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		cpuid_count(SGX_CPUID, SGX_CPUID_ATTRIBUTES, &eax, &ebx, &ecx,
-			    &edx);
-		sgx_xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
-
-		for (i = 2; i < 64; i++) {
-			cpuid_count(0x0D, i, &eax, &ebx, &ecx, &edx);
-			if ((1ULL << i) & sgx_xfrm_mask)
-				sgx_xsave_size_tbl[i] = eax + ebx;
-		}
-	}
-
-	ret = sgx_page_cache_init(parent);
-	if (ret)
-		return ret;
-
-	sgx_add_page_wq = alloc_workqueue("intel_sgx-add-page-wq",
-					  WQ_UNBOUND | WQ_FREEZABLE, 1);
-	if (!sgx_add_page_wq) {
-		pr_err("intel_sgx: alloc_workqueue() failed\n");
-		ret = -ENOMEM;
-		goto out_page_cache;
-	}
-
-	ret = cdev_device_add(&sgx_dev->cdev, &sgx_dev->dev);
-	if (ret)
-		goto out_workqueue;
-
-	ret = cdev_device_add(&sgx_dev->provision_cdev, &sgx_dev->provision_dev);
-	if (ret)
-		goto out_workqueue;
-
-	sgx_dev->kobj_dir = kobject_create_and_add("sgx", kernel_kobj);
-	sysfs_create_file(sgx_dev->kobj_dir, &info_attr.attr);
-	sysfs_create_file(sgx_dev->kobj_dir, &version_attr.attr);
-
-	return 0;
-
-out_workqueue:
-	destroy_workqueue(sgx_add_page_wq);
-out_page_cache:
-	sgx_page_cache_teardown();
-	return ret;
-}
-
-
-static int sgx_drv_probe(struct platform_device *pdev)
-{
-	sgx_init();
-
-	if (!sgx_enabled)
-		return -ENODEV;
-
-	return sgx_dev_init(&pdev->dev);
-}
-
-static int sgx_drv_remove(struct platform_device *pdev)
-{
-	struct sgx_context *ctx = dev_get_drvdata(&pdev->dev);
-
-	sysfs_remove_file(ctx->kobj_dir, &info_attr.attr);
-	sysfs_remove_file(ctx->kobj_dir, &version_attr.attr);
-	kobject_put(ctx->kobj_dir);
-
-	cdev_device_del(&ctx->cdev, &ctx->dev);
-	cdev_device_del(&ctx->provision_cdev, &ctx->provision_dev);
-
-	destroy_workqueue(sgx_add_page_wq);
-	sgx_page_cache_teardown();
-
-	return 0;
-}
-
-#ifdef CONFIG_ACPI
-static struct acpi_device_id sgx_device_ids[] = {
-	{"INT0E0C", 0},
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, sgx_device_ids);
-#endif
-
-static struct platform_driver sgx_drv = {
-	.probe = sgx_drv_probe,
-	.remove = sgx_drv_remove,
-	.driver = {
-		.name			= "intel_sgx",
-		.pm			= &sgx_drv_pm,
-		.acpi_match_table	= ACPI_PTR(sgx_device_ids),
-	},
-};
-
-static int __init sgx_drv_subsys_init(void)
-{
-	int ret;
-
-	ret = bus_register(&sgx_bus_type);
-	if (ret)
-		return ret;
-
-	ret = alloc_chrdev_region(&sgx_devt, 0, 1, "sgx");
-	if (ret < 0) {
-		bus_unregister(&sgx_bus_type);
-		return ret;
-	}
-
-	return 0;
-}
-
-static void sgx_drv_subsys_exit(void)
-{
-	bus_unregister(&sgx_bus_type);
-	unregister_chrdev_region(sgx_devt, 1);
-}
-
-static int __init sgx_drv_init(void)
-{
-	int ret;
-
-	ret = sgx_drv_subsys_init();
-	if (ret)
-		return ret;
-
-	ret = platform_driver_register(&sgx_drv);
-	if (ret)
-		sgx_drv_subsys_exit();
-
-	return ret;
-}
-module_init(sgx_drv_init);
-
-static void __exit sgx_drv_exit(void)
-{
-	platform_driver_unregister(&sgx_drv);
-	sgx_drv_subsys_exit();
-}
-module_exit(sgx_drv_exit);
-
diff --git a/ubuntu/sgx/sgx_page_cache.c b/ubuntu/sgx/sgx_page_cache.c
deleted file mode 100644
index 751893d2f688..000000000000
--- a/ubuntu/sgx/sgx_page_cache.c
+++ /dev/null
@@ -1,596 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-// Sean Christopherson <sean.j.christopherson@intel.com>
-
-#include <linux/device.h>
-#include <linux/freezer.h>
-#include <linux/highmem.h>
-#include <linux/kthread.h>
-#include <linux/ratelimit.h>
-#include <linux/version.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include "sgx.h"
-
-#define SGX_NR_LOW_PAGES 32
-#define SGX_NR_HIGH_PAGES 64
-#define SGX_NR_TO_SCAN	16
-
-LIST_HEAD(sgx_encl_list);
-DEFINE_MUTEX(sgx_encl_list_lock);
-atomic_t sgx_va_pages_cnt = ATOMIC_INIT(0);
-
-struct sgx_epc_bank {
-	unsigned long pa;
-	unsigned long va;
-	unsigned long size;
-	void **pages;
-	atomic_t free_cnt;
-	struct rw_semaphore lock;
-};
-
-static struct sgx_epc_bank sgx_epc_banks[SGX_MAX_EPC_BANKS];
-static int sgx_nr_epc_banks;
-static unsigned int sgx_nr_total_pages;
-static atomic_t sgx_nr_free_pages = ATOMIC_INIT(0);
-static struct task_struct *ksgxswapd_tsk;
-static DECLARE_WAIT_QUEUE_HEAD(ksgxswapd_waitq);
-
-static int sgx_test_and_clear_young_cb(pte_t *ptep,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
-				pgtable_t token,
-#endif
-				unsigned long addr, void *data)
-{
-	pte_t pte;
-	int ret;
-
-	ret = pte_young(*ptep);
-	if (ret) {
-		pte = pte_mkold(*ptep);
-		set_pte_at((struct mm_struct *)data, addr, ptep, pte);
-	}
-
-	return ret;
-}
-
-/**
- * sgx_test_and_clear_young() - Test and reset the accessed bit
- * @page:	enclave page to be tested for recent access
- *
- * Checks the Access (A) bit from the PTE corresponding to the
- * enclave page and clears it.  Returns 1 if the page has been
- * recently accessed and 0 if not.
- */
-int sgx_test_and_clear_young(struct sgx_encl_page *page)
-{
-	unsigned long addr = SGX_ENCL_PAGE_ADDR(page);
-	struct sgx_encl *encl = page->encl;
-	struct vm_area_struct *vma;
-	int ret;
-
-	ret = sgx_encl_find(encl->mm, addr, &vma);
-	if (ret)
-		return 0;
-
-	if (encl != vma->vm_private_data)
-		return 0;
-
-	return apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
-				   sgx_test_and_clear_young_cb, vma->vm_mm);
-}
-
-static struct sgx_encl *sgx_isolate_encl(void)
-{
-	struct sgx_encl *encl = NULL;
-	int i;
-
-	mutex_lock(&sgx_encl_list_lock);
-
-	if (list_empty(&sgx_encl_list)) {
-		mutex_unlock(&sgx_encl_list_lock);
-		return NULL;
-	}
-
-	for (i = 0; i < SGX_NR_TO_SCAN; i++) {
-		encl = list_first_entry(&sgx_encl_list, struct sgx_encl,
-					encl_list);
-
-		list_move_tail(&encl->encl_list, &sgx_encl_list);
-
-		/* Select a victim with faulted pages and a valid refcount. */
-		if (!list_empty(&encl->load_list) &&
-		    kref_get_unless_zero(&encl->refcount))
-			break;
-
-		encl = NULL;
-	}
-
-	mutex_unlock(&sgx_encl_list_lock);
-
-	return encl;
-}
-
-static void sgx_isolate_pages(struct sgx_encl *encl,
-			      struct sgx_encl_page **cluster)
-{
-	struct sgx_encl_page *entry;
-	int i;
-
-	mutex_lock(&encl->lock);
-
-	if (encl->flags & SGX_ENCL_DEAD)
-		goto out;
-
-	for (i = 0; i < SGX_NR_TO_SCAN; i++) {
-		if (list_empty(&encl->load_list))
-			break;
-
-		entry = list_first_entry(&encl->load_list, struct sgx_encl_page,
-					 list);
-
-		if (!sgx_test_and_clear_young(entry) &&
-		    !(entry->desc & SGX_ENCL_PAGE_RESERVED)) {
-			entry->desc |= SGX_ENCL_PAGE_RESERVED;
-			list_del(&entry->list);
-			entry->desc &= ~SGX_ENCL_PAGE_LOADED;
-			*cluster++ = entry;
-		} else {
-			list_move_tail(&entry->list, &encl->load_list);
-		}
-	}
-out:
-	*cluster = NULL;
-	mutex_unlock(&encl->lock);
-}
-
-static int __sgx_ewb(struct sgx_encl *encl,
-		     struct sgx_encl_page *encl_page,
-		     struct sgx_va_page *va_page,
-		     unsigned int va_offset)
-{
-	unsigned long pcmd_offset = SGX_ENCL_PAGE_PCMD_OFFSET(encl_page);
-	struct sgx_pageinfo pginfo;
-	struct page *backing;
-	struct page *pcmd;
-	void *epc;
-	void *va;
-	int ret;
-
-	backing = sgx_get_backing(encl, encl_page, false);
-	if (IS_ERR(backing)) {
-		ret = PTR_ERR(backing);
-		sgx_warn(encl, "pinning the backing page for EWB failed with %d\n",
-			 ret);
-		return ret;
-	}
-
-	pcmd = sgx_get_backing(encl, encl_page, true);
-	if (IS_ERR(pcmd)) {
-		ret = PTR_ERR(pcmd);
-		sgx_warn(encl, "pinning the pcmd page for EWB failed with %d\n",
-			 ret);
-		goto out;
-	}
-
-	epc = sgx_get_page(encl_page->epc_page);
-	va = sgx_get_page(va_page->epc_page);
-
-	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
-	pginfo.pcmd = (unsigned long)kmap_atomic(pcmd) + pcmd_offset;
-	pginfo.linaddr = 0;
-	pginfo.secs = 0;
-	ret = __ewb(&pginfo, epc, (void *)((unsigned long)va + va_offset));
-	kunmap_atomic((void *)(unsigned long)(pginfo.pcmd - pcmd_offset));
-	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
-
-	sgx_put_page(va);
-	sgx_put_page(epc);
-	sgx_put_backing(pcmd, true);
-
-out:
-	sgx_put_backing(backing, true);
-	return ret;
-}
-
-static void sgx_eblock(struct sgx_encl *encl, struct sgx_encl_page **cluster)
-{
-	struct vm_area_struct *vma;
-	unsigned long addr;
-	void *ptr;
-	int ret;
-
-	for ( ; *cluster; cluster++) {
-		addr = SGX_ENCL_PAGE_ADDR(*cluster);
-
-		ret = sgx_encl_find(encl->mm, addr, &vma);
-		if (!ret && encl == vma->vm_private_data)
-			zap_vma_ptes(vma, addr, PAGE_SIZE);
-
-		ptr = sgx_get_page((*cluster)->epc_page);
-		ret = __eblock(ptr);
-		sgx_put_page(ptr);
-		if (ret) {
-			sgx_crit(encl, "EBLOCK returned %d\n", ret);
-			sgx_invalidate(encl, true);
-		}
-	}
-}
-
-static void sgx_etrack(struct sgx_encl *encl)
-{
-	void *ptr;
-	int ret;
-
-	ptr = sgx_get_page(encl->secs.epc_page);
-	ret = __etrack(ptr);
-	sgx_put_page(ptr);
-	if (ret) {
-		sgx_crit(encl, "ETRACK returned %d\n", ret);
-		sgx_invalidate(encl, true);
-	}
-}
-
-static void sgx_ewb(struct sgx_encl *encl, struct sgx_encl_page *entry)
-{
-	struct sgx_va_page *va_page;
-	unsigned int va_offset;
-	int ret;
-
-	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, list);
-	va_offset = sgx_alloc_va_slot(va_page);
-	if (sgx_va_page_full(va_page))
-		list_move_tail(&va_page->list, &encl->va_pages);
-
-	ret = __sgx_ewb(encl, entry, va_page, va_offset);
-	if (ret == SGX_NOT_TRACKED) {
-		/* slow path, IPI needed */
-		sgx_flush_cpus(encl);
-		ret = __sgx_ewb(encl, entry, va_page, va_offset);
-	}
-
-	if (ret) {
-		sgx_invalidate(encl, true);
-		if (ret > 0)
-			sgx_err(encl, "EWB returned %d, enclave invalidated\n",
-				ret);
-	}
-
-	sgx_free_page(entry->epc_page, encl);
-	entry->desc |= va_offset;
-	entry->va_page = va_page;
-	entry->desc &= ~SGX_ENCL_PAGE_RESERVED;
-}
-
-static void sgx_write_pages(struct sgx_encl *encl,
-			    struct sgx_encl_page **cluster)
-{
-	if (!*cluster)
-		return;
-
-	mutex_lock(&encl->lock);
-
-	sgx_eblock(encl, cluster);
-	sgx_etrack(encl);
-
-	for ( ; *cluster; cluster++) {
-		sgx_ewb(encl, *cluster);
-		encl->secs_child_cnt--;
-	}
-
-	if (!encl->secs_child_cnt && (encl->flags & SGX_ENCL_INITIALIZED)) {
-		sgx_ewb(encl, &encl->secs);
-		encl->flags |= SGX_ENCL_SECS_EVICTED;
-	}
-
-	mutex_unlock(&encl->lock);
-}
-
-static void sgx_swap_pages(void)
-{
-	struct sgx_encl *encl;
-	struct sgx_encl_page *cluster[SGX_NR_TO_SCAN + 1];
-
-	encl = sgx_isolate_encl();
-	if (!encl)
-		return;
-
-	down_read(&encl->mm->mmap_sem);
-	sgx_isolate_pages(encl, cluster);
-	sgx_write_pages(encl, cluster);
-	up_read(&encl->mm->mmap_sem);
-
-	kref_put(&encl->refcount, sgx_encl_release);
-}
-
-static int ksgxswapd(void *p)
-{
-	set_freezable();
-
-	while (!kthread_should_stop()) {
-		if (try_to_freeze())
-			continue;
-
-		wait_event_freezable(ksgxswapd_waitq, kthread_should_stop() ||
-				     atomic_read(&sgx_nr_free_pages) <
-				     SGX_NR_HIGH_PAGES);
-
-		if (atomic_read(&sgx_nr_free_pages) < SGX_NR_HIGH_PAGES)
-			sgx_swap_pages();
-	}
-
-	pr_info("%s: done\n", __func__);
-	return 0;
-}
-
-static int sgx_init_epc_bank(unsigned long addr, unsigned long size,
-			     unsigned long index, struct sgx_epc_bank *bank)
-{
-	unsigned long nr_pages = size >> PAGE_SHIFT;
-	unsigned long i;
-	void *va;
-
-	if (IS_ENABLED(CONFIG_X86_64)) {
-		va = ioremap_cache(addr, size);
-		if (!va)
-			return -ENOMEM;
-	}
-
-	bank->pages = kzalloc(nr_pages * sizeof(void *), GFP_KERNEL);
-	if (!bank->pages) {
-		if (IS_ENABLED(CONFIG_X86_64))
-			iounmap(va);
-
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < nr_pages; i++)
-		bank->pages[i] = (void *)((addr + (i << PAGE_SHIFT)) | index);
-
-	bank->pa = addr;
-	bank->size = size;
-
-	if (IS_ENABLED(CONFIG_X86_64))
-		bank->va = (unsigned long)va;
-
-	atomic_set(&bank->free_cnt, nr_pages);
-
-	init_rwsem(&bank->lock);
-
-	sgx_nr_total_pages += nr_pages;
-	atomic_add(nr_pages, &sgx_nr_free_pages);
-	return 0;
-}
-
-int sgx_page_cache_init(struct device *parent)
-{
-	struct task_struct *tsk;
-	unsigned long size;
-	unsigned int eax = 0;
-	unsigned int ebx = 0;
-	unsigned int ecx = 0;
-	unsigned int edx = 0;
-	unsigned long pa;
-	int i;
-	int ret;
-
-	for (i = 0; i < SGX_MAX_EPC_BANKS; i++) {
-		cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC_BANKS, &eax, &ebx,
-			    &ecx, &edx);
-		if (!(eax & 0xf))
-			break;
-
-		pa = ((u64)(ebx & 0xfffff) << 32) + (u64)(eax & 0xfffff000);
-		size = ((u64)(edx & 0xfffff) << 32) + (u64)(ecx & 0xfffff000);
-
-		dev_info(parent, "EPC bank 0x%lx-0x%lx\n", pa, pa + size);
-
-		ret = sgx_init_epc_bank(pa, size, i, &sgx_epc_banks[i]);
-		if (ret)
-			return ret;
-
-		sgx_nr_epc_banks++;
-	}
-
-	tsk = kthread_run(ksgxswapd, NULL, "ksgxswapd");
-	if (IS_ERR(tsk)) {
-		sgx_page_cache_teardown();
-		return PTR_ERR(tsk);
-	}
-
-	ksgxswapd_tsk = tsk;
-	return 0;
-}
-
-void sgx_page_cache_teardown(void)
-{
-	struct sgx_epc_bank *bank;
-	int i;
-
-	if (ksgxswapd_tsk) {
-		kthread_stop(ksgxswapd_tsk);
-		ksgxswapd_tsk = NULL;
-	}
-
-	for (i = 0; i < sgx_nr_epc_banks; i++) {
-		bank = &sgx_epc_banks[i];
-
-		if (IS_ENABLED(CONFIG_X86_64))
-			iounmap((void *)bank->va);
-
-		kfree(bank->pages);
-	}
-}
-
-static void *sgx_try_alloc_page(void)
-{
-	struct sgx_epc_bank *bank;
-	void *page = NULL;
-	int i;
-
-	for (i = 0; i < sgx_nr_epc_banks; i++) {
-		bank = &sgx_epc_banks[i];
-
-		down_write(&bank->lock);
-
-		if (atomic_read(&bank->free_cnt))
-			page = bank->pages[atomic_dec_return(&bank->free_cnt)];
-
-		up_write(&bank->lock);
-
-		if (page)
-			break;
-	}
-
-	if (page)
-		atomic_dec(&sgx_nr_free_pages);
-
-	return page;
-}
-
-/**
- * sgx_alloc_page - allocate an EPC page
- * @flags:	allocation flags
- *
- * Try to grab a page from the free EPC page list. If there is a free page
- * available, it is returned to the caller. If called with SGX_ALLOC_ATOMIC,
- * the function will return immediately if the list is empty. Otherwise, it
- * will swap pages up until there is a free page available. Before returning
- * the low watermark is checked and ksgxswapd is waken up if we are below it.
- *
- * Return: an EPC page or a system error code
- */
-void *sgx_alloc_page(unsigned int flags)
-{
-	void *entry;
-
-	for ( ; ; ) {
-		entry = sgx_try_alloc_page();
-		if (entry)
-			break;
-
-		/* We need at minimum two pages for the #PF handler. */
-		if (atomic_read(&sgx_va_pages_cnt) > (sgx_nr_total_pages - 2))
-			return ERR_PTR(-ENOMEM);
-
-		if (flags & SGX_ALLOC_ATOMIC) {
-			entry = ERR_PTR(-EBUSY);
-			break;
-		}
-
-		if (signal_pending(current)) {
-			entry = ERR_PTR(-ERESTARTSYS);
-			break;
-		}
-
-		sgx_swap_pages();
-		schedule();
-	}
-
-	if (atomic_read(&sgx_nr_free_pages) < SGX_NR_LOW_PAGES)
-		wake_up(&ksgxswapd_waitq);
-
-	return entry;
-}
-
-/**
- * sgx_free_page - free an EPC page
- *
- * EREMOVE an EPC page and insert it back to the list of free pages.
- * If EREMOVE fails, the error is printed out loud as a critical error.
- * It is an indicator of a driver bug if that would happen.
- *
- * @page:	any EPC page
- * @encl:	enclave that owns the given EPC page
- */
-void sgx_free_page(void *page, struct sgx_encl *encl)
-{
-	struct sgx_epc_bank *bank = SGX_EPC_BANK(page);
-	void *va;
-	int ret;
-
-	va = sgx_get_page(page);
-	ret = __eremove(va);
-	sgx_put_page(va);
-
-	if (ret)
-		sgx_crit(encl, "EREMOVE returned %d\n", ret);
-
-	down_read(&bank->lock);
-	bank->pages[atomic_inc_return(&bank->free_cnt) - 1] = page;
-	up_read(&bank->lock);
-
-	atomic_inc(&sgx_nr_free_pages);
-}
-
-void *sgx_get_page(void *page)
-{
-	struct sgx_epc_bank *bank = SGX_EPC_BANK(page);
-
-	if (IS_ENABLED(CONFIG_X86_64))
-		return (void *)(bank->va + SGX_EPC_ADDR(page) - bank->pa);
-
-	return kmap_atomic_pfn(SGX_EPC_PFN(page));
-}
-
-void sgx_put_page(void *ptr)
-{
-	if (IS_ENABLED(CONFIG_X86_64))
-		return;
-
-	kunmap_atomic(ptr);
-}
diff --git a/ubuntu/sgx/sgx_util.c b/ubuntu/sgx/sgx_util.c
deleted file mode 100644
index 4ff8b13ebbf1..000000000000
--- a/ubuntu/sgx/sgx_util.c
+++ /dev/null
@@ -1,382 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-// Sean Christopherson <sean.j.christopherson@intel.com>
-
-#include <linux/highmem.h>
-#include <linux/version.h>
-#include <linux/sched/mm.h>
-#include <linux/shmem_fs.h>
-#include "sgx.h"
-
-struct page *sgx_get_backing(struct sgx_encl *encl,
-			     struct sgx_encl_page *entry,
-			     bool pcmd)
-{
-	struct address_space *mapping;
-	struct inode *inode;
-	gfp_t gfpmask;
-	pgoff_t index;
-
-	if (pcmd)
-		inode = encl->pcmd->f_path.dentry->d_inode;
-	else
-		inode = encl->backing->f_path.dentry->d_inode;
-
-	mapping = inode->i_mapping;
-	gfpmask = mapping_gfp_mask(mapping);
-
-	if (pcmd)
-		index = PFN_DOWN(entry->desc - encl->base) >> 5;
-	else
-		index = PFN_DOWN(entry->desc - encl->base);
-
-	return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
-}
-
-void sgx_put_backing(struct page *backing_page, bool write)
-{
-	if (write)
-		set_page_dirty(backing_page);
-
-	put_page(backing_page);
-}
-
-void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
-{
-	struct sgx_encl_page *entry;
-	unsigned long addr;
-
-	list_for_each_entry(entry, &encl->load_list, list) {
-		addr = SGX_ENCL_PAGE_ADDR(entry);
-		if ((entry->desc & SGX_ENCL_PAGE_TCS) &&
-		    addr >= vma->vm_start && addr < vma->vm_end)
-			zap_vma_ptes(vma, addr, PAGE_SIZE);
-	}
-}
-
-void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus)
-{
-	struct vm_area_struct *vma;
-	unsigned long addr;
-	int ret;
-
-	for (addr = encl->base; addr < (encl->base + encl->size);
-	     addr = vma->vm_end) {
-		ret = sgx_encl_find(encl->mm, addr, &vma);
-		if (!ret && encl == vma->vm_private_data)
-			sgx_zap_tcs_ptes(encl, vma);
-		else
-			break;
-	}
-
-	encl->flags |= SGX_ENCL_DEAD;
-
-	if (flush_cpus)
-		sgx_flush_cpus(encl);
-}
-
-static void sgx_ipi_cb(void *info)
-{
-}
-
-void sgx_flush_cpus(struct sgx_encl *encl)
-{
-	on_each_cpu_mask(mm_cpumask(encl->mm), sgx_ipi_cb, NULL, 1);
-}
-
-static int sgx_eldu(struct sgx_encl *encl,
-		    struct sgx_encl_page *encl_page,
-		    void *epc_page,
-		    bool is_secs)
-{
-	struct sgx_pageinfo pginfo;
-	unsigned long pcmd_offset;
-	unsigned long va_offset;
-	void *secs_ptr = NULL;
-	struct page *backing;
-	struct page *pcmd;
-	void *epc_ptr;
-	void *va_ptr;
-	int ret;
-
-	pcmd_offset = SGX_ENCL_PAGE_PCMD_OFFSET(encl_page);
-	va_offset = SGX_ENCL_PAGE_VA_OFFSET(encl_page);
-
-	backing = sgx_get_backing(encl, encl_page, false);
-	if (IS_ERR(backing)) {
-		ret = PTR_ERR(backing);
-		sgx_warn(encl, "pinning the backing page for ELDU failed with %d\n",
-			 ret);
-		return ret;
-	}
-
-	pcmd = sgx_get_backing(encl, encl_page, true);
-	if (IS_ERR(pcmd)) {
-		ret = PTR_ERR(pcmd);
-		sgx_warn(encl, "pinning the pcmd page for EWB failed with %d\n",
-			 ret);
-		goto out;
-	}
-
-	if (!is_secs)
-		secs_ptr = sgx_get_page(encl->secs.epc_page);
-
-	epc_ptr = sgx_get_page(epc_page);
-	va_ptr = sgx_get_page(encl_page->va_page->epc_page);
-	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
-	pginfo.pcmd = (unsigned long)kmap_atomic(pcmd) + pcmd_offset;
-	pginfo.linaddr = is_secs ? 0 : SGX_ENCL_PAGE_ADDR(encl_page);
-	pginfo.secs = (unsigned long)secs_ptr;
-
-	ret = __eldu((unsigned long)&pginfo, (unsigned long)epc_ptr,
-		     (unsigned long)va_ptr + va_offset);
-	if (ret) {
-		sgx_err(encl, "ELDU returned %d\n", ret);
-		ret = -EFAULT;
-	}
-
-	kunmap_atomic((void *)(unsigned long)(pginfo.pcmd - pcmd_offset));
-	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
-	sgx_put_page(va_ptr);
-	sgx_put_page(epc_ptr);
-
-	if (!is_secs)
-		sgx_put_page(secs_ptr);
-
-	sgx_put_backing(pcmd, false);
-
-out:
-	sgx_put_backing(backing, false);
-
-	if (!ret) {
-		sgx_free_va_slot(encl_page->va_page, va_offset);
-		list_move(&encl_page->va_page->list, &encl->va_pages);
-		encl_page->desc &= ~SGX_VA_OFFSET_MASK;
-	}
-
-	return ret;
-}
-
-static struct sgx_encl_page *sgx_do_fault(struct vm_area_struct *vma,
-					  unsigned long addr,
-					  unsigned int flags)
-{
-	bool reserve = (flags & SGX_FAULT_RESERVE) != 0;
-	struct sgx_encl *encl = vma->vm_private_data;
-	struct sgx_encl_page *entry;
-	void *secs_epc_page = NULL;
-	void *epc_page = NULL;
-	int rc = 0;
-
-	/* If process was forked, VMA is still there but vm_private_data is set
-	 * to NULL.
-	 */
-	if (!encl)
-		return ERR_PTR(-EFAULT);
-
-	mutex_lock(&encl->lock);
-
-	entry = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
-	if (!entry) {
-		rc = -EFAULT;
-		goto out;
-	}
-
-	if (encl->flags & SGX_ENCL_DEAD) {
-		rc = -EFAULT;
-		goto out;
-	}
-
-	if (!(encl->flags & SGX_ENCL_INITIALIZED)) {
-		sgx_dbg(encl, "cannot fault, unitialized\n");
-		rc = -EFAULT;
-		goto out;
-	}
-
-	if (reserve && (entry->desc & SGX_ENCL_PAGE_RESERVED)) {
-		sgx_dbg(encl, "cannot fault, 0x%p is reserved\n",
-			(void *)SGX_ENCL_PAGE_ADDR(entry));
-		rc = -EBUSY;
-		goto out;
-	}
-
-	/* Legal race condition, page is already faulted. */
-	if (entry->desc & SGX_ENCL_PAGE_LOADED) {
-		if (reserve)
-			entry->desc |= SGX_ENCL_PAGE_RESERVED;
-		goto out;
-	}
-
-	epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
-	if (IS_ERR(epc_page)) {
-		rc = PTR_ERR(epc_page);
-		epc_page = NULL;
-		goto out;
-	}
-
-	/* If SECS is evicted then reload it first */
-	if (encl->flags & SGX_ENCL_SECS_EVICTED) {
-		secs_epc_page = sgx_alloc_page(SGX_ALLOC_ATOMIC);
-		if (IS_ERR(secs_epc_page)) {
-			rc = PTR_ERR(secs_epc_page);
-			secs_epc_page = NULL;
-			goto out;
-		}
-
-		rc = sgx_eldu(encl, &encl->secs, secs_epc_page, true);
-		if (rc)
-			goto out;
-
-		encl->secs.epc_page = secs_epc_page;
-		encl->flags &= ~SGX_ENCL_SECS_EVICTED;
-
-		/* Do not free */
-		secs_epc_page = NULL;
-	}
-
-	rc = sgx_eldu(encl, entry, epc_page, false /* is_secs */);
-	if (rc)
-		goto out;
-
-	/* Track the EPC page even if vm_insert_pfn fails; we need to ensure
-	 * the EPC page is properly freed and we can't do EREMOVE right away
-	 * because EREMOVE may fail due to an active cpu in the enclave.  We
-	 * can't call vm_insert_pfn before sgx_eldu because SKL signals #GP
-	 * instead of #PF if the EPC page is invalid.
-	 */
-	encl->secs_child_cnt++;
-
-	entry->epc_page = epc_page;
-	entry->desc |= SGX_ENCL_PAGE_LOADED;
-
-	if (reserve)
-		entry->desc |= SGX_ENCL_PAGE_RESERVED;
-
-	/* Do not free */
-	epc_page = NULL;
-	list_add_tail(&entry->list, &encl->load_list);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
-	rc = vmf_insert_pfn(vma, addr, SGX_EPC_PFN(entry->epc_page));
-	if (rc != VM_FAULT_NOPAGE) {
-#else
-	rc = vm_insert_pfn(vma, addr, SGX_EPC_PFN(entry->epc_page));
-	if (rc) {
-#endif
-		/* Kill the enclave if vm_insert_pfn fails; failure only occurs
-		 * if there is a driver bug or an unrecoverable issue, e.g. OOM.
-		 */
-		sgx_crit(encl, "vm_insert_pfn returned %d\n", rc);
-		sgx_invalidate(encl, true);
-		goto out;
-	}
-	rc = 0;
-
-	sgx_test_and_clear_young(entry);
-out:
-	mutex_unlock(&encl->lock);
-	if (epc_page)
-		sgx_free_page(epc_page, encl);
-	if (secs_epc_page)
-		sgx_free_page(secs_epc_page, encl);
-	return rc ? ERR_PTR(rc) : entry;
-}
-
-struct sgx_encl_page *sgx_fault_page(struct vm_area_struct *vma,
-				     unsigned long addr,
-				     unsigned int flags)
-{
-	struct sgx_encl_page *entry;
-
-	do {
-		entry = sgx_do_fault(vma, addr, flags);
-		if (!(flags & SGX_FAULT_RESERVE))
-			break;
-	} while (PTR_ERR(entry) == -EBUSY);
-
-	return entry;
-}
-
-int sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus, void *hash)
-{
-	SHASH_DESC_ON_STACK(shash, tfm);
-
-	shash->tfm = tfm;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0))
-        shash->tfm->base.crt_flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-#else
-        shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-#endif
-
-	return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
-}
-
-int sgx_get_key_hash_simple(const void *modulus, void *hash)
-{
-	struct crypto_shash *tfm;
-	int ret;
-
-	tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	ret = sgx_get_key_hash(tfm, modulus, hash);
-
-	crypto_free_shash(tfm);
-	return ret;
-}
diff --git a/ubuntu/sgx/sgx_version.h b/ubuntu/sgx/sgx_version.h
deleted file mode 100644
index 07e3f0e8c048..000000000000
--- a/ubuntu/sgx/sgx_version.h
+++ /dev/null
@@ -1,60 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-
-#ifndef _SGX_VERSION_H
-#define _SGX_VERSION_H
-
-#define DRV_DESCRIPTION "Intel SGX Driver"
-#define DRV_VERSION "1.22"
-
-#endif /* _SGX_VERSION_H */
diff --git a/ubuntu/sgx/sgx_vma.c b/ubuntu/sgx/sgx_vma.c
deleted file mode 100644
index cff5082e0add..000000000000
--- a/ubuntu/sgx/sgx_vma.c
+++ /dev/null
@@ -1,242 +0,0 @@ 
-// This file is provided under a dual BSD/GPLv2 license.  When using or
-// redistributing this file, you may do so under either license.
-//
-// GPL LICENSE SUMMARY
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-// General Public License for more details.
-//
-// Contact Information:
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
-//
-// BSD LICENSE
-//
-// Copyright(c) 2016-2018 Intel Corporation.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-//   * Redistributions of source code must retain the above copyright
-//     notice, this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright
-//     notice, this list of conditions and the following disclaimer in
-//     the documentation and/or other materials provided with the
-//     distribution.
-//   * Neither the name of Intel Corporation nor the names of its
-//     contributors may be used to endorse or promote products derived
-//     from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors:
-//
-// Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-// Suresh Siddha <suresh.b.siddha@intel.com>
-// Serge Ayoun <serge.ayoun@intel.com>
-// Shay Katz-zamir <shay.katz-zamir@intel.com>
-// Sean Christopherson <sean.j.christopherson@intel.com>
-
-#include <asm/mman.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/hashtable.h>
-#include <linux/highmem.h>
-#include <linux/mm.h>
-#include <linux/ratelimit.h>
-#include <linux/shmem_fs.h>
-#include <linux/slab.h>
-#include <linux/version.h>
-#include "sgx.h"
-
-#ifndef ALIGN_DOWN
-#define ALIGN_DOWN(x, align_to) ((x) & ~((align_to)-1))
-#endif
-
-static void sgx_vma_open(struct vm_area_struct *vma)
-{
-	struct sgx_encl *encl = vma->vm_private_data;
-
-	if (!encl)
-		return;
-
-	/* kref cannot underflow because ECREATE ioctl checks that there is only
-	 * one single VMA for the enclave before proceeding.
-	 */
-	kref_get(&encl->refcount);
-}
-
-static void sgx_vma_close(struct vm_area_struct *vma)
-{
-	struct sgx_encl *encl = vma->vm_private_data;
-
-	if (!encl)
-		return;
-
-	mutex_lock(&encl->lock);
-	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-	encl->flags |= SGX_ENCL_DEAD;
-	mutex_unlock(&encl->lock);
-	kref_put(&encl->refcount, sgx_encl_release);
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
-static unsigned int sgx_vma_fault(struct vm_fault *vmf)
-#else
-static int sgx_vma_fault(struct vm_fault *vmf)
-#endif
-{
-	struct vm_area_struct *vma = vmf->vma;
-	unsigned long addr = (unsigned long)vmf->address;
-	struct sgx_encl_page *entry;
-
-	entry = sgx_fault_page(vma, addr, 0);
-
-	if (!IS_ERR(entry) || PTR_ERR(entry) == -EBUSY)
-		return VM_FAULT_NOPAGE;
-	else
-		return VM_FAULT_SIGBUS;
-}
-
-static int sgx_edbgrd(struct sgx_encl *encl, struct sgx_encl_page *page,
-		      unsigned long addr, void *data)
-{
-	unsigned long offset;
-	void *ptr;
-	int ret;
-
-	offset = addr & ~PAGE_MASK;
-
-	if ((page->desc & SGX_ENCL_PAGE_TCS) &&
-	    (offset + sizeof(unsigned long)) >
-	    offsetof(struct sgx_tcs, reserved))
-		return -ECANCELED;
-
-	ptr = sgx_get_page(page->epc_page);
-	ret = __edbgrd((unsigned long)ptr + offset, data);
-	sgx_put_page(ptr);
-	if (ret) {
-		sgx_dbg(encl, "EDBGRD returned %d\n", ret);
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int sgx_edbgwr(struct sgx_encl *encl, struct sgx_encl_page *page,
-		      unsigned long addr, void *data)
-{
-	unsigned long offset;
-	void *ptr;
-	int ret;
-
-	offset = addr & ~PAGE_MASK;
-
-	/* Writing anything else than flags will cause #GP */
-	if ((page->desc & SGX_ENCL_PAGE_TCS) &&
-		(offset < offsetof(struct sgx_tcs, flags)))
-		return -ECANCELED;
-
-	ptr = sgx_get_page(page->epc_page);
-	ret = __edbgwr((unsigned long)ptr + offset, data);
-	sgx_put_page(ptr);
-	if (ret) {
-		sgx_dbg(encl, "EDBGWR returned %d\n", ret);
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
-			  void *buf, int len, int write)
-{
-	struct sgx_encl *encl = vma->vm_private_data;
-	struct sgx_encl_page *entry = NULL;
-	unsigned long align;
-	char data[sizeof(unsigned long)];
-	int offset;
-	int cnt;
-	int ret = 0;
-	int i;
-
-	/* If process was forked, VMA is still there but vm_private_data is set
-	 * to NULL.
-	 */
-	if (!encl)
-		return -EFAULT;
-
-	if (!(encl->flags & SGX_ENCL_DEBUG) ||
-	    !(encl->flags & SGX_ENCL_INITIALIZED) ||
-	    (encl->flags & SGX_ENCL_DEAD))
-		return -EFAULT;
-
-	for (i = 0; i < len; i += cnt) {
-		if (!entry || !((addr + i) & (PAGE_SIZE - 1))) {
-			if (entry)
-				entry->desc &= ~SGX_ENCL_PAGE_RESERVED;
-
-			entry = sgx_fault_page(vma, (addr + i) & PAGE_MASK,
-					       SGX_FAULT_RESERVE);
-			if (IS_ERR(entry)) {
-				ret = PTR_ERR(entry);
-				entry = NULL;
-				break;
-			}
-		}
-
-		/* Locking is not needed because only immutable fields of the
-		 * page are accessed and page itself is reserved so that it
-		 * cannot be swapped out in the middle.
-		 */
-
-		align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
-		offset = (addr + i) & (sizeof(unsigned long) - 1);
-		cnt = sizeof(unsigned long) - offset;
-		cnt = min(cnt, len - i);
-
-		ret = sgx_edbgrd(encl, entry, align, data);
-		if (ret)
-			break;
-
-		if (write) {
-			memcpy(data + offset, buf + i, cnt);
-			ret = sgx_edbgwr(encl, entry, align, data);
-			if (ret)
-				break;
-		}
-		else
-			memcpy(buf + i,data + offset, cnt);
-	}
-
-	if (entry)
-		entry->desc &= ~SGX_ENCL_PAGE_RESERVED;
-
-	return (ret < 0 && ret != -ECANCELED) ? ret : i;
-}
-
-const struct vm_operations_struct sgx_vm_ops = {
-	.close = sgx_vma_close,
-	.open = sgx_vma_open,
-	.fault = sgx_vma_fault,
-	.access = sgx_vma_access,
-};
diff --git a/ubuntu/sgx/version.h b/ubuntu/sgx/version.h
new file mode 100644
index 000000000000..f3f6698695b0
--- /dev/null
+++ b/ubuntu/sgx/version.h
@@ -0,0 +1,10 @@ 
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2016-18 Intel Corporation.
+
+#ifndef _SGX_VERSION_H
+#define _SGX_VERSION_H
+
+#define DRV_DESCRIPTION "Intel SGX DCAP Driver"
+#define DRV_VERSION "1.33"
+
+#endif /* _SGX_VERSION_H */