Skip to content
This repository has been archived by the owner on Jan 20, 2022. It is now read-only.

Commit

Permalink
fixup! [Pal/Linux-SGX] DONTMERGE: Initial EDMM dynamic heap implement…
Browse files Browse the repository at this point in the history
…ation
  • Loading branch information
vijaydhanraj committed Mar 12, 2021
1 parent 5dbe5b4 commit 43fd00f
Show file tree
Hide file tree
Showing 10 changed files with 77 additions and 74 deletions.
2 changes: 1 addition & 1 deletion Documentation/manifest-syntax.rst
Expand Up @@ -381,7 +381,7 @@ creating the enclave but allocated dynamically using EACCEPT when Graphene
requests more heap memory. This triggers a page fault (#PF) which is handled by
the Intel SGX driver (legacy driver) by EAUGing the page and returning the
control back to the enclave. The enclave now continues from the same EACCEPT
instruction (but this time this instruction succeeds).
instruction (but this time the instruction succeeds).

One of the key advantages of EDMM is that the enclave ends up using only the
EPC pages that it requires and the user does not need to tailor the enclave
Expand Down
2 changes: 1 addition & 1 deletion LibOS/shim/test/regression/test_libos.py
Expand Up @@ -399,7 +399,7 @@ def test_043_futex_wake_op(self):
@unittest.skipIf(HAS_SGX,
'On SGX, SIGBUS isn\'t always implemented correctly, for lack '
'of memory protection. For now, some of these cases won\'t work.')
def test_051_mmap_sgx(self):
def test_051_mmap(self):
stdout, _ = self.run_binary(['mmap_file'], timeout=60)

# Private mmap beyond file range
Expand Down
3 changes: 3 additions & 0 deletions Pal/src/host/Linux-SGX/db_main.c
Expand Up @@ -718,6 +718,9 @@ noreturn void pal_linux_main(char* uptr_libpal_uri, size_t libpal_uri_len, char*
ocall_exit(1, true);
}
if (!g_pal_sec.edmm_enable_heap && preheat_enclave == 1) {
log_warning("EDMM ('sgx.edmm_enable_heap') and preheat-enclave ('sgx.preheat_enclave') are"
" both enabled. Graphene will use EDMM only for the region excluded by"
" preheat-enclave size.");
for (uint8_t* i = g_pal_sec.heap_min; i < (uint8_t*)g_pal_sec.heap_max; i += g_page_size)
READ_ONCE(*(size_t*)i);
}
Expand Down
103 changes: 52 additions & 51 deletions Pal/src/host/Linux-SGX/enclave_pages.c
Expand Up @@ -88,19 +88,19 @@ int init_enclave_pages(void) {
return 0;
}

/* This function trims an EPC page on enclave's request. The sequence is as below,
/* This function trims EPC pages on enclave's request. The sequence is as below:
* 1. Enclave calls SGX driver IOCTL to change the page's type to PT_TRIM.
* 2. In turn driver invokes ETRACK to track page's address on all processors and issues IPI to flush
* stale TLB entries.
* 3. Enclave issues an EACCEPT to accept changes to the EPC page.
* 4. Notifies the driver to remove EPC page which issues EREMOVE inst to complete the request. */
* 2. Driver invokes ETRACK to track page's address on all CPUs and issues IPI to flush stale TLB
* entries.
* 3. Enclave issues an EACCEPT to accept changes to each EPC page.
* 4. Enclave notifies the driver to remove EPC pages (using an IOCTL).
* 5. Driver issues EREMOVE to complete the request. */
static int free_edmm_page_range(void* start, size_t size) {
void* addr = ALLOC_ALIGN_DOWN_PTR(start);
void* end = addr + size;
void* end = (void*)((char*)addr + size);
int ret = 0;
log_debug("%s: start = %p, size = %lx\n", __func__, start, size);
__sgx_mem_aligned sgx_arch_sec_info_t secinfo;

__sgx_mem_aligned sgx_arch_sec_info_t secinfo;
secinfo.flags = SGX_SECINFO_FLAGS_TRIM | SGX_SECINFO_FLAGS_MODIFIED;
memset(&secinfo.reserved, 0, sizeof(secinfo.reserved));

Expand All @@ -111,7 +111,8 @@ static int free_edmm_page_range(void* start, size_t size) {
return ret;
}

for (void* page_addr = addr; page_addr < end; page_addr += g_pal_state.alloc_align) {
for (void* page_addr = addr; page_addr < end;
page_addr = (void*)((char*)page_addr + g_pal_state.alloc_align)) {
ret = sgx_accept(&secinfo, page_addr);
if (ret) {
log_debug("EDMM accept page failed while trimming: %p %d\n", page_addr, ret);
Expand All @@ -128,18 +129,17 @@ static int free_edmm_page_range(void* start, size_t size) {
return 0;
}

/* This function allocates a new page at an address in ELRANGE of an enclave. If the page contains
* executable code, the page permissions are extended once the page is in a valid state. The
* allocation sequence is described below
* 1. Enclave invokes EACCEPT on the new page request which triggers a page fault(#PF) as the page
/* This function allocates EPC pages within ELRANGE of an enclave. If EPC pages contain
* executable code, page permissions are extended once the page is in a valid state. The
* allocation sequence is described below:
* 1. Enclave invokes EACCEPT on a new page request which triggers a page fault (#PF) as the page
* is not available yet.
* 2. Driver catches this #PF and issues EAUG for the page. The control returns back to enclave.
* 2. Driver catches this #PF and issues EAUG for the page (at this point the page becomes VALID and
* may be used by the enclave). The control returns back to enclave.
* 3. Enclave continues the same EACCEPT and the instruction succeeds this time. */
static int get_edmm_page_range(void* start, size_t size, bool executable) {
uintptr_t lo = (uintptr_t)start;
uintptr_t addr = lo + size;
log_debug("%s: start = %p, size = %lx, is_executable = %s\n", __func__, start, size,
(executable) ? "TRUE" : "FALSE");
void* lo = start;
void* addr = (void*)((char*)lo + size);

__sgx_mem_aligned sgx_arch_sec_info_t secinfo;
secinfo.flags = SGX_SECINFO_FLAGS_R | SGX_SECINFO_FLAGS_W | SGX_SECINFO_FLAGS_REG |
Expand All @@ -148,24 +148,21 @@ static int get_edmm_page_range(void* start, size_t size, bool executable) {

while (lo < addr) {
int ret;
addr -= g_pal_state.alloc_align;
addr = (void*)((char*)addr - g_pal_state.alloc_align);

ret = sgx_accept(&secinfo, (const void*)addr);
ret = sgx_accept(&secinfo, addr);
if (ret) {
log_debug("EDMM accept page failed: %p %d\n", (void*)addr, ret);
log_debug("EDMM accept page failed: %p %d\n", addr, ret);
return -1;
}

/* All new pages will have RW permissions initially, so after EAUG/EACCEPT, extend
* permission of VALID enclave page. Supplying a value that does not extend the page
* permissions will have no effect.
* Note: Page becomes valid only after EUG which will be done as part of previous sgx_accept
* call. */
* permission of a VALID enclave page (if needed). */
if (executable) {
__sgx_mem_aligned sgx_arch_sec_info_t secinfo_extend = secinfo;

secinfo_extend.flags |= SGX_SECINFO_FLAGS_X;
sgx_modpe(&secinfo_extend, (const void*)addr);
sgx_modpe(&secinfo_extend, addr);
}
}

Expand All @@ -174,8 +171,7 @@ static int get_edmm_page_range(void* start, size_t size, bool executable) {

static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_internal,
struct heap_vma* vma_above,
struct edmm_heap_range* unallocated_heap) {
struct heap_vma* vma_current = NULL;
struct edmm_heap_range* heap_ranges_to_alloc) {
assert(_DkInternalIsLocked(&g_heap_vma_lock));
assert(addr && size);

Expand All @@ -186,7 +182,6 @@ static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_interna
struct heap_vma* vma_below;
if (vma_above) {
vma_below = LISTP_NEXT_ENTRY(vma_above, &g_heap_vma_list, list);
vma_current = vma_below;
} else {
/* no VMA above `addr`; VMA right below `addr` must be the first (highest-address) in list */
vma_below = LISTP_FIRST_ENTRY(&g_heap_vma_list, struct heap_vma, list);
Expand Down Expand Up @@ -227,35 +222,37 @@ static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_interna
* (2) start from `vma_below` and iterate through VMAs with lower-addresses for merges.
* Note that we never merge normal VMAs with pal-internal VMAs. */
int free_cnt = 0;
void* current_top = (vma_below) ? MAX(vma_below->top, vma->bottom) : vma->bottom;
while (vma_above && vma_above->bottom <= vma->top &&
vma_above->is_pal_internal == vma->is_pal_internal) {
/* newly created VMA grows into above VMA; expand newly created VMA and free above-VMA */
freed += vma_above->top - vma_above->bottom;
struct heap_vma* vma_above_above = LISTP_PREV_ENTRY(vma_above, &g_heap_vma_list, list);

/* Track free space between VMAs while merging `vma_above`.
* Note: We don't track free VMAs while merging `vma_below` as it will never happen given
* the condition `vma_below->top >= vma->bottom` */
if (g_pal_sec.edmm_enable_heap && vma_current && addr < vma_current->top) {
int64_t free_size = vma_above->bottom - vma_current->top;
assert(free_size > 0);
/* Track free space between VMAs while merging `vma_above`. */
if (g_pal_sec.edmm_enable_heap && vma_above->bottom > current_top) {
size_t free_size = vma_above->bottom - current_top;
if (free_size) {
unallocated_heap[free_cnt].size = free_size;
unallocated_heap[free_cnt].addr = vma_current->top;
assert(free_cnt < EDMM_HEAP_RANGE_CNT);
heap_ranges_to_alloc[free_cnt].size = free_size;
heap_ranges_to_alloc[free_cnt].addr = current_top;
free_cnt++;
log_debug("%s: free region while merging vma_above, addr=%p size=0x%lx\n",
__func__, vma_current->top, free_size);
__func__, current_top, free_size);
}
}

vma->bottom = MIN(vma_above->bottom, vma->bottom);
vma->top = MAX(vma_above->top, vma->top);
LISTP_DEL(vma_above, &g_heap_vma_list, list);

/* Store vma_above->top to check for any free region between vma_above->top and
* vma_above_above->bottom. */
if (g_pal_sec.edmm_enable_heap)
current_top = vma_above->top;

__free_vma(vma_above);
vma_above = vma_above_above;
if (g_pal_sec.edmm_enable_heap)
vma_current = vma;
}

while (vma_below && vma_below->top >= vma->bottom &&
Expand All @@ -282,10 +279,11 @@ static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_interna

assert(vma->top - vma->bottom >= (ptrdiff_t)freed);
size_t allocated = vma->top - vma->bottom - freed;

/* No free space between VMAs found */
if (g_pal_sec.edmm_enable_heap && free_cnt == 0 && allocated > 0) {
unallocated_heap[0].size = size;
unallocated_heap[0].addr = addr;
heap_ranges_to_alloc[0].size = allocated;
heap_ranges_to_alloc[0].addr = addr;
}

__atomic_add_fetch(&g_allocated_pages.counter, allocated / g_page_size, __ATOMIC_SEQ_CST);
Expand All @@ -301,8 +299,9 @@ static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_interna
void* get_enclave_pages(void* addr, size_t size, bool is_pal_internal) {
void* ret = NULL;
/* TODO: Should we introduce a compiler switch for EDMM? */
struct edmm_heap_range unallocated_heap[EDMM_HEAP_RANGE_CNT] = {0};
struct edmm_heap_range heap_ranges_to_alloc[EDMM_HEAP_RANGE_CNT] = {0};

log_debug("%s: edmm alloc start_addr = %p, size = %lx\n", __func__, addr, size);
if (!size)
return NULL;

Expand Down Expand Up @@ -333,15 +332,15 @@ void* get_enclave_pages(void* addr, size_t size, bool is_pal_internal) {
}
vma_above = vma;
}
ret = __create_vma_and_merge(addr, size, is_pal_internal, vma_above, unallocated_heap);
ret = __create_vma_and_merge(addr, size, is_pal_internal, vma_above, heap_ranges_to_alloc);
} else {
/* caller did not specify address; find first (highest-address) empty slot that fits */
void* vma_above_bottom = g_heap_top;

LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
if (vma->top < vma_above_bottom - size) {
ret = __create_vma_and_merge(vma_above_bottom - size, size, is_pal_internal,
vma_above, unallocated_heap);
vma_above, heap_ranges_to_alloc);
goto out;
}
vma_above = vma;
Expand All @@ -351,18 +350,18 @@ void* get_enclave_pages(void* addr, size_t size, bool is_pal_internal) {
/* corner case: there may be enough space between heap bottom and the lowest-address VMA */
if (g_heap_bottom < vma_above_bottom - size)
ret = __create_vma_and_merge(vma_above_bottom - size, size, is_pal_internal, vma_above,
unallocated_heap);
heap_ranges_to_alloc);
}

out:
/* In order to prevent already accepted pages from being accepted again, we track EPC pages that
* aren't accepted yet (unallocated heap) and call EACCEPT only on those EPC pages. */
if (g_pal_sec.edmm_enable_heap && ret != NULL) {
for (int cnt = 0; cnt < EDMM_HEAP_RANGE_CNT; cnt++) {
log_debug("%s: edmm alloc start_addr = %p, size = %lx\n", __func__,
unallocated_heap[cnt].addr, unallocated_heap[cnt].size);
if (unallocated_heap[cnt].size > 0 &&
get_edmm_page_range(unallocated_heap[cnt].addr, unallocated_heap[cnt].size, 1) < 0) {
if (!heap_ranges_to_alloc[cnt].size)
break;
if (get_edmm_page_range(heap_ranges_to_alloc[cnt].addr,
heap_ranges_to_alloc[cnt].size, 1) < 0) {
ret = NULL;
break;
}
Expand All @@ -378,6 +377,7 @@ int free_enclave_pages(void* addr, size_t size) {
struct edmm_heap_range edmm_free_heap[EDMM_HEAP_RANGE_CNT] = {0};
int edmm_free_cnt = 0;

log_debug("%s: edmm free start_addr = %p, size = %lx\n", __func__, addr, size);
if (!size)
return -PAL_ERROR_NOMEM;

Expand Down Expand Up @@ -430,6 +430,7 @@ int free_enclave_pages(void* addr, size_t size) {
edmm_free_heap[edmm_free_cnt-1].addr = start_addr;
edmm_free_heap[edmm_free_cnt-1].size += range;
} else {
assert(edmm_free_cnt < EDMM_HEAP_RANGE_CNT);
/* found a new non-contiguous range */
edmm_free_heap[edmm_free_cnt].addr = start_addr;
edmm_free_heap[edmm_free_cnt].size = range;
Expand Down Expand Up @@ -471,7 +472,7 @@ int free_enclave_pages(void* addr, size_t size) {
out:
if (ret >=0 && g_pal_sec.edmm_enable_heap) {
for (int free_cnt = 0; free_cnt < edmm_free_cnt; free_cnt++) {
log_debug("%s: edmm free start_addr = %p, size = %lx\n", __func__,
log_debug("%s: edmm actual free addr = %p, size = %lx\n", __func__,
edmm_free_heap[free_cnt].addr, edmm_free_heap[free_cnt].size);

if (free_edmm_page_range(edmm_free_heap[free_cnt].addr,
Expand Down
4 changes: 2 additions & 2 deletions Pal/src/host/Linux-SGX/ocall_types.h
Expand Up @@ -313,8 +313,8 @@ typedef struct {
} ms_ocall_get_quote_t;

typedef struct {
unsigned long start_addr;
unsigned int nr_pages;
unsigned long start_addr;
unsigned int nr_pages;
} ms_ocall_sgx_range_t ;

#pragma pack(pop)
4 changes: 2 additions & 2 deletions Pal/src/host/Linux-SGX/sgx_api.h
Expand Up @@ -54,7 +54,7 @@ static inline int64_t sgx_getkey(sgx_key_request_t* keyrequest, sgx_key_128bit_t
/*!
* \brief Low-level wrapper around EACCEPT instruction leaf.
*
* Caller is responsible for parameter alignment: 64B for `si` and 4KB(page size) for `addr`.
* Caller is responsible for parameter alignment: 64B for `si` and 4KB (page size) for `addr`.
*/
static inline int64_t sgx_accept(sgx_arch_sec_info_t* si, const void* addr) {
int64_t rax = EACCEPT;
Expand All @@ -69,7 +69,7 @@ static inline int64_t sgx_accept(sgx_arch_sec_info_t* si, const void* addr) {
/*!
* \brief Low-level wrapper around EMODPE instruction leaf.
*
* Caller is responsible for parameter alignment: 64B for `si` and 4KB(page size) for `addr`.
* Caller is responsible for parameter alignment: 64B for `si` and 4KB (page size) for `addr`.
*/
static inline int64_t sgx_modpe(sgx_arch_sec_info_t* si, const void* addr) {
int64_t rax = EMODPE;
Expand Down
19 changes: 12 additions & 7 deletions Pal/src/host/Linux-SGX/sgx_framework.c
Expand Up @@ -199,6 +199,15 @@ int create_enclave(sgx_arch_secs_t* secs, sgx_arch_token_t* token) {
return 0;
}

void prot_flags_to_permissions_str(char* p, int prot) {
if (prot & PROT_READ)
p[0] = 'R';
if (prot & PROT_WRITE)
p[1] = 'W';
if (prot & PROT_EXEC)
p[2] = 'X';
}

int add_pages_to_enclave(sgx_arch_secs_t* secs, void* addr, void* user_addr, unsigned long size,
enum sgx_page_type type, int prot, bool skip_eextend,
const char* comment) {
Expand Down Expand Up @@ -236,17 +245,13 @@ int add_pages_to_enclave(sgx_arch_secs_t* secs, void* addr, void* user_addr, uns
break;
}

char p[4] = "---";

const char* t = (type == SGX_PAGE_TCS) ? "TCS" : "REG";
const char* m = skip_eextend ? "" : " measured";

char p[4] = "---";
if (type == SGX_PAGE_REG) {
if (prot & PROT_READ)
p[0] = 'R';
if (prot & PROT_WRITE)
p[1] = 'W';
if (prot & PROT_EXEC)
p[2] = 'X';
prot_flags_to_permissions_str(p, prot);
}

if (size == g_page_size)
Expand Down
1 change: 1 addition & 0 deletions Pal/src/host/Linux-SGX/sgx_internal.h
Expand Up @@ -85,6 +85,7 @@ struct pal_enclave {

extern struct pal_enclave g_pal_enclave;

void prot_flags_to_permissions_str(char* p, int prot);
int open_sgx_driver(bool need_gsgx);
bool is_wrfsbase_supported(void);

Expand Down
11 changes: 2 additions & 9 deletions Pal/src/host/Linux-SGX/sgx_main.c
Expand Up @@ -455,17 +455,10 @@ static int initialize_enclave(struct pal_enclave* enclave, const char* manifest_
assert(areas[i].data_src == ZERO);
}

/* skip adding free (heap) pages to the enclave */
/* skip adding free (heap) pages to the enclave if EDMM is enabled */
if (enclave->pal_sec.edmm_enable_heap && !strcmp(areas[i].desc, "free")) {
char p[4] = "---";
if (areas[i].type == SGX_PAGE_REG) {
if (areas[i].prot & PROT_READ)
p[0] = 'R';
if (areas[i].prot & PROT_WRITE)
p[1] = 'W';
if (areas[i].prot & PROT_EXEC)
p[2] = 'X';
}
prot_flags_to_permissions_str(p, areas[i].prot);
urts_log_debug("SKIP adding pages to enclave: %p-%p [%s:%s] (%s)%s\n",
(void *)areas[i].addr,
(void *)areas[i].addr + areas[i].size,
Expand Down
2 changes: 1 addition & 1 deletion python/graphenelibos/sgx_sign.py
Expand Up @@ -525,7 +525,7 @@ def load_file(digest, file, offset, addr, filesize, memsize, desc, flags):
load_file(mrenclave, file, offset, baseaddr_ + addr, filesize, memsize,
desc, flags)
else:
# Skip EADDing of heap("free") pages when EDMM is enabled.
# Skip EADDing of heap ("free") pages when EDMM is enabled.
if edmm_enable_heap == 1 and area.desc == "free":
continue
for addr in range(area.addr, area.addr + area.size, offs.PAGESIZE):
Expand Down

0 comments on commit 43fd00f

Please sign in to comment.