Skip to content

Commit

Permalink
Interrupt unfinished Enclu on main enclave thread exit
Browse files Browse the repository at this point in the history
  • Loading branch information
Max K committed Jul 19, 2023
1 parent 3bf4f73 commit 8268018
Show file tree
Hide file tree
Showing 2 changed files with 145 additions and 68 deletions.
147 changes: 88 additions & 59 deletions intel-sgx/enclave-runner/src/tcs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,21 @@ use std::os::raw::c_void;

use sgx_isa::Enclu;
use sgxs::loader::Tcs;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::sync::atomic::Ordering;

thread_local! {
pub static EXITING: RefCell<Option<Arc<AtomicBool>>> = RefCell::new(None);
}

pub(crate) type DebugBuffer = [u8; 1024];

#[derive(Debug)]
pub enum CoResult<Y, R> {
Yield(Y),
Return(R),
Abort,
}

#[derive(Debug)]
Expand All @@ -39,58 +47,61 @@ impl<T: Tcs> Usercall<T> {
pub fn coreturn(
self,
retval: (u64, u64),
debug_buf: Option<&RefCell<DebugBuffer>>,
exiting: &Arc<AtomicBool>,
debug_buf: Option<&RefCell<DebugBuffer>>
) -> ThreadResult<T> {
coenter(self.tcs, 0, retval.0, retval.1, 0, 0, debug_buf)
coenter(self.tcs, 0, retval.0, retval.1, 0, 0, exiting, debug_buf)
}

pub fn tcs_address(&self) -> *mut c_void {
self.tcs.address()
}
}

pub(crate) fn coenter<T: Tcs>(
tcs: T,
mut p1: u64,
mut p2: u64,
mut p3: u64,
mut p4: u64,
mut p5: u64,
debug_buf: Option<&RefCell<DebugBuffer>>,
) -> ThreadResult<T> {
/// Check if __vdso_sgx_enter_enclave exists. We're using weak linkage, so
/// it might not.
#[cfg(target_os = "linux")]
fn has_vdso_sgx_enter_enclave() -> bool {
unsafe {
let addr: usize;
asm!("
/// Check if __vdso_sgx_enter_enclave exists. We're using weak linkage, so
/// it might not.
#[cfg(target_os = "linux")]
pub(crate) fn has_vdso_sgx_enter_enclave() -> bool {
unsafe {
let addr: usize;
asm!("
.weak __vdso_sgx_enter_enclave
.type __vdso_sgx_enter_enclave, function
mov __vdso_sgx_enter_enclave@GOTPCREL(%rip), {}
jmp 1f
// Strongly link to another symbol in the VDSO, so that the
// linker will include a DT_NEEDED entry for `linux-vdso.so.1`.
// This doesn't happen automatically because rustc passes
// `--as-needed` to the linker. This is never executed because
// of the unconditional jump above.
mov __vdso_sgx_enter_enclave@GOTPCREL(%rip), {}
jmp 1f
// Strongly link to another symbol in the VDSO, so that the
// linker will include a DT_NEEDED entry for `linux-vdso.so.1`.
// This doesn't happen automatically because rustc passes
// `--as-needed` to the linker. This is never executed because
// of the unconditional jump above.
.global __vdso_clock_gettime
.type __vdso_clock_gettime, function
call __vdso_clock_gettime@PLT
call __vdso_clock_gettime@PLT
1:
", out(reg) addr, options(nomem, nostack, att_syntax));
addr != 0
}
", out(reg) addr, options(nomem, nostack, att_syntax));
addr != 0
}
}

#[cfg(not(target_os = "linux"))]
fn has_vdso_sgx_enter_enclave() -> bool {
false
}
#[cfg(not(target_os = "linux"))]
pub(crate) fn has_vdso_sgx_enter_enclave() -> bool {
false
}

let sgx_result: u32;
pub(crate) fn coenter<T: Tcs>(
tcs: T,
mut p1: u64,
mut p2: u64,
mut p3: u64,
mut p4: u64,
mut p5: u64,
exiting: &Arc<AtomicBool>,
debug_buf: Option<&RefCell<DebugBuffer>>
) -> ThreadResult<T> {

let mut enclu_leaf = Enclu::EEnter as u32;

unsafe {
let mut uninit_debug_buf: std::mem::MaybeUninit<DebugBuffer>;
Expand All @@ -103,6 +114,7 @@ pub(crate) fn coenter<T: Tcs>(
}
};
if has_vdso_sgx_enter_enclave() {

#[repr(C)]
#[derive(Default)]
struct SgxEnclaveRun {
Expand Down Expand Up @@ -137,6 +149,9 @@ pub(crate) fn coenter<T: Tcs>(
..Default::default()
};
let ret: i32;

// set exiting flag in thread local storage variable to check it in signal handler in case of a signal
EXITING.with(|cell| *cell.borrow_mut() = Some(exiting.clone()));
asm!("
sub $0x8, %rsp // align stack
push {} // push argument: run
Expand All @@ -162,11 +177,14 @@ pub(crate) fn coenter<T: Tcs>(
lateout("r15") _, // V
options(att_syntax)
);
// Enclave work is done - unset TLS variable
EXITING.with(|cell| { *cell.borrow_mut() = None;});

if ret == 0 {
sgx_result = run.function;
match sgx_result.try_into() {
Ok(Enclu::EExit) => { /* normal case */ },
Ok(Enclu::EResume) => {
enclu_leaf = run.function;
match enclu_leaf.try_into() {
Ok(Enclu::EExit) => { /* normal case or enclave work was intervened by the sighup */ },
Ok(Enclu::EResume) => { /* given vdso code this never may be the case */
if let Some(mut debug_buf) = debug_buf {
let _ = write!(&mut debug_buf[..], "Enclave triggered exception: {:?}\0", run);
} else {
Expand All @@ -183,31 +201,42 @@ pub(crate) fn coenter<T: Tcs>(
panic!("Error entering enclave (VDSO): ret = {:?}, run = {:?}", std::io::Error::from_raw_os_error(-ret), run);
}
} else {
asm!("
lea 1f(%rip), %rcx // set SGX AEP
xchg {0}, %rbx
1: enclu
xchg %rbx, {0}
",
inout(reg) tcs.address() => _, // rbx is used internally by LLVM and cannot be used as an operand for inline asm (#84658)
inout("eax") Enclu::EEnter as u32 => sgx_result,
out("rcx") _,
inout("rdx") p3,
inout("rdi") p1,
inout("rsi") p2,
inout("r8") p4,
inout("r9") p5,
inout("r10") debug_buf_ptr => _,
out("r11") _,
options(nostack, att_syntax)
);
while !exiting.load(Ordering::SeqCst) && enclu_leaf != (Enclu::EExit as u32) {
asm!("
lea 1f(%rip), %rcx // set SGX AEP
push %rbx // rbx is used internally by LLVM and cannot be designated as being clobbered. Store/restoring it
mov {0}, %rbx
enclu
1: pop %rbx
",
inout(reg) tcs.address() => _, // rbx is used internally by LLVM and cannot be used as an operand for inline asm (#84658)
inout("eax") enclu_leaf as u32 => enclu_leaf,
out("rcx") _,
inout("rdx") p3,
inout("rdi") p1,
inout("rsi") p2,
inout("r8") p4,
inout("r9") p5,
inout("r10") debug_buf_ptr => _,
out("r11") _,
lateout("r12") _, // these may be clobbered in case of AEX
lateout("r13") _, // V
lateout("r14") _, // V
lateout("r15") _, // V
options(nostack, att_syntax)
);
}
}
};

if sgx_result != (Enclu::EExit as u32) {
panic!("Invalid return value in EAX! eax={}", sgx_result);
if exiting.load(Ordering::SeqCst) {
return CoResult::Abort
}

if enclu_leaf != (Enclu::EExit as u32) {
panic!("Invalid return value in EAX! eax={}", enclu_leaf);
}

if p1 == 0 {
CoResult::Return((tcs, p2, p3))
} else {
Expand Down
66 changes: 57 additions & 9 deletions intel-sgx/enclave-runner/src/usercalls/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@
use std::alloc::{GlobalAlloc, Layout, System};
use std::cell::RefCell;
use std::collections::VecDeque;
use std::io::{self, ErrorKind as IoErrorKind, Read, Result as IoResult};
use std::io::{self, stderr, Write, ErrorKind as IoErrorKind, Read, Result as IoResult};
use std::pin::Pin;
use std::result::Result as StdResult;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll, Waker};
use std::thread::{self, JoinHandle};
#[cfg(unix)]
use std::os::unix::thread::JoinHandleExt;
use std::time::{self, Duration};
use std::{cmp, fmt, str};

Expand All @@ -38,9 +40,10 @@ use ipc_queue::{self, DescriptorGuard, Identified, QueueEvent};
use sgxs::loader::Tcs as SgxsTcs;

use crate::loader::{EnclavePanic, ErasedTcs};
use crate::tcs::{self, CoResult, ThreadResult};
use crate::tcs::{self, CoResult, ThreadResult, EXITING};
use self::abi::dispatch;
use self::interface::{Handler, OutputBuffer};
use sgx_isa::Enclu;

pub(crate) mod abi;
mod interface;
Expand Down Expand Up @@ -647,7 +650,7 @@ pub(crate) struct EnclaveState {
event_queues: FnvHashMap<TcsAddress, PendingEvents>,
fds: Mutex<FnvHashMap<Fd, Arc<AsyncFileDesc>>>,
last_fd: AtomicUsize,
exiting: AtomicBool,
exiting: Arc<AtomicBool>,
usercall_ext: Box<dyn UsercallExtension>,
threads_queue: crossbeam::queue::SegQueue<StoppedTcs>,
forward_panics: bool,
Expand All @@ -667,15 +670,15 @@ enum CoEntry {
}

impl Work {
fn do_work(self, io_send_queue: &mut tokio::sync::mpsc::UnboundedSender<UsercallSendData>) {
fn do_work(self, io_send_queue: &mut tokio::sync::mpsc::UnboundedSender<UsercallSendData>, exiting: &Arc<AtomicBool>) {
let buf = RefCell::new([0u8; 1024]);
let usercall_send_data = match self.entry {
CoEntry::Initial(erased_tcs, p1, p2, p3, p4, p5) => {
let coresult = tcs::coenter(erased_tcs, p1, p2, p3, p4, p5, Some(&buf));
let coresult = tcs::coenter(erased_tcs, p1, p2, p3, p4, p5, &exiting, Some(&buf));
UsercallSendData::Sync(coresult, self.tcs, buf)
}
CoEntry::Resume(usercall, coresult) => {
let coresult = usercall.coreturn(coresult, Some(&buf));
let coresult = usercall.coreturn(coresult, &exiting, Some(&buf));
UsercallSendData::Sync(coresult, self.tcs, buf)
}
};
Expand Down Expand Up @@ -739,7 +742,7 @@ impl EnclaveState {
event_queues,
fds: Mutex::new(fds),
last_fd,
exiting: AtomicBool::new(false),
exiting: Arc::new(AtomicBool::new(false)),
usercall_ext,
threads_queue,
forward_panics,
Expand Down Expand Up @@ -935,6 +938,9 @@ impl EnclaveState {
};
tokio::task::spawn_local(fut);
}
UsercallSendData::Sync(CoResult::Abort, _, _) => {
// enclave interuption - do nothing
}
};
}
unreachable!();
Expand Down Expand Up @@ -964,21 +970,28 @@ impl EnclaveState {
num_of_worker_threads: usize,
work_receiver: crossbeam::crossbeam_channel::Receiver<Work>,
io_queue_send: tokio::sync::mpsc::UnboundedSender<UsercallSendData>,
exiting: Arc<AtomicBool>
) -> Vec<JoinHandle<()>> {
let mut thread_handles = vec![];
for _ in 0..num_of_worker_threads {
let work_receiver = work_receiver.clone();
let mut io_queue_send = io_queue_send.clone();
let exiting = exiting.clone();

thread_handles.push(thread::spawn(move || {
while let Ok(work) = work_receiver.recv() {
work.do_work(&mut io_queue_send);
work.do_work(&mut io_queue_send, &exiting);
}
}));
}
thread_handles
}

#[cfg(unix)]
if tcs::has_vdso_sgx_enter_enclave() {
catch_sighup()
};

let (io_queue_send, io_queue_receive) = tokio::sync::mpsc::unbounded_channel();

let (work_sender, work_receiver) = crossbeam::crossbeam_channel::unbounded();
Expand All @@ -987,12 +1000,20 @@ impl EnclaveState {
.expect("Work sender couldn't send data to receiver");

let join_handlers =
create_worker_threads(num_of_worker_threads, work_receiver, io_queue_send.clone());
create_worker_threads(num_of_worker_threads, work_receiver, io_queue_send.clone(), enclave.exiting.clone());
// main syscall polling loop
let main_result =
EnclaveState::syscall_loop(enclave.clone(), io_queue_receive, io_queue_send, work_sender);

for handler in join_handlers {
#[cfg(unix)]
if tcs::has_vdso_sgx_enter_enclave() {
// * The enclave thread may be in a long-running `AEX/enclu[ERESUME]` loop.
// for vdso case: execution control back to the enclave-runner worker thread returned
// by means of issued signal and rewriting IP in signal handler.
unsafe { libc::pthread_kill(handler.as_pthread_t() as _, signal::SIGHUP as _); }
// for non-vdso case: execution control returned using AEP set next after Enclu to break AEX/enclu[EResume] loop (signal not required).
}
let _ = handler.join();
}
return main_result;
Expand Down Expand Up @@ -1214,6 +1235,33 @@ extern "C" fn handle_trap(_signo: c_int, _info: *mut siginfo_t, context: *mut c_
return;
}

#[cfg(unix)]
fn catch_sighup() {
unsafe {
extern "C" fn handle_sighup(_signo: c_int, _info: *mut siginfo_t, context: *mut c_void) {
eprintln!("SIGHUP triggered, thread_id: {:?}", std::thread::current().id());
let instruction_ptr = unsafe { (*(context as *mut ucontext_t)).uc_mcontext.gregs[REG_RIP as usize] as *const u8};
// enclu instruction code
const ENCLU: [u8; 3] = [0x0f, 0x01, 0xd7];
let is_enclu = ENCLU.iter().enumerate().all(|(idx, v)| {
unsafe { *instruction_ptr.offset(idx as isize) == *v }
});
if is_enclu && EXITING.with(|cell| cell.borrow().as_ref().is_some_and(|val| val.load(Ordering::SeqCst) == true)) {
// Interrupt enclave execution by setting IP to the instruction following the ENCLU to mimic normal ENCLU[EEXIT])
unsafe {
(*(context as *mut ucontext_t)).uc_mcontext.gregs[REG_RIP as usize] += ENCLU.len() as i64;
(*(context as *mut ucontext_t)).uc_mcontext.gregs[REG_RAX as usize] = Enclu::EExit as i64;
}
}
let _ = stderr().flush();
}

let hdl = signal::SigHandler::SigAction(handle_sighup);
let sig_action = signal::SigAction::new(hdl, signal::SaFlags::empty(), signal::SigSet::empty());
signal::sigaction(signal::SIGHUP, &sig_action).unwrap();
}
}

#[cfg(unix)]
/* Raising Sigtrap to allow debugger to take control.
* Here, we also store tcs in rbx, so that the debugger could read it, to
Expand Down

0 comments on commit 8268018

Please sign in to comment.