Browse Source

Merge pull request #14 from Meziu/tweak/remove-svcGetThreadList

Remove reliance on svcGetThreadList and do some refactoring
pull/15/head
Meziu 3 years ago committed by GitHub
parent
commit
2e5e1b5a1b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 84
      src/condvar.rs
  2. 748
      src/lib.rs
  3. 19
      src/misc.rs
  4. 93
      src/mutex.rs
  5. 171
      src/rwlock.rs
  6. 350
      src/thread.rs
  7. 108
      src/thread/attr.rs
  8. 73
      src/thread_keys.rs

84
src/condvar.rs

@ -0,0 +1,84 @@
//! PThread condition variables implemented using libctru.
pub fn init() {}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_init(
cond: *mut libc::pthread_cond_t,
_attr: *const libc::pthread_condattr_t,
) -> libc::c_int {
ctru_sys::CondVar_Init(cond as _);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_signal(cond: *mut libc::pthread_cond_t) -> libc::c_int {
ctru_sys::CondVar_WakeUp(cond as _, 1);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_broadcast(cond: *mut libc::pthread_cond_t) -> libc::c_int {
ctru_sys::CondVar_WakeUp(cond as _, -1);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_wait(
cond: *mut libc::pthread_cond_t,
lock: *mut libc::pthread_mutex_t,
) -> libc::c_int {
ctru_sys::CondVar_Wait(cond as _, lock as _);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_timedwait(
cond: *mut libc::pthread_cond_t,
lock: *mut libc::pthread_mutex_t,
abstime: *const libc::timespec,
) -> libc::c_int {
// libctru expects a duration, but we have an absolute timestamp.
// Convert to a duration before calling libctru.
// Get the current time so we can make a duration
let mut now = libc::timeval {
tv_sec: 0,
tv_usec: 0,
};
let r = libc::gettimeofday(&mut now, std::ptr::null_mut());
if r != 0 {
return r;
}
// Calculate the duration
let duration_nsec = (*abstime)
.tv_sec
// Get the difference in seconds
.saturating_sub(now.tv_sec)
// Convert to nanoseconds
.saturating_mul(1_000_000_000)
// Add the difference in nanoseconds
.saturating_add((*abstime).tv_nsec as i64)
.saturating_sub(now.tv_usec as i64 * 1_000)
// Don't go negative
.max(0);
let r = ctru_sys::CondVar_WaitTimeout(cond as _, lock as _, duration_nsec);
// CondVar_WaitTimeout returns a boolean which is true (nonzero) if it timed out
if r == 0 {
0
} else {
libc::ETIMEDOUT
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_destroy(_cond: *mut libc::pthread_cond_t) -> libc::c_int {
0
}

748
src/lib.rs

@ -1,745 +1,25 @@
#![feature(thread_local)] #![feature(thread_local)]
#![feature(const_btree_new)] #![feature(const_btree_new)]
#![feature(let_else)]
#![allow(non_camel_case_types)] #![allow(non_camel_case_types)]
#![allow(clippy::missing_safety_doc)] #![allow(clippy::missing_safety_doc)]
use std::ptr; mod condvar;
mod misc;
mod mutex;
mod rwlock;
mod thread;
mod thread_keys;
/// Call this somewhere to force Rust to link this module. /// Call this somewhere to force Rust to link this module.
/// The call doesn't need to execute, just exist. /// The call doesn't need to execute, just exist.
/// ///
/// See https://github.com/rust-lang/rust/issues/47384 /// See https://github.com/rust-lang/rust/issues/47384
pub fn init() {} pub fn init() {
condvar::init();
// PTHREAD LAYER TO CALL LIBCTRU misc::init();
mutex::init();
/// The main thread's thread ID. It is "null" because libctru didn't spawn it. rwlock::init();
const MAIN_THREAD_ID: libc::pthread_t = 0; thread::init();
thread_keys::init();
#[no_mangle]
pub unsafe extern "C" fn pthread_create(
native: *mut libc::pthread_t,
attr: *const libc::pthread_attr_t,
entrypoint: extern "C" fn(_: *mut libc::c_void) -> *mut libc::c_void,
value: *mut libc::c_void,
) -> libc::c_int {
let attr = attr as *const PThreadAttr;
let stack_size = (*attr).stack_size as ctru_sys::size_t;
let priority = (*attr).priority;
let processor_id = (*attr).processor_id;
extern "C" fn thread_start(main: *mut libc::c_void) {
unsafe {
Box::from_raw(main as *mut Box<dyn FnOnce() -> *mut libc::c_void>)();
}
}
// The closure needs a fat pointer (64 bits) to work since it captures a variable and is thus a
// trait object, but *mut void is only 32 bits. We need double indirection to pass along the
// full closure data.
// We make this closure in the first place because threadCreate expects a void return type, but
// entrypoint returns a pointer so the types are incompatible.
let main: *mut Box<dyn FnOnce() -> *mut libc::c_void> =
Box::into_raw(Box::new(Box::new(move || entrypoint(value))));
let thread = ctru_sys::threadCreate(
Some(thread_start),
main as *mut libc::c_void,
stack_size,
priority,
processor_id,
false,
);
if thread.is_null() {
// There was some error, but libctru doesn't expose the result.
// We assume there was permissions issue (such as too low of a priority).
// We also need to clean up the closure at this time.
drop(Box::from_raw(main));
return libc::EPERM;
}
*native = thread as _;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_join(
native: libc::pthread_t,
_value: *mut *mut libc::c_void,
) -> libc::c_int {
if native == MAIN_THREAD_ID {
// This is not a valid thread to join on
return libc::EINVAL;
}
let result = ctru_sys::threadJoin(native as ctru_sys::Thread, u64::MAX);
if ctru_sys::R_FAILED(result) {
// TODO: improve the error code by checking the result further?
return libc::EINVAL;
}
ctru_sys::threadFree(native as ctru_sys::Thread);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_detach(thread: libc::pthread_t) -> libc::c_int {
if thread == MAIN_THREAD_ID {
// This is not a valid thread to detach
return libc::EINVAL;
}
ctru_sys::threadDetach(thread as ctru_sys::Thread);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_self() -> libc::pthread_t {
ctru_sys::threadGetCurrent() as libc::pthread_t
}
unsafe fn get_main_thread_handle() -> Result<ctru_sys::Handle, ctru_sys::Result> {
// Unfortunately I don't know of any better way to get the main thread's
// handle, other than via svcGetThreadList and svcOpenThread.
// Experimentally, the main thread ID is always the first in the list.
let mut thread_ids = [0; 1];
let mut thread_ids_count = 0;
let result = ctru_sys::svcGetThreadList(
&mut thread_ids_count,
thread_ids.as_mut_ptr(),
thread_ids.len() as i32,
ctru_sys::CUR_PROCESS_HANDLE,
);
if ctru_sys::R_FAILED(result) {
return Err(result);
}
// Get the main thread's handle
let main_thread_id = thread_ids[0];
let mut main_thread_handle = 0;
let result = ctru_sys::svcOpenThread(
&mut main_thread_handle,
ctru_sys::CUR_PROCESS_HANDLE,
main_thread_id,
);
if ctru_sys::R_FAILED(result) {
return Err(result);
}
Ok(main_thread_handle)
}
unsafe fn get_thread_handle(native: libc::pthread_t) -> Result<ctru_sys::Handle, ctru_sys::Result> {
if native == MAIN_THREAD_ID {
get_main_thread_handle()
} else {
Ok(ctru_sys::threadGetHandle(native as ctru_sys::Thread))
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_getschedparam(
native: libc::pthread_t,
policy: *mut libc::c_int,
param: *mut libc::sched_param,
) -> libc::c_int {
let handle = match get_thread_handle(native) {
Ok(handle) => handle,
Err(_) => return libc::ESRCH,
};
if handle == u32::MAX {
// The thread has already finished
return libc::ESRCH;
}
let mut priority = 0;
let result = ctru_sys::svcGetThreadPriority(&mut priority, handle);
if ctru_sys::R_FAILED(result) {
// Some error occurred. This is the only error defined for this
// function, so return it. Maybe the thread exited while this function
// was exiting?
return libc::ESRCH;
}
(*param).sched_priority = priority;
// SCHED_FIFO is closest to how the cooperative app core works, while
// SCHED_RR is closest to how the preemptive sys core works.
// However, we don't have an API to get the current processor ID of a chosen
// thread (only the current), so we just always return SCHED_FIFO.
(*policy) = libc::SCHED_FIFO;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_setschedparam(
native: libc::pthread_t,
policy: libc::c_int,
param: *const libc::sched_param,
) -> libc::c_int {
if policy != libc::SCHED_FIFO {
// We only accept SCHED_FIFO. See the note in pthread_getschedparam.
return libc::EINVAL;
}
let handle = match get_thread_handle(native) {
Ok(handle) => handle,
Err(_) => return libc::EINVAL,
};
if handle == u32::MAX {
// The thread has already finished
return libc::ESRCH;
}
let result = ctru_sys::svcSetThreadPriority(handle, (*param).sched_priority);
if ctru_sys::R_FAILED(result) {
// Probably the priority is out of the permissible bounds
// TODO: improve the error code by checking the result further?
return libc::EPERM;
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_getprocessorid_np() -> libc::c_int {
ctru_sys::svcGetProcessorID()
}
/// Internal struct for storing pthread attribute data
/// Must be less than or equal to the size of `libc::pthread_attr_t`. We assert
/// this below via static_assertions.
struct PThreadAttr {
stack_size: libc::size_t,
priority: libc::c_int,
processor_id: libc::c_int,
}
impl Default for PThreadAttr {
fn default() -> Self {
// Note: we are ignoring the result here, but errors shouldn't occur
// since we're using a valid handle.
let mut priority = 0;
unsafe { ctru_sys::svcGetThreadPriority(&mut priority, ctru_sys::CUR_THREAD_HANDLE) };
PThreadAttr {
stack_size: libc::PTHREAD_STACK_MIN,
// If no priority value is specified, spawn with the same priority
// as the current thread
priority,
// If no processor is specified, spawn on the default core.
// (determined by the application's Exheader)
processor_id: -2,
}
}
}
static_assertions::const_assert!(
std::mem::size_of::<PThreadAttr>() <= std::mem::size_of::<libc::pthread_attr_t>()
);
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
*attr = PThreadAttr::default();
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int {
ptr::drop_in_place(attr as *mut PThreadAttr);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_setstacksize(
attr: *mut libc::pthread_attr_t,
stack_size: libc::size_t,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*attr).stack_size = stack_size;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_getschedparam(
attr: *const libc::pthread_attr_t,
param: *mut libc::sched_param,
) -> libc::c_int {
let attr = attr as *const PThreadAttr;
(*param).sched_priority = (*attr).priority;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_setschedparam(
attr: *mut libc::pthread_attr_t,
param: *const libc::sched_param,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*attr).priority = (*param).sched_priority;
// TODO: we could validate the priority here if we wanted?
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_getprocessorid_np(
attr: *const libc::pthread_attr_t,
processor_id: *mut libc::c_int,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*processor_id) = (*attr).processor_id;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_setprocessorid_np(
attr: *mut libc::pthread_attr_t,
processor_id: libc::c_int,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*attr).processor_id = processor_id;
// TODO: we could validate the processor ID here if we wanted?
0
}
#[no_mangle]
pub unsafe extern "C" fn sched_yield() -> libc::c_int {
ctru_sys::svcSleepThread(0);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_init(
cond: *mut libc::pthread_cond_t,
_attr: *const libc::pthread_condattr_t,
) -> libc::c_int {
ctru_sys::CondVar_Init(cond as _);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_signal(cond: *mut libc::pthread_cond_t) -> libc::c_int {
ctru_sys::CondVar_WakeUp(cond as _, 1);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_broadcast(cond: *mut libc::pthread_cond_t) -> libc::c_int {
ctru_sys::CondVar_WakeUp(cond as _, -1);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_wait(
cond: *mut libc::pthread_cond_t,
lock: *mut libc::pthread_mutex_t,
) -> libc::c_int {
ctru_sys::CondVar_Wait(cond as _, lock as _);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_timedwait(
cond: *mut libc::pthread_cond_t,
lock: *mut libc::pthread_mutex_t,
abstime: *const libc::timespec,
) -> libc::c_int {
// libctru expects a duration, but we have an absolute timestamp.
// Convert to a duration before calling libctru.
// Get the current time so we can make a duration
let mut now = libc::timeval {
tv_sec: 0,
tv_usec: 0,
};
let r = libc::gettimeofday(&mut now, ptr::null_mut());
if r != 0 {
return r;
}
// Calculate the duration
let duration_nsec = (*abstime)
.tv_sec
// Get the difference in seconds
.saturating_sub(now.tv_sec)
// Convert to nanoseconds
.saturating_mul(1_000_000_000)
// Add the difference in nanoseconds
.saturating_add((*abstime).tv_nsec as i64)
.saturating_sub(now.tv_usec as i64 * 1_000)
// Don't go negative
.max(0);
let r = ctru_sys::CondVar_WaitTimeout(cond as _, lock as _, duration_nsec);
// CondVar_WaitTimeout returns a boolean which is true (nonzero) if it timed out
if r == 0 {
0
} else {
libc::ETIMEDOUT
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_cond_destroy(_cond: *mut libc::pthread_cond_t) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutexattr_init(
_attr: *mut libc::pthread_mutexattr_t,
) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutexattr_settype(
attr: *mut libc::pthread_mutexattr_t,
_type: libc::c_int,
) -> libc::c_int {
let attr: *mut libc::c_int = attr as _;
*attr = _type as _;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_init(
lock: *mut libc::pthread_mutex_t,
attr: *const libc::pthread_mutexattr_t,
) -> libc::c_int {
let lock = lock as *mut u8;
let attr: libc::c_int = *(attr as *const libc::c_int);
if attr == libc::PTHREAD_MUTEX_NORMAL {
ctru_sys::LightLock_Init(lock as _);
} else if attr == libc::PTHREAD_MUTEX_RECURSIVE {
ctru_sys::RecursiveLock_Init(lock as _)
}
*(lock.offset(39)) = attr as u8;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_destroy(_lock: *mut libc::pthread_mutex_t) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_lock(lock: *mut libc::pthread_mutex_t) -> libc::c_int {
let lock = lock as *const u8;
if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_NORMAL as u8 {
ctru_sys::LightLock_Lock(lock as _);
} else if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_RECURSIVE as u8 {
ctru_sys::RecursiveLock_Lock(lock as _);
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_trylock(lock: *mut libc::pthread_mutex_t) -> libc::c_int {
let lock = lock as *const u8;
if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_NORMAL as u8 {
return ctru_sys::LightLock_TryLock(lock as _);
} else if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_RECURSIVE as u8 {
return ctru_sys::RecursiveLock_TryLock(lock as _);
}
-1
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_unlock(lock: *mut libc::pthread_mutex_t) -> libc::c_int {
let lock = lock as *const u8;
if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_NORMAL as u8 {
ctru_sys::LightLock_Unlock(lock as _);
} else if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_RECURSIVE as u8 {
ctru_sys::RecursiveLock_Unlock(lock as _);
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutexattr_destroy(
_attr: *mut libc::pthread_mutexattr_t,
) -> libc::c_int {
0
}
struct rwlock_clear {
mutex: libc::pthread_mutex_t,
cvar: i32,
num_readers: i32,
writer_active: bool,
initialized: bool,
}
/// Initializes the rwlock internal members if they weren't already
fn init_rwlock(lock: *mut libc::pthread_rwlock_t) {
let lock = lock as *mut rwlock_clear;
unsafe {
if !(*lock).initialized {
let mut attr = std::mem::MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
pthread_mutexattr_init(attr.as_mut_ptr());
let mut attr = attr.assume_init();
pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL);
pthread_mutex_init(&mut (*lock).mutex, &attr);
pthread_cond_init(&mut (*lock).cvar as *mut i32 as *mut _, core::ptr::null());
(*lock).num_readers = 0;
(*lock).writer_active = false;
(*lock).initialized = true;
}
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_init(
lock: *mut libc::pthread_rwlock_t,
_attr: *const libc::pthread_rwlockattr_t,
) -> libc::c_int {
init_rwlock(lock);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_destroy(_lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_rdlock(lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
pthread_mutex_lock(&mut (*lock).mutex);
while (*lock).writer_active {
pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).num_readers += 1;
pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_tryrdlock(
lock: *mut libc::pthread_rwlock_t,
) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
if pthread_mutex_trylock(&mut (*lock).mutex) != 0 {
return -1;
}
while (*lock).writer_active {
pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).num_readers += 1;
pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_wrlock(lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
pthread_mutex_lock(&mut (*lock).mutex);
while (*lock).writer_active || (*lock).num_readers > 0 {
pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).writer_active = true;
pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_trywrlock(
lock: *mut libc::pthread_rwlock_t,
) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
if pthread_mutex_trylock(&mut (*lock).mutex) != 0 {
return -1;
}
while (*lock).writer_active || (*lock).num_readers > 0 {
pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).writer_active = true;
pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_unlock(lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
pthread_mutex_lock(&mut (*lock).mutex);
// If there are readers and no writer => Must be a reader
if (*lock).num_readers > 0 && !(*lock).writer_active {
(*lock).num_readers -= 1;
// If there are no more readers, signal to a waiting writer
if (*lock).num_readers == 0 {
pthread_cond_signal(&mut (*lock).cvar as *mut i32 as _);
}
// If there are no readers and a writer => Must be a writer
} else if (*lock).num_readers == 0 && (*lock).writer_active {
(*lock).writer_active = false;
pthread_cond_broadcast(&mut (*lock).cvar as *mut i32 as _);
}
pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlockattr_init(
_attr: *mut libc::pthread_rwlockattr_t,
) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlockattr_destroy(
_attr: *mut libc::pthread_rwlockattr_t,
) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_sigmask(
_how: ::libc::c_int,
_set: *const libc::sigset_t,
_oldset: *mut libc::sigset_t,
) -> ::libc::c_int {
-1
}
// THREAD KEYS IMPLEMENTATION FOR RUST STD
use spin::rwlock::RwLock;
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicUsize, Ordering};
type Key = usize;
type Destructor = unsafe extern "C" fn(*mut libc::c_void);
static NEXT_KEY: AtomicUsize = AtomicUsize::new(1);
// This is a spin-lock RwLock which yields the thread every loop
static KEYS: RwLock<BTreeMap<Key, Option<Destructor>>, spin::Yield> = RwLock::new(BTreeMap::new());
#[thread_local]
static mut LOCALS: BTreeMap<Key, *mut libc::c_void> = BTreeMap::new();
fn is_valid_key(key: Key) -> bool {
KEYS.read().contains_key(&(key as Key))
}
#[no_mangle]
pub unsafe extern "C" fn pthread_key_create(
key: *mut libc::pthread_key_t,
destructor: Option<Destructor>,
) -> libc::c_int {
let new_key = NEXT_KEY.fetch_add(1, Ordering::SeqCst);
KEYS.write().insert(new_key, destructor);
*key = new_key as libc::pthread_key_t;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_key_delete(key: libc::pthread_key_t) -> libc::c_int {
match KEYS.write().remove(&(key as Key)) {
// We had a entry, so it was a valid key.
// It's officially undefined behavior if they use the key after this,
// so don't worry about cleaning up LOCALS, especially since we can't
// clean up every thread's map.
Some(_) => 0,
// The key is unknown
None => libc::EINVAL,
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_getspecific(key: libc::pthread_key_t) -> *mut libc::c_void {
if let Some(&value) = LOCALS.get(&(key as Key)) {
value as _
} else {
// Note: we don't care if the key is invalid, we still return null
ptr::null_mut()
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_setspecific(
key: libc::pthread_key_t,
value: *const libc::c_void,
) -> libc::c_int {
let key = key as Key;
if !is_valid_key(key) {
return libc::EINVAL;
}
LOCALS.insert(key, value as *mut _);
0
} }

19
src/misc.rs

@ -0,0 +1,19 @@
//! Miscellaneous pthread functions
pub fn init() {}
#[no_mangle]
pub unsafe extern "C" fn sched_yield() -> libc::c_int {
ctru_sys::svcSleepThread(0);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_sigmask(
_how: ::libc::c_int,
_set: *const libc::sigset_t,
_oldset: *mut libc::sigset_t,
) -> ::libc::c_int {
-1
}

93
src/mutex.rs

@ -0,0 +1,93 @@
//! PThread mutex implemented using libctru.
pub fn init() {}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutexattr_init(
_attr: *mut libc::pthread_mutexattr_t,
) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutexattr_settype(
attr: *mut libc::pthread_mutexattr_t,
_type: libc::c_int,
) -> libc::c_int {
let attr: *mut libc::c_int = attr as _;
*attr = _type as _;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_init(
lock: *mut libc::pthread_mutex_t,
attr: *const libc::pthread_mutexattr_t,
) -> libc::c_int {
let lock = lock as *mut u8;
let attr: libc::c_int = *(attr as *const libc::c_int);
if attr == libc::PTHREAD_MUTEX_NORMAL {
ctru_sys::LightLock_Init(lock as _);
} else if attr == libc::PTHREAD_MUTEX_RECURSIVE {
ctru_sys::RecursiveLock_Init(lock as _)
}
*(lock.offset(39)) = attr as u8;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_destroy(_lock: *mut libc::pthread_mutex_t) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_lock(lock: *mut libc::pthread_mutex_t) -> libc::c_int {
let lock = lock as *const u8;
if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_NORMAL as u8 {
ctru_sys::LightLock_Lock(lock as _);
} else if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_RECURSIVE as u8 {
ctru_sys::RecursiveLock_Lock(lock as _);
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_trylock(lock: *mut libc::pthread_mutex_t) -> libc::c_int {
let lock = lock as *const u8;
if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_NORMAL as u8 {
return ctru_sys::LightLock_TryLock(lock as _);
} else if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_RECURSIVE as u8 {
return ctru_sys::RecursiveLock_TryLock(lock as _);
}
-1
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutex_unlock(lock: *mut libc::pthread_mutex_t) -> libc::c_int {
let lock = lock as *const u8;
if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_NORMAL as u8 {
ctru_sys::LightLock_Unlock(lock as _);
} else if *(lock.offset(39)) as u8 == libc::PTHREAD_MUTEX_RECURSIVE as u8 {
ctru_sys::RecursiveLock_Unlock(lock as _);
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_mutexattr_destroy(
_attr: *mut libc::pthread_mutexattr_t,
) -> libc::c_int {
0
}

171
src/rwlock.rs

@ -0,0 +1,171 @@
//! PThread read-write lock implemented using libctru.
pub fn init() {}
use crate::{condvar, mutex};
struct rwlock_clear {
mutex: libc::pthread_mutex_t,
cvar: i32,
num_readers: i32,
writer_active: bool,
initialized: bool,
}
/// Initializes the rwlock internal members if they weren't already
fn init_rwlock(lock: *mut libc::pthread_rwlock_t) {
let lock = lock as *mut rwlock_clear;
unsafe {
if !(*lock).initialized {
let mut attr = std::mem::MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
mutex::pthread_mutexattr_init(attr.as_mut_ptr());
let mut attr = attr.assume_init();
mutex::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL);
mutex::pthread_mutex_init(&mut (*lock).mutex, &attr);
condvar::pthread_cond_init(&mut (*lock).cvar as *mut i32 as *mut _, core::ptr::null());
(*lock).num_readers = 0;
(*lock).writer_active = false;
(*lock).initialized = true;
}
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_init(
lock: *mut libc::pthread_rwlock_t,
_attr: *const libc::pthread_rwlockattr_t,
) -> libc::c_int {
init_rwlock(lock);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_destroy(_lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_rdlock(lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
mutex::pthread_mutex_lock(&mut (*lock).mutex);
while (*lock).writer_active {
condvar::pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).num_readers += 1;
mutex::pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_tryrdlock(
lock: *mut libc::pthread_rwlock_t,
) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
if mutex::pthread_mutex_trylock(&mut (*lock).mutex) != 0 {
return -1;
}
while (*lock).writer_active {
condvar::pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).num_readers += 1;
mutex::pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_wrlock(lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
mutex::pthread_mutex_lock(&mut (*lock).mutex);
while (*lock).writer_active || (*lock).num_readers > 0 {
condvar::pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).writer_active = true;
mutex::pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_trywrlock(
lock: *mut libc::pthread_rwlock_t,
) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
if mutex::pthread_mutex_trylock(&mut (*lock).mutex) != 0 {
return -1;
}
while (*lock).writer_active || (*lock).num_readers > 0 {
condvar::pthread_cond_wait(&mut (*lock).cvar as *mut i32 as _, &mut (*lock).mutex);
}
(*lock).writer_active = true;
mutex::pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlock_unlock(lock: *mut libc::pthread_rwlock_t) -> libc::c_int {
init_rwlock(lock);
let lock = lock as *mut rwlock_clear;
mutex::pthread_mutex_lock(&mut (*lock).mutex);
// If there are readers and no writer => Must be a reader
if (*lock).num_readers > 0 && !(*lock).writer_active {
(*lock).num_readers -= 1;
// If there are no more readers, signal to a waiting writer
if (*lock).num_readers == 0 {
condvar::pthread_cond_signal(&mut (*lock).cvar as *mut i32 as _);
}
// If there are no readers and a writer => Must be a writer
} else if (*lock).num_readers == 0 && (*lock).writer_active {
(*lock).writer_active = false;
condvar::pthread_cond_broadcast(&mut (*lock).cvar as *mut i32 as _);
}
mutex::pthread_mutex_unlock(&mut (*lock).mutex);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlockattr_init(
_attr: *mut libc::pthread_rwlockattr_t,
) -> libc::c_int {
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_rwlockattr_destroy(
_attr: *mut libc::pthread_rwlockattr_t,
) -> libc::c_int {
0
}

350
src/thread.rs

@ -0,0 +1,350 @@
//! PThread threads implemented using libctru.
use attr::PThreadAttr;
use spin::rwlock::RwLock;
use std::collections::BTreeMap;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering};
mod attr;
pub fn init() {
attr::init();
}
/// The main thread's pthread ID
const MAIN_THREAD_ID: libc::pthread_t = 0;
// We initialize to 1 since 0 is reserved for the main thread.
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
// This is a spin-lock RwLock which yields the thread every loop.
static THREADS: RwLock<BTreeMap<libc::pthread_t, PThread>, spin::Yield> =
RwLock::new(BTreeMap::new());
// Initialize to zero (main thread ID) since the main thread will have the
// default value in this thread local.
#[thread_local]
static mut THREAD_ID: libc::pthread_t = MAIN_THREAD_ID;
#[derive(Copy, Clone)]
struct PThread {
thread: ShareablePtr<ctru_sys::Thread_tag>,
os_thread_id: u32,
is_detached: bool,
is_finished: bool,
result: ShareablePtr<libc::c_void>,
}
/// Pointers are not Send or Sync, though it's really just a lint. This struct
/// lets us ignore that "lint".
struct ShareablePtr<T>(*mut T);
unsafe impl<T> Send for ShareablePtr<T> {}
unsafe impl<T> Sync for ShareablePtr<T> {}
// We can't use the derives because they add an unnecessary T: Copy/Clone bound.
impl<T> Copy for ShareablePtr<T> {}
impl<T> Clone for ShareablePtr<T> {
fn clone(&self) -> Self {
*self
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_create(
native: *mut libc::pthread_t,
attr: *const libc::pthread_attr_t,
entrypoint: extern "C" fn(_: *mut libc::c_void) -> *mut libc::c_void,
value: *mut libc::c_void,
) -> libc::c_int {
let attr = attr as *const PThreadAttr;
let stack_size = (*attr).stack_size as ctru_sys::size_t;
let priority = (*attr).priority;
let processor_id = (*attr).processor_id;
let thread_id = NEXT_ID.fetch_add(1, Ordering::SeqCst) as libc::pthread_t;
extern "C" fn thread_start(main: *mut libc::c_void) {
unsafe {
Box::from_raw(main as *mut Box<dyn FnOnce()>)();
}
}
// The closure needs a fat pointer (64 bits) to work since it captures a variable and is thus a
// trait object, but *mut void is only 32 bits. We need double indirection to pass along the
// full closure data.
// We make this closure in the first place because threadCreate expects a void return type, but
// entrypoint returns a pointer so the types are incompatible.
let main: *mut Box<dyn FnOnce()> = Box::into_raw(Box::new(Box::new(move || {
THREAD_ID = thread_id;
let result = entrypoint(value);
// Update the threads map with the result, and remove this thread if
// it's detached.
// We hold the lock the whole time so there isn't a race condition.
// (ex. we copy out the thread data, pthread_detach is called, we
// check is_detached and it's still false, so thread is never
// removed from the map)
let mut thread_map = THREADS.write();
if let Some(mut pthread) = thread_map.get_mut(&thread_id) {
pthread.is_finished = true;
pthread.result.0 = result;
if pthread.is_detached {
// libctru will call threadFree once this thread dies
thread_map.remove(&thread_id);
}
}
})));
let thread = ctru_sys::threadCreate(
Some(thread_start),
main as *mut libc::c_void,
stack_size,
priority,
processor_id,
false,
);
if thread.is_null() {
// There was some error, but libctru doesn't expose the result.
// We assume there was permissions issue (such as too low of a priority).
// We also need to clean up the closure at this time.
drop(Box::from_raw(main));
return libc::EPERM;
}
// Get the OS thread ID
let os_handle = ctru_sys::threadGetHandle(thread);
let mut os_thread_id = 0;
let result = ctru_sys::svcGetThreadId(&mut os_thread_id, os_handle);
if ctru_sys::R_FAILED(result) {
// TODO: improve error handling? Different codes?
return libc::EPERM;
}
// Insert the thread into our map
THREADS.write().insert(
thread_id,
PThread {
thread: ShareablePtr(thread),
os_thread_id,
is_detached: false,
is_finished: false,
result: ShareablePtr(ptr::null_mut()),
},
);
*native = thread_id;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_join(
thread_id: libc::pthread_t,
return_value: *mut *mut libc::c_void,
) -> libc::c_int {
if thread_id == MAIN_THREAD_ID {
// This is not a valid thread to join on
return libc::EINVAL;
}
let thread_map = THREADS.read();
let Some(&pthread) = thread_map.get(&thread_id) else {
// This is not a valid thread ID
return libc::ESRCH
};
// We need to drop our read guard so it doesn't stay locked while joining
// the thread.
drop(thread_map);
if pthread.is_detached {
// Cannot join on a detached thread
return libc::EINVAL;
}
let result = ctru_sys::threadJoin(pthread.thread.0, u64::MAX);
if ctru_sys::R_FAILED(result) {
// TODO: improve the error code by checking the result further?
return libc::EINVAL;
}
ctru_sys::threadFree(pthread.thread.0);
let thread_data = THREADS.write().remove(&thread_id);
// This should always be Some, but we use an if let just in case.
if let Some(thread_data) = thread_data {
if !return_value.is_null() {
*return_value = thread_data.result.0;
}
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_detach(thread_id: libc::pthread_t) -> libc::c_int {
if thread_id == MAIN_THREAD_ID {
// This is not a valid thread to detach
return libc::EINVAL;
}
let mut thread_map = THREADS.write();
let Some(mut pthread) = thread_map.get_mut(&thread_id) else {
// This is not a valid thread ID
return libc::ESRCH
};
if pthread.is_detached {
// Cannot detach an already detached thread
return libc::EINVAL;
}
pthread.is_detached = true;
ctru_sys::threadDetach(pthread.thread.0);
if pthread.is_finished {
// threadFree was already called by threadDetach
thread_map.remove(&thread_id);
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_self() -> libc::pthread_t {
let thread_id = THREAD_ID;
if thread_id == MAIN_THREAD_ID {
// Take this opportunity to populate the main thread's data in the map.
// They can only "legally" get the main thread's ID by calling this
// function, so this will run before they do anything that needs this.
// We have to ignore the possible error from svcGetThreadId since
// pthread_self cannot fail... But there shouldn't be an error anyways.
let mut os_thread_id = 0;
ctru_sys::svcGetThreadId(&mut os_thread_id, ctru_sys::CUR_THREAD_HANDLE);
THREADS.write().insert(
MAIN_THREAD_ID,
PThread {
// This null pointer is safe because we return before ever using
// it (in pthread_join and pthread_detach).
thread: ShareablePtr(ptr::null_mut()),
os_thread_id,
is_detached: true,
is_finished: false,
result: ShareablePtr(ptr::null_mut()),
},
);
}
thread_id
}
/// Closes a kernel handle on drop.
struct Handle(ctru_sys::Handle);
impl TryFrom<libc::pthread_t> for Handle {
type Error = libc::c_int;
fn try_from(thread_id: libc::pthread_t) -> Result<Self, Self::Error> {
let Some(&pthread) = THREADS.read().get(&thread_id) else {
// This is not a valid thread ID
return Err(libc::ESRCH)
};
if pthread.is_finished {
return Err(libc::ESRCH);
}
let mut os_handle = 0;
let ret = unsafe {
ctru_sys::svcOpenThread(
&mut os_handle,
ctru_sys::CUR_PROCESS_HANDLE,
pthread.os_thread_id,
)
};
if ctru_sys::R_FAILED(ret) || os_handle == u32::MAX {
// Either there was an error or the thread already finished
Err(libc::ESRCH)
} else {
Ok(Self(os_handle))
}
}
}
impl Drop for Handle {
fn drop(&mut self) {
unsafe {
// We ignore the error because we can't return it or panic.
ctru_sys::svcCloseHandle(self.0);
}
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_getschedparam(
thread_id: libc::pthread_t,
policy: *mut libc::c_int,
param: *mut libc::sched_param,
) -> libc::c_int {
let handle = match Handle::try_from(thread_id) {
Ok(handle) => handle,
Err(code) => return code,
};
let mut priority = 0;
let result = ctru_sys::svcGetThreadPriority(&mut priority, handle.0);
if ctru_sys::R_FAILED(result) {
// Some error occurred. This is the only error defined for this
// function, so return it. Maybe the thread exited while this function
// was exiting?
return libc::ESRCH;
}
(*param).sched_priority = priority;
// SCHED_FIFO is closest to how the cooperative app core works, while
// SCHED_RR is closest to how the preemptive sys core works.
// However, we don't have an API to get the current processor ID of a chosen
// thread (only the current), so we just always return SCHED_FIFO.
(*policy) = libc::SCHED_FIFO;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_setschedparam(
thread_id: libc::pthread_t,
policy: libc::c_int,
param: *const libc::sched_param,
) -> libc::c_int {
if policy != libc::SCHED_FIFO {
// We only accept SCHED_FIFO. See the note in pthread_getschedparam.
return libc::EINVAL;
}
let handle = match Handle::try_from(thread_id) {
Ok(handle) => handle,
Err(code) => return code,
};
let result = ctru_sys::svcSetThreadPriority(handle.0, (*param).sched_priority);
if ctru_sys::R_FAILED(result) {
// Probably the priority is out of the permissible bounds
// TODO: improve the error code by checking the result further?
return libc::EPERM;
}
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_getprocessorid_np() -> libc::c_int {
ctru_sys::svcGetProcessorID()
}

108
src/thread/attr.rs

@ -0,0 +1,108 @@
use static_assertions::const_assert;
use std::mem;
pub fn init() {}
/// Internal struct for storing pthread attribute data
/// Must be less than or equal to the size of `libc::pthread_attr_t`. We assert
/// this below via static_assertions.
pub struct PThreadAttr {
pub(crate) stack_size: libc::size_t,
pub(crate) priority: libc::c_int,
pub(crate) processor_id: libc::c_int,
}
const_assert!(mem::size_of::<PThreadAttr>() <= mem::size_of::<libc::pthread_attr_t>());
impl Default for PThreadAttr {
fn default() -> Self {
// Note: we are ignoring the result here, but errors shouldn't occur
// since we're using a valid handle.
let mut priority = 0;
unsafe { ctru_sys::svcGetThreadPriority(&mut priority, ctru_sys::CUR_THREAD_HANDLE) };
PThreadAttr {
stack_size: libc::PTHREAD_STACK_MIN,
// If no priority value is specified, spawn with the same priority
// as the current thread
priority,
// If no processor is specified, spawn on the default core.
// (determined by the application's Exheader)
processor_id: -2,
}
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
*attr = PThreadAttr::default();
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int {
std::ptr::drop_in_place(attr as *mut PThreadAttr);
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_setstacksize(
attr: *mut libc::pthread_attr_t,
stack_size: libc::size_t,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*attr).stack_size = stack_size;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_getschedparam(
attr: *const libc::pthread_attr_t,
param: *mut libc::sched_param,
) -> libc::c_int {
let attr = attr as *const PThreadAttr;
(*param).sched_priority = (*attr).priority;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_setschedparam(
attr: *mut libc::pthread_attr_t,
param: *const libc::sched_param,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*attr).priority = (*param).sched_priority;
// TODO: we could validate the priority here if we wanted?
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_getprocessorid_np(
attr: *const libc::pthread_attr_t,
processor_id: *mut libc::c_int,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*processor_id) = (*attr).processor_id;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_attr_setprocessorid_np(
attr: *mut libc::pthread_attr_t,
processor_id: libc::c_int,
) -> libc::c_int {
let attr = attr as *mut PThreadAttr;
(*attr).processor_id = processor_id;
// TODO: we could validate the processor ID here if we wanted?
0
}

73
src/thread_keys.rs

@ -0,0 +1,73 @@
//! Thread keys implementation for the standard library.
use spin::rwlock::RwLock;
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicUsize, Ordering};
pub fn init() {}
type Key = usize;
type Destructor = unsafe extern "C" fn(*mut libc::c_void);
static NEXT_KEY: AtomicUsize = AtomicUsize::new(1);
// This is a spin-lock RwLock which yields the thread every loop
static KEYS: RwLock<BTreeMap<Key, Option<Destructor>>, spin::Yield> = RwLock::new(BTreeMap::new());
#[thread_local]
static mut LOCALS: BTreeMap<Key, *mut libc::c_void> = BTreeMap::new();
fn is_valid_key(key: Key) -> bool {
KEYS.read().contains_key(&(key as Key))
}
#[no_mangle]
pub unsafe extern "C" fn pthread_key_create(
key: *mut libc::pthread_key_t,
destructor: Option<Destructor>,
) -> libc::c_int {
let new_key = NEXT_KEY.fetch_add(1, Ordering::SeqCst);
KEYS.write().insert(new_key, destructor);
*key = new_key as libc::pthread_key_t;
0
}
#[no_mangle]
pub unsafe extern "C" fn pthread_key_delete(key: libc::pthread_key_t) -> libc::c_int {
match KEYS.write().remove(&(key as Key)) {
// We had a entry, so it was a valid key.
// It's officially undefined behavior if they use the key after this,
// so don't worry about cleaning up LOCALS, especially since we can't
// clean up every thread's map.
Some(_) => 0,
// The key is unknown
None => libc::EINVAL,
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_getspecific(key: libc::pthread_key_t) -> *mut libc::c_void {
if let Some(&value) = LOCALS.get(&(key as Key)) {
value as _
} else {
// Note: we don't care if the key is invalid, we still return null
std::ptr::null_mut()
}
}
#[no_mangle]
pub unsafe extern "C" fn pthread_setspecific(
key: libc::pthread_key_t,
value: *const libc::c_void,
) -> libc::c_int {
let key = key as Key;
if !is_valid_key(key) {
return libc::EINVAL;
}
LOCALS.insert(key, value as *mut _);
0
}
Loading…
Cancel
Save