diff --git a/Cargo.toml b/Cargo.toml index 0dff7af..b594834 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ init_with = "1.1.0" lazy_static = "1.1.0" log = "0.4.1" memsec = "0.5.4" +page_size = "0.4.1" pairing = { version = "0.14.2", features = ["u128-support"] } rand = "0.4.2" rand_derive = "0.3.1" diff --git a/src/lib.rs b/src/lib.rs index e93d88b..bb1d535 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ extern crate lazy_static; #[macro_use] extern crate log; extern crate memsec; +extern crate page_size; extern crate pairing; extern crate rand; #[macro_use] diff --git a/src/secret.rs b/src/secret.rs index c68c776..84676ed 100644 --- a/src/secret.rs +++ b/src/secret.rs @@ -1,12 +1,15 @@ //! Utilities for working with secret values. This module includes functionality for locking (and //! unlocking) memory into RAM and overwriting memory with zeros. +use std::collections::HashMap; use std::env; use std::mem::{size_of, size_of_val}; use std::ops::{Deref, DerefMut}; +use std::sync::Mutex; use errno::errno; use memsec::{memzero, mlock, munlock}; +use page_size; use pairing::bls12_381::Fr; use error::{Error, Result}; @@ -22,13 +25,22 @@ lazy_static! { /// be swapped/core-dumped to disk, resulting in unmanaged copies of secrets to hang around in /// memory; this is significantly less secure than enabling memory locking (the default). Only /// set `MLOCK_SECRETS=false` in development/testing. - pub(crate) static ref SHOULD_MLOCK_SECRETS: bool = match env::var("MLOCK_SECRETS") { - Ok(s) => s.parse().unwrap_or(true), - _ => true, - }; + static ref SHOULD_MLOCK_SECRETS: bool = env::var("MLOCK_SECRETS") + .map(|s| s.parse().expect("invalid value for `MLOCK_SECRETS`")) + .unwrap_or(true); /// The size in bytes of a single field element. pub(crate) static ref FR_SIZE: usize = size_of::(); + + /// Counts the number of secrets allocated in each page of memory. + static ref MLOCK_MANAGER: Mutex = Mutex::new(MlockManager::new()); + + /// The size in bytes of each page in memory. + static ref PAGE_SIZE: usize = get_page_size(); +} + +fn get_page_size() -> usize { + page_size::get() } /// Overwrites a single field element with zeros. @@ -36,6 +48,85 @@ pub(crate) fn clear_fr(fr_ptr: *mut u8) { unsafe { memzero(fr_ptr, *FR_SIZE) }; } +/// Round `ptr` down to the nearest page boundry (i.e returns the first address in the page that +/// contains `ptr`). +fn get_page_addr(ptr: *const u8) -> *const u8 { + let offset = ptr as usize % *PAGE_SIZE; + unsafe { ptr.sub(offset) } +} + +/// Manages when each page in memory is locked and unlocked from RAM based on how many secrets are +/// allocated in each page. +/// +/// The `MlockManager` contains a `HashMap` counter, each key in the counter is the address for a +/// page in memory, each value is the number of secret values currently allocated in the +/// corresponding page. The first secret that is allocated in a page, results in a call to the +/// `mlock` syscall for that page. The final secret to be deallocated from a page, results in the +/// `munlock` syscall being called for that page. +/// +/// The `MlockManager` ensures that no pages are unlocked from RAM until all secrets from the +/// corresponding page have been dropped. The `MlockManager` also prevents unnecessary calls to +/// the `mlock` and `munlock` syscalls. +#[derive(Debug, Default)] +struct MlockManager(HashMap); + +impl MlockManager { + fn new() -> Self { + MlockManager::default() + } + + /// Checks if the page that contains the value that `ptr` points to should be locked into RAM, + /// if so, the `mlock` syscall is called for that page. + fn mlock(&mut self, ptr: *const u8) -> bool { + let page_addr = get_page_addr(ptr); + let should_mlock_page = { + let n_allocs = self.0.entry(page_addr as usize).or_insert(0); + let should_mlock = *n_allocs == 0; + *n_allocs += 1; + should_mlock + }; + if should_mlock_page { + unsafe { mlock(ptr as *mut u8, 1) } + } else { + true + } + } + + /// Checks if the page that contains the value that `ptr` points to should be unlocked from + /// RAM, if so, the `munlock` syscall is called for that page. + fn munlock(&mut self, ptr: *const u8) -> bool { + let page_addr = get_page_addr(ptr); + let should_munlock_page = { + let n_allocs = self.0.entry(page_addr as usize).or_insert(0); + let should_munlock = *n_allocs == 1; + *n_allocs = n_allocs.saturating_sub(1); + should_munlock + }; + if should_munlock_page { + unsafe { munlock(ptr as *mut u8, 1) } + } else { + true + } + } + + /// Returns the total number of pages currently locked into RAM. + #[cfg(test)] + fn n_pages_locked(&self) -> usize { + self.0.values().filter(|count| **count > 0).count() + } + + /// Returns the number of secrets allocated in a given page. + #[cfg(test)] + fn alloc_count(&self, page_ptr: *const u8) -> u8 { + let page_ptr = page_ptr as usize; + if let Some(n_allocs) = self.0.get(&page_ptr) { + *n_allocs + } else { + 0 + } + } +} + pub(crate) struct MemRange { pub ptr: *mut u8, pub n_bytes: usize, @@ -52,6 +143,9 @@ pub(crate) trait ContainsSecret { /// swap-to-disk or core dump. This method should be called upon instantiation of every type /// that implements `ContainsSecret`. /// + /// We do not attempt to lock zero-sized types into RAM because zero-sized types may not + /// contain a valid pointer. + /// /// Operating systems set a limit on the ammount of memory that a process may lock into RAM. /// Due to this limitation, this method returns a `Result` in the event that memory locking /// fails. @@ -68,7 +162,7 @@ pub(crate) trait ContainsSecret { if n_bytes == 0 { return Ok(()); } - let mlock_succeeded = unsafe { mlock(ptr, n_bytes) }; + let mlock_succeeded = MLOCK_MANAGER.lock().unwrap().mlock(ptr); if mlock_succeeded { Ok(()) } else { @@ -89,6 +183,9 @@ pub(crate) trait ContainsSecret { /// from being copied to disk. This method should be called upon destruction of every type that /// implements `ContainsSecret`. /// + /// We do not attempt to unlock zero-sized types from RAM because zero-sized types may not + /// contain a valid pointer. + /// /// # Errors /// /// An `Error::MlockFailed` is returned if we attempt to lock an invalid region memory. @@ -100,7 +197,7 @@ pub(crate) trait ContainsSecret { if n_bytes == 0 { return Ok(()); } - let munlock_succeeded = unsafe { munlock(ptr, n_bytes) }; + let munlock_succeeded = MLOCK_MANAGER.lock().unwrap().munlock(ptr); if munlock_succeeded { Ok(()) } else { @@ -185,3 +282,61 @@ where Ok(safe) } } + +#[cfg(test)] +mod tests { + use super::{get_page_addr, MlockManager, PAGE_SIZE}; + + #[test] + fn test_manager() { + let mut manager = MlockManager::new(); + + // We create a single `u64` on the stack; we then check that calling `mlock` on its + // address results in a call to the `mlock` syscall. We check this by asserting that the + // total number of locked pages is incremented from 0 (the default for each page) to 1. + let x = 5u64; + let x_ptr = &x as *const u64 as *mut u8; + let first_page = get_page_addr(x_ptr); + assert!(manager.mlock(x_ptr)); + assert_eq!(manager.n_pages_locked(), 1); + assert_eq!(manager.alloc_count(first_page), 1); + + // Check that allocating a second secret in the first page of memory does not result in a + // call to the `mlock` syscall. We check this by asserting that the total number of locked + // pages has not changed. + assert!(manager.mlock(first_page)); + assert_eq!(manager.n_pages_locked(), 1); + assert_eq!(manager.alloc_count(first_page), 2); + + // Check that locking the first address in the page following `first_page`, results in a + // call to the `mlock` syscall. We check this by asserting that the total number of locked + // pages is incremented from 1 to 2. + let second_page = unsafe { first_page.offset(*PAGE_SIZE as isize) as *mut u8 }; + assert!(manager.mlock(second_page)); + assert_eq!(manager.n_pages_locked(), 2); + assert_eq!(manager.alloc_count(second_page), 1); + + // Check that calling `munlock` on the second page, which holds only a single secret, + // results in a call to the `munlock` syscall. We check this by asserting that the total + // number of locked pages is decremented from 2 (as asserted above) to 1. + assert!(manager.munlock(second_page)); + assert_eq!(manager.n_pages_locked(), 1); + assert_eq!(manager.alloc_count(second_page), 0); + + // We check that calling `munlock` on the page that contains `x` (i.e. the first page), + // does not result in a call to the `munlock` syscall, because there still exists secrets + // that are allocated in the first page. We do this by asserting that the allocation + // counter for the first page has not been decremented from 2 (as asserted above) to 0 + // (which would result in a call to the syscall). + assert!(manager.munlock(x_ptr)); + assert_eq!(manager.n_pages_locked(), 1); + assert_eq!(manager.alloc_count(first_page), 1); + + // Check that unlocking the remaining secret in the first page of memory results in a call + // to the `munlock` syscall. We do this by asserting that the number of locked pages has + // been decremented from 1 (as asserted above) to 0. + assert!(manager.munlock(first_page)); + assert_eq!(manager.n_pages_locked(), 0); + assert_eq!(manager.alloc_count(first_page), 0); + } +}