1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
// TODO: Remove this for the next major release
#![allow(non_upper_case_globals)]

use {os, page, query_range, Error, Region, Result};

/// Changes the memory protection of one or more pages.
///
/// The address range may overlap one or more pages, and if so, all pages within
/// the range will be modified. The previous protection flags are not preserved
/// (if reset of protection flags is desired, use `protect_with_handle`).
///
/// - The range is `[address, address + size)`
/// - The address may not be null.
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
///   address.
///
/// # Safety
///
/// This is unsafe since it can change read-only properties of constants and/or
/// modify the executable properties of any code segments.
///
/// # Examples
///
/// ```
/// # if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
/// use region::{Protection};
///
/// let ret5 = [0xB8, 0x05, 0x00, 0x00, 0x00, 0xC3];
/// let x: extern "C" fn() -> i32 = unsafe {
///   region::protect(ret5.as_ptr(), ret5.len(), Protection::READ_WRITE_EXECUTE).unwrap();
///   std::mem::transmute(ret5.as_ptr())
/// };
/// assert_eq!(x(), 5);
/// # }
/// ```
pub unsafe fn protect(address: *const u8, size: usize, protection: Protection) -> Result<()> {
  if address.is_null() {
    return Err(Error::NullAddress);
  }

  if size == 0 {
    return Err(Error::EmptyRange);
  }

  // Ignore the preservation of previous protection flags
  os::set_protection(
    page::floor(address as usize) as *const u8,
    page::size_from_range(address, size),
    protection,
  )
}

/// Changes the memory protection of one or more pages temporarily.
///
/// The address range may overlap one or more pages, and if so, all pages within
/// the range will be modified. The protection flags will be reset when the
/// handle is dropped.
///
/// This function uses `query_range` internally and is therefore less performant
/// than `protect`. Prefer this function only if a memory protection reset is
/// desired.
///
/// - The range is `[address, address + size)`
/// - The address may not be null.
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
///   address.
///
/// # Safety
///
/// This is unsafe since it can change read-only properties of constants and/or
/// modify the executable properties of any code segments.
pub unsafe fn protect_with_handle(
  address: *const u8,
  size: usize,
  protection: Protection,
) -> Result<ProtectGuard> {
  // Determine the current region flags
  let mut regions = query_range(address, size)?;

  // Change the region to the desired protection
  protect(address, size, protection)?;

  let lower = page::floor(address as usize);
  let upper = page::ceil(address as usize + size);

  if let Some(ref mut region) = regions.first_mut() {
    // Offset the lower region to the smallest page boundary
    let delta = lower - region.base as usize;
    region.base = (region.base as usize + delta) as *mut u8;
    region.size -= delta;
  }

  if let Some(ref mut region) = regions.last_mut() {
    // Truncate the upper region to the smallest page boundary
    let delta = region.upper() - upper;
    region.size -= delta;
  }

  Ok(ProtectGuard::new(regions))
}

/// An RAII implementation of "scoped protection". When this structure is dropped
/// (falls out of scope), the memory region protection will be reset.
#[must_use]
pub struct ProtectGuard {
  regions: Vec<Region>,
}

impl ProtectGuard {
  fn new(regions: Vec<Region>) -> Self {
    ProtectGuard { regions }
  }

  /// Releases the guards ownership of the memory protection.
  #[deprecated(since = "2.2.0", note = "Use std::mem::forget instead")]
  pub fn release(self) {
    ::std::mem::forget(self);
  }
}

impl Drop for ProtectGuard {
  fn drop(&mut self) {
    let result = unsafe {
      self
        .regions
        .iter()
        .try_for_each(|region| protect(region.base, region.size, region.protection))
    };
    debug_assert!(result.is_ok(), "restoring region protection");
  }
}

unsafe impl Send for ProtectGuard {}
unsafe impl Sync for ProtectGuard {}

bitflags! {
  /// Memory page protection constants.
  ///
  /// Determines the access rights for a specific page and/or region. Some
  /// combination of flags may not work depending on the OS (e.g macOS
  /// enforces pages to be readable).
  ///
  /// # Examples
  ///
  /// ```
  /// use region::Protection;
  ///
  /// let combine = Protection::READ | Protection::WRITE;
  /// let shorthand = Protection::READ_WRITE;
  /// ```
  pub struct Protection: usize {
    /// No access allowed at all.
    const NONE = 0;
    /// Read access; writing and/or executing data will panic.
    const READ = (1 << 1);
    /// Write access; this flag alone may not be supported on all OSs.
    const WRITE = (1 << 2);
    /// Execute access; this may not be allowed depending on DEP.
    const EXECUTE = (1 << 3);
    /// Read and execute shorthand.
    const READ_EXECUTE = (Self::READ.bits | Self::EXECUTE.bits);
    /// Read and write shorthand.
    const READ_WRITE = (Self::READ.bits | Self::WRITE.bits);
    /// Read, write and execute shorthand.
    const READ_WRITE_EXECUTE = (Self::READ.bits | Self::WRITE.bits | Self::EXECUTE.bits);
    /// Write and execute shorthand.
    const WRITE_EXECUTE = (Self::WRITE.bits | Self::EXECUTE.bits);

    /// No access allowed at all.
    #[deprecated(since = "2.2.0", note = "Use Protection::NONE instead")]
    const None = Self::NONE.bits;
    /// Read access; writing and/or executing data will panic.
    #[deprecated(since = "2.2.0", note = "Use Protection::READ instead")]
    const Read = Self::READ.bits;
    /// Write access; this flag alone may not be supported on all OSs.
    #[deprecated(since = "2.2.0", note = "Use Protection::WRITE instead")]
    const Write = Self::WRITE.bits;
    /// Execute access; this may not be allowed depending on DEP.
    #[deprecated(since = "2.2.0", note = "Use Protection::EXECUTE instead")]
    const Execute = Self::EXECUTE.bits;
    /// Read and execute shorthand.
    #[deprecated(since = "2.2.0", note = "Use Protection::READ_EXECUTE instead")]
    const ReadExecute = Self::READ_EXECUTE.bits;
    /// Read and write shorthand.
    #[deprecated(since = "2.2.0", note = "Use Protection::READ_WRITE instead")]
    const ReadWrite = Self::READ_WRITE.bits;
    /// Read, write and execute shorthand.
    #[deprecated(since = "2.2.0", note = "Use Protection::READ_WRITE_EXECUTE instead")]
    const ReadWriteExecute = Self::READ_WRITE_EXECUTE.bits;
    /// Write and execute shorthand.
    #[deprecated(since = "2.2.0", note = "Use Protection::WRITE_EXECUTE instead")]
    const WriteExecute = Self::WRITE_EXECUTE.bits;
  }
}

#[cfg(test)]
mod tests {
  use super::*;
  use tests::alloc_pages;

  #[test]
  fn protect_null() {
    assert!(unsafe { protect(::std::ptr::null(), 0, Protection::NONE) }.is_err());
  }

  #[test]
  fn protect_code() {
    let address = &mut protect_code as *mut _ as *mut u8;
    unsafe {
      protect(address, 0x10, Protection::READ_WRITE_EXECUTE).unwrap();
      *address = 0x90;
    }
  }

  #[test]
  fn protect_alloc() {
    let mut map = alloc_pages(&[Protection::READ]);
    unsafe {
      protect(map.as_ptr(), page::size(), Protection::READ_WRITE).unwrap();
      *map.as_mut_ptr() = 0x1;
    }
  }

  #[test]
  fn protect_overlap() {
    let pz = page::size();

    // Create a page boundary with different protection flags in the
    // upper and lower span, so the intermediate page sizes are fixed.
    let prots = [
      Protection::READ,
      Protection::READ_EXECUTE,
      Protection::READ_WRITE,
      Protection::READ,
    ];

    let map = alloc_pages(&prots);
    let base_exec = unsafe { map.as_ptr().offset(pz as isize) };
    let straddle = unsafe { base_exec.offset(pz as isize - 1) };

    // Change the protection over two page boundaries
    unsafe { protect(straddle, 2, Protection::READ_WRITE_EXECUTE).unwrap() };

    // Ensure that the pages have merged into one region
    let result = query_range(base_exec, pz * 2).unwrap();
    assert_eq!(result.len(), 1);
    assert_eq!(result[0].protection, Protection::READ_WRITE_EXECUTE);
    assert_eq!(result[0].size, pz * 2);
  }

  #[test]
  fn protect_handle() {
    let map = alloc_pages(&[Protection::READ]);
    unsafe {
      let _handle =
        protect_with_handle(map.as_ptr(), page::size(), Protection::READ_WRITE).unwrap();
      assert_eq!(
        ::query(map.as_ptr()).unwrap().protection,
        Protection::READ_WRITE
      );
    };
    assert_eq!(::query(map.as_ptr()).unwrap().protection, Protection::READ);
  }
}