|
| 1 | +/* |
| 2 | +Copyright 2025 The Hyperlight Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | + */ |
| 16 | + |
| 17 | +use crate::vm::{Mapping, MappingKind, TableOps}; |
| 18 | + |
| 19 | +#[inline(always)] |
| 20 | +/// Utility function to extract an (inclusive on both ends) bit range |
| 21 | +/// from a quadword. |
| 22 | +fn bits<const HIGH_BIT: u8, const LOW_BIT: u8>(x: u64) -> u64 { |
| 23 | + (x & ((1 << (HIGH_BIT + 1)) - 1)) >> LOW_BIT |
| 24 | +} |
| 25 | + |
| 26 | +/// A helper structure indicating a mapping operation that needs to be |
| 27 | +/// performed |
| 28 | +struct MapRequest<T> { |
| 29 | + table_base: T, |
| 30 | + vmin: VirtAddr, |
| 31 | + len: u64, |
| 32 | +} |
| 33 | + |
| 34 | +/// A helper structure indicating that a particular PTE needs to be |
| 35 | +/// modified |
| 36 | +struct MapResponse<T> { |
| 37 | + entry_ptr: T, |
| 38 | + vmin: VirtAddr, |
| 39 | + len: u64, |
| 40 | +} |
| 41 | + |
| 42 | +struct ModifyPteIterator<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableOps> { |
| 43 | + request: MapRequest<Op::TableAddr>, |
| 44 | + n: u64, |
| 45 | +} |
| 46 | +impl<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableOps> Iterator |
| 47 | + for ModifyPteIterator<HIGH_BIT, LOW_BIT, Op> |
| 48 | +{ |
| 49 | + type Item = MapResponse<Op::TableAddr>; |
| 50 | + fn next(&mut self) -> Option<Self::Item> { |
| 51 | + if (self.n << LOW_BIT) >= self.request.len { |
| 52 | + return None; |
| 53 | + } |
| 54 | + // next stage parameters |
| 55 | + let mut next_vmin = self.request.vmin + (self.n << LOW_BIT); |
| 56 | + let lower_bits_mask = (1 << LOW_BIT) - 1; |
| 57 | + if self.n > 0 { |
| 58 | + next_vmin &= !lower_bits_mask; |
| 59 | + } |
| 60 | + let entry_ptr = Op::entry_addr( |
| 61 | + self.request.table_base, |
| 62 | + bits::<HIGH_BIT, LOW_BIT>(next_vmin) << 3, |
| 63 | + ); |
| 64 | + let len_from_here = self.request.len - (next_vmin - self.request.vmin); |
| 65 | + let max_len = (1 << LOW_BIT) - (next_vmin & lower_bits_mask); |
| 66 | + let next_len = core::cmp::min(len_from_here, max_len); |
| 67 | + |
| 68 | + // update our state |
| 69 | + self.n += 1; |
| 70 | + |
| 71 | + Some(MapResponse { |
| 72 | + entry_ptr, |
| 73 | + vmin: next_vmin, |
| 74 | + len: next_len, |
| 75 | + }) |
| 76 | + } |
| 77 | +} |
| 78 | +fn modify_ptes<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableOps>( |
| 79 | + r: MapRequest<Op::TableAddr>, |
| 80 | +) -> ModifyPteIterator<HIGH_BIT, LOW_BIT, Op> { |
| 81 | + ModifyPteIterator { request: r, n: 0 } |
| 82 | +} |
| 83 | + |
| 84 | +/// Page-mapping callback to allocate a next-level page table if necessary. |
| 85 | +/// # Safety |
| 86 | +/// This function modifies page table data structures, and should not be called concurrently |
| 87 | +/// with any other operations that modify the page tables. |
| 88 | +unsafe fn alloc_pte_if_needed<Op: TableOps>( |
| 89 | + op: &Op, |
| 90 | + x: MapResponse<Op::TableAddr>, |
| 91 | +) -> MapRequest<Op::TableAddr> { |
| 92 | + let pte = unsafe { op.read_entry(x.entry_ptr) }; |
| 93 | + let present = pte & 0x1; |
| 94 | + if present != 0 { |
| 95 | + return MapRequest { |
| 96 | + table_base: Op::from_phys(pte & !0xfff), |
| 97 | + vmin: x.vmin, |
| 98 | + len: x.len, |
| 99 | + }; |
| 100 | + } |
| 101 | + |
| 102 | + let page_addr = unsafe { op.alloc_table() }; |
| 103 | + |
| 104 | + #[allow(clippy::identity_op)] |
| 105 | + #[allow(clippy::precedence)] |
| 106 | + let pte = Op::to_phys(page_addr) | |
| 107 | + 1 << 5 | // A - we don't track accesses at table level |
| 108 | + 0 << 4 | // PCD - leave caching enabled |
| 109 | + 0 << 3 | // PWT - write-back |
| 110 | + 1 << 2 | // U/S - allow user access to everything (for now) |
| 111 | + 1 << 1 | // R/W - we don't use block-level permissions |
| 112 | + 1 << 0; // P - this entry is present |
| 113 | + unsafe { op.write_entry(x.entry_ptr, pte) }; |
| 114 | + MapRequest { |
| 115 | + table_base: page_addr, |
| 116 | + vmin: x.vmin, |
| 117 | + len: x.len, |
| 118 | + } |
| 119 | +} |
| 120 | + |
| 121 | +/// Map a normal memory page |
| 122 | +/// # Safety |
| 123 | +/// This function modifies page table data structures, and should not be called concurrently |
| 124 | +/// with any other operations that modify the page tables. |
| 125 | +#[allow(clippy::identity_op)] |
| 126 | +#[allow(clippy::precedence)] |
| 127 | +unsafe fn map_page<Op: TableOps>(op: &Op, mapping: &Mapping, r: MapResponse<Op::TableAddr>) { |
| 128 | + let pte = match &mapping.kind { |
| 129 | + MappingKind::BasicMapping(bm) => |
| 130 | + // TODO: Support not readable |
| 131 | + { |
| 132 | + (mapping.phys_base + (r.vmin - mapping.virt_base)) | |
| 133 | + (!bm.executable as u64) << 63 | // NX - no execute unless allowed |
| 134 | + 1 << 7 | // 1 - RES1 according to manual |
| 135 | + 1 << 6 | // D - we don't presently track dirty state for anything |
| 136 | + 1 << 5 | // A - we don't presently track access for anything |
| 137 | + 0 << 4 | // PCD - leave caching enabled |
| 138 | + 0 << 3 | // PWT - write-back |
| 139 | + 1 << 2 | // U/S - allow user access to everything (for now) |
| 140 | + (bm.writable as u64) << 1 | // R/W - for now make everything r/w |
| 141 | + 1 << 0 // P - this entry is present |
| 142 | + } |
| 143 | + }; |
| 144 | + unsafe { |
| 145 | + op.write_entry(r.entry_ptr, pte); |
| 146 | + } |
| 147 | +} |
| 148 | + |
| 149 | +// There are no notable architecture-specific safety considerations |
| 150 | +// here, and the general conditions are documented in the |
| 151 | +// architecture-independent re-export in vm.rs |
| 152 | +#[allow(clippy::missing_safety_doc)] |
| 153 | +pub unsafe fn map<Op: TableOps>(op: &Op, mapping: Mapping) { |
| 154 | + modify_ptes::<47, 39, Op>(MapRequest { |
| 155 | + table_base: op.root_table(), |
| 156 | + vmin: mapping.virt_base, |
| 157 | + len: mapping.len, |
| 158 | + }) |
| 159 | + .map(|r| unsafe { alloc_pte_if_needed(op, r) }) |
| 160 | + .flat_map(modify_ptes::<38, 30, Op>) |
| 161 | + .map(|r| unsafe { alloc_pte_if_needed(op, r) }) |
| 162 | + .flat_map(modify_ptes::<29, 21, Op>) |
| 163 | + .map(|r| unsafe { alloc_pte_if_needed(op, r) }) |
| 164 | + .flat_map(modify_ptes::<20, 12, Op>) |
| 165 | + .map(|r| unsafe { map_page(op, &mapping, r) }) |
| 166 | + .for_each(drop); |
| 167 | +} |
| 168 | + |
| 169 | +/// # Safety |
| 170 | +/// This function traverses page table data structures, and should not |
| 171 | +/// be called concurrently with any other operations that modify the |
| 172 | +/// page table. |
| 173 | +unsafe fn require_pte_exist<Op: TableOps>( |
| 174 | + op: &Op, |
| 175 | + x: MapResponse<Op::TableAddr>, |
| 176 | +) -> Option<MapRequest<Op::TableAddr>> { |
| 177 | + let pte = unsafe { op.read_entry(x.entry_ptr) }; |
| 178 | + let present = pte & 0x1; |
| 179 | + if present == 0 { |
| 180 | + return None; |
| 181 | + } |
| 182 | + Some(MapRequest { |
| 183 | + table_base: Op::from_phys(pte & !0xfff), |
| 184 | + vmin: x.vmin, |
| 185 | + len: x.len, |
| 186 | + }) |
| 187 | +} |
| 188 | + |
| 189 | +// There are no notable architecture-specific safety considerations |
| 190 | +// here, and the general conditions are documented in the |
| 191 | +// architecture-independent re-export in vm.rs |
| 192 | +#[allow(clippy::missing_safety_doc)] |
| 193 | +pub unsafe fn vtop<Op: TableOps>(op: &Op, address: u64) -> Option<u64> { |
| 194 | + modify_ptes::<47, 39, Op>(MapRequest { |
| 195 | + table_base: op.root_table(), |
| 196 | + vmin: address, |
| 197 | + len: 1, |
| 198 | + }) |
| 199 | + .filter_map(|r| unsafe { require_pte_exist::<Op>(op, r) }) |
| 200 | + .flat_map(modify_ptes::<38, 30, Op>) |
| 201 | + .filter_map(|r| unsafe { require_pte_exist::<Op>(op, r) }) |
| 202 | + .flat_map(modify_ptes::<29, 21, Op>) |
| 203 | + .filter_map(|r| unsafe { require_pte_exist::<Op>(op, r) }) |
| 204 | + .flat_map(modify_ptes::<20, 12, Op>) |
| 205 | + .filter_map(|r| { |
| 206 | + let pte = unsafe { op.read_entry(r.entry_ptr) }; |
| 207 | + let present = pte & 0x1; |
| 208 | + if present == 0 { None } else { Some(pte) } |
| 209 | + }) |
| 210 | + .next() |
| 211 | +} |
| 212 | + |
| 213 | +pub const PAGE_SIZE: usize = 4096; |
| 214 | +pub const PAGE_TABLE_SIZE: usize = 4096; |
| 215 | +pub type PageTableEntry = u64; |
| 216 | +pub type VirtAddr = u64; |
| 217 | +pub type PhysAddr = u64; |
0 commit comments