src/memory.cpp
| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /** | ||
| 2 | * @file memory.cpp | ||
| 3 | * @brief Implementation of memory manipulation and validation utilities. | ||
| 4 | * | ||
| 5 | * Provides functions for checking memory readability and writability, writing bytes to memory, | ||
| 6 | * and managing a memory region cache for performance optimization. | ||
| 7 | * The cache uses sharded locks with SRWLOCK for high-concurrency read-heavy access. | ||
| 8 | * Uses monotonic counter-keyed map for O(log n) LRU eviction instead of O(n) scan. | ||
| 9 | * In-flight query coalescing prevents cache stampede under high concurrency. | ||
| 10 | * On-demand cleanup handles expired entry removal to avoid polluting the miss path. | ||
| 11 | * Epoch-based reader tracking prevents use-after-free during shutdown. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include "DetourModKit/memory.hpp" | ||
| 15 | #include "DetourModKit/format.hpp" | ||
| 16 | #include "DetourModKit/logger.hpp" | ||
| 17 | |||
| 18 | #include <windows.h> | ||
| 19 | #include <shared_mutex> | ||
| 20 | #include <unordered_map> | ||
| 21 | #include <map> | ||
| 22 | #include <vector> | ||
| 23 | #include <chrono> | ||
| 24 | #include <atomic> | ||
| 25 | #include <sstream> | ||
| 26 | #include <iomanip> | ||
| 27 | #include <algorithm> | ||
| 28 | #include <stdexcept> | ||
| 29 | #include <cstddef> | ||
| 30 | #include <thread> | ||
| 31 | #include <condition_variable> | ||
| 32 | |||
| 33 | using namespace DetourModKit; | ||
| 34 | |||
| 35 | // Permission flags as constexpr for compile-time constants | ||
| 36 | namespace CachePermissions | ||
| 37 | { | ||
| 38 | constexpr DWORD READ_PERMISSION_FLAGS = PAGE_READONLY | PAGE_READWRITE | PAGE_WRITECOPY | | ||
| 39 | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY; | ||
| 40 | constexpr DWORD WRITE_PERMISSION_FLAGS = PAGE_READWRITE | PAGE_WRITECOPY | | ||
| 41 | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY; | ||
| 42 | constexpr DWORD NOACCESS_GUARD_FLAGS = PAGE_NOACCESS | PAGE_GUARD; | ||
| 43 | } | ||
| 44 | |||
| 45 | // Anonymous namespace for internal helpers and storage | ||
| 46 | namespace | ||
| 47 | { | ||
| 48 | /** | ||
| 49 | * @class SrwSharedMutex | ||
| 50 | * @brief Shared mutex backed by Windows SRWLOCK instead of pthread_rwlock_t. | ||
| 51 | * @details MinGW/winpthreads' pthread_rwlock_t corrupts internal state under | ||
| 52 | * high reader contention, causing assertion failures in lock_shared(). | ||
| 53 | * SRWLOCK is kernel-level, lock-free for uncontended cases, and does | ||
| 54 | * not suffer from this bug. | ||
| 55 | */ | ||
| 56 | class SrwSharedMutex | ||
| 57 | { | ||
| 58 | public: | ||
| 59 | 2360 | SrwSharedMutex() noexcept { InitializeSRWLock(&srw_); } | |
| 60 | |||
| 61 | SrwSharedMutex(const SrwSharedMutex &) = delete; | ||
| 62 | SrwSharedMutex &operator=(const SrwSharedMutex &) = delete; | ||
| 63 | |||
| 64 | 2891 | void lock() noexcept { AcquireSRWLockExclusive(&srw_); } | |
| 65 | 31 | bool try_lock() noexcept { return TryAcquireSRWLockExclusive(&srw_) != 0; } | |
| 66 | 2922 | void unlock() noexcept { ReleaseSRWLockExclusive(&srw_); } | |
| 67 | |||
| 68 | 91204 | void lock_shared() noexcept { AcquireSRWLockShared(&srw_); } | |
| 69 | 8 | bool try_lock_shared() noexcept { return TryAcquireSRWLockShared(&srw_) != 0; } | |
| 70 | 97361 | void unlock_shared() noexcept { ReleaseSRWLockShared(&srw_); } | |
| 71 | |||
| 72 | private: | ||
| 73 | SRWLOCK srw_; | ||
| 74 | }; | ||
| 75 | |||
| 76 | /** | ||
| 77 | * @struct CachedMemoryRegionInfo | ||
| 78 | * @brief Structure to hold cached memory region information. | ||
| 79 | * @details Uses timestamp for thread-safe updates and reduced memory footprint. | ||
| 80 | */ | ||
| 81 | struct CachedMemoryRegionInfo | ||
| 82 | { | ||
| 83 | uintptr_t baseAddress; | ||
| 84 | size_t regionSize; | ||
| 85 | DWORD protection; | ||
| 86 | uint64_t timestamp_ns; | ||
| 87 | uint64_t lru_key; | ||
| 88 | bool valid; | ||
| 89 | |||
| 90 | 135 | CachedMemoryRegionInfo() | |
| 91 | 135 | : baseAddress(0), regionSize(0), protection(0), timestamp_ns(0), lru_key(0), valid(false) | |
| 92 | { | ||
| 93 | 135 | } | |
| 94 | }; | ||
| 95 | |||
| 96 | /** | ||
| 97 | * @struct CacheShard | ||
| 98 | * @brief Individual cache shard with O(1) address lookup and O(log n) LRU eviction. | ||
| 99 | * @details Uses unordered_map keyed by region base address for fast lookup. | ||
| 100 | * std::map keyed by monotonic counter for efficient oldest-entry eviction. | ||
| 101 | * SrwSharedMutex allows multiple concurrent readers. | ||
| 102 | * in_flight flag prevents cache stampede by coalescing concurrent VirtualQuery calls. | ||
| 103 | * Mutex is stored separately to allow vector resize operations. | ||
| 104 | */ | ||
| 105 | struct CacheShard | ||
| 106 | { | ||
| 107 | // Map from baseAddress -> CachedMemoryRegionInfo for O(1) lookup by address | ||
| 108 | std::unordered_map<uintptr_t, CachedMemoryRegionInfo> entries; | ||
| 109 | // Map from monotonic counter -> baseAddress for O(log n) oldest-entry lookup (LRU) | ||
| 110 | // Monotonic counter guarantees insertion-order uniqueness for correct eviction | ||
| 111 | std::map<uint64_t, uintptr_t> lru_index; | ||
| 112 | uint64_t entry_counter{0}; | ||
| 113 | size_t capacity; | ||
| 114 | size_t max_capacity; | ||
| 115 | |||
| 116 | 2360 | CacheShard() : capacity(0), max_capacity(0) | |
| 117 | { | ||
| 118 |
1/2✓ Branch 4 → 5 taken 2360 times.
✗ Branch 4 → 6 not taken.
|
2360 | entries.reserve(64); |
| 119 | 2360 | } | |
| 120 | }; | ||
| 121 | |||
| 122 | /** | ||
| 123 | * @brief Returns current time in nanoseconds. | ||
| 124 | */ | ||
| 125 | 94554 | inline uint64_t current_time_ns() noexcept | |
| 126 | { | ||
| 127 | 99495 | return std::chrono::duration_cast<std::chrono::nanoseconds>( | |
| 128 | 192567 | std::chrono::steady_clock::now().time_since_epoch()) | |
| 129 | 99873 | .count(); | |
| 130 | } | ||
| 131 | |||
| 132 | /** | ||
| 133 | * @brief Computes the shard index for a given address. | ||
| 134 | * @param address The address to hash. | ||
| 135 | * @param shard_count Total number of shards. | ||
| 136 | * @return The shard index. | ||
| 137 | * @note Uses golden ratio bit-mixing to spread adjacent addresses across shards. | ||
| 138 | */ | ||
| 139 | 94456 | constexpr inline size_t compute_shard_index(uintptr_t address, size_t shard_count) noexcept | |
| 140 | { | ||
| 141 | 94456 | return (static_cast<size_t>((address * 0x9E3779B97F4A7C15ULL) >> 48)) % shard_count; | |
| 142 | } | ||
| 143 | } | ||
| 144 | |||
| 145 | /** | ||
| 146 | * @namespace MemoryUtilsCacheInternal | ||
| 147 | * @brief Encapsulates internal static variables and helper functions for memory cache. | ||
| 148 | */ | ||
| 149 | namespace MemoryUtilsCacheInternal | ||
| 150 | { | ||
| 151 | std::vector<CacheShard> s_cacheShards; | ||
| 152 | std::vector<std::unique_ptr<SrwSharedMutex>> s_shardMutexes; | ||
| 153 | std::unique_ptr<std::atomic<char>[]> s_inFlight; | ||
| 154 | std::atomic<size_t> s_shardCount{0}; | ||
| 155 | std::atomic<size_t> s_maxEntriesPerShard{0}; | ||
| 156 | std::atomic<unsigned int> s_configuredExpiryMs{0}; | ||
| 157 | std::atomic<bool> s_cacheInitialized{false}; | ||
| 158 | |||
| 159 | // Global cache state mutex to serialize init/clear/shutdown transitions | ||
| 160 | // Protects against concurrent state changes that could leave vectors in invalid state | ||
| 161 | std::mutex s_cacheStateMutex; | ||
| 162 | |||
| 163 | // Epoch-based reader tracking to prevent use-after-free during shutdown. | ||
| 164 | // Readers increment on entry to is_readable/is_writable and decrement on exit. | ||
| 165 | // shutdown_cache waits for this to reach zero before destroying data structures. | ||
| 166 | std::atomic<int32_t> s_activeReaders{0}; | ||
| 167 | |||
| 168 | /** | ||
| 169 | * @class ActiveReaderGuard | ||
| 170 | * @brief RAII guard that increments s_activeReaders on construction and | ||
| 171 | * decrements on destruction, ensuring correct pairing on all exit paths. | ||
| 172 | */ | ||
| 173 | class ActiveReaderGuard | ||
| 174 | { | ||
| 175 | public: | ||
| 176 | 98029 | ActiveReaderGuard() noexcept | |
| 177 | { | ||
| 178 | s_activeReaders.fetch_add(1, std::memory_order_acq_rel); | ||
| 179 | 98029 | } | |
| 180 | |||
| 181 | 101372 | ~ActiveReaderGuard() noexcept | |
| 182 | { | ||
| 183 | s_activeReaders.fetch_sub(1, std::memory_order_release); | ||
| 184 | 101372 | } | |
| 185 | |||
| 186 | ActiveReaderGuard(const ActiveReaderGuard &) = delete; | ||
| 187 | ActiveReaderGuard &operator=(const ActiveReaderGuard &) = delete; | ||
| 188 | }; | ||
| 189 | |||
| 190 | // Background cleanup thread. | ||
| 191 | // Uses std::thread (not jthread) because these are namespace-scope statics: | ||
| 192 | // jthread's auto-join destructor would run after s_cleanupCv/s_cleanupMutex | ||
| 193 | // are destroyed (reverse declaration order), causing UB. Manual join in | ||
| 194 | // shutdown_cache() avoids this. DMK_Shutdown() guarantees proper teardown. | ||
| 195 | std::atomic<bool> s_cleanupThreadRunning{false}; | ||
| 196 | std::thread s_cleanupThread; | ||
| 197 | std::mutex s_cleanupMutex; | ||
| 198 | std::condition_variable s_cleanupCv; | ||
| 199 | std::atomic<bool> s_cleanupRequested{false}; | ||
| 200 | |||
| 201 | // On-demand cleanup fallback timer (used when background thread is disabled) | ||
| 202 | std::atomic<uint64_t> s_lastCleanupTimeNs{0}; | ||
| 203 | constexpr uint64_t CLEANUP_INTERVAL_NS = 1'000'000'000ULL; // 1 second in nanoseconds | ||
| 204 | |||
| 205 | // Always-available cache statistics | ||
| 206 | struct CacheStats | ||
| 207 | { | ||
| 208 | std::atomic<uint64_t> cacheHits{0}; | ||
| 209 | std::atomic<uint64_t> cacheMisses{0}; | ||
| 210 | std::atomic<uint64_t> invalidations{0}; | ||
| 211 | std::atomic<uint64_t> coalescedQueries{0}; | ||
| 212 | std::atomic<uint64_t> onDemandCleanups{0}; | ||
| 213 | }; | ||
| 214 | CacheStats s_stats; | ||
| 215 | |||
| 216 | /** | ||
| 217 | * @brief Checks if a cache entry covers the requested address range and is valid. | ||
| 218 | * @param entry The cache entry to check. | ||
| 219 | * @param address Start address of the query. | ||
| 220 | * @param size Size of the query range. | ||
| 221 | * @param current_time_ns Current timestamp in nanoseconds. | ||
| 222 | * @param expiry_ns Expiry time in nanoseconds. | ||
| 223 | * @return true if the entry is valid and covers the range. | ||
| 224 | */ | ||
| 225 | 95467 | constexpr inline bool is_entry_valid_and_covers(const CachedMemoryRegionInfo &entry, | |
| 226 | uintptr_t address, | ||
| 227 | size_t size, | ||
| 228 | uint64_t current_time_ns, | ||
| 229 | uint64_t expiry_ns) noexcept | ||
| 230 | { | ||
| 231 |
1/2✗ Branch 2 → 3 not taken.
✓ Branch 2 → 4 taken 95467 times.
|
95467 | if (!entry.valid) |
| 232 | ✗ | return false; | |
| 233 | |||
| 234 | 95467 | const uint64_t entry_age = current_time_ns - entry.timestamp_ns; | |
| 235 |
2/2✓ Branch 4 → 5 taken 4 times.
✓ Branch 4 → 6 taken 95463 times.
|
95467 | if (entry_age > expiry_ns) |
| 236 | 4 | return false; | |
| 237 | |||
| 238 | 95463 | const uintptr_t endAddress = address + size; | |
| 239 |
2/2✓ Branch 6 → 7 taken 4 times.
✓ Branch 6 → 8 taken 95459 times.
|
95463 | if (endAddress < address) |
| 240 | 4 | return false; | |
| 241 | |||
| 242 | 95459 | const uintptr_t entryEndAddress = entry.baseAddress + entry.regionSize; | |
| 243 |
1/2✗ Branch 8 → 9 not taken.
✓ Branch 8 → 10 taken 95459 times.
|
95459 | if (entryEndAddress < entry.baseAddress) |
| 244 | ✗ | return false; | |
| 245 | |||
| 246 |
2/4✓ Branch 10 → 11 taken 96160 times.
✗ Branch 10 → 13 not taken.
✓ Branch 11 → 12 taken 97296 times.
✗ Branch 11 → 13 not taken.
|
95459 | return address >= entry.baseAddress && endAddress <= entryEndAddress; |
| 247 | } | ||
| 248 | |||
| 249 | /** | ||
| 250 | * @brief Checks protection flags for read permission. | ||
| 251 | */ | ||
| 252 | 96016 | constexpr inline bool check_read_permission(DWORD protection) noexcept | |
| 253 | { | ||
| 254 |
1/2✓ Branch 2 → 3 taken 98139 times.
✗ Branch 2 → 5 not taken.
|
194155 | return (protection & CachePermissions::READ_PERMISSION_FLAGS) != 0 && |
| 255 |
1/2✓ Branch 3 → 4 taken 98926 times.
✗ Branch 3 → 5 not taken.
|
194155 | (protection & CachePermissions::NOACCESS_GUARD_FLAGS) == 0; |
| 256 | } | ||
| 257 | |||
| 258 | /** | ||
| 259 | * @brief Checks protection flags for write permission. | ||
| 260 | */ | ||
| 261 | 3704 | constexpr inline bool check_write_permission(DWORD protection) noexcept | |
| 262 | { | ||
| 263 |
1/2✓ Branch 2 → 3 taken 3734 times.
✗ Branch 2 → 5 not taken.
|
7438 | return (protection & CachePermissions::WRITE_PERMISSION_FLAGS) != 0 && |
| 264 |
1/2✓ Branch 3 → 4 taken 3742 times.
✗ Branch 3 → 5 not taken.
|
7438 | (protection & CachePermissions::NOACCESS_GUARD_FLAGS) == 0; |
| 265 | } | ||
| 266 | |||
| 267 | /** | ||
| 268 | * @brief Finds and validates a cache entry in a shard by scanning for range containment. | ||
| 269 | * @param shard The cache shard to search. | ||
| 270 | * @param address Address to look up. | ||
| 271 | * @param size Size of the query range. | ||
| 272 | * @param current_time_ns Current timestamp in nanoseconds. | ||
| 273 | * @param expiry_ns Expiry time in nanoseconds. | ||
| 274 | * @return Pointer to the matching entry, or nullptr if not found or expired. | ||
| 275 | * @note Must be called with shard mutex held (shared or exclusive). | ||
| 276 | * @note First attempts direct lookup by page-aligned base address for O(1) fast path, | ||
| 277 | * then falls back to linear scan for addresses within larger regions. | ||
| 278 | */ | ||
| 279 | 102264 | CachedMemoryRegionInfo *find_in_shard(CacheShard &shard, | |
| 280 | uintptr_t address, | ||
| 281 | size_t size, | ||
| 282 | uint64_t current_time_ns, | ||
| 283 | uint64_t expiry_ns) noexcept | ||
| 284 | { | ||
| 285 | // Fast path: direct lookup by page-aligned base address | ||
| 286 | 102264 | const uintptr_t base_addr = address & ~static_cast<uintptr_t>(0xFFF); | |
| 287 | 102264 | auto it = shard.entries.find(base_addr); | |
| 288 |
1/2✓ Branch 5 → 6 taken 97893 times.
✗ Branch 5 → 10 not taken.
|
97210 | if (it != shard.entries.end()) |
| 289 | { | ||
| 290 | 97893 | CachedMemoryRegionInfo &entry = it->second; | |
| 291 |
1/2✓ Branch 8 → 9 taken 97650 times.
✗ Branch 8 → 10 not taken.
|
96942 | if (is_entry_valid_and_covers(entry, address, size, current_time_ns, expiry_ns)) |
| 292 | { | ||
| 293 | 97650 | return &entry; | |
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 297 | // Slow path: scan all entries for a region that contains the queried range. | ||
| 298 | // This handles addresses that fall within a larger region whose base address | ||
| 299 | // differs from the queried page. Shard sizes are bounded so this is fast. | ||
| 300 | ✗ | for (auto &pair : shard.entries) | |
| 301 | { | ||
| 302 | 56 | CachedMemoryRegionInfo &entry = pair.second; | |
| 303 |
2/2✓ Branch 14 → 15 taken 2 times.
✓ Branch 14 → 16 taken 54 times.
|
56 | if (is_entry_valid_and_covers(entry, address, size, current_time_ns, expiry_ns)) |
| 304 | { | ||
| 305 | 2 | return &entry; | |
| 306 | } | ||
| 307 | } | ||
| 308 | |||
| 309 | 141 | return nullptr; | |
| 310 | } | ||
| 311 | |||
| 312 | /** | ||
| 313 | * @brief Evicts the oldest entry from the shard using O(log n) LRU lookup. | ||
| 314 | * @note Must be called with shard mutex held (exclusive). | ||
| 315 | * @return true if an entry was evicted, false if shard is empty. | ||
| 316 | */ | ||
| 317 | 8 | bool evict_oldest_entry(CacheShard &shard) noexcept | |
| 318 | { | ||
| 319 |
1/2✗ Branch 3 → 4 not taken.
✓ Branch 3 → 5 taken 8 times.
|
8 | if (shard.lru_index.empty()) |
| 320 | ✗ | return false; | |
| 321 | |||
| 322 | 8 | const auto lru_it = shard.lru_index.begin(); | |
| 323 | 8 | const uintptr_t oldest_base = lru_it->second; | |
| 324 | |||
| 325 | 8 | shard.lru_index.erase(lru_it); | |
| 326 | |||
| 327 | 8 | const auto entry_it = shard.entries.find(oldest_base); | |
| 328 |
1/2✓ Branch 11 → 12 taken 8 times.
✗ Branch 11 → 14 not taken.
|
8 | if (entry_it != shard.entries.end()) |
| 329 | { | ||
| 330 | 8 | shard.entries.erase(entry_it); | |
| 331 | 8 | return true; | |
| 332 | } | ||
| 333 | ✗ | return false; | |
| 334 | } | ||
| 335 | |||
| 336 | /** | ||
| 337 | * @brief Force-evicts entries until shard is at or below max_capacity. | ||
| 338 | * @note Must be called with shard mutex held (exclusive). | ||
| 339 | * @param shard The cache shard to trim. | ||
| 340 | */ | ||
| 341 | 20 | void trim_to_max_capacity(CacheShard &shard) noexcept | |
| 342 | { | ||
| 343 |
2/6✗ Branch 5 → 6 not taken.
✓ Branch 5 → 9 taken 20 times.
✗ Branch 7 → 8 not taken.
✗ Branch 7 → 9 not taken.
✗ Branch 10 → 3 not taken.
✓ Branch 10 → 11 taken 20 times.
|
20 | while (shard.entries.size() > shard.max_capacity && !shard.lru_index.empty()) |
| 344 | { | ||
| 345 | ✗ | evict_oldest_entry(shard); | |
| 346 | } | ||
| 347 | 20 | } | |
| 348 | |||
| 349 | /** | ||
| 350 | * @brief Updates or inserts a cache entry in a specific shard. | ||
| 351 | * @param shard The cache shard to update. | ||
| 352 | * @param mbi Memory basic information from VirtualQuery. | ||
| 353 | * @param current_time_ns Current timestamp in nanoseconds. | ||
| 354 | * @note Must be called with shard mutex held (exclusive). | ||
| 355 | */ | ||
| 356 | 139 | void update_shard_with_region(CacheShard &shard, const MEMORY_BASIC_INFORMATION &mbi, uint64_t current_time_ns) noexcept | |
| 357 | { | ||
| 358 | 139 | const uintptr_t base_addr = reinterpret_cast<uintptr_t>(mbi.BaseAddress); | |
| 359 | |||
| 360 | 139 | auto it = shard.entries.find(base_addr); | |
| 361 |
2/2✓ Branch 5 → 6 taken 4 times.
✓ Branch 5 → 19 taken 135 times.
|
139 | if (it != shard.entries.end()) |
| 362 | { | ||
| 363 | // Remove old entry from LRU index using stored lru_key | ||
| 364 | 4 | CachedMemoryRegionInfo &old_entry = it->second; | |
| 365 | 4 | const auto lru_it = shard.lru_index.find(old_entry.lru_key); | |
| 366 |
3/6✓ Branch 10 → 11 taken 4 times.
✗ Branch 10 → 14 not taken.
✓ Branch 12 → 13 taken 4 times.
✗ Branch 12 → 14 not taken.
✓ Branch 15 → 16 taken 4 times.
✗ Branch 15 → 17 not taken.
|
4 | if (lru_it != shard.lru_index.end() && lru_it->second == base_addr) |
| 367 | { | ||
| 368 | 4 | shard.lru_index.erase(lru_it); | |
| 369 | } | ||
| 370 | |||
| 371 | // Update existing entry with new monotonic LRU key | ||
| 372 | 4 | const uint64_t new_lru_key = shard.entry_counter++; | |
| 373 | 4 | old_entry.baseAddress = base_addr; | |
| 374 | 4 | old_entry.regionSize = mbi.RegionSize; | |
| 375 | 4 | old_entry.protection = mbi.Protect; | |
| 376 | 4 | old_entry.timestamp_ns = current_time_ns; | |
| 377 | 4 | old_entry.lru_key = new_lru_key; | |
| 378 | 4 | old_entry.valid = true; | |
| 379 | |||
| 380 | // Insert new composite key into LRU index | ||
| 381 | 4 | shard.lru_index.emplace(new_lru_key, base_addr); | |
| 382 | } | ||
| 383 | else | ||
| 384 | { | ||
| 385 | // Evict oldest if at capacity - O(log n) via map | ||
| 386 |
2/2✓ Branch 20 → 21 taken 8 times.
✓ Branch 20 → 22 taken 127 times.
|
135 | if (shard.entries.size() >= shard.capacity) |
| 387 | { | ||
| 388 | 8 | evict_oldest_entry(shard); | |
| 389 | } | ||
| 390 | |||
| 391 | // Hard upper bound: trim if exceeding max_capacity | ||
| 392 |
1/2✗ Branch 23 → 24 not taken.
✓ Branch 23 → 25 taken 135 times.
|
135 | if (shard.entries.size() >= shard.max_capacity) |
| 393 | { | ||
| 394 | ✗ | trim_to_max_capacity(shard); | |
| 395 | } | ||
| 396 | |||
| 397 | // Generate unique monotonic LRU key | ||
| 398 | 135 | const uint64_t new_lru_key = shard.entry_counter++; | |
| 399 | |||
| 400 | 135 | CachedMemoryRegionInfo new_entry; | |
| 401 | 135 | new_entry.baseAddress = base_addr; | |
| 402 | 135 | new_entry.regionSize = mbi.RegionSize; | |
| 403 | 135 | new_entry.protection = mbi.Protect; | |
| 404 | 135 | new_entry.timestamp_ns = current_time_ns; | |
| 405 | 135 | new_entry.lru_key = new_lru_key; | |
| 406 | 135 | new_entry.valid = true; | |
| 407 | |||
| 408 | 270 | shard.entries.insert_or_assign(base_addr, std::move(new_entry)); | |
| 409 | 135 | shard.lru_index.emplace(new_lru_key, base_addr); | |
| 410 | } | ||
| 411 | 139 | } | |
| 412 | |||
| 413 | /** | ||
| 414 | * @brief Removes expired entries from a shard. | ||
| 415 | * @note Must be called with shard mutex held (exclusive). | ||
| 416 | * @return Number of entries removed from this shard. | ||
| 417 | */ | ||
| 418 | 20 | size_t cleanup_expired_entries_in_shard(CacheShard &shard, | |
| 419 | uint64_t current_time_ns, | ||
| 420 | uint64_t expiry_ns) noexcept | ||
| 421 | { | ||
| 422 | 20 | size_t removed = 0; | |
| 423 | 20 | auto it = shard.entries.begin(); | |
| 424 |
2/2✓ Branch 24 → 4 taken 1 time.
✓ Branch 24 → 25 taken 20 times.
|
21 | while (it != shard.entries.end()) |
| 425 | { | ||
| 426 | 1 | const CachedMemoryRegionInfo &entry = it->second; | |
| 427 | 1 | const uint64_t entry_age = current_time_ns - entry.timestamp_ns; | |
| 428 | |||
| 429 |
2/4✓ Branch 5 → 6 taken 1 time.
✗ Branch 5 → 7 not taken.
✓ Branch 6 → 7 taken 1 time.
✗ Branch 6 → 20 not taken.
|
1 | if (!entry.valid || entry_age > expiry_ns) |
| 430 | { | ||
| 431 | // Remove from LRU index using stored lru_key | ||
| 432 | 1 | const auto lru_it = shard.lru_index.find(entry.lru_key); | |
| 433 |
3/6✓ Branch 10 → 11 taken 1 time.
✗ Branch 10 → 15 not taken.
✓ Branch 13 → 14 taken 1 time.
✗ Branch 13 → 15 not taken.
✓ Branch 16 → 17 taken 1 time.
✗ Branch 16 → 18 not taken.
|
1 | if (lru_it != shard.lru_index.end() && lru_it->second == it->first) |
| 434 | { | ||
| 435 | 1 | shard.lru_index.erase(lru_it); | |
| 436 | } | ||
| 437 | |||
| 438 | 1 | it = shard.entries.erase(it); | |
| 439 | 1 | ++removed; | |
| 440 | 1 | } | |
| 441 | else | ||
| 442 | { | ||
| 443 | ✗ | ++it; | |
| 444 | } | ||
| 445 | } | ||
| 446 | 20 | return removed; | |
| 447 | } | ||
| 448 | |||
| 449 | /** | ||
| 450 | * @brief Performs cleanup of expired cache entries across all shards. | ||
| 451 | * @details Called by the background cleanup thread or on-demand timer. | ||
| 452 | * @param force Force cleanup regardless of timing. | ||
| 453 | */ | ||
| 454 | 2 | void cleanup_expired_entries(bool force) noexcept | |
| 455 | { | ||
| 456 | // Always hold state mutex to prevent racing with shutdown_cache() | ||
| 457 | // which clears the shard vectors. try_lock for on-demand to avoid | ||
| 458 | // blocking the hot path; forced cleanup blocks to guarantee progress. | ||
| 459 | 2 | std::unique_lock<std::mutex> lock(s_cacheStateMutex, std::defer_lock); | |
| 460 |
1/2✓ Branch 3 → 4 taken 2 times.
✗ Branch 3 → 5 not taken.
|
2 | if (force) |
| 461 | { | ||
| 462 | 2 | lock.lock(); | |
| 463 | } | ||
| 464 | ✗ | else if (!lock.try_lock()) | |
| 465 | { | ||
| 466 | ✗ | return; // Shutdown or forced cleanup in progress, skip | |
| 467 | } | ||
| 468 | |||
| 469 |
1/2✗ Branch 9 → 10 not taken.
✓ Branch 9 → 11 taken 2 times.
|
2 | if (s_cacheShards.empty()) |
| 470 | ✗ | return; | |
| 471 | |||
| 472 | 2 | const size_t shard_count = s_shardCount.load(std::memory_order_acquire); | |
| 473 |
1/2✗ Branch 18 → 19 not taken.
✓ Branch 18 → 20 taken 2 times.
|
2 | if (shard_count == 0) |
| 474 | ✗ | return; | |
| 475 | |||
| 476 | 2 | const uint64_t current_ts = current_time_ns(); | |
| 477 | 2 | const uint64_t expiry_ns = static_cast<uint64_t>(s_configuredExpiryMs.load(std::memory_order_acquire)) * 1'000'000ULL; | |
| 478 | |||
| 479 |
2/2✓ Branch 40 → 29 taken 20 times.
✓ Branch 40 → 41 taken 2 times.
|
22 | for (size_t i = 0; i < shard_count; ++i) |
| 480 | { | ||
| 481 | 20 | std::unique_lock<SrwSharedMutex> shard_lock(*s_shardMutexes[i], std::try_to_lock); | |
| 482 |
1/2✓ Branch 33 → 34 taken 20 times.
✗ Branch 33 → 38 not taken.
|
20 | if (shard_lock.owns_lock()) |
| 483 | { | ||
| 484 | 20 | cleanup_expired_entries_in_shard(s_cacheShards[i], current_ts, expiry_ns); | |
| 485 | // Also trim to hard upper bound | ||
| 486 | 20 | trim_to_max_capacity(s_cacheShards[i]); | |
| 487 | } | ||
| 488 | 20 | } | |
| 489 |
1/2✓ Branch 43 → 44 taken 2 times.
✗ Branch 43 → 46 not taken.
|
2 | } |
| 490 | |||
| 491 | /** | ||
| 492 | * @brief Checks if on-demand cleanup should run based on elapsed time. | ||
| 493 | * @return true if cleanup was performed, false otherwise. | ||
| 494 | */ | ||
| 495 | ✗ | bool try_trigger_on_demand_cleanup() noexcept | |
| 496 | { | ||
| 497 | ✗ | if (!s_cacheInitialized.load(std::memory_order_acquire)) | |
| 498 | ✗ | return false; | |
| 499 | |||
| 500 | ✗ | const uint64_t now_ns = current_time_ns(); | |
| 501 | ✗ | const uint64_t last_cleanup = s_lastCleanupTimeNs.load(std::memory_order_acquire); | |
| 502 | ✗ | const uint64_t elapsed_ns = now_ns - last_cleanup; | |
| 503 | |||
| 504 | ✗ | if (elapsed_ns >= CLEANUP_INTERVAL_NS) | |
| 505 | { | ||
| 506 | // Atomically update last cleanup time to prevent multiple threads triggering | ||
| 507 | ✗ | uint64_t expected = last_cleanup; | |
| 508 | ✗ | if (s_lastCleanupTimeNs.compare_exchange_strong(expected, now_ns, std::memory_order_acq_rel)) | |
| 509 | { | ||
| 510 | ✗ | cleanup_expired_entries(false); | |
| 511 | s_stats.onDemandCleanups.fetch_add(1, std::memory_order_relaxed); | ||
| 512 | ✗ | return true; | |
| 513 | } | ||
| 514 | } | ||
| 515 | ✗ | return false; | |
| 516 | } | ||
| 517 | |||
| 518 | /** | ||
| 519 | * @brief Background cleanup thread function. | ||
| 520 | * @details Runs periodically to clean up expired entries without impacting the miss path. | ||
| 521 | */ | ||
| 522 | 167 | void cleanup_thread_func() noexcept | |
| 523 | { | ||
| 524 |
2/2✓ Branch 13 → 3 taken 13 times.
✓ Branch 13 → 14 taken 156 times.
|
169 | while (s_cleanupThreadRunning.load(std::memory_order_acquire)) |
| 525 | { | ||
| 526 | { | ||
| 527 | 13 | std::unique_lock<std::mutex> lock(s_cleanupMutex); | |
| 528 | 13 | s_cleanupCv.wait_for(lock, std::chrono::seconds(1), [&]() | |
| 529 |
4/4✓ Branch 3 → 4 taken 24 times.
✓ Branch 3 → 6 taken 1 time.
✓ Branch 5 → 6 taken 11 times.
✓ Branch 5 → 7 taken 13 times.
|
25 | { return s_cleanupRequested.load(std::memory_order_acquire) || !s_cleanupThreadRunning.load(std::memory_order_acquire); }); |
| 530 | 13 | } | |
| 531 | |||
| 532 |
2/2✓ Branch 8 → 9 taken 11 times.
✓ Branch 8 → 10 taken 2 times.
|
13 | if (!s_cleanupThreadRunning.load(std::memory_order_acquire)) |
| 533 | 11 | break; | |
| 534 | |||
| 535 | 2 | cleanup_expired_entries(true); // force=true to hold state mutex during vector iteration | |
| 536 | 2 | s_cleanupRequested.store(false, std::memory_order_relaxed); | |
| 537 | } | ||
| 538 | 167 | } | |
| 539 | |||
| 540 | /** | ||
| 541 | * @brief Signals the cleanup thread to run or triggers on-demand cleanup. | ||
| 542 | */ | ||
| 543 | 11 | void request_cleanup() noexcept | |
| 544 | { | ||
| 545 |
1/2✓ Branch 3 → 4 taken 11 times.
✗ Branch 3 → 6 not taken.
|
11 | if (s_cleanupThreadRunning.load(std::memory_order_acquire)) |
| 546 | { | ||
| 547 | 11 | s_cleanupRequested.store(true, std::memory_order_relaxed); | |
| 548 | 11 | s_cleanupCv.notify_one(); | |
| 549 | } | ||
| 550 | else | ||
| 551 | { | ||
| 552 | // Background thread disabled (MinGW) - use on-demand timer-based cleanup | ||
| 553 | ✗ | try_trigger_on_demand_cleanup(); | |
| 554 | } | ||
| 555 | 11 | } | |
| 556 | |||
| 557 | /** | ||
| 558 | * @brief Invalidates cache entries in shards that overlap with the given range. | ||
| 559 | * @details Only invalidates specific entries that overlap, not entire shards. | ||
| 560 | * Uses retry loop to handle locked shards gracefully. | ||
| 561 | */ | ||
| 562 | 11 | void invalidate_range_internal(uintptr_t address, size_t size) noexcept | |
| 563 | { | ||
| 564 |
3/6✓ Branch 3 → 4 taken 11 times.
✗ Branch 3 → 5 not taken.
✗ Branch 4 → 5 not taken.
✓ Branch 4 → 6 taken 11 times.
✗ Branch 7 → 8 not taken.
✓ Branch 7 → 9 taken 11 times.
|
11 | if (s_cacheShards.empty() || size == 0) |
| 565 | ✗ | return; | |
| 566 | |||
| 567 | 11 | const uintptr_t endAddress = address + size; | |
| 568 | 11 | const size_t shard_count = s_shardCount.load(std::memory_order_acquire); | |
| 569 | |||
| 570 | 11 | const uintptr_t start_page = address >> 12; | |
| 571 |
1/2✓ Branch 16 → 17 taken 11 times.
✗ Branch 16 → 18 not taken.
|
11 | const uintptr_t end_page = (endAddress == 0 ? address : endAddress - 1) >> 12; |
| 572 | |||
| 573 | 11 | constexpr size_t MAX_INVALIDATION_RETRIES = 3; | |
| 574 | |||
| 575 |
1/2✓ Branch 70 → 20 taken 11 times.
✗ Branch 70 → 71 not taken.
|
11 | for (uintptr_t page = start_page; page <= end_page; ++page) |
| 576 | { | ||
| 577 | 11 | const size_t shard_idx = compute_shard_index(page << 12, shard_count); | |
| 578 | |||
| 579 | 11 | bool invalidated = false; | |
| 580 |
3/4✓ Branch 65 → 66 taken 22 times.
✗ Branch 65 → 67 not taken.
✓ Branch 66 → 22 taken 11 times.
✓ Branch 66 → 67 taken 11 times.
|
22 | for (size_t retry = 0; retry < MAX_INVALIDATION_RETRIES && !invalidated; ++retry) |
| 581 | { | ||
| 582 | 11 | std::unique_lock<SrwSharedMutex> lock(*s_shardMutexes[shard_idx], std::try_to_lock); | |
| 583 |
1/2✗ Branch 26 → 27 not taken.
✓ Branch 26 → 30 taken 11 times.
|
11 | if (!lock.owns_lock()) |
| 584 | { | ||
| 585 | // Shard is locked by another writer - yield and retry | ||
| 586 | ✗ | if (retry < MAX_INVALIDATION_RETRIES - 1) | |
| 587 | { | ||
| 588 | ✗ | std::this_thread::yield(); | |
| 589 | } | ||
| 590 | ✗ | continue; | |
| 591 | } | ||
| 592 | |||
| 593 | 11 | CacheShard &shard = s_cacheShards[shard_idx]; | |
| 594 | 11 | const uintptr_t page_base = page << 12; | |
| 595 | |||
| 596 | 11 | auto it = shard.entries.find(page_base); | |
| 597 |
2/2✓ Branch 34 → 35 taken 4 times.
✓ Branch 34 → 57 taken 7 times.
|
11 | if (it != shard.entries.end()) |
| 598 | { | ||
| 599 | 4 | CachedMemoryRegionInfo &entry = it->second; | |
| 600 |
1/2✗ Branch 36 → 37 not taken.
✓ Branch 36 → 38 taken 4 times.
|
4 | if (!entry.valid) |
| 601 | { | ||
| 602 | ✗ | invalidated = true; | |
| 603 | ✗ | continue; | |
| 604 | } | ||
| 605 | |||
| 606 | 4 | const uintptr_t entryEndAddress = entry.baseAddress + entry.regionSize; | |
| 607 |
2/4✓ Branch 38 → 39 taken 4 times.
✗ Branch 38 → 41 not taken.
✓ Branch 39 → 40 taken 4 times.
✗ Branch 39 → 41 not taken.
|
4 | const bool overlaps = address < entryEndAddress && endAddress > entry.baseAddress; |
| 608 |
1/2✓ Branch 42 → 43 taken 4 times.
✗ Branch 42 → 58 not taken.
|
4 | if (overlaps) |
| 609 | { | ||
| 610 | // Remove from LRU index using stored lru_key to avoid tombstone accumulation | ||
| 611 | 4 | const auto lru_it = shard.lru_index.find(entry.lru_key); | |
| 612 |
3/6✓ Branch 46 → 47 taken 4 times.
✗ Branch 46 → 50 not taken.
✓ Branch 48 → 49 taken 4 times.
✗ Branch 48 → 50 not taken.
✓ Branch 51 → 52 taken 4 times.
✗ Branch 51 → 53 not taken.
|
4 | if (lru_it != shard.lru_index.end() && lru_it->second == page_base) |
| 613 | { | ||
| 614 | 4 | shard.lru_index.erase(lru_it); | |
| 615 | } | ||
| 616 | // Erase entry immediately instead of leaving tombstone | ||
| 617 | 4 | shard.entries.erase(it); | |
| 618 | s_stats.invalidations.fetch_add(1, std::memory_order_relaxed); | ||
| 619 | 4 | invalidated = true; | |
| 620 | } | ||
| 621 | } | ||
| 622 | else | ||
| 623 | { | ||
| 624 | 7 | invalidated = true; | |
| 625 | } | ||
| 626 |
1/2✓ Branch 60 → 61 taken 11 times.
✗ Branch 60 → 63 not taken.
|
11 | } |
| 627 | |||
| 628 |
1/2✓ Branch 67 → 68 taken 11 times.
✗ Branch 67 → 69 not taken.
|
11 | if (start_page == end_page) |
| 629 | 11 | break; | |
| 630 | } | ||
| 631 | } | ||
| 632 | |||
| 633 | /** | ||
| 634 | * @brief Performs one-time cache initialization. | ||
| 635 | */ | ||
| 636 | 167 | bool perform_cache_initialization(size_t cache_size, unsigned int expiry_ms, size_t shard_count) | |
| 637 | { | ||
| 638 |
1/2✗ Branch 2 → 3 not taken.
✓ Branch 2 → 4 taken 167 times.
|
167 | if (cache_size == 0) |
| 639 | ✗ | cache_size = 1; | |
| 640 |
1/2✗ Branch 4 → 5 not taken.
✓ Branch 4 → 6 taken 167 times.
|
167 | if (shard_count == 0) |
| 641 | ✗ | shard_count = 1; | |
| 642 | |||
| 643 | 167 | const size_t entries_per_shard = (cache_size + shard_count - 1) / shard_count; | |
| 644 | 167 | const size_t hard_max_per_shard = entries_per_shard * 2; // Hard upper bound: 2x capacity | |
| 645 | |||
| 646 | try | ||
| 647 | { | ||
| 648 |
1/2✓ Branch 6 → 7 taken 167 times.
✗ Branch 6 → 71 not taken.
|
167 | s_cacheShards.resize(shard_count); |
| 649 |
1/2✓ Branch 7 → 8 taken 167 times.
✗ Branch 7 → 71 not taken.
|
167 | s_shardMutexes.resize(shard_count); |
| 650 |
1/2✓ Branch 8 → 9 taken 167 times.
✗ Branch 8 → 69 not taken.
|
167 | s_inFlight = std::make_unique<std::atomic<char>[]>(shard_count); |
| 651 |
2/2✓ Branch 30 → 12 taken 2360 times.
✓ Branch 30 → 31 taken 167 times.
|
2527 | for (size_t i = 0; i < shard_count; ++i) |
| 652 | { | ||
| 653 |
1/2✓ Branch 13 → 14 taken 2360 times.
✗ Branch 13 → 71 not taken.
|
2360 | s_cacheShards[i].entries.reserve(entries_per_shard * 2); |
| 654 | 2360 | s_cacheShards[i].capacity = entries_per_shard; | |
| 655 | 2360 | s_cacheShards[i].max_capacity = hard_max_per_shard; | |
| 656 |
1/2✓ Branch 16 → 17 taken 2360 times.
✗ Branch 16 → 70 not taken.
|
2360 | s_shardMutexes[i] = std::make_unique<SrwSharedMutex>(); |
| 657 | 2360 | s_inFlight[i].store(0, std::memory_order_relaxed); | |
| 658 | } | ||
| 659 | } | ||
| 660 | ✗ | catch (const std::bad_alloc &) | |
| 661 | { | ||
| 662 | ✗ | Logger::get_instance().error("MemoryCache: Failed to allocate memory for cache shards."); | |
| 663 | ✗ | s_cacheShards.clear(); | |
| 664 | ✗ | s_shardMutexes.clear(); | |
| 665 | ✗ | s_inFlight.reset(); | |
| 666 | // Reset initialization flag so retry can work | ||
| 667 | ✗ | s_cacheInitialized.store(false, std::memory_order_relaxed); | |
| 668 | ✗ | return false; | |
| 669 | ✗ | } | |
| 670 | |||
| 671 | 167 | s_shardCount.store(shard_count, std::memory_order_release); | |
| 672 | 167 | s_maxEntriesPerShard.store(entries_per_shard, std::memory_order_release); | |
| 673 | 167 | s_configuredExpiryMs.store(expiry_ms, std::memory_order_release); | |
| 674 | 167 | s_lastCleanupTimeNs.store(current_time_ns(), std::memory_order_release); | |
| 675 | |||
| 676 |
2/4✓ Branch 64 → 65 taken 167 times.
✗ Branch 64 → 85 not taken.
✓ Branch 65 → 66 taken 167 times.
✗ Branch 65 → 84 not taken.
|
167 | Logger::get_instance().debug("MemoryCache: Initialized with {} shards ({} entries/shard, {}ms expiry, {} max).", |
| 677 | shard_count, entries_per_shard, expiry_ms, hard_max_per_shard); | ||
| 678 | |||
| 679 | 167 | return true; | |
| 680 | } | ||
| 681 | |||
| 682 | /** | ||
| 683 | * @brief Performs VirtualQuery and updates cache with coalescing support. | ||
| 684 | * @param shard_idx Index of the shard to update. | ||
| 685 | * @param address Address to query. | ||
| 686 | * @param mbi_out Output buffer for VirtualQuery result. | ||
| 687 | * @return true if VirtualQuery succeeded. | ||
| 688 | */ | ||
| 689 | 139 | bool query_and_update_cache(size_t shard_idx, LPCVOID address, MEMORY_BASIC_INFORMATION &mbi_out) noexcept | |
| 690 | { | ||
| 691 | 139 | CacheShard &shard = s_cacheShards[shard_idx]; | |
| 692 | |||
| 693 | // Try to claim in-flight status (stampede coalescing) | ||
| 694 | 139 | char expected = 0; | |
| 695 |
1/2✓ Branch 12 → 13 taken 139 times.
✗ Branch 12 → 32 not taken.
|
278 | if (s_inFlight[shard_idx].compare_exchange_strong(expected, 1, std::memory_order_acq_rel)) |
| 696 | { | ||
| 697 | // We are the leader - perform VirtualQuery | ||
| 698 | 139 | const bool result = VirtualQuery(address, &mbi_out, sizeof(mbi_out)) != 0; | |
| 699 | 139 | const uint64_t now_ns = current_time_ns(); | |
| 700 | |||
| 701 |
1/2✓ Branch 15 → 16 taken 139 times.
✗ Branch 15 → 22 not taken.
|
139 | if (result) |
| 702 | { | ||
| 703 | 139 | std::unique_lock<SrwSharedMutex> lock(*s_shardMutexes[shard_idx]); | |
| 704 | 139 | update_shard_with_region(shard, mbi_out, now_ns); | |
| 705 | 139 | } | |
| 706 | |||
| 707 | // Release in-flight status | ||
| 708 | 139 | s_inFlight[shard_idx].store(0, std::memory_order_release); | |
| 709 | 139 | return result; | |
| 710 | } | ||
| 711 | else | ||
| 712 | { | ||
| 713 | // We are a follower - VirtualQuery already in progress by another thread. | ||
| 714 | // Bounded wait to avoid stalling game threads on render-critical paths. | ||
| 715 | ✗ | const uint64_t expiry_ns = static_cast<uint64_t>(s_configuredExpiryMs.load(std::memory_order_acquire)) * 1'000'000ULL; | |
| 716 | ✗ | constexpr size_t MAX_FOLLOWER_YIELDS = 8; | |
| 717 | |||
| 718 | ✗ | for (size_t yield_count = 0; yield_count < MAX_FOLLOWER_YIELDS; ++yield_count) | |
| 719 | { | ||
| 720 | ✗ | if (s_inFlight[shard_idx].load(std::memory_order_acquire) == 0) | |
| 721 | { | ||
| 722 | // Query completed, check cache | ||
| 723 | ✗ | const uintptr_t addr_val = reinterpret_cast<uintptr_t>(address); | |
| 724 | ✗ | std::shared_lock<SrwSharedMutex> lock(*s_shardMutexes[shard_idx]); | |
| 725 | ✗ | CachedMemoryRegionInfo *cached = find_in_shard(shard, addr_val, 1, current_time_ns(), expiry_ns); | |
| 726 | ✗ | if (cached) | |
| 727 | { | ||
| 728 | s_stats.coalescedQueries.fetch_add(1, std::memory_order_relaxed); | ||
| 729 | // Copy cached info to output for consistency | ||
| 730 | ✗ | mbi_out.BaseAddress = reinterpret_cast<PVOID>(cached->baseAddress); | |
| 731 | ✗ | mbi_out.RegionSize = cached->regionSize; | |
| 732 | ✗ | mbi_out.Protect = cached->protection; | |
| 733 | ✗ | mbi_out.State = MEM_COMMIT; | |
| 734 | ✗ | return true; | |
| 735 | } | ||
| 736 | // Cache not populated, break to retry as leader | ||
| 737 | ✗ | break; | |
| 738 | ✗ | } | |
| 739 | |||
| 740 | // Yield to allow the leader thread to complete | ||
| 741 | ✗ | std::this_thread::yield(); | |
| 742 | } | ||
| 743 | |||
| 744 | // Retry as leader if follower wait timed out | ||
| 745 | ✗ | expected = 0; | |
| 746 | ✗ | if (s_inFlight[shard_idx].compare_exchange_strong(expected, 1, std::memory_order_acq_rel)) | |
| 747 | { | ||
| 748 | ✗ | const bool result = VirtualQuery(address, &mbi_out, sizeof(mbi_out)) != 0; | |
| 749 | ✗ | if (result) | |
| 750 | { | ||
| 751 | ✗ | std::unique_lock<SrwSharedMutex> lock(*s_shardMutexes[shard_idx]); | |
| 752 | ✗ | const uint64_t now_ns = current_time_ns(); | |
| 753 | ✗ | update_shard_with_region(shard, mbi_out, now_ns); | |
| 754 | ✗ | } | |
| 755 | ✗ | s_inFlight[shard_idx].store(0, std::memory_order_release); | |
| 756 | ✗ | return result; | |
| 757 | } | ||
| 758 | |||
| 759 | // Last resort: just do VirtualQuery without cache update | ||
| 760 | ✗ | return VirtualQuery(address, &mbi_out, sizeof(mbi_out)) != 0; | |
| 761 | } | ||
| 762 | } | ||
| 763 | |||
| 764 | /** | ||
| 765 | * @brief Shuts down the cleanup thread. | ||
| 766 | * @note Background cleanup thread is disabled on mingw due to pthreads compatibility issues. | ||
| 767 | * On-demand cleanup timer handles expiration in this case. | ||
| 768 | */ | ||
| 769 | ✗ | void shutdown_cleanup_thread() noexcept | |
| 770 | { | ||
| 771 | // Signal cleanup thread to stop | ||
| 772 | ✗ | s_cleanupThreadRunning.store(false, std::memory_order_release); | |
| 773 | ✗ | s_cleanupCv.notify_one(); | |
| 774 | |||
| 775 | // Wait for cleanup thread to finish if it was started | ||
| 776 | ✗ | if (s_cleanupThread.joinable()) | |
| 777 | { | ||
| 778 | ✗ | s_cleanupThread.join(); | |
| 779 | } | ||
| 780 | ✗ | } | |
| 781 | |||
| 782 | } // namespace MemoryUtilsCacheInternal | ||
| 783 | |||
| 784 | 169 | bool DetourModKit::Memory::init_cache(size_t cache_size, unsigned int expiry_ms, size_t shard_count) | |
| 785 | { | ||
| 786 | // Hold state mutex to prevent concurrent clear_cache or shutdown_cache | ||
| 787 | // This serializes init/clear/shutdown transitions to ensure vectors are not accessed while being resized or cleared | ||
| 788 |
1/2✓ Branch 2 → 3 taken 169 times.
✗ Branch 2 → 34 not taken.
|
169 | std::lock_guard<std::mutex> state_lock(MemoryUtilsCacheInternal::s_cacheStateMutex); |
| 789 | |||
| 790 | // Fast path: already initialized | ||
| 791 |
2/2✓ Branch 4 → 5 taken 2 times.
✓ Branch 4 → 6 taken 167 times.
|
169 | if (MemoryUtilsCacheInternal::s_cacheInitialized.load(std::memory_order_acquire)) |
| 792 | 2 | return true; | |
| 793 | |||
| 794 | // Try to initialize | ||
| 795 | 167 | bool expected = false; | |
| 796 |
1/2✓ Branch 7 → 8 taken 167 times.
✗ Branch 7 → 17 not taken.
|
167 | if (MemoryUtilsCacheInternal::s_cacheInitialized.compare_exchange_strong(expected, true, std::memory_order_acq_rel)) |
| 797 | { | ||
| 798 |
2/4✓ Branch 8 → 9 taken 167 times.
✗ Branch 8 → 32 not taken.
✗ Branch 9 → 10 not taken.
✓ Branch 9 → 11 taken 167 times.
|
167 | if (!MemoryUtilsCacheInternal::perform_cache_initialization(cache_size, expiry_ms, shard_count)) |
| 799 | { | ||
| 800 | // Initialization failed - s_cacheInitialized already reset to false in perform_cache_initialization | ||
| 801 | ✗ | return false; | |
| 802 | } | ||
| 803 | |||
| 804 | // Try to start background cleanup thread (may fail silently on MinGW) | ||
| 805 | 167 | MemoryUtilsCacheInternal::s_cleanupThreadRunning.store(true, std::memory_order_release); | |
| 806 | try | ||
| 807 | { | ||
| 808 |
1/2✓ Branch 12 → 13 taken 167 times.
✗ Branch 12 → 21 not taken.
|
167 | MemoryUtilsCacheInternal::s_cleanupThread = std::thread(MemoryUtilsCacheInternal::cleanup_thread_func); |
| 809 | } | ||
| 810 | ✗ | catch (const std::system_error &) | |
| 811 | { | ||
| 812 | // Background thread creation failed (MinGW pthreads issue) - use on-demand cleanup | ||
| 813 | ✗ | MemoryUtilsCacheInternal::s_cleanupThreadRunning.store(false, std::memory_order_release); | |
| 814 | ✗ | Logger::get_instance().debug("MemoryCache: Background cleanup thread unavailable, using on-demand cleanup."); | |
| 815 | ✗ | } | |
| 816 | |||
| 817 | 167 | return true; | |
| 818 | } | ||
| 819 | |||
| 820 | // Another thread initialized while we were waiting | ||
| 821 | ✗ | return true; | |
| 822 | 169 | } | |
| 823 | |||
| 824 | 26 | void DetourModKit::Memory::clear_cache() | |
| 825 | { | ||
| 826 | // Hold state mutex to serialize with shutdown and cleanup thread | ||
| 827 |
1/2✓ Branch 2 → 3 taken 26 times.
✗ Branch 2 → 100 not taken.
|
26 | std::lock_guard<std::mutex> state_lock(MemoryUtilsCacheInternal::s_cacheStateMutex); |
| 828 | |||
| 829 |
1/2✗ Branch 4 → 5 not taken.
✓ Branch 4 → 6 taken 26 times.
|
26 | if (!MemoryUtilsCacheInternal::s_cacheInitialized.load(std::memory_order_acquire)) |
| 830 | ✗ | return; | |
| 831 | |||
| 832 | 26 | const size_t shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 833 |
1/2✗ Branch 13 → 14 not taken.
✓ Branch 13 → 15 taken 26 times.
|
26 | if (shard_count == 0) |
| 834 | ✗ | return; | |
| 835 | |||
| 836 | // Acquire exclusive lock on each shard and clear entries. | ||
| 837 | // Uses blocking lock to guarantee all entries are cleared. | ||
| 838 | // The background cleanup thread uses try_to_lock on shard mutexes, | ||
| 839 | // so it will skip shards we hold without deadlocking. | ||
| 840 |
2/2✓ Branch 37 → 16 taken 392 times.
✓ Branch 37 → 38 taken 26 times.
|
418 | for (size_t i = 0; i < shard_count; ++i) |
| 841 | { | ||
| 842 | 392 | auto &mutex_ptr = MemoryUtilsCacheInternal::s_shardMutexes[i]; | |
| 843 |
1/2✓ Branch 18 → 19 taken 392 times.
✗ Branch 18 → 36 not taken.
|
392 | if (mutex_ptr) |
| 844 | { | ||
| 845 |
1/2✓ Branch 20 → 21 taken 392 times.
✗ Branch 20 → 96 not taken.
|
392 | std::unique_lock<SrwSharedMutex> shard_lock(*mutex_ptr); |
| 846 | 392 | MemoryUtilsCacheInternal::s_cacheShards[i].entries.clear(); | |
| 847 | 392 | MemoryUtilsCacheInternal::s_cacheShards[i].lru_index.clear(); | |
| 848 | 392 | MemoryUtilsCacheInternal::s_inFlight[i].store(0, std::memory_order_relaxed); | |
| 849 | 392 | } | |
| 850 | } | ||
| 851 | |||
| 852 | MemoryUtilsCacheInternal::s_stats.cacheHits.store(0, std::memory_order_relaxed); | ||
| 853 | MemoryUtilsCacheInternal::s_stats.cacheMisses.store(0, std::memory_order_relaxed); | ||
| 854 | MemoryUtilsCacheInternal::s_stats.invalidations.store(0, std::memory_order_relaxed); | ||
| 855 | MemoryUtilsCacheInternal::s_stats.coalescedQueries.store(0, std::memory_order_relaxed); | ||
| 856 | MemoryUtilsCacheInternal::s_stats.onDemandCleanups.store(0, std::memory_order_relaxed); | ||
| 857 | |||
| 858 | 26 | MemoryUtilsCacheInternal::s_lastCleanupTimeNs.store(current_time_ns(), std::memory_order_relaxed); | |
| 859 | |||
| 860 |
2/4✓ Branch 87 → 88 taken 26 times.
✗ Branch 87 → 98 not taken.
✓ Branch 88 → 89 taken 26 times.
✗ Branch 88 → 97 not taken.
|
26 | Logger::get_instance().debug("MemoryCache: All entries cleared."); |
| 861 |
1/2✓ Branch 91 → 92 taken 26 times.
✗ Branch 91 → 94 not taken.
|
26 | } |
| 862 | |||
| 863 | 167 | void DetourModKit::Memory::shutdown_cache() | |
| 864 | { | ||
| 865 | // Signal and join cleanup thread BEFORE acquiring state mutex. | ||
| 866 | // The cleanup thread acquires s_cacheStateMutex in cleanup_expired_entries(force=true), | ||
| 867 | // so joining while holding the state mutex would deadlock. | ||
| 868 | 167 | MemoryUtilsCacheInternal::s_cleanupThreadRunning.store(false, std::memory_order_release); | |
| 869 | 167 | MemoryUtilsCacheInternal::s_cleanupCv.notify_one(); | |
| 870 | |||
| 871 |
1/2✓ Branch 5 → 6 taken 167 times.
✗ Branch 5 → 7 not taken.
|
167 | if (MemoryUtilsCacheInternal::s_cleanupThread.joinable()) |
| 872 | { | ||
| 873 |
1/2✓ Branch 6 → 7 taken 167 times.
✗ Branch 6 → 126 not taken.
|
167 | MemoryUtilsCacheInternal::s_cleanupThread.join(); |
| 874 | } | ||
| 875 | |||
| 876 | // Acquire state mutex to serialize with clear_cache and protect data teardown | ||
| 877 |
1/2✓ Branch 7 → 8 taken 167 times.
✗ Branch 7 → 126 not taken.
|
167 | std::lock_guard<std::mutex> state_lock(MemoryUtilsCacheInternal::s_cacheStateMutex); |
| 878 | |||
| 879 | // Mark as not initialized and zero shard count. | ||
| 880 | // This prevents new readers from entering the critical section. | ||
| 881 | // acquire/release is sufficient here because the state mutex provides the | ||
| 882 | // cross-thread ordering guarantee. Readers that observe shard_count == 0 | ||
| 883 | // immediately exit without touching data structures. | ||
| 884 | 167 | MemoryUtilsCacheInternal::s_cacheInitialized.store(false, std::memory_order_release); | |
| 885 | MemoryUtilsCacheInternal::s_shardCount.store(0, std::memory_order_release); | ||
| 886 | |||
| 887 | // Wait for in-flight readers to finish before destroying data structures. | ||
| 888 | // Readers increment s_activeReaders on entry and decrement on exit. | ||
| 889 | // ActiveReaderGuard is RAII so readers always decrement; this loop is | ||
| 890 | // bounded by the maximum time a single cache lookup can take. | ||
| 891 | // Escalate from yield to sleep to avoid burning CPU if a reader is | ||
| 892 | // preempted by the OS scheduler. | ||
| 893 | 167 | constexpr int yield_spins = 4096; | |
| 894 | 167 | int spins = 0; | |
| 895 |
1/2✗ Branch 31 → 18 not taken.
✓ Branch 31 → 32 taken 167 times.
|
334 | while (MemoryUtilsCacheInternal::s_activeReaders.load(std::memory_order_acquire) > 0) |
| 896 | { | ||
| 897 | ✗ | if (spins < yield_spins) | |
| 898 | { | ||
| 899 | ✗ | std::this_thread::yield(); | |
| 900 | } | ||
| 901 | else | ||
| 902 | { | ||
| 903 | ✗ | std::this_thread::sleep_for(std::chrono::microseconds(100)); | |
| 904 | } | ||
| 905 | ✗ | ++spins; | |
| 906 | } | ||
| 907 | |||
| 908 | // All readers have exited - safe to destroy data structures | ||
| 909 | 167 | const size_t shard_count = MemoryUtilsCacheInternal::s_cacheShards.size(); | |
| 910 |
2/2✓ Branch 47 → 34 taken 2360 times.
✓ Branch 47 → 48 taken 167 times.
|
2527 | for (size_t i = 0; i < shard_count; ++i) |
| 911 | { | ||
| 912 |
1/2✓ Branch 36 → 37 taken 2360 times.
✗ Branch 36 → 46 not taken.
|
2360 | if (MemoryUtilsCacheInternal::s_shardMutexes[i]) |
| 913 | { | ||
| 914 |
1/2✓ Branch 39 → 40 taken 2360 times.
✗ Branch 39 → 122 not taken.
|
2360 | std::unique_lock<SrwSharedMutex> shard_lock(*MemoryUtilsCacheInternal::s_shardMutexes[i]); |
| 915 | 2360 | MemoryUtilsCacheInternal::s_cacheShards[i].entries.clear(); | |
| 916 | 2360 | MemoryUtilsCacheInternal::s_cacheShards[i].lru_index.clear(); | |
| 917 | 2360 | } | |
| 918 | } | ||
| 919 | |||
| 920 | 167 | MemoryUtilsCacheInternal::s_cacheShards.clear(); | |
| 921 | 167 | MemoryUtilsCacheInternal::s_shardMutexes.clear(); | |
| 922 | 167 | MemoryUtilsCacheInternal::s_inFlight.reset(); | |
| 923 | |||
| 924 | // Reset all stats and config so a subsequent init_cache starts from a clean state | ||
| 925 | MemoryUtilsCacheInternal::s_stats.cacheHits.store(0, std::memory_order_relaxed); | ||
| 926 | MemoryUtilsCacheInternal::s_stats.cacheMisses.store(0, std::memory_order_relaxed); | ||
| 927 | MemoryUtilsCacheInternal::s_stats.invalidations.store(0, std::memory_order_relaxed); | ||
| 928 | MemoryUtilsCacheInternal::s_stats.coalescedQueries.store(0, std::memory_order_relaxed); | ||
| 929 | MemoryUtilsCacheInternal::s_stats.onDemandCleanups.store(0, std::memory_order_relaxed); | ||
| 930 | MemoryUtilsCacheInternal::s_lastCleanupTimeNs.store(0, std::memory_order_relaxed); | ||
| 931 | MemoryUtilsCacheInternal::s_configuredExpiryMs.store(0, std::memory_order_relaxed); | ||
| 932 | MemoryUtilsCacheInternal::s_maxEntriesPerShard.store(0, std::memory_order_relaxed); | ||
| 933 | 167 | MemoryUtilsCacheInternal::s_cleanupRequested.store(false, std::memory_order_relaxed); | |
| 934 | |||
| 935 |
2/4✓ Branch 116 → 117 taken 167 times.
✗ Branch 116 → 124 not taken.
✓ Branch 117 → 118 taken 167 times.
✗ Branch 117 → 123 not taken.
|
167 | Logger::get_instance().debug("MemoryCache: Shutdown complete."); |
| 936 | 167 | } | |
| 937 | |||
| 938 | 21 | std::string DetourModKit::Memory::get_cache_stats() | |
| 939 | { | ||
| 940 | 21 | const uint64_t hits = MemoryUtilsCacheInternal::s_stats.cacheHits.load(std::memory_order_relaxed); | |
| 941 | 21 | const uint64_t misses = MemoryUtilsCacheInternal::s_stats.cacheMisses.load(std::memory_order_relaxed); | |
| 942 | 21 | const uint64_t invalidations = MemoryUtilsCacheInternal::s_stats.invalidations.load(std::memory_order_relaxed); | |
| 943 | 21 | const uint64_t coalesced = MemoryUtilsCacheInternal::s_stats.coalescedQueries.load(std::memory_order_relaxed); | |
| 944 | 21 | const uint64_t on_demand_cleanups = MemoryUtilsCacheInternal::s_stats.onDemandCleanups.load(std::memory_order_relaxed); | |
| 945 | 21 | const uint64_t total_queries = hits + misses; | |
| 946 | |||
| 947 | 21 | const size_t shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 948 | 21 | const size_t max_entries_per_shard = MemoryUtilsCacheInternal::s_maxEntriesPerShard.load(std::memory_order_acquire); | |
| 949 | 21 | const unsigned int expiry_ms = MemoryUtilsCacheInternal::s_configuredExpiryMs.load(std::memory_order_acquire); | |
| 950 | |||
| 951 | // Calculate total entries and hard max with reader guard | ||
| 952 | 21 | size_t total_entries = 0; | |
| 953 | 21 | size_t total_hard_max = 0; | |
| 954 | |||
| 955 | { | ||
| 956 | 21 | MemoryUtilsCacheInternal::ActiveReaderGuard reader_guard; | |
| 957 | 21 | const size_t active_shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 958 |
2/2✓ Branch 78 → 67 taken 99 times.
✓ Branch 78 → 79 taken 21 times.
|
120 | for (size_t i = 0; i < active_shard_count; ++i) |
| 959 | { | ||
| 960 | 99 | auto &mutex_ptr = MemoryUtilsCacheInternal::s_shardMutexes[i]; | |
| 961 |
1/2✓ Branch 69 → 70 taken 99 times.
✗ Branch 69 → 77 not taken.
|
99 | if (mutex_ptr) |
| 962 | { | ||
| 963 | 99 | std::shared_lock<SrwSharedMutex> shard_lock(*mutex_ptr); | |
| 964 | 99 | total_entries += MemoryUtilsCacheInternal::s_cacheShards[i].entries.size(); | |
| 965 | 99 | total_hard_max += MemoryUtilsCacheInternal::s_cacheShards[i].max_capacity; | |
| 966 | 99 | } | |
| 967 | } | ||
| 968 | 21 | } | |
| 969 | |||
| 970 |
1/2✓ Branch 80 → 81 taken 21 times.
✗ Branch 80 → 120 not taken.
|
21 | std::ostringstream oss; |
| 971 |
2/4✓ Branch 81 → 82 taken 21 times.
✗ Branch 81 → 118 not taken.
✓ Branch 82 → 83 taken 21 times.
✗ Branch 82 → 118 not taken.
|
21 | oss << "MemoryCache Stats (Shards: " << shard_count |
| 972 |
2/4✓ Branch 83 → 84 taken 21 times.
✗ Branch 83 → 118 not taken.
✓ Branch 84 → 85 taken 21 times.
✗ Branch 84 → 118 not taken.
|
21 | << ", Entries/Shard: " << max_entries_per_shard |
| 973 |
3/6✓ Branch 85 → 86 taken 21 times.
✗ Branch 85 → 118 not taken.
✓ Branch 86 → 87 taken 21 times.
✗ Branch 86 → 88 not taken.
✓ Branch 89 → 90 taken 21 times.
✗ Branch 89 → 118 not taken.
|
21 | << ", HardMax/Shard: " << (shard_count > 0 ? total_hard_max / shard_count : 0) |
| 974 |
2/4✓ Branch 90 → 91 taken 21 times.
✗ Branch 90 → 118 not taken.
✓ Branch 91 → 92 taken 21 times.
✗ Branch 91 → 118 not taken.
|
21 | << ", Expiry: " << expiry_ms << "ms) - " |
| 975 |
5/10✓ Branch 92 → 93 taken 21 times.
✗ Branch 92 → 118 not taken.
✓ Branch 93 → 94 taken 21 times.
✗ Branch 93 → 118 not taken.
✓ Branch 94 → 95 taken 21 times.
✗ Branch 94 → 118 not taken.
✓ Branch 95 → 96 taken 21 times.
✗ Branch 95 → 118 not taken.
✓ Branch 96 → 97 taken 21 times.
✗ Branch 96 → 118 not taken.
|
21 | << "Hits: " << hits << ", Misses: " << misses |
| 976 |
2/4✓ Branch 97 → 98 taken 21 times.
✗ Branch 97 → 118 not taken.
✓ Branch 98 → 99 taken 21 times.
✗ Branch 98 → 118 not taken.
|
21 | << ", Invalidations: " << invalidations |
| 977 |
2/4✓ Branch 99 → 100 taken 21 times.
✗ Branch 99 → 118 not taken.
✓ Branch 100 → 101 taken 21 times.
✗ Branch 100 → 118 not taken.
|
21 | << ", Coalesced: " << coalesced |
| 978 |
2/4✓ Branch 101 → 102 taken 21 times.
✗ Branch 101 → 118 not taken.
✓ Branch 102 → 103 taken 21 times.
✗ Branch 102 → 118 not taken.
|
21 | << ", OnDemandCleanups: " << on_demand_cleanups |
| 979 |
2/4✓ Branch 103 → 104 taken 21 times.
✗ Branch 103 → 118 not taken.
✓ Branch 104 → 105 taken 21 times.
✗ Branch 104 → 118 not taken.
|
21 | << ", TotalEntries: " << total_entries; |
| 980 | |||
| 981 |
2/2✓ Branch 105 → 106 taken 16 times.
✓ Branch 105 → 112 taken 5 times.
|
21 | if (total_queries > 0) |
| 982 | { | ||
| 983 | 16 | const double hit_rate_percent = (static_cast<double>(hits) / static_cast<double>(total_queries)) * 100.0; | |
| 984 |
4/8✓ Branch 106 → 107 taken 16 times.
✗ Branch 106 → 118 not taken.
✓ Branch 107 → 108 taken 16 times.
✗ Branch 107 → 118 not taken.
✓ Branch 110 → 111 taken 16 times.
✗ Branch 110 → 118 not taken.
✓ Branch 111 → 113 taken 16 times.
✗ Branch 111 → 118 not taken.
|
16 | oss << ", Hit Rate: " << std::fixed << std::setprecision(2) << hit_rate_percent << "%"; |
| 985 | } | ||
| 986 | else | ||
| 987 | { | ||
| 988 |
1/2✓ Branch 112 → 113 taken 5 times.
✗ Branch 112 → 118 not taken.
|
5 | oss << ", Hit Rate: N/A (no queries tracked)"; |
| 989 | } | ||
| 990 |
1/2✓ Branch 113 → 114 taken 21 times.
✗ Branch 113 → 118 not taken.
|
42 | return oss.str(); |
| 991 | 21 | } | |
| 992 | |||
| 993 | 13 | void DetourModKit::Memory::invalidate_range(const void *address, size_t size) | |
| 994 | { | ||
| 995 |
4/4✓ Branch 2 → 3 taken 12 times.
✓ Branch 2 → 4 taken 1 time.
✓ Branch 3 → 4 taken 1 time.
✓ Branch 3 → 5 taken 11 times.
|
13 | if (!address || size == 0) |
| 996 | 2 | return; | |
| 997 | |||
| 998 |
1/2✗ Branch 6 → 7 not taken.
✓ Branch 6 → 8 taken 11 times.
|
11 | if (!MemoryUtilsCacheInternal::s_cacheInitialized.load(std::memory_order_acquire)) |
| 999 | ✗ | return; | |
| 1000 | |||
| 1001 | 11 | MemoryUtilsCacheInternal::ActiveReaderGuard reader_guard; | |
| 1002 | |||
| 1003 | 11 | const size_t shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 1004 |
1/2✗ Branch 16 → 17 not taken.
✓ Branch 16 → 18 taken 11 times.
|
11 | if (shard_count == 0) |
| 1005 | ✗ | return; | |
| 1006 | |||
| 1007 | 11 | const uintptr_t addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1008 | 11 | MemoryUtilsCacheInternal::invalidate_range_internal(addr_val, size); | |
| 1009 | |||
| 1010 | // request_cleanup may trigger on-demand cleanup_expired_entries(force=false) | ||
| 1011 | // which iterates shards without s_cacheStateMutex. Keep s_activeReaders > 0 | ||
| 1012 | // so shutdown_cache cannot destroy shards during the cleanup pass. | ||
| 1013 | 11 | MemoryUtilsCacheInternal::request_cleanup(); | |
| 1014 |
1/2✓ Branch 22 → 23 taken 11 times.
✗ Branch 22 → 25 not taken.
|
11 | } |
| 1015 | |||
| 1016 | 92744 | bool DetourModKit::Memory::is_readable(const void *address, size_t size) | |
| 1017 | { | ||
| 1018 |
2/4✓ Branch 2 → 3 taken 95020 times.
✗ Branch 2 → 4 not taken.
✗ Branch 3 → 4 not taken.
✓ Branch 3 → 5 taken 96141 times.
|
92744 | if (!address || size == 0) |
| 1019 | ✗ | return false; | |
| 1020 | |||
| 1021 | // Construct reader guard BEFORE checking s_cacheInitialized to prevent | ||
| 1022 | // shutdown_cache from destroying data structures between the check and access. | ||
| 1023 | 96141 | MemoryUtilsCacheInternal::ActiveReaderGuard reader_guard; | |
| 1024 | |||
| 1025 |
2/2✓ Branch 7 → 8 taken 5 times.
✓ Branch 7 → 24 taken 95412 times.
|
101336 | if (!MemoryUtilsCacheInternal::s_cacheInitialized.load(std::memory_order_acquire)) |
| 1026 | { | ||
| 1027 | // Cache not initialized - fall back to direct VirtualQuery | ||
| 1028 | MEMORY_BASIC_INFORMATION mbi; | ||
| 1029 |
2/4✓ Branch 8 → 9 taken 5 times.
✗ Branch 8 → 77 not taken.
✗ Branch 9 → 10 not taken.
✓ Branch 9 → 11 taken 5 times.
|
5 | if (!VirtualQuery(address, &mbi, sizeof(mbi))) |
| 1030 | ✗ | return false; | |
| 1031 |
2/2✓ Branch 11 → 12 taken 1 time.
✓ Branch 11 → 13 taken 4 times.
|
5 | if (mbi.State != MEM_COMMIT) |
| 1032 | 1 | return false; | |
| 1033 |
1/2✗ Branch 14 → 15 not taken.
✓ Branch 14 → 16 taken 4 times.
|
4 | if (!MemoryUtilsCacheInternal::check_read_permission(mbi.Protect)) |
| 1034 | ✗ | return false; | |
| 1035 | 4 | const uintptr_t query_addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1036 | 4 | const uintptr_t region_start = reinterpret_cast<uintptr_t>(mbi.BaseAddress); | |
| 1037 | 4 | const uintptr_t query_end = query_addr_val + size; | |
| 1038 |
2/2✓ Branch 16 → 17 taken 2 times.
✓ Branch 16 → 18 taken 2 times.
|
4 | if (query_end < query_addr_val) |
| 1039 | 2 | return false; | |
| 1040 |
2/4✓ Branch 18 → 19 taken 2 times.
✗ Branch 18 → 21 not taken.
✓ Branch 19 → 20 taken 2 times.
✗ Branch 19 → 21 not taken.
|
2 | return query_addr_val >= region_start && query_end <= region_start + mbi.RegionSize; |
| 1041 | } | ||
| 1042 | |||
| 1043 | // Reader guard already active — safe to access cache data structures | ||
| 1044 | |||
| 1045 | 92599 | const size_t shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 1046 |
1/2✗ Branch 31 → 32 not taken.
✓ Branch 31 → 33 taken 92599 times.
|
92599 | if (shard_count == 0) |
| 1047 | ✗ | return false; | |
| 1048 | |||
| 1049 | 92599 | const uintptr_t query_addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1050 | 92599 | const size_t shard_idx = compute_shard_index(query_addr_val, shard_count); | |
| 1051 | 90859 | const uint64_t now_ns = current_time_ns(); | |
| 1052 | 93052 | const uint64_t expiry_ns = static_cast<uint64_t>(MemoryUtilsCacheInternal::s_configuredExpiryMs.load(std::memory_order_acquire)) * 1'000'000ULL; | |
| 1053 | |||
| 1054 | // Fast path: blocking shared lock for concurrent read access (multiple readers allowed) | ||
| 1055 | { | ||
| 1056 | 93052 | std::shared_lock<SrwSharedMutex> lock(*MemoryUtilsCacheInternal::s_shardMutexes[shard_idx]); | |
| 1057 | 100786 | CachedMemoryRegionInfo *cached_info = MemoryUtilsCacheInternal::find_in_shard( | |
| 1058 | 104438 | MemoryUtilsCacheInternal::s_cacheShards[shard_idx], | |
| 1059 | query_addr_val, size, now_ns, expiry_ns); | ||
| 1060 |
2/2✓ Branch 47 → 48 taken 97707 times.
✓ Branch 47 → 52 taken 127 times.
|
97834 | if (cached_info) |
| 1061 | { | ||
| 1062 | MemoryUtilsCacheInternal::s_stats.cacheHits.fetch_add(1, std::memory_order_relaxed); | ||
| 1063 | 97707 | return MemoryUtilsCacheInternal::check_read_permission(cached_info->protection); | |
| 1064 | } | ||
| 1065 |
2/2✓ Branch 54 → 55 taken 127 times.
✓ Branch 54 → 60 taken 99643 times.
|
96766 | } |
| 1066 | |||
| 1067 | MemoryUtilsCacheInternal::s_stats.cacheMisses.fetch_add(1, std::memory_order_relaxed); | ||
| 1068 | |||
| 1069 | // Cache miss: call VirtualQuery with stampede coalescing | ||
| 1070 | MEMORY_BASIC_INFORMATION mbi; | ||
| 1071 |
1/2✗ Branch 59 → 61 not taken.
✓ Branch 59 → 62 taken 127 times.
|
127 | if (!MemoryUtilsCacheInternal::query_and_update_cache(shard_idx, address, mbi)) |
| 1072 | ✗ | return false; | |
| 1073 | |||
| 1074 |
2/2✓ Branch 62 → 63 taken 3 times.
✓ Branch 62 → 64 taken 124 times.
|
127 | if (mbi.State != MEM_COMMIT) |
| 1075 | 3 | return false; | |
| 1076 | |||
| 1077 |
2/2✓ Branch 65 → 66 taken 4 times.
✓ Branch 65 → 67 taken 120 times.
|
124 | if (!MemoryUtilsCacheInternal::check_read_permission(mbi.Protect)) |
| 1078 | 4 | return false; | |
| 1079 | |||
| 1080 | 120 | const uintptr_t region_start_addr = reinterpret_cast<uintptr_t>(mbi.BaseAddress); | |
| 1081 | 120 | const uintptr_t region_end_addr = region_start_addr + mbi.RegionSize; | |
| 1082 | 120 | const uintptr_t query_end_addr = query_addr_val + size; | |
| 1083 | |||
| 1084 |
2/2✓ Branch 67 → 68 taken 2 times.
✓ Branch 67 → 69 taken 118 times.
|
120 | if (query_end_addr < query_addr_val) |
| 1085 | 2 | return false; | |
| 1086 | |||
| 1087 |
3/4✓ Branch 69 → 70 taken 118 times.
✗ Branch 69 → 72 not taken.
✓ Branch 70 → 71 taken 117 times.
✓ Branch 70 → 72 taken 1 time.
|
118 | return query_addr_val >= region_start_addr && query_end_addr <= region_end_addr; |
| 1088 | 99775 | } | |
| 1089 | |||
| 1090 | 3721 | bool DetourModKit::Memory::is_writable(void *address, size_t size) | |
| 1091 | { | ||
| 1092 |
2/4✓ Branch 2 → 3 taken 3750 times.
✗ Branch 2 → 4 not taken.
✗ Branch 3 → 4 not taken.
✓ Branch 3 → 5 taken 3756 times.
|
3721 | if (!address || size == 0) |
| 1093 | ✗ | return false; | |
| 1094 | |||
| 1095 | // Construct reader guard BEFORE checking s_cacheInitialized to prevent | ||
| 1096 | // shutdown_cache from destroying data structures between the check and access. | ||
| 1097 | 3756 | MemoryUtilsCacheInternal::ActiveReaderGuard reader_guard; | |
| 1098 | |||
| 1099 |
2/2✓ Branch 7 → 8 taken 4 times.
✓ Branch 7 → 24 taken 3675 times.
|
3889 | if (!MemoryUtilsCacheInternal::s_cacheInitialized.load(std::memory_order_acquire)) |
| 1100 | { | ||
| 1101 | // Cache not initialized - fall back to direct VirtualQuery | ||
| 1102 | MEMORY_BASIC_INFORMATION mbi; | ||
| 1103 |
2/4✓ Branch 8 → 9 taken 4 times.
✗ Branch 8 → 77 not taken.
✗ Branch 9 → 10 not taken.
✓ Branch 9 → 11 taken 4 times.
|
4 | if (!VirtualQuery(address, &mbi, sizeof(mbi))) |
| 1104 | ✗ | return false; | |
| 1105 |
2/2✓ Branch 11 → 12 taken 1 time.
✓ Branch 11 → 13 taken 3 times.
|
4 | if (mbi.State != MEM_COMMIT) |
| 1106 | 1 | return false; | |
| 1107 |
2/2✓ Branch 14 → 15 taken 1 time.
✓ Branch 14 → 16 taken 2 times.
|
3 | if (!MemoryUtilsCacheInternal::check_write_permission(mbi.Protect)) |
| 1108 | 1 | return false; | |
| 1109 | 2 | const uintptr_t query_addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1110 | 2 | const uintptr_t region_start = reinterpret_cast<uintptr_t>(mbi.BaseAddress); | |
| 1111 | 2 | const uintptr_t query_end = query_addr_val + size; | |
| 1112 |
2/2✓ Branch 16 → 17 taken 1 time.
✓ Branch 16 → 18 taken 1 time.
|
2 | if (query_end < query_addr_val) |
| 1113 | 1 | return false; | |
| 1114 |
2/4✓ Branch 18 → 19 taken 1 time.
✗ Branch 18 → 21 not taken.
✓ Branch 19 → 20 taken 1 time.
✗ Branch 19 → 21 not taken.
|
1 | return query_addr_val >= region_start && query_end <= region_start + mbi.RegionSize; |
| 1115 | } | ||
| 1116 | |||
| 1117 | // Reader guard already active — safe to access cache data structures | ||
| 1118 | |||
| 1119 | 3664 | const size_t shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 1120 |
1/2✗ Branch 31 → 32 not taken.
✓ Branch 31 → 33 taken 3664 times.
|
3664 | if (shard_count == 0) |
| 1121 | ✗ | return false; | |
| 1122 | |||
| 1123 | 3664 | const uintptr_t query_addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1124 | 3664 | const size_t shard_idx = compute_shard_index(query_addr_val, shard_count); | |
| 1125 | 3625 | const uint64_t now_ns = current_time_ns(); | |
| 1126 | 3674 | const uint64_t expiry_ns = static_cast<uint64_t>(MemoryUtilsCacheInternal::s_configuredExpiryMs.load(std::memory_order_acquire)) * 1'000'000ULL; | |
| 1127 | |||
| 1128 | // Fast path: blocking shared lock for concurrent read access (multiple readers allowed) | ||
| 1129 | { | ||
| 1130 | 3674 | std::shared_lock<SrwSharedMutex> lock(*MemoryUtilsCacheInternal::s_shardMutexes[shard_idx]); | |
| 1131 | 3694 | CachedMemoryRegionInfo *cached_info = MemoryUtilsCacheInternal::find_in_shard( | |
| 1132 | 3778 | MemoryUtilsCacheInternal::s_cacheShards[shard_idx], | |
| 1133 | query_addr_val, size, now_ns, expiry_ns); | ||
| 1134 |
2/2✓ Branch 47 → 48 taken 3744 times.
✓ Branch 47 → 52 taken 12 times.
|
3756 | if (cached_info) |
| 1135 | { | ||
| 1136 | MemoryUtilsCacheInternal::s_stats.cacheHits.fetch_add(1, std::memory_order_relaxed); | ||
| 1137 | 3744 | return MemoryUtilsCacheInternal::check_write_permission(cached_info->protection); | |
| 1138 | } | ||
| 1139 |
2/2✓ Branch 54 → 55 taken 12 times.
✓ Branch 54 → 60 taken 3795 times.
|
3726 | } |
| 1140 | |||
| 1141 | MemoryUtilsCacheInternal::s_stats.cacheMisses.fetch_add(1, std::memory_order_relaxed); | ||
| 1142 | |||
| 1143 | MEMORY_BASIC_INFORMATION mbi; | ||
| 1144 |
1/2✗ Branch 59 → 61 not taken.
✓ Branch 59 → 62 taken 12 times.
|
12 | if (!MemoryUtilsCacheInternal::query_and_update_cache(shard_idx, address, mbi)) |
| 1145 | ✗ | return false; | |
| 1146 | |||
| 1147 |
2/2✓ Branch 62 → 63 taken 1 time.
✓ Branch 62 → 64 taken 11 times.
|
12 | if (mbi.State != MEM_COMMIT) |
| 1148 | 1 | return false; | |
| 1149 | |||
| 1150 |
2/2✓ Branch 65 → 66 taken 4 times.
✓ Branch 65 → 67 taken 7 times.
|
11 | if (!MemoryUtilsCacheInternal::check_write_permission(mbi.Protect)) |
| 1151 | 4 | return false; | |
| 1152 | |||
| 1153 | 7 | const uintptr_t region_start_addr = reinterpret_cast<uintptr_t>(mbi.BaseAddress); | |
| 1154 | 7 | const uintptr_t region_end_addr = region_start_addr + mbi.RegionSize; | |
| 1155 | 7 | const uintptr_t query_end_addr = query_addr_val + size; | |
| 1156 | |||
| 1157 |
2/2✓ Branch 67 → 68 taken 2 times.
✓ Branch 67 → 69 taken 5 times.
|
7 | if (query_end_addr < query_addr_val) |
| 1158 | 2 | return false; | |
| 1159 | |||
| 1160 |
2/4✓ Branch 69 → 70 taken 5 times.
✗ Branch 69 → 72 not taken.
✓ Branch 70 → 71 taken 5 times.
✗ Branch 70 → 72 not taken.
|
5 | return query_addr_val >= region_start_addr && query_end_addr <= region_end_addr; |
| 1161 | 3811 | } | |
| 1162 | |||
| 1163 | 12 | std::expected<void, MemoryError> DetourModKit::Memory::write_bytes(std::byte *targetAddress, const std::byte *sourceBytes, size_t numBytes, Logger &logger) | |
| 1164 | { | ||
| 1165 |
2/2✓ Branch 2 → 3 taken 2 times.
✓ Branch 2 → 8 taken 10 times.
|
12 | if (!targetAddress) |
| 1166 | { | ||
| 1167 |
1/2✓ Branch 3 → 4 taken 2 times.
✗ Branch 3 → 59 not taken.
|
2 | logger.error("write_bytes: Target address is null."); |
| 1168 | 2 | return std::unexpected(MemoryError::NullTargetAddress); | |
| 1169 | } | ||
| 1170 |
3/4✓ Branch 8 → 9 taken 2 times.
✓ Branch 8 → 15 taken 8 times.
✓ Branch 9 → 10 taken 2 times.
✗ Branch 9 → 15 not taken.
|
10 | if (!sourceBytes && numBytes > 0) |
| 1171 | { | ||
| 1172 |
1/2✓ Branch 10 → 11 taken 2 times.
✗ Branch 10 → 60 not taken.
|
2 | logger.error("write_bytes: Source bytes pointer is null for non-zero numBytes."); |
| 1173 | 2 | return std::unexpected(MemoryError::NullSourceBytes); | |
| 1174 | } | ||
| 1175 |
2/2✓ Branch 15 → 16 taken 2 times.
✓ Branch 15 → 20 taken 6 times.
|
8 | if (numBytes == 0) |
| 1176 | { | ||
| 1177 |
1/2✓ Branch 16 → 17 taken 2 times.
✗ Branch 16 → 61 not taken.
|
2 | logger.warning("write_bytes: Number of bytes to write is zero. Operation has no effect."); |
| 1178 | 2 | return {}; | |
| 1179 | } | ||
| 1180 | |||
| 1181 | DWORD old_protection_flags; | ||
| 1182 |
2/4✓ Branch 20 → 21 taken 6 times.
✗ Branch 20 → 84 not taken.
✗ Branch 21 → 22 not taken.
✓ Branch 21 → 30 taken 6 times.
|
6 | if (!VirtualProtect(reinterpret_cast<LPVOID>(targetAddress), numBytes, PAGE_EXECUTE_READWRITE, &old_protection_flags)) |
| 1183 | { | ||
| 1184 | ✗ | logger.error("write_bytes: VirtualProtect failed to set PAGE_EXECUTE_READWRITE at address {}. Windows Error: {}", | |
| 1185 | ✗ | DetourModKit::Format::format_address(reinterpret_cast<uintptr_t>(targetAddress)), GetLastError()); | |
| 1186 | ✗ | return std::unexpected(MemoryError::ProtectionChangeFailed); | |
| 1187 | } | ||
| 1188 | |||
| 1189 | 6 | memcpy(reinterpret_cast<void *>(targetAddress), reinterpret_cast<const void *>(sourceBytes), numBytes); | |
| 1190 | |||
| 1191 | DWORD temp_old_protect; | ||
| 1192 |
2/4✓ Branch 30 → 31 taken 6 times.
✗ Branch 30 → 84 not taken.
✗ Branch 31 → 32 not taken.
✓ Branch 31 → 42 taken 6 times.
|
6 | if (!VirtualProtect(reinterpret_cast<LPVOID>(targetAddress), numBytes, old_protection_flags, &temp_old_protect)) |
| 1193 | { | ||
| 1194 | ✗ | logger.error("write_bytes: VirtualProtect failed to restore original protection ({}) at address {}. Windows Error: {}. Memory may remain writable!", | |
| 1195 | ✗ | DetourModKit::Format::format_hex(static_cast<int>(old_protection_flags)), | |
| 1196 | ✗ | DetourModKit::Format::format_address(reinterpret_cast<uintptr_t>(targetAddress)), GetLastError()); | |
| 1197 | ✗ | return std::unexpected(MemoryError::ProtectionRestoreFailed); | |
| 1198 | } | ||
| 1199 | |||
| 1200 |
3/6✓ Branch 42 → 43 taken 6 times.
✗ Branch 42 → 84 not taken.
✓ Branch 43 → 44 taken 6 times.
✗ Branch 43 → 84 not taken.
✗ Branch 44 → 45 not taken.
✓ Branch 44 → 50 taken 6 times.
|
6 | if (!FlushInstructionCache(GetCurrentProcess(), reinterpret_cast<LPCVOID>(targetAddress), numBytes)) |
| 1201 | { | ||
| 1202 | ✗ | logger.warning("write_bytes: FlushInstructionCache failed for address {}. Windows Error: {}", | |
| 1203 | ✗ | DetourModKit::Format::format_address(reinterpret_cast<uintptr_t>(targetAddress)), GetLastError()); | |
| 1204 | } | ||
| 1205 | |||
| 1206 | 6 | Memory::invalidate_range(targetAddress, numBytes); | |
| 1207 | |||
| 1208 |
1/2✓ Branch 52 → 53 taken 6 times.
✗ Branch 52 → 80 not taken.
|
6 | logger.debug("write_bytes: Successfully wrote {} bytes to address {}.", |
| 1209 |
1/2✓ Branch 51 → 52 taken 6 times.
✗ Branch 51 → 83 not taken.
|
12 | numBytes, DetourModKit::Format::format_address(reinterpret_cast<uintptr_t>(targetAddress))); |
| 1210 | 6 | return {}; | |
| 1211 | } | ||
| 1212 | |||
| 1213 | 12 | Memory::ReadableStatus DetourModKit::Memory::is_readable_nonblocking(const void *address, size_t size) | |
| 1214 | { | ||
| 1215 |
4/4✓ Branch 2 → 3 taken 11 times.
✓ Branch 2 → 4 taken 1 time.
✓ Branch 3 → 4 taken 1 time.
✓ Branch 3 → 5 taken 10 times.
|
12 | if (!address || size == 0) |
| 1216 | 2 | return ReadableStatus::NotReadable; | |
| 1217 | |||
| 1218 | 10 | MemoryUtilsCacheInternal::ActiveReaderGuard reader_guard; | |
| 1219 | |||
| 1220 |
2/2✓ Branch 7 → 8 taken 2 times.
✓ Branch 7 → 23 taken 8 times.
|
10 | if (!MemoryUtilsCacheInternal::s_cacheInitialized.load(std::memory_order_acquire)) |
| 1221 | { | ||
| 1222 | // Cache not initialized - fall back to direct VirtualQuery (blocking) | ||
| 1223 | MEMORY_BASIC_INFORMATION mbi; | ||
| 1224 |
2/4✓ Branch 8 → 9 taken 2 times.
✗ Branch 8 → 62 not taken.
✗ Branch 9 → 10 not taken.
✓ Branch 9 → 11 taken 2 times.
|
2 | if (!VirtualQuery(address, &mbi, sizeof(mbi))) |
| 1225 | ✗ | return ReadableStatus::NotReadable; | |
| 1226 |
2/2✓ Branch 11 → 12 taken 1 time.
✓ Branch 11 → 13 taken 1 time.
|
2 | if (mbi.State != MEM_COMMIT) |
| 1227 | 1 | return ReadableStatus::NotReadable; | |
| 1228 |
1/2✗ Branch 14 → 15 not taken.
✓ Branch 14 → 16 taken 1 time.
|
1 | if (!MemoryUtilsCacheInternal::check_read_permission(mbi.Protect)) |
| 1229 | ✗ | return ReadableStatus::NotReadable; | |
| 1230 | 1 | const uintptr_t query_addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1231 | 1 | const uintptr_t region_start = reinterpret_cast<uintptr_t>(mbi.BaseAddress); | |
| 1232 | 1 | const uintptr_t query_end = query_addr_val + size; | |
| 1233 |
1/2✗ Branch 16 → 17 not taken.
✓ Branch 16 → 18 taken 1 time.
|
1 | if (query_end < query_addr_val) |
| 1234 | ✗ | return ReadableStatus::NotReadable; | |
| 1235 |
2/4✓ Branch 18 → 19 taken 1 time.
✗ Branch 18 → 21 not taken.
✓ Branch 19 → 20 taken 1 time.
✗ Branch 19 → 21 not taken.
|
1 | if (query_addr_val >= region_start && query_end <= region_start + mbi.RegionSize) |
| 1236 | 1 | return ReadableStatus::Readable; | |
| 1237 | ✗ | return ReadableStatus::NotReadable; | |
| 1238 | } | ||
| 1239 | |||
| 1240 | 8 | const size_t shard_count = MemoryUtilsCacheInternal::s_shardCount.load(std::memory_order_acquire); | |
| 1241 |
1/2✗ Branch 30 → 31 not taken.
✓ Branch 30 → 32 taken 8 times.
|
8 | if (shard_count == 0) |
| 1242 | ✗ | return ReadableStatus::Unknown; | |
| 1243 | |||
| 1244 | 8 | const uintptr_t query_addr_val = reinterpret_cast<uintptr_t>(address); | |
| 1245 | 8 | const size_t shard_idx = compute_shard_index(query_addr_val, shard_count); | |
| 1246 | 8 | const uint64_t now_ns = current_time_ns(); | |
| 1247 | 8 | const uint64_t expiry_ns = static_cast<uint64_t>(MemoryUtilsCacheInternal::s_configuredExpiryMs.load(std::memory_order_acquire)) * 1'000'000ULL; | |
| 1248 | |||
| 1249 | // Non-blocking: try_lock_shared to avoid stalling latency-sensitive threads | ||
| 1250 | 8 | std::shared_lock<SrwSharedMutex> lock(*MemoryUtilsCacheInternal::s_shardMutexes[shard_idx], std::try_to_lock); | |
| 1251 |
1/2✗ Branch 45 → 46 not taken.
✓ Branch 45 → 47 taken 8 times.
|
8 | if (!lock.owns_lock()) |
| 1252 | ✗ | return ReadableStatus::Unknown; | |
| 1253 | |||
| 1254 | 8 | CachedMemoryRegionInfo *cached_info = MemoryUtilsCacheInternal::find_in_shard( | |
| 1255 | 8 | MemoryUtilsCacheInternal::s_cacheShards[shard_idx], | |
| 1256 | query_addr_val, size, now_ns, expiry_ns); | ||
| 1257 |
2/2✓ Branch 49 → 50 taken 6 times.
✓ Branch 49 → 57 taken 2 times.
|
8 | if (cached_info) |
| 1258 | { | ||
| 1259 | MemoryUtilsCacheInternal::s_stats.cacheHits.fetch_add(1, std::memory_order_relaxed); | ||
| 1260 | 6 | return MemoryUtilsCacheInternal::check_read_permission(cached_info->protection) | |
| 1261 |
2/2✓ Branch 53 → 54 taken 3 times.
✓ Branch 53 → 55 taken 3 times.
|
6 | ? ReadableStatus::Readable |
| 1262 | 6 | : ReadableStatus::NotReadable; | |
| 1263 | } | ||
| 1264 | |||
| 1265 | // Cache miss with non-blocking semantics: return Unknown rather than issuing VirtualQuery | ||
| 1266 | 2 | return ReadableStatus::Unknown; | |
| 1267 | 10 | } | |
| 1268 | |||
| 1269 | 10 | uintptr_t DetourModKit::Memory::read_ptr_unsafe(uintptr_t base, ptrdiff_t offset) noexcept | |
| 1270 | { | ||
| 1271 | #ifdef _MSC_VER | ||
| 1272 | __try | ||
| 1273 | { | ||
| 1274 | return *reinterpret_cast<const uintptr_t *>(base + offset); | ||
| 1275 | } | ||
| 1276 | __except ((GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION || | ||
| 1277 | GetExceptionCode() == STATUS_GUARD_PAGE_VIOLATION) | ||
| 1278 | ? EXCEPTION_EXECUTE_HANDLER | ||
| 1279 | : EXCEPTION_CONTINUE_SEARCH) | ||
| 1280 | { | ||
| 1281 | return 0; | ||
| 1282 | } | ||
| 1283 | #else | ||
| 1284 | // MinGW/GCC lacks __try/__except. Use VirtualQuery as a lightweight | ||
| 1285 | // guard before the raw dereference. This is still faster than | ||
| 1286 | // is_readable() because it bypasses the entire cache machinery. | ||
| 1287 | 10 | const void *addr = reinterpret_cast<const void *>(base + offset); | |
| 1288 | MEMORY_BASIC_INFORMATION mbi; | ||
| 1289 |
1/2✗ Branch 3 → 4 not taken.
✓ Branch 3 → 5 taken 10 times.
|
10 | if (!VirtualQuery(addr, &mbi, sizeof(mbi))) |
| 1290 | ✗ | return 0; | |
| 1291 |
2/2✓ Branch 5 → 6 taken 3 times.
✓ Branch 5 → 7 taken 7 times.
|
10 | if (mbi.State != MEM_COMMIT) |
| 1292 | 3 | return 0; | |
| 1293 |
2/2✓ Branch 7 → 8 taken 6 times.
✓ Branch 7 → 9 taken 1 time.
|
7 | if ((mbi.Protect & CachePermissions::READ_PERMISSION_FLAGS) == 0 || |
| 1294 |
2/2✓ Branch 8 → 9 taken 1 time.
✓ Branch 8 → 10 taken 5 times.
|
6 | (mbi.Protect & CachePermissions::NOACCESS_GUARD_FLAGS) != 0) |
| 1295 | 2 | return 0; | |
| 1296 | 5 | return *reinterpret_cast<const uintptr_t *>(base + offset); | |
| 1297 | #endif | ||
| 1298 | } | ||
| 1299 |