Skip to content
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 123 additions & 1 deletion src/core/memory.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2025-2026 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#include <limits>

#include "common/alignment.h"
#include "common/assert.h"
#include "common/config.h"
Expand Down Expand Up @@ -68,8 +70,25 @@ void MemoryManager::SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1
fmem_map.emplace(total_direct_size,
PhysicalMemoryArea{total_direct_size, remaining_physical_space});

flexible_virtual_base = impl.SystemReservedVirtualBase();
const u64 flexible_virtual_size =
std::min<u64>(total_flexible_size, impl.SystemReservedVirtualSize());
flexible_virtual_end = flexible_virtual_base + flexible_virtual_size;

{
std::scoped_lock lk{mutex};
RecalculateFlexibleMappedUsageLocked();
}

LOG_INFO(Kernel_Vmm, "Configured memory regions: flexible size = {:#x}, direct size = {:#x}",
total_flexible_size, total_direct_size);
if (Config::debugDump()) {
LOG_DEBUG(
Kernel_Vmm,
"Flexible accounting region: [{:#x}, {:#x}), total = {:#x}, used = {:#x}, free = {:#x}",
flexible_virtual_base, flexible_virtual_end, total_flexible_size, flexible_mapped_usage,
GetAvailableFlexibleSize());
}
}

u64 MemoryManager::ClampRangeSize(VAddr virtual_addr, u64 size) {
Expand Down Expand Up @@ -557,6 +576,7 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo

// Acquire writer lock.
std::unique_lock lk2{mutex};
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);

// Create VMA representing this mapping.
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, type, name, alignment);
Expand Down Expand Up @@ -643,6 +663,9 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
MergeAdjacent(vma_map, new_vma_handle);
}

const u64 flexible_after = GetFlexibleMappedBytesInRangeLocked(mapped_addr, size);
AdjustFlexibleMappedUsageLocked(flexible_before, flexible_after);

*out_addr = std::bit_cast<void*>(mapped_addr);
if (type != VMAType::Reserved && type != VMAType::PoolReserved) {
// Flexible address space mappings were performed while finding direct memory areas.
Expand Down Expand Up @@ -731,6 +754,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory

// Aquire writer lock
std::scoped_lock lk2{mutex};
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);

// Update VMA map and map to address space.
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0);
Expand All @@ -742,6 +766,9 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory

impl.MapFile(mapped_addr, size, phys_addr, std::bit_cast<u32>(prot), handle);

const u64 flexible_after = GetFlexibleMappedBytesInRangeLocked(mapped_addr, size);
AdjustFlexibleMappedUsageLocked(flexible_before, flexible_after);

*out_addr = std::bit_cast<void*>(mapped_addr);
return ORBIS_OK;
}
Expand Down Expand Up @@ -848,7 +875,13 @@ s32 MemoryManager::UnmapMemory(VAddr virtual_addr, u64 size) {

// Acquire writer lock.
std::scoped_lock lk2{mutex};
return UnmapMemoryImpl(virtual_addr, size);
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);
const s32 result = UnmapMemoryImpl(virtual_addr, size);
if (result == ORBIS_OK) {
const u64 flexible_after = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);
AdjustFlexibleMappedUsageLocked(flexible_before, flexible_after);
}
return result;
}

u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size) {
Expand Down Expand Up @@ -1344,6 +1377,95 @@ void MemoryManager::InvalidateMemory(const VAddr addr, const u64 size) const {
}
}

void MemoryManager::RecalculateFlexibleUsageForDebug() {
std::scoped_lock lk{mutex};
RecalculateFlexibleMappedUsageLocked();
}

bool MemoryManager::IsFlexibleCountedVmaType(VMAType type) const {
return type == VMAType::Flexible || type == VMAType::Code;
}

bool MemoryManager::IsFlexibleCommittedVma(const VirtualMemoryArea& vma) const {
if (!vma.IsMapped()) {
return false;
}

const bool has_physical_tracking =
vma.type == VMAType::Direct || vma.type == VMAType::Flexible || vma.type == VMAType::Pooled;
if (has_physical_tracking) {
// Direct/flexible/pooled mappings should expose at least one physical sub-area when
// committed.
return !vma.phys_areas.empty();
}

// Non-phys-tracked mappings (code/stack/file) are committed through address-space map calls.
return vma.type == VMAType::Code || vma.type == VMAType::Stack || vma.type == VMAType::File;
}

u64 MemoryManager::GetFlexibleMappedBytesInRangeLocked(VAddr virtual_addr, u64 size) const {
if (!IsFlexibleRegionConfigured() || size == 0) {
return 0;
}

const VAddr aligned_start = Common::AlignDown(virtual_addr, 16_KB);
const u64 page_offset = virtual_addr - aligned_start;
if (size > std::numeric_limits<u64>::max() - page_offset) {
return 0;
}
const u64 aligned_size = Common::AlignUp(size + page_offset, 16_KB);
if (aligned_size == 0) {
return 0;
}

const VAddr aligned_end = aligned_start + aligned_size;
const VAddr range_start = std::max(aligned_start, flexible_virtual_base);
const VAddr range_end = std::min(aligned_end, flexible_virtual_end);
if (range_start >= range_end) {
return 0;
}

u64 mapped_bytes = 0;
auto it = vma_map.upper_bound(range_start);
if (it != vma_map.begin()) {
it = std::prev(it);
}
while (it != vma_map.end() && it->second.base < range_end) {
const auto& vma = it->second;
const VAddr vma_end = vma.base + vma.size;
const VAddr overlap_start = std::max(range_start, vma.base);
const VAddr overlap_end = std::min(range_end, vma_end);
const bool counted_type = IsFlexibleCountedVmaType(vma.type);
const bool committed = IsFlexibleCommittedVma(vma);

if (overlap_start < overlap_end && counted_type && committed) {
mapped_bytes += overlap_end - overlap_start;
}

++it;
}
return mapped_bytes;
}

void MemoryManager::AdjustFlexibleMappedUsageLocked(u64 mapped_before, u64 mapped_after) {
if (mapped_after >= mapped_before) {
flexible_mapped_usage += mapped_after - mapped_before;
} else {
const u64 delta = mapped_before - mapped_after;
flexible_mapped_usage = delta > flexible_mapped_usage ? 0 : flexible_mapped_usage - delta;
}
}

void MemoryManager::RecalculateFlexibleMappedUsageLocked() {
if (!IsFlexibleRegionConfigured()) {
flexible_mapped_usage = 0;
return;
}

flexible_mapped_usage = GetFlexibleMappedBytesInRangeLocked(
flexible_virtual_base, flexible_virtual_end - flexible_virtual_base);
}

VAddr MemoryManager::SearchFree(VAddr virtual_addr, u64 size, u32 alignment) {
// Calculate the minimum and maximum addresses present in our address space.
auto min_search_address = impl.SystemManagedVirtualBase();
Expand Down
39 changes: 37 additions & 2 deletions src/core/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,11 @@ struct VirtualMemoryArea {
class MemoryManager {
using PhysMap = std::map<PAddr, PhysicalMemoryArea>;
using PhysHandle = PhysMap::iterator;
using PhysConstHandle = PhysMap::const_iterator;

using VMAMap = std::map<VAddr, VirtualMemoryArea>;
using VMAHandle = VMAMap::iterator;
using VMAConstHandle = VMAMap::const_iterator;

public:
explicit MemoryManager();
Expand All @@ -181,8 +183,17 @@ class MemoryManager {
return total_flexible_size;
}

u64 GetUsedFlexibleSize() const {
return flexible_mapped_usage;
}

u64 GetAvailableFlexibleSize() const {
return total_flexible_size - flexible_usage;
const u64 used = GetUsedFlexibleSize();
return used < total_flexible_size ? total_flexible_size - used : 0;
}

bool IsFlexibleRegionConfigured() const {
return flexible_virtual_end > flexible_virtual_base;
}

VAddr SystemReservedVirtualBase() noexcept {
Expand Down Expand Up @@ -288,20 +299,31 @@ class MemoryManager {

void InvalidateMemory(VAddr addr, u64 size) const;

void RecalculateFlexibleUsageForDebug();

private:
VMAHandle FindVMA(VAddr target) {
return std::prev(vma_map.upper_bound(target));
}
VMAConstHandle FindVMA(VAddr target) const {
return std::prev(vma_map.upper_bound(target));
}

PhysHandle FindDmemArea(PAddr target) {
return std::prev(dmem_map.upper_bound(target));
}
PhysConstHandle FindDmemArea(PAddr target) const {
return std::prev(dmem_map.upper_bound(target));
}

PhysHandle FindFmemArea(PAddr target) {
return std::prev(fmem_map.upper_bound(target));
}
PhysConstHandle FindFmemArea(PAddr target) const {
return std::prev(fmem_map.upper_bound(target));
}

bool HasPhysicalBacking(VirtualMemoryArea vma) {
bool HasPhysicalBacking(const VirtualMemoryArea& vma) const {
return vma.type == VMAType::Direct || vma.type == VMAType::Flexible ||
vma.type == VMAType::Pooled;
}
Expand All @@ -327,6 +349,16 @@ class MemoryManager {

s32 UnmapMemoryImpl(VAddr virtual_addr, u64 size);

bool IsFlexibleCountedVmaType(VMAType type) const;

bool IsFlexibleCommittedVma(const VirtualMemoryArea& vma) const;

u64 GetFlexibleMappedBytesInRangeLocked(VAddr virtual_addr, u64 size) const;

void AdjustFlexibleMappedUsageLocked(u64 mapped_before, u64 mapped_after);

void RecalculateFlexibleMappedUsageLocked();

private:
AddressSpace impl;
PhysMap dmem_map;
Expand All @@ -337,6 +369,9 @@ class MemoryManager {
u64 total_direct_size{};
u64 total_flexible_size{};
u64 flexible_usage{};
VAddr flexible_virtual_base{};
VAddr flexible_virtual_end{};
u64 flexible_mapped_usage{};
u64 pool_budget{};
s32 sdk_version{};
Vulkan::Rasterizer* rasterizer{};
Expand Down
9 changes: 8 additions & 1 deletion src/core/module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,19 @@ void Module::LoadModuleToMemory(u32& max_tls_index) {

// Map module segments (and possible TLS trampolines)
void** out_addr = reinterpret_cast<void**>(&base_virtual_addr);
memory->MapMemory(out_addr, ModuleLoadBase, aligned_base_size + TrampolineSize,
memory->MapMemory(out_addr, ModuleLoadBase, aligned_base_size,
MemoryProt::CpuReadWrite | MemoryProt::CpuExec, MemoryMapFlags::NoFlags,
VMAType::Code, name);
LOG_INFO(Core_Linker, "Loading module {} to {}", name, fmt::ptr(*out_addr));

#ifdef ARCH_X86_64
void* trampoline_region = std::bit_cast<void*>(base_virtual_addr + aligned_base_size);
const int tramp_ret = memory->MapMemory(
&trampoline_region, base_virtual_addr + aligned_base_size, TrampolineSize,
MemoryProt::CpuReadWrite | MemoryProt::CpuExec,
MemoryMapFlags::Fixed | MemoryMapFlags::NoOverwrite, VMAType::File, "Trampoline");
ASSERT_MSG(tramp_ret == 0, "Unable to map trampoline memory");

// Initialize trampoline generator.
void* trampoline_addr = std::bit_cast<void*>(base_virtual_addr + aligned_base_size);
RegisterPatchModule(*out_addr, aligned_base_size, trampoline_addr, TrampolineSize);
Expand Down