Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
188 changes: 183 additions & 5 deletions src/core/memory.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2025-2026 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#include <limits>

#include "common/alignment.h"
#include "common/assert.h"
#include "common/config.h"
Expand Down Expand Up @@ -68,8 +70,26 @@ void MemoryManager::SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1
fmem_map.emplace(total_direct_size,
PhysicalMemoryArea{total_direct_size, remaining_physical_space});

flexible_virtual_base = impl.SystemReservedVirtualBase();
const u64 flexible_virtual_size =
std::min<u64>(total_flexible_size, impl.SystemReservedVirtualSize());
flexible_virtual_end = flexible_virtual_base + flexible_virtual_size;

{
std::scoped_lock lk{mutex};
InvalidateFlexibleMappedRangeCacheLocked();
RecalculateFlexibleMappedUsageLocked();
}

LOG_INFO(Kernel_Vmm, "Configured memory regions: flexible size = {:#x}, direct size = {:#x}",
total_flexible_size, total_direct_size);
if (Config::debugDump()) {
LOG_DEBUG(
Kernel_Vmm,
"Flexible accounting region: [{:#x}, {:#x}), total = {:#x}, used = {:#x}, free = {:#x}",
flexible_virtual_base, flexible_virtual_end, total_flexible_size, flexible_mapped_usage,
GetAvailableFlexibleSize());
}
}

u64 MemoryManager::ClampRangeSize(VAddr virtual_addr, u64 size) {
Expand Down Expand Up @@ -394,6 +414,7 @@ s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32
new_vma.prot = prot;
new_vma.name = "anon";
new_vma.type = Core::VMAType::Pooled;
new_vma.is_system_module = false;
new_vma.phys_areas.clear();

// Find suitable physical addresses
Expand Down Expand Up @@ -445,7 +466,8 @@ s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32

MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size, MemoryProt prot,
MemoryMapFlags flags, VMAType type,
std::string_view name, u64 alignment) {
std::string_view name, u64 alignment,
bool is_system_module) {
// Locate the VMA representing the requested region
auto vma = FindVMA(virtual_addr)->second;
if (True(flags & MemoryMapFlags::Fixed)) {
Expand All @@ -469,6 +491,7 @@ MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size,
// Create a memory area representing this mapping.
const auto new_vma_handle = CarveVMA(virtual_addr, size);
auto& new_vma = new_vma_handle->second;
InvalidateFlexibleMappedRangeCacheLocked();
const bool is_exec = True(prot & MemoryProt::CpuExec);
if (True(prot & MemoryProt::CpuWrite)) {
// On PS4, read is appended to write mappings.
Expand All @@ -480,13 +503,15 @@ MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size,
new_vma.prot = prot;
new_vma.name = name;
new_vma.type = type;
new_vma.is_system_module = is_system_module;
new_vma.phys_areas.clear();
return new_vma_handle;
}

s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
MemoryMapFlags flags, VMAType type, std::string_view name,
bool validate_dmem, PAddr phys_addr, u64 alignment) {
bool validate_dmem, PAddr phys_addr, u64 alignment,
bool is_system_module) {
// Certain games perform flexible mappings on loop to determine
// the available flexible memory size. Questionable but we need to handle this.
if (type == VMAType::Flexible && flexible_usage + size > total_flexible_size) {
Expand Down Expand Up @@ -557,9 +582,11 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo

// Acquire writer lock.
std::unique_lock lk2{mutex};
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);

// Create VMA representing this mapping.
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, type, name, alignment);
auto new_vma_handle =
CreateArea(virtual_addr, size, prot, flags, type, name, alignment, is_system_module);
auto& new_vma = new_vma_handle->second;
auto mapped_addr = new_vma.base;
bool is_exec = True(prot & MemoryProt::CpuExec);
Expand Down Expand Up @@ -643,6 +670,14 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
MergeAdjacent(vma_map, new_vma_handle);
}

u64 flexible_after = 0;
if (type == VMAType::Flexible && !new_vma.phys_areas.empty()) {
flexible_after = GetFlexibleRangeOverlapBytesLocked(mapped_addr, size);
} else if (type == VMAType::Code && !is_system_module) {
flexible_after = GetFlexibleRangeOverlapBytesLocked(mapped_addr, size);
}
AdjustFlexibleMappedUsageLocked(flexible_before, flexible_after);

*out_addr = std::bit_cast<void*>(mapped_addr);
if (type != VMAType::Reserved && type != VMAType::PoolReserved) {
// Flexible address space mappings were performed while finding direct memory areas.
Expand Down Expand Up @@ -731,9 +766,11 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory

// Aquire writer lock
std::scoped_lock lk2{mutex};
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);

// Update VMA map and map to address space.
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0);
auto new_vma_handle =
CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0, false);

auto& new_vma = new_vma_handle->second;
new_vma.fd = fd;
Expand All @@ -742,6 +779,8 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory

impl.MapFile(mapped_addr, size, phys_addr, std::bit_cast<u32>(prot), handle);

AdjustFlexibleMappedUsageLocked(flexible_before, 0);

*out_addr = std::bit_cast<void*>(mapped_addr);
return ORBIS_OK;
}
Expand Down Expand Up @@ -814,6 +853,7 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
vma.prot = MemoryProt::NoAccess;
vma.disallow_merge = false;
vma.name = "anon";
vma.is_system_module = false;
vma.phys_areas.clear();
MergeAdjacent(vma_map, new_it);

Expand Down Expand Up @@ -848,7 +888,12 @@ s32 MemoryManager::UnmapMemory(VAddr virtual_addr, u64 size) {

// Acquire writer lock.
std::scoped_lock lk2{mutex};
return UnmapMemoryImpl(virtual_addr, size);
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);
const s32 result = UnmapMemoryImpl(virtual_addr, size);
if (result == ORBIS_OK) {
AdjustFlexibleMappedUsageLocked(flexible_before, 0);
}
return result;
}

u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size) {
Expand Down Expand Up @@ -904,11 +949,13 @@ u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma
// Mark region as free and attempt to coalesce it with neighbours.
const auto new_it = CarveVMA(virtual_addr, size_in_vma);
auto& vma = new_it->second;
InvalidateFlexibleMappedRangeCacheLocked();
vma.type = VMAType::Free;
vma.prot = MemoryProt::NoAccess;
vma.phys_areas.clear();
vma.disallow_merge = false;
vma.name = "";
vma.is_system_module = false;
MergeAdjacent(vma_map, new_it);

if (vma_type != VMAType::Reserved && vma_type != VMAType::PoolReserved) {
Expand Down Expand Up @@ -1344,6 +1391,137 @@ void MemoryManager::InvalidateMemory(const VAddr addr, const u64 size) const {
}
}

void MemoryManager::RecalculateFlexibleUsageForDebug() {
std::scoped_lock lk{mutex};
RecalculateFlexibleMappedUsageLocked();
}

bool MemoryManager::IsFlexibleCountedVmaType(VMAType type) const {
return type == VMAType::Flexible || type == VMAType::Code;
}

bool MemoryManager::IsFlexibleCommittedVma(const VirtualMemoryArea& vma) const {
if (!vma.IsMapped()) {
return false;
}

if (vma.type == VMAType::Flexible) {
return !vma.phys_areas.empty();
}

if (vma.type == VMAType::Code) {
// System modules should not consume the game's flexible memory.
return !vma.is_system_module;
}
return false;
}

u64 MemoryManager::GetFlexibleRangeOverlapBytesLocked(VAddr virtual_addr, u64 size) const {
if (!IsFlexibleRegionConfigured() || size == 0) {
return 0;
}

const VAddr aligned_start = Common::AlignDown(virtual_addr, 16_KB);
const u64 page_offset = virtual_addr - aligned_start;
if (size > std::numeric_limits<u64>::max() - page_offset) {
return 0;
}
const u64 aligned_size = Common::AlignUp(size + page_offset, 16_KB);
if (aligned_size == 0) {
return 0;
}

const VAddr aligned_end = aligned_start + aligned_size;
const VAddr range_start = std::max(aligned_start, flexible_virtual_base);
const VAddr range_end = std::min(aligned_end, flexible_virtual_end);
return range_start < range_end ? range_end - range_start : 0;
}

u64 MemoryManager::GetFlexibleMappedBytesInRangeLocked(VAddr virtual_addr, u64 size) const {
if (!IsFlexibleRegionConfigured() || size == 0) {
return 0;
}

const VAddr aligned_start = Common::AlignDown(virtual_addr, 16_KB);
const u64 page_offset = virtual_addr - aligned_start;
if (size > std::numeric_limits<u64>::max() - page_offset) {
return 0;
}
const u64 aligned_size = Common::AlignUp(size + page_offset, 16_KB);
if (aligned_size == 0) {
return 0;
}

const VAddr aligned_end = aligned_start + aligned_size;
const VAddr range_start = std::max(aligned_start, flexible_virtual_base);
const VAddr range_end = std::min(aligned_end, flexible_virtual_end);
if (range_start >= range_end) {
return 0;
}

if (flexible_mapped_range_cache.valid && flexible_mapped_range_cache.revision == vma_revision &&
flexible_mapped_range_cache.range_start == range_start &&
flexible_mapped_range_cache.range_end == range_end) {
return flexible_mapped_range_cache.mapped_bytes;
}

u64 mapped_bytes = 0;
auto it = vma_map.upper_bound(range_start);
if (it != vma_map.begin()) {
it = std::prev(it);
}
while (it != vma_map.end() && it->second.base < range_end) {
const auto& vma = it->second;
const bool counted_type = IsFlexibleCountedVmaType(vma.type);
if (!counted_type) {
++it;
continue;
}

const VAddr vma_end = vma.base + vma.size;
const VAddr overlap_start = std::max(range_start, vma.base);
const VAddr overlap_end = std::min(range_end, vma_end);
const bool committed = IsFlexibleCommittedVma(vma);

if (overlap_start < overlap_end && committed) {
mapped_bytes += overlap_end - overlap_start;
}

++it;
}

flexible_mapped_range_cache.range_start = range_start;
flexible_mapped_range_cache.range_end = range_end;
flexible_mapped_range_cache.mapped_bytes = mapped_bytes;
flexible_mapped_range_cache.revision = vma_revision;
flexible_mapped_range_cache.valid = true;
return mapped_bytes;
}

void MemoryManager::InvalidateFlexibleMappedRangeCacheLocked() {
++vma_revision;
flexible_mapped_range_cache.valid = false;
}

void MemoryManager::AdjustFlexibleMappedUsageLocked(u64 mapped_before, u64 mapped_after) {
if (mapped_after >= mapped_before) {
flexible_mapped_usage += mapped_after - mapped_before;
} else {
const u64 delta = mapped_before - mapped_after;
flexible_mapped_usage = delta > flexible_mapped_usage ? 0 : flexible_mapped_usage - delta;
}
}

void MemoryManager::RecalculateFlexibleMappedUsageLocked() {
if (!IsFlexibleRegionConfigured()) {
flexible_mapped_usage = 0;
return;
}

flexible_mapped_usage = GetFlexibleMappedBytesInRangeLocked(
flexible_virtual_base, flexible_virtual_end - flexible_virtual_base);
}

VAddr MemoryManager::SearchFree(VAddr virtual_addr, u64 size, u32 alignment) {
// Calculate the minimum and maximum addresses present in our address space.
auto min_search_address = impl.SystemManagedVirtualBase();
Expand Down
Loading