dolphin/Source/Core/Core/HW/Memmap.cpp
Dentomologist 7a45ede688
Merge pull request #14123 from JosJuice/fix-logical-page-mappings
Memmap: Fix populating m_logical_page_mappings
2026-03-13 12:11:01 -07:00

795 lines
26 KiB
C++

// Copyright 2008 Dolphin Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
// NOTE:
// These functions are primarily used by the interpreter versions of the LoadStore instructions.
// However, if a JITed instruction (for example lwz) wants to access a bad memory area that call
// may be redirected here (for example to Read_U32()).
#include "Core/HW/Memmap.h"
#include <algorithm>
#include <array>
#include <bit>
#include <cstring>
#include <map>
#include <memory>
#include <set>
#include <span>
#include <tuple>
#include "Common/ChunkFile.h"
#include "Common/CommonTypes.h"
#include "Common/Logging/Log.h"
#include "Common/MemArena.h"
#include "Common/MsgHandler.h"
#include "Common/Swap.h"
#include "Core/Config/MainSettings.h"
#include "Core/Core.h"
#include "Core/HW/AudioInterface.h"
#include "Core/HW/DSP.h"
#include "Core/HW/DVD/DVDInterface.h"
#include "Core/HW/EXI/EXI.h"
#include "Core/HW/MMIO.h"
#include "Core/HW/MemoryInterface.h"
#include "Core/HW/ProcessorInterface.h"
#include "Core/HW/SI/SI.h"
#include "Core/HW/VideoInterface.h"
#include "Core/HW/WII_IPC.h"
#include "Core/PowerPC/JitCommon/JitBase.h"
#include "Core/PowerPC/PowerPC.h"
#include "Core/System.h"
#include "VideoCommon/CommandProcessor.h"
#include "VideoCommon/PixelEngine.h"
namespace Memory
{
MemoryManager::MemoryManager(Core::System& system)
: m_page_size(static_cast<u32>(m_arena.GetPageSize())),
m_guest_pages_per_host_page(m_page_size / PowerPC::HW_PAGE_SIZE),
m_host_page_type(GetHostPageTypeForPageSize(m_page_size)), m_system(system)
{
}
MemoryManager::~MemoryManager() = default;
MemoryManager::HostPageType MemoryManager::GetHostPageTypeForPageSize(u32 page_size)
{
if (!std::has_single_bit(page_size))
return HostPageType::Unsupported;
return page_size > PowerPC::HW_PAGE_SIZE ? HostPageType::LargePages : HostPageType::SmallPages;
}
void MemoryManager::InitMMIO(Core::System& system)
{
m_mmio_mapping = std::make_unique<MMIO::Mapping>();
system.GetCommandProcessor().RegisterMMIO(m_mmio_mapping.get(), 0x0C000000);
system.GetPixelEngine().RegisterMMIO(m_mmio_mapping.get(), 0x0C001000);
system.GetVideoInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C002000);
system.GetProcessorInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C003000);
system.GetMemoryInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C004000);
system.GetDSP().RegisterMMIO(m_mmio_mapping.get(), 0x0C005000);
system.GetDVDInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C006000, false);
system.GetSerialInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C006400);
system.GetExpansionInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C006800);
system.GetAudioInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0C006C00);
if (system.IsWii())
{
system.GetWiiIPC().RegisterMMIO(m_mmio_mapping.get(), 0x0D000000);
system.GetDVDInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0D006000, true);
system.GetSerialInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0D006400);
system.GetExpansionInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0D006800);
system.GetAudioInterface().RegisterMMIO(m_mmio_mapping.get(), 0x0D006C00);
}
}
void MemoryManager::Init()
{
const auto get_mem1_size = [] {
if (Config::Get(Config::MAIN_RAM_OVERRIDE_ENABLE))
return Config::Get(Config::MAIN_MEM1_SIZE);
return Memory::MEM1_SIZE_RETAIL;
};
const auto get_mem2_size = [] {
if (Config::Get(Config::MAIN_RAM_OVERRIDE_ENABLE))
return Config::Get(Config::MAIN_MEM2_SIZE);
return Memory::MEM2_SIZE_RETAIL;
};
m_ram_size_real = get_mem1_size();
m_ram_size = MathUtil::NextPowerOf2(GetRamSizeReal());
m_ram_mask = GetRamSize() - 1;
m_fakevmem_size = 0x02000000;
m_fakevmem_mask = GetFakeVMemSize() - 1;
m_l1_cache_size = 0x00040000;
m_l1_cache_mask = GetL1CacheSize() - 1;
m_exram_size_real = get_mem2_size();
m_exram_size = MathUtil::NextPowerOf2(GetExRamSizeReal());
m_exram_mask = GetExRamSize() - 1;
m_physical_regions[0] = PhysicalMemoryRegion{
&m_ram, 0x00000000, GetRamSize(), PhysicalMemoryRegion::ALWAYS, 0, false};
m_physical_regions[1] = PhysicalMemoryRegion{
&m_l1_cache, 0xE0000000, GetL1CacheSize(), PhysicalMemoryRegion::ALWAYS, 0, false};
m_physical_regions[2] = PhysicalMemoryRegion{
&m_fake_vmem, 0x7E000000, GetFakeVMemSize(), PhysicalMemoryRegion::FAKE_VMEM, 0, false};
m_physical_regions[3] = PhysicalMemoryRegion{
&m_exram, 0x10000000, GetExRamSize(), PhysicalMemoryRegion::WII_ONLY, 0, false};
const bool wii = m_system.IsWii();
const bool mmu = m_system.IsMMUMode();
// If MMU is turned off in GameCube mode, turn on fake VMEM hack.
const bool fake_vmem = !wii && !mmu;
u32 mem_size = 0;
for (PhysicalMemoryRegion& region : m_physical_regions)
{
if (!wii && (region.flags & PhysicalMemoryRegion::WII_ONLY))
continue;
if (!fake_vmem && (region.flags & PhysicalMemoryRegion::FAKE_VMEM))
continue;
region.shm_position = mem_size;
region.active = true;
mem_size += region.size;
}
m_arena.GrabSHMSegment(mem_size, "dolphin-emu");
m_physical_page_mappings.fill(nullptr);
// Create an anonymous view of the physical memory
for (const PhysicalMemoryRegion& region : m_physical_regions)
{
if (!region.active)
continue;
*region.out_pointer = (u8*)m_arena.CreateView(region.shm_position, region.size);
if (!*region.out_pointer)
{
PanicAlertFmt(
"Memory::Init(): Failed to create view for physical region at 0x{:08X} (size 0x{:08X}).",
region.physical_address, region.size);
exit(0);
}
for (u32 i = 0; i < region.size; i += PowerPC::BAT_PAGE_SIZE)
{
const size_t index = (i + region.physical_address) >> PowerPC::BAT_INDEX_SHIFT;
m_physical_page_mappings[index] = *region.out_pointer + i;
}
}
m_physical_page_mappings_base = reinterpret_cast<u8*>(m_physical_page_mappings.data());
m_logical_page_mappings_base = reinterpret_cast<u8*>(m_logical_page_mappings.data());
Clear();
INFO_LOG_FMT(MEMMAP, "Memory system initialized. RAM at {}", fmt::ptr(m_ram));
m_is_initialized = true;
}
bool MemoryManager::IsAddressInFastmemArea(const u8* address) const
{
return address >= m_fastmem_arena && address < m_fastmem_arena + m_fastmem_arena_size;
}
bool MemoryManager::InitFastmemArena()
{
// Here we set up memory mappings for fastmem. The basic idea of fastmem is that we reserve 4 GiB
// of virtual memory and lay out the addresses within that 4 GiB range just like the memory map of
// the emulated system. This lets the JIT emulate PPC load/store instructions by translating a PPC
// address to a host address as follows and then using a regular load/store instruction:
//
// RMEM = ppcState.msr.DR ? m_logical_base : m_physical_base
// host_address = RMEM + u32(ppc_address_base + ppc_address_offset)
//
// If the resulting host address is backed by real memory, the memory access will simply work.
// If not, a segfault handler will backpatch the JIT code to instead call functions in MMU.cpp.
// This way, most memory accesses will be super fast. We do pay a performance penalty for memory
// accesses that need special handling, but they're rare enough that it's very beneficial overall.
//
// Note: Jit64 (but not JitArm64) sometimes takes a shortcut when computing addresses and skips
// the cast to u32 that you see in the pseudocode above. When this happens, ppc_address_base
// is a 32-bit value stored in a 64-bit register (which effectively makes it behave like an
// unsigned 32-bit value), and ppc_address_offset is a signed 32-bit integer encoded directly
// into the load/store instruction. This can cause us to undershoot or overshoot the intended
// 4 GiB range by at most 2 GiB in either direction. So, make sure we have 2 GiB of guard pages
// on each side of each 4 GiB range.
//
// We need two 4 GiB ranges, one for PPC addresses with address translation disabled
// (m_physical_base) and one for PPC addresses with address translation enabled (m_logical_base),
// so our memory map ends up looking like this:
//
// 2 GiB guard
// 4 GiB view for disabled address translation
// 2 GiB guard
// 4 GiB view for enabled address translation
// 2 GiB guard
constexpr size_t ppc_view_size = 0x1'0000'0000;
constexpr size_t guard_size = 0x8000'0000;
constexpr size_t memory_size = ppc_view_size * 2 + guard_size * 3;
m_fastmem_arena = m_arena.ReserveMemoryRegion(memory_size);
if (!m_fastmem_arena)
{
PanicAlertFmt("Memory::InitFastmemArena(): Failed finding a memory base.");
return false;
}
m_physical_base = m_fastmem_arena + guard_size;
m_logical_base = m_fastmem_arena + ppc_view_size + guard_size * 2;
for (const PhysicalMemoryRegion& region : m_physical_regions)
{
if (!region.active)
continue;
void* base = m_physical_base + region.physical_address;
void* view = m_arena.MapInMemoryRegion(region.shm_position, region.size, base, true);
if (base != view)
{
PanicAlertFmt("Memory::InitFastmemArena(): Failed to map memory region at 0x{:08X} "
"(size 0x{:08X}) into physical fastmem region.",
region.physical_address, region.size);
return false;
}
}
m_is_fastmem_arena_initialized = true;
m_fastmem_arena_size = memory_size;
return true;
}
void MemoryManager::UpdateDBATMappings(const PowerPC::BatTable& dbat_table)
{
for (const auto& [logical_address, entry] : m_dbat_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
m_dbat_mapped_entries.clear();
RemoveAllPageTableMappings();
m_logical_page_mappings.fill(nullptr);
for (u32 i = 0; i < dbat_table.size(); ++i)
{
if (dbat_table[i] & PowerPC::BAT_PHYSICAL_BIT)
{
u32 logical_address = i << PowerPC::BAT_INDEX_SHIFT;
u32 logical_size = PowerPC::BAT_PAGE_SIZE;
u32 translated_address = dbat_table[i] & PowerPC::BAT_RESULT_MASK;
while (i + 1 < dbat_table.size())
{
if (!(dbat_table[i + 1] & PowerPC::BAT_PHYSICAL_BIT))
{
++i;
break;
}
if ((dbat_table[i + 1] & PowerPC::BAT_RESULT_MASK) != translated_address + logical_size)
break;
++i;
logical_size += PowerPC::BAT_PAGE_SIZE;
}
for (const auto& physical_region : m_physical_regions)
{
if (!physical_region.active)
continue;
u32 mapping_address = physical_region.physical_address;
u32 mapping_end = mapping_address + physical_region.size;
u32 intersection_start = std::max(mapping_address, translated_address);
u32 intersection_end = std::min(mapping_end, translated_address + logical_size);
if (intersection_start < intersection_end)
{
// Found an overlapping region; map it.
u32 mapped_logical_address = logical_address + intersection_start - translated_address;
u32 mapped_size = intersection_end - intersection_start;
if (m_is_fastmem_arena_initialized)
{
u32 position = physical_region.shm_position + intersection_start - mapping_address;
u8* base = m_logical_base + mapped_logical_address;
void* mapped_pointer = m_arena.MapInMemoryRegion(position, mapped_size, base, true);
if (!mapped_pointer)
{
PanicAlertFmt("Memory::UpdateDBATMappings(): Failed to map memory region at 0x{:08X} "
"(size 0x{:08X}) into logical fastmem region at 0x{:08X}.",
intersection_start, mapped_size, logical_address);
continue;
}
m_dbat_mapped_entries.emplace(logical_address,
LogicalMemoryView{mapped_pointer, mapped_size});
}
u32 bat_index = mapped_logical_address / PowerPC::BAT_PAGE_SIZE;
u8* target_address = *physical_region.out_pointer + intersection_start - mapping_address;
for (u32 j = 0; j < mapped_size / PowerPC::BAT_PAGE_SIZE; ++j)
m_logical_page_mappings[bat_index + j] = target_address + j * PowerPC::BAT_PAGE_SIZE;
}
}
}
}
}
void MemoryManager::AddPageTableMapping(u32 logical_address, u32 translated_address, bool writeable)
{
if (!m_is_fastmem_arena_initialized)
return;
switch (m_host_page_type)
{
case HostPageType::SmallPages:
return AddHostPageTableMapping(logical_address, translated_address, writeable,
PowerPC::HW_PAGE_SIZE);
case HostPageType::LargePages:
return TryAddLargePageTableMapping(logical_address, translated_address, writeable);
default:
return;
}
}
void MemoryManager::TryAddLargePageTableMapping(u32 logical_address, u32 translated_address,
bool writeable)
{
const bool add_readable =
TryAddLargePageTableMapping(logical_address, translated_address, m_large_readable_pages);
const bool add_writeable =
writeable &&
TryAddLargePageTableMapping(logical_address, translated_address, m_large_writeable_pages);
if (add_readable || add_writeable)
{
AddHostPageTableMapping(logical_address & ~(m_page_size - 1),
translated_address & ~(m_page_size - 1), add_writeable, m_page_size);
}
}
bool MemoryManager::TryAddLargePageTableMapping(u32 logical_address, u32 translated_address,
std::map<u32, std::vector<u32>>& map)
{
std::vector<u32>& entries = map[logical_address & ~(m_page_size - 1)];
if (entries.empty())
entries = std::vector<u32>(m_guest_pages_per_host_page, INVALID_MAPPING);
entries[(logical_address & (m_page_size - 1)) / PowerPC::HW_PAGE_SIZE] = translated_address;
return CanCreateHostMappingForGuestPages(entries);
}
bool MemoryManager::CanCreateHostMappingForGuestPages(const std::vector<u32>& entries) const
{
const u32 translated_address = entries[0];
if ((translated_address & (m_page_size - 1)) != 0)
return false;
for (size_t i = 1; i < m_guest_pages_per_host_page; ++i)
{
if (entries[i] != translated_address + i * PowerPC::HW_PAGE_SIZE)
return false;
}
return true;
}
void MemoryManager::AddHostPageTableMapping(u32 logical_address, u32 translated_address,
bool writeable, u32 logical_size)
{
for (const auto& physical_region : m_physical_regions)
{
if (!physical_region.active)
continue;
const u32 mapping_address = physical_region.physical_address;
const u32 mapping_end = mapping_address + physical_region.size;
const u32 intersection_start = std::max(mapping_address, translated_address);
const u32 intersection_end = std::min(mapping_end, translated_address + logical_size);
if (intersection_start >= intersection_end)
continue;
// Found an overlapping region; map it.
const u32 position = physical_region.shm_position + intersection_start - mapping_address;
u8* const base = m_logical_base + logical_address + intersection_start - translated_address;
const u32 mapped_size = intersection_end - intersection_start;
const auto it = m_page_table_mapped_entries.find(logical_address);
if (it != m_page_table_mapped_entries.end())
{
// Update the protection of an existing mapping.
if (it->second.mapped_pointer == base && it->second.mapped_size == mapped_size)
{
if (!m_arena.ChangeMappingProtection(base, mapped_size, writeable))
{
PanicAlertFmt("Memory::AddPageTableMapping(): Failed to change protection for memory "
"region at 0x{:08X} (size 0x{:08X}, logical fastmem region at 0x{:08X}).",
intersection_start, mapped_size, logical_address);
}
}
}
else
{
// Create a new mapping.
void* const mapped_pointer =
m_arena.MapInMemoryRegion(position, mapped_size, base, writeable);
if (!mapped_pointer)
{
PanicAlertFmt("Memory::AddPageTableMapping(): Failed to map memory region at 0x{:08X} "
"(size 0x{:08X}) into logical fastmem region at 0x{:08X}.",
intersection_start, mapped_size, logical_address);
continue;
}
m_page_table_mapped_entries.emplace(logical_address,
LogicalMemoryView{mapped_pointer, mapped_size});
}
}
}
void MemoryManager::RemovePageTableMappings(const std::set<u32>& mappings)
{
switch (m_host_page_type)
{
case HostPageType::SmallPages:
for (u32 logical_address : mappings)
RemoveHostPageTableMapping(logical_address);
return;
case HostPageType::LargePages:
for (u32 logical_address : mappings)
RemoveLargePageTableMapping(logical_address);
return;
default:
return;
}
}
void MemoryManager::RemoveLargePageTableMapping(u32 logical_address)
{
RemoveLargePageTableMapping(logical_address, m_large_readable_pages);
RemoveLargePageTableMapping(logical_address, m_large_writeable_pages);
RemoveHostPageTableMapping(logical_address & ~(m_page_size - 1));
}
void MemoryManager::RemoveLargePageTableMapping(u32 logical_address,
std::map<u32, std::vector<u32>>& map)
{
const auto it = map.find(logical_address & ~(m_page_size - 1));
if (it != map.end())
it->second[(logical_address & (m_page_size - 1)) / PowerPC::HW_PAGE_SIZE] = INVALID_MAPPING;
}
void MemoryManager::RemoveHostPageTableMapping(u32 logical_address)
{
const auto it = m_page_table_mapped_entries.find(logical_address);
if (it != m_page_table_mapped_entries.end())
{
const LogicalMemoryView& entry = it->second;
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
m_page_table_mapped_entries.erase(it);
}
}
void MemoryManager::RemoveAllPageTableMappings()
{
for (const auto& [logical_address, entry] : m_page_table_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
m_page_table_mapped_entries.clear();
m_large_readable_pages.clear();
m_large_writeable_pages.clear();
}
void MemoryManager::DoState(PointerWrap& p)
{
const u32 current_ram_size = GetRamSize();
const u32 current_l1_cache_size = GetL1CacheSize();
const bool current_have_fake_vmem = !!m_fake_vmem;
const u32 current_fake_vmem_size = current_have_fake_vmem ? GetFakeVMemSize() : 0;
const bool current_have_exram = !!m_exram;
const u32 current_exram_size = current_have_exram ? GetExRamSize() : 0;
u32 state_ram_size = current_ram_size;
u32 state_l1_cache_size = current_l1_cache_size;
bool state_have_fake_vmem = current_have_fake_vmem;
u32 state_fake_vmem_size = current_fake_vmem_size;
bool state_have_exram = current_have_exram;
u32 state_exram_size = current_exram_size;
p.Do(state_ram_size);
p.Do(state_l1_cache_size);
p.Do(state_have_fake_vmem);
p.Do(state_fake_vmem_size);
p.Do(state_have_exram);
p.Do(state_exram_size);
// If we're loading a savestate and any of the above differs between the savestate and the current
// state, cancel the load. This is technically possible to support but would require a bunch of
// reinitialization of things that depend on these.
if (std::tie(state_ram_size, state_l1_cache_size, state_have_fake_vmem, state_fake_vmem_size,
state_have_exram, state_exram_size) !=
std::tie(current_ram_size, current_l1_cache_size, current_have_fake_vmem,
current_fake_vmem_size, current_have_exram, current_exram_size))
{
Core::DisplayMessage("State is incompatible with current memory settings (MMU and/or memory "
"overrides). Aborting load state.",
3000);
p.SetVerifyMode();
return;
}
p.DoArray(m_ram, current_ram_size);
p.DoArray(m_l1_cache, current_l1_cache_size);
p.DoMarker("Memory RAM");
if (current_have_fake_vmem)
p.DoArray(m_fake_vmem, current_fake_vmem_size);
p.DoMarker("Memory FakeVMEM");
if (current_have_exram)
p.DoArray(m_exram, current_exram_size);
p.DoMarker("Memory EXRAM");
}
void MemoryManager::Shutdown()
{
ShutdownFastmemArena();
m_is_initialized = false;
for (const PhysicalMemoryRegion& region : m_physical_regions)
{
if (!region.active)
continue;
m_arena.ReleaseView(*region.out_pointer, region.size);
*region.out_pointer = nullptr;
}
m_arena.ReleaseSHMSegment();
m_mmio_mapping.reset();
INFO_LOG_FMT(MEMMAP, "Memory system shut down.");
}
void MemoryManager::ShutdownFastmemArena()
{
if (!m_is_fastmem_arena_initialized)
return;
for (const PhysicalMemoryRegion& region : m_physical_regions)
{
if (!region.active)
continue;
u8* base = m_physical_base + region.physical_address;
m_arena.UnmapFromMemoryRegion(base, region.size);
}
for (const auto& [logical_address, entry] : m_dbat_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
m_dbat_mapped_entries.clear();
for (const auto& [logical_address, entry] : m_page_table_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
m_page_table_mapped_entries.clear();
m_arena.ReleaseMemoryRegion();
m_large_readable_pages.clear();
m_large_writeable_pages.clear();
m_fastmem_arena = nullptr;
m_fastmem_arena_size = 0;
m_physical_base = nullptr;
m_logical_base = nullptr;
m_is_fastmem_arena_initialized = false;
}
void MemoryManager::Clear()
{
if (m_ram)
memset(m_ram, 0, GetRamSize());
if (m_l1_cache)
memset(m_l1_cache, 0, GetL1CacheSize());
if (m_fake_vmem)
memset(m_fake_vmem, 0, GetFakeVMemSize());
if (m_exram)
memset(m_exram, 0, GetExRamSize());
}
u8* MemoryManager::GetPointerForRange(u32 address, size_t size) const
{
std::span<u8> span = GetSpanForAddress(address);
if (span.data() == nullptr)
{
// The address isn't in a valid memory region.
// A panic alert has already been raised by GetPointer, so let's not raise another one.
return nullptr;
}
if (span.size() < size)
{
// The start address is in a valid region, but the end address is beyond the end of that region.
PanicAlertFmt("Oversized range in GetPointerForRange. {:x} bytes at {:#010x}", size, address);
return nullptr;
}
return span.data();
}
void MemoryManager::CopyFromEmu(void* data, u32 address, size_t size) const
{
if (size == 0)
return;
void* pointer = GetPointerForRange(address, size);
if (!pointer)
{
PanicAlertFmt("Invalid range in CopyFromEmu. {:x} bytes from {:#010x}", size, address);
return;
}
memcpy(data, pointer, size);
}
void MemoryManager::CopyToEmu(u32 address, const void* data, size_t size)
{
if (size == 0)
return;
void* pointer = GetPointerForRange(address, size);
if (!pointer)
{
PanicAlertFmt("Invalid range in CopyToEmu. {:x} bytes to {:#010x}", size, address);
return;
}
memcpy(pointer, data, size);
}
void MemoryManager::Memset(u32 address, u8 value, size_t size)
{
if (size == 0)
return;
void* pointer = GetPointerForRange(address, size);
if (!pointer)
{
PanicAlertFmt("Invalid range in Memset. {:x} bytes at {:#010x}", size, address);
return;
}
memset(pointer, value, size);
}
std::string MemoryManager::GetString(u32 em_address, size_t size)
{
std::string result;
if (size == 0) // Null terminated string.
{
while (true)
{
const u8 value = Read_U8(em_address);
if (value == 0)
break;
result.push_back(value);
++em_address;
}
return result;
}
else // Fixed size string, potentially null terminated or null padded.
{
result.resize(size);
CopyFromEmu(result.data(), em_address, size);
size_t length = strnlen(result.data(), size);
result.resize(length);
return result;
}
}
std::span<u8> MemoryManager::GetSpanForAddress(u32 address) const
{
// TODO: Should we be masking off more bits here? Can all devices access
// EXRAM?
address &= 0x3FFFFFFF;
if (address < GetRamSizeReal())
return std::span(m_ram + address, GetRamSizeReal() - address);
if (m_exram)
{
if ((address >> 28) == 0x1 && (address & 0x0fffffff) < GetExRamSizeReal())
{
return std::span(m_exram + (address & GetExRamMask()),
GetExRamSizeReal() - (address & GetExRamMask()));
}
}
auto& ppc_state = m_system.GetPPCState();
PanicAlertFmt("Unknown Pointer {:#010x} PC {:#010x} LR {:#010x}", address, ppc_state.pc,
LR(ppc_state));
return {};
}
u8 MemoryManager::Read_U8(u32 address) const
{
u8 value = 0;
CopyFromEmu(&value, address, sizeof(value));
return value;
}
u16 MemoryManager::Read_U16(u32 address) const
{
u16 value = 0;
CopyFromEmu(&value, address, sizeof(value));
return Common::swap16(value);
}
u32 MemoryManager::Read_U32(u32 address) const
{
u32 value = 0;
CopyFromEmu(&value, address, sizeof(value));
return Common::swap32(value);
}
u64 MemoryManager::Read_U64(u32 address) const
{
u64 value = 0;
CopyFromEmu(&value, address, sizeof(value));
return Common::swap64(value);
}
u32 MemoryManager::Read_U32_Swap(u32 address) const
{
u32 value = 0;
CopyFromEmu(&value, address, sizeof(value));
return value;
}
void MemoryManager::Write_U8(u8 value, u32 address)
{
CopyToEmu(address, &value, sizeof(value));
}
void MemoryManager::Write_U16(u16 value, u32 address)
{
u16 swapped_value = Common::swap16(value);
CopyToEmu(address, &swapped_value, sizeof(swapped_value));
}
void MemoryManager::Write_U32(u32 value, u32 address)
{
u32 swapped_value = Common::swap32(value);
CopyToEmu(address, &swapped_value, sizeof(swapped_value));
}
void MemoryManager::Write_U64(u64 value, u32 address)
{
u64 swapped_value = Common::swap64(value);
CopyToEmu(address, &swapped_value, sizeof(swapped_value));
}
void MemoryManager::Write_U32_Swap(u32 value, u32 address)
{
CopyToEmu(address, &value, sizeof(value));
}
void MemoryManager::Write_U64_Swap(u64 value, u32 address)
{
CopyToEmu(address, &value, sizeof(value));
}
} // namespace Memory