gpu: Make memory_manager private
Makes the class interface consistent and provides accessors for obtaining a reference to the memory manager instance. Given we also return references, this makes our more flimsy uses of const apparent, given const doesn't propagate through pointers in the way one would typically expect. This makes our mutable state more apparent in some places.
This commit is contained in:
parent
ffe2336136
commit
45fb74d262
@ -56,9 +56,9 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>&
|
|||||||
auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
||||||
if (params.flags & 1) {
|
if (params.flags & 1) {
|
||||||
params.offset = gpu.memory_manager->AllocateSpace(params.offset, size, 1);
|
params.offset = gpu.MemoryManager().AllocateSpace(params.offset, size, 1);
|
||||||
} else {
|
} else {
|
||||||
params.offset = gpu.memory_manager->AllocateSpace(size, params.align);
|
params.offset = gpu.MemoryManager().AllocateSpace(size, params.align);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
@ -88,7 +88,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output)
|
|||||||
u64 size = static_cast<u64>(entry.pages) << 0x10;
|
u64 size = static_cast<u64>(entry.pages) << 0x10;
|
||||||
ASSERT(size <= object->size);
|
ASSERT(size <= object->size);
|
||||||
|
|
||||||
Tegra::GPUVAddr returned = gpu.memory_manager->MapBufferEx(object->addr, offset, size);
|
Tegra::GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size);
|
||||||
ASSERT(returned == offset);
|
ASSERT(returned == offset);
|
||||||
}
|
}
|
||||||
std::memcpy(output.data(), entries.data(), output.size());
|
std::memcpy(output.data(), entries.data(), output.size());
|
||||||
@ -125,9 +125,9 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||||||
auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
|
|
||||||
if (params.flags & 1) {
|
if (params.flags & 1) {
|
||||||
params.offset = gpu.memory_manager->MapBufferEx(object->addr, params.offset, object->size);
|
params.offset = gpu.MemoryManager().MapBufferEx(object->addr, params.offset, object->size);
|
||||||
} else {
|
} else {
|
||||||
params.offset = gpu.memory_manager->MapBufferEx(object->addr, object->size);
|
params.offset = gpu.MemoryManager().MapBufferEx(object->addr, object->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new mapping entry for this operation.
|
// Create a new mapping entry for this operation.
|
||||||
@ -161,7 +161,7 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
|
|||||||
itr->second.size);
|
itr->second.size);
|
||||||
|
|
||||||
auto& gpu = system_instance.GPU();
|
auto& gpu = system_instance.GPU();
|
||||||
params.offset = gpu.memory_manager->UnmapBuffer(params.offset, itr->second.size);
|
params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
|
||||||
|
|
||||||
buffer_mappings.erase(itr->second.offset);
|
buffer_mappings.erase(itr->second.offset);
|
||||||
|
|
||||||
|
@ -264,7 +264,7 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
|
|||||||
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
|
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
|
||||||
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
|
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
|
||||||
boost::optional<VAddr> maybe_vaddr =
|
boost::optional<VAddr> maybe_vaddr =
|
||||||
Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(gpu_addr);
|
Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
|
||||||
// The GPU <-> CPU virtual memory mapping is not 1:1
|
// The GPU <-> CPU virtual memory mapping is not 1:1
|
||||||
if (!maybe_vaddr) {
|
if (!maybe_vaddr) {
|
||||||
LOG_ERROR(HW_Memory,
|
LOG_ERROR(HW_Memory,
|
||||||
@ -346,7 +346,7 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
|
|||||||
const VAddr overlap_end = std::min(end, region_end);
|
const VAddr overlap_end = std::min(end, region_end);
|
||||||
|
|
||||||
const std::vector<Tegra::GPUVAddr> gpu_addresses =
|
const std::vector<Tegra::GPUVAddr> gpu_addresses =
|
||||||
system_instance.GPU().memory_manager->CpuToGpuAddress(overlap_start);
|
system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
|
||||||
|
|
||||||
if (gpu_addresses.empty()) {
|
if (gpu_addresses.empty()) {
|
||||||
return;
|
return;
|
||||||
|
@ -22,7 +22,7 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
GPU::GPU(VideoCore::RasterizerInterface& rasterizer) {
|
GPU::GPU(VideoCore::RasterizerInterface& rasterizer) {
|
||||||
memory_manager = std::make_unique<MemoryManager>();
|
memory_manager = std::make_unique<Tegra::MemoryManager>();
|
||||||
maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager);
|
maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager);
|
||||||
fermi_2d = std::make_unique<Engines::Fermi2D>(*memory_manager);
|
fermi_2d = std::make_unique<Engines::Fermi2D>(*memory_manager);
|
||||||
maxwell_compute = std::make_unique<Engines::MaxwellCompute>();
|
maxwell_compute = std::make_unique<Engines::MaxwellCompute>();
|
||||||
@ -31,12 +31,20 @@ GPU::GPU(VideoCore::RasterizerInterface& rasterizer) {
|
|||||||
|
|
||||||
GPU::~GPU() = default;
|
GPU::~GPU() = default;
|
||||||
|
|
||||||
|
Engines::Maxwell3D& GPU::Maxwell3D() {
|
||||||
|
return *maxwell_3d;
|
||||||
|
}
|
||||||
|
|
||||||
const Engines::Maxwell3D& GPU::Maxwell3D() const {
|
const Engines::Maxwell3D& GPU::Maxwell3D() const {
|
||||||
return *maxwell_3d;
|
return *maxwell_3d;
|
||||||
}
|
}
|
||||||
|
|
||||||
Engines::Maxwell3D& GPU::Maxwell3D() {
|
MemoryManager& GPU::MemoryManager() {
|
||||||
return *maxwell_3d;
|
return *memory_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MemoryManager& GPU::MemoryManager() const {
|
||||||
|
return *memory_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
|
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
|
||||||
|
@ -117,18 +117,24 @@ public:
|
|||||||
/// Processes a command list stored at the specified address in GPU memory.
|
/// Processes a command list stored at the specified address in GPU memory.
|
||||||
void ProcessCommandList(GPUVAddr address, u32 size);
|
void ProcessCommandList(GPUVAddr address, u32 size);
|
||||||
|
|
||||||
/// Returns a const reference to the Maxwell3D GPU engine.
|
|
||||||
const Engines::Maxwell3D& Maxwell3D() const;
|
|
||||||
|
|
||||||
/// Returns a reference to the Maxwell3D GPU engine.
|
/// Returns a reference to the Maxwell3D GPU engine.
|
||||||
Engines::Maxwell3D& Maxwell3D();
|
Engines::Maxwell3D& Maxwell3D();
|
||||||
|
|
||||||
std::unique_ptr<MemoryManager> memory_manager;
|
/// Returns a const reference to the Maxwell3D GPU engine.
|
||||||
|
const Engines::Maxwell3D& Maxwell3D() const;
|
||||||
|
|
||||||
|
/// Returns a reference to the GPU memory manager.
|
||||||
|
Tegra::MemoryManager& MemoryManager();
|
||||||
|
|
||||||
|
/// Returns a const reference to the GPU memory manager.
|
||||||
|
const Tegra::MemoryManager& MemoryManager() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Writes a single register in the engine bound to the specified subchannel
|
/// Writes a single register in the engine bound to the specified subchannel
|
||||||
void WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params);
|
void WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params);
|
||||||
|
|
||||||
|
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
||||||
|
|
||||||
/// Mapping of command subchannels to their bound engine ids.
|
/// Mapping of command subchannels to their bound engine ids.
|
||||||
std::unordered_map<u32, EngineID> bound_engines;
|
std::unordered_map<u32, EngineID> bound_engines;
|
||||||
|
|
||||||
|
@ -425,8 +425,8 @@ std::tuple<u8*, GLintptr, GLintptr> RasterizerOpenGL::UploadMemory(u8* buffer_pt
|
|||||||
std::tie(buffer_ptr, buffer_offset) = AlignBuffer(buffer_ptr, buffer_offset, alignment);
|
std::tie(buffer_ptr, buffer_offset) = AlignBuffer(buffer_ptr, buffer_offset, alignment);
|
||||||
GLintptr uploaded_offset = buffer_offset;
|
GLintptr uploaded_offset = buffer_offset;
|
||||||
|
|
||||||
const auto& memory_manager = Core::System::GetInstance().GPU().memory_manager;
|
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
||||||
const boost::optional<VAddr> cpu_addr{memory_manager->GpuToCpuAddress(gpu_addr)};
|
const boost::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||||
Memory::ReadBlock(*cpu_addr, buffer_ptr, size);
|
Memory::ReadBlock(*cpu_addr, buffer_ptr, size);
|
||||||
|
|
||||||
buffer_ptr += size;
|
buffer_ptr += size;
|
||||||
|
@ -168,8 +168,8 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType
|
|||||||
}
|
}
|
||||||
|
|
||||||
VAddr SurfaceParams::GetCpuAddr() const {
|
VAddr SurfaceParams::GetCpuAddr() const {
|
||||||
const auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
return *gpu.memory_manager->GpuToCpuAddress(addr);
|
return *gpu.MemoryManager().GpuToCpuAddress(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool IsPixelFormatASTC(PixelFormat format) {
|
static bool IsPixelFormatASTC(PixelFormat format) {
|
||||||
@ -220,14 +220,14 @@ void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_bu
|
|||||||
Tegra::GPUVAddr addr) {
|
Tegra::GPUVAddr addr) {
|
||||||
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
|
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
|
||||||
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
||||||
const auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
|
|
||||||
if (morton_to_gl) {
|
if (morton_to_gl) {
|
||||||
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
|
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
|
||||||
// pixel values.
|
// pixel values.
|
||||||
const u32 tile_size{IsFormatBCn(format) ? 4U : 1U};
|
const u32 tile_size{IsFormatBCn(format) ? 4U : 1U};
|
||||||
const std::vector<u8> data =
|
const std::vector<u8> data =
|
||||||
Tegra::Texture::UnswizzleTexture(*gpu.memory_manager->GpuToCpuAddress(addr), tile_size,
|
Tegra::Texture::UnswizzleTexture(*gpu.MemoryManager().GpuToCpuAddress(addr), tile_size,
|
||||||
bytes_per_pixel, stride, height, block_height);
|
bytes_per_pixel, stride, height, block_height);
|
||||||
const size_t size_to_copy{std::min(gl_buffer.size(), data.size())};
|
const size_t size_to_copy{std::min(gl_buffer.size(), data.size())};
|
||||||
gl_buffer.assign(data.begin(), data.begin() + size_to_copy);
|
gl_buffer.assign(data.begin(), data.begin() + size_to_copy);
|
||||||
@ -237,7 +237,7 @@ void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_bu
|
|||||||
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
|
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
|
||||||
VideoCore::MortonCopyPixels128(
|
VideoCore::MortonCopyPixels128(
|
||||||
stride, height, bytes_per_pixel, gl_bytes_per_pixel,
|
stride, height, bytes_per_pixel, gl_bytes_per_pixel,
|
||||||
Memory::GetPointer(*gpu.memory_manager->GpuToCpuAddress(addr)), gl_buffer.data(),
|
Memory::GetPointer(*gpu.MemoryManager().GpuToCpuAddress(addr)), gl_buffer.data(),
|
||||||
morton_to_gl);
|
morton_to_gl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -754,9 +754,9 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
// Don't try to create any entries in the cache if the address of the texture is invalid.
|
// Don't try to create any entries in the cache if the address of the texture is invalid.
|
||||||
if (gpu.memory_manager->GpuToCpuAddress(params.addr) == boost::none)
|
if (gpu.MemoryManager().GpuToCpuAddress(params.addr) == boost::none)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
// Look up surface in the cache based on address
|
// Look up surface in the cache based on address
|
||||||
@ -848,7 +848,7 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
|
|||||||
"reinterpretation but the texture is tiled.");
|
"reinterpretation but the texture is tiled.");
|
||||||
}
|
}
|
||||||
size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes();
|
size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes();
|
||||||
auto address = Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(
|
auto address = Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(
|
||||||
new_params.addr + params.SizeInBytes());
|
new_params.addr + params.SizeInBytes());
|
||||||
std::vector<u8> data(remaining_size);
|
std::vector<u8> data(remaining_size);
|
||||||
Memory::ReadBlock(*address, data.data(), data.size());
|
Memory::ReadBlock(*address, data.data(), data.size());
|
||||||
|
@ -382,7 +382,7 @@ void GraphicsSurfaceWidget::OnUpdate() {
|
|||||||
// TODO: Implement a good way to visualize alpha components!
|
// TODO: Implement a good way to visualize alpha components!
|
||||||
|
|
||||||
QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32);
|
QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32);
|
||||||
boost::optional<VAddr> address = gpu.memory_manager->GpuToCpuAddress(surface_address);
|
boost::optional<VAddr> address = gpu.MemoryManager().GpuToCpuAddress(surface_address);
|
||||||
|
|
||||||
// TODO(bunnei): Will not work with BCn formats that swizzle 4x4 tiles.
|
// TODO(bunnei): Will not work with BCn formats that swizzle 4x4 tiles.
|
||||||
// Needs to be fixed if we plan to use this feature more, otherwise we may remove it.
|
// Needs to be fixed if we plan to use this feature more, otherwise we may remove it.
|
||||||
@ -443,7 +443,7 @@ void GraphicsSurfaceWidget::SaveSurface() {
|
|||||||
pixmap->save(&file, "PNG");
|
pixmap->save(&file, "PNG");
|
||||||
} else if (selectedFilter == bin_filter) {
|
} else if (selectedFilter == bin_filter) {
|
||||||
auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
boost::optional<VAddr> address = gpu.memory_manager->GpuToCpuAddress(surface_address);
|
boost::optional<VAddr> address = gpu.MemoryManager().GpuToCpuAddress(surface_address);
|
||||||
|
|
||||||
const u8* buffer = Memory::GetPointer(*address);
|
const u8* buffer = Memory::GetPointer(*address);
|
||||||
ASSERT_MSG(buffer != nullptr, "Memory not accessible");
|
ASSERT_MSG(buffer != nullptr, "Memory not accessible");
|
||||||
|
Loading…
Reference in New Issue
Block a user