gl_rasterizer_cache: Refactor to only call GetRegionEnd on surface creation.
This commit is contained in:
		
							parent
							
								
									949d7832fa
								
							
						
					
					
						commit
						0e59291310
					
				@ -62,12 +62,12 @@ static std::pair<u32, u32> GetASTCBlockSize(PixelFormat format) {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr) {
 | 
			
		||||
void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) {
 | 
			
		||||
    auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
 | 
			
		||||
    const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
 | 
			
		||||
    const auto max_size{memory_manager.GetRegionEnd(gpu_addr) - gpu_addr};
 | 
			
		||||
    const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr_)};
 | 
			
		||||
 | 
			
		||||
    addr = cpu_addr ? *cpu_addr : 0;
 | 
			
		||||
    gpu_addr = gpu_addr_;
 | 
			
		||||
    size_in_bytes = SizeInBytesRaw();
 | 
			
		||||
 | 
			
		||||
    if (IsPixelFormatASTC(pixel_format)) {
 | 
			
		||||
@ -76,15 +76,6 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr) {
 | 
			
		||||
    } else {
 | 
			
		||||
        size_in_bytes_gl = SizeInBytesGL();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Clamp size to mapped GPU memory region
 | 
			
		||||
    // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000
 | 
			
		||||
    // R32F render buffer. We do not yet know if this is a game bug or something else, but this
 | 
			
		||||
    // check is necessary to prevent flushing from overwriting unmapped memory.
 | 
			
		||||
    if (size_in_bytes > max_size) {
 | 
			
		||||
        LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes, max_size);
 | 
			
		||||
        size_in_bytes = max_size;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
 | 
			
		||||
@ -719,7 +710,8 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CachedSurface::CachedSurface(const SurfaceParams& params)
 | 
			
		||||
    : params(params), gl_target(SurfaceTargetToGL(params.target)) {
 | 
			
		||||
    : params(params), gl_target(SurfaceTargetToGL(params.target)),
 | 
			
		||||
      cached_size_in_bytes(params.size_in_bytes) {
 | 
			
		||||
    texture.Create();
 | 
			
		||||
    const auto& rect{params.GetRect()};
 | 
			
		||||
 | 
			
		||||
@ -769,6 +761,18 @@ CachedSurface::CachedSurface(const SurfaceParams& params)
 | 
			
		||||
 | 
			
		||||
    VideoCore::LabelGLObject(GL_TEXTURE, texture.handle, params.addr,
 | 
			
		||||
                             SurfaceParams::SurfaceTargetName(params.target));
 | 
			
		||||
 | 
			
		||||
    // Clamp size to mapped GPU memory region
 | 
			
		||||
    // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000
 | 
			
		||||
    // R32F render buffer. We do not yet know if this is a game bug or something else, but this
 | 
			
		||||
    // check is necessary to prevent flushing from overwriting unmapped memory.
 | 
			
		||||
 | 
			
		||||
    auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
 | 
			
		||||
    const u64 max_size{memory_manager.GetRegionEnd(params.gpu_addr) - params.gpu_addr};
 | 
			
		||||
    if (cached_size_in_bytes > max_size) {
 | 
			
		||||
        LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", params.size_in_bytes, max_size);
 | 
			
		||||
        cached_size_in_bytes = max_size;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void ConvertS8Z24ToZ24S8(std::vector<u8>& data, u32 width, u32 height, bool reverse) {
 | 
			
		||||
@ -912,7 +916,7 @@ void CachedSurface::FlushGLBuffer() {
 | 
			
		||||
    ASSERT_MSG(!IsPixelFormatASTC(params.pixel_format), "Unimplemented");
 | 
			
		||||
 | 
			
		||||
    // OpenGL temporary buffer needs to be big enough to store raw texture size
 | 
			
		||||
    gl_buffer.resize(params.size_in_bytes);
 | 
			
		||||
    gl_buffer.resize(GetSizeInBytes());
 | 
			
		||||
 | 
			
		||||
    const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type);
 | 
			
		||||
    // Ensure no bad interactions with GL_UNPACK_ALIGNMENT
 | 
			
		||||
 | 
			
		||||
@ -783,6 +783,7 @@ struct SurfaceParams {
 | 
			
		||||
 | 
			
		||||
    // Parameters used for caching
 | 
			
		||||
    VAddr addr;
 | 
			
		||||
    Tegra::GPUVAddr gpu_addr;
 | 
			
		||||
    std::size_t size_in_bytes;
 | 
			
		||||
    std::size_t size_in_bytes_gl;
 | 
			
		||||
 | 
			
		||||
@ -802,7 +803,8 @@ struct SurfaceReserveKey : Common::HashableStruct<OpenGL::SurfaceParams> {
 | 
			
		||||
    static SurfaceReserveKey Create(const OpenGL::SurfaceParams& params) {
 | 
			
		||||
        SurfaceReserveKey res;
 | 
			
		||||
        res.state = params;
 | 
			
		||||
        res.state.rt = {}; // Ignore rt config in caching
 | 
			
		||||
        res.state.gpu_addr = {}; // Ignore GPU vaddr in caching
 | 
			
		||||
        res.state.rt = {};       // Ignore rt config in caching
 | 
			
		||||
        return res;
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
@ -826,7 +828,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::size_t GetSizeInBytes() const {
 | 
			
		||||
        return params.size_in_bytes;
 | 
			
		||||
        return cached_size_in_bytes;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Flush() {
 | 
			
		||||
@ -865,6 +867,7 @@ private:
 | 
			
		||||
    std::vector<u8> gl_buffer;
 | 
			
		||||
    SurfaceParams params;
 | 
			
		||||
    GLenum gl_target;
 | 
			
		||||
    std::size_t cached_size_in_bytes;
 | 
			
		||||
    bool dirty = false;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user