// Copyright 2022 Simon Boyé #include #include #include #include #include namespace vk { MemoryBlock::MemoryBlock() noexcept { } MemoryBlock::MemoryBlock( VkDeviceMemory device_memory, VkDeviceSize size, VkDeviceSize offset, MemoryPage* memory_page, uint32_t memory_type ) noexcept : m_size(size) , m_offset(offset) , m_device_memory(device_memory) , m_memory_page(memory_page) , m_memory_type(memory_type) {} MemoryBlock::MemoryBlock(MemoryBlock&& other) noexcept : m_size(other.m_size) , m_offset(other.m_offset) , m_device_memory(other.m_device_memory) , m_memory_page(other.m_memory_page) , m_memory_type(other.m_memory_type) { other.m_size = 0; other.m_offset = 0; other.m_device_memory = VK_NULL_HANDLE; other.m_memory_page = nullptr; other.m_memory_type = 0; } MemoryBlock::~MemoryBlock() noexcept { if(!is_null()) free(); } MemoryBlock& MemoryBlock::operator=(MemoryBlock&& other) noexcept { if(&other != this) { using std::swap; swap(m_size, other.m_size); swap(m_offset, other.m_offset); swap(m_device_memory, other.m_device_memory); swap(m_memory_page, other.m_memory_page); swap(m_memory_type, other.m_memory_type); } return *this; } VkMemoryType MemoryBlock::memory_type_info(Context& context) const noexcept { assert(!is_null()); assert(context); return context.memory_properties().memoryTypes[m_memory_type]; } void MemoryBlock::free() noexcept { assert(!is_null()); m_memory_page->p_free(*this); m_size = 0; m_offset = 0; m_device_memory = VK_NULL_HANDLE; m_memory_page = nullptr; m_memory_type = 0; } void* MemoryBlock::map(Context& context) { return map(context, 0, m_size); } void* MemoryBlock::map(Context& context, VkDeviceSize offset, VkDeviceSize size) { assert(!is_null()); assert(context); assert(offset + size <= m_size); void* ptr; if(vkMapMemory( context.device(), m_device_memory, m_offset + offset, size, 0, &ptr ) != VK_SUCCESS) throw std::runtime_error("failed to map memory"); return ptr; } void MemoryBlock::unmap(Context& context) noexcept { assert(!is_null()); vkUnmapMemory( context.device(), m_device_memory ); } void MemoryBlock::flush(Context& context) { assert(!is_null()); VkMappedMemoryRange range { .sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, .memory = m_device_memory, .offset = m_offset, .size = m_size, }; if(vkFlushMappedMemoryRanges( context.device(), 1, &range ) != VK_SUCCESS) std::runtime_error("failed to flush memory"); } void MemoryBlock::invalidate(Context& context) { assert(!is_null()); VkMappedMemoryRange range { .sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, .memory = m_device_memory, .offset = m_offset, .size = m_size, }; if(vkInvalidateMappedMemoryRanges( context.device(), 1, &range ) != VK_SUCCESS) std::runtime_error("failed to invalidate memory"); } MemoryPage::MemoryPage( VkDeviceSize size, VkDeviceMemory device_memory, uint32_t memory_type ) noexcept : m_size(size) , m_device_memory(device_memory) , m_blocks{ Block{ 0, true }, Block{ m_size, false }, } , m_memory_type(memory_type) {} MemoryPage::MemoryPage(MemoryPage&&) = default; MemoryPage::~MemoryPage() = default; MemoryPage& MemoryPage::operator=(MemoryPage&&) = default; MemoryBlock MemoryPage::allocate(VkDeviceSize size) noexcept { const auto block_it = find_free_block(size); if(block_it == m_blocks.end()) return MemoryBlock(); const auto offset = block_it->offset; block_it[0].is_free = false; if (block_it[0].offset != block_it[1].offset) { m_blocks.emplace(std::next(block_it), Block{ block_it[0].offset + size, true }); } return MemoryBlock(m_device_memory, size, offset, this, m_memory_type); } void MemoryPage::p_free(MemoryBlock& block) noexcept { assert(block.device_memory() == m_device_memory); const auto offset = block.offset(); const auto block_it = std::lower_bound( m_blocks.begin(), m_blocks.end(), Block{ offset, false }, [] (const Block& lhs, const Block& rhs) { return lhs.offset < rhs.offset; } ); assert(block_it != m_blocks.end() && block_it->offset == offset); // Merge with next block if also free if (block_it[1].is_free) { m_blocks.erase(std::next(block_it)); } // Merge with previous block if also free if (block_it != m_blocks.begin() && block_it[-1].is_free) { m_blocks.erase(block_it); } else { block_it->is_free = true; } } void MemoryPage::free_device_memory(Context& context) noexcept { vkFreeMemory( context.device(), m_device_memory, nullptr ); m_size = 0; m_device_memory = VK_NULL_HANDLE; m_blocks.clear(); m_memory_type = 0; } MemoryPage::BlockList::iterator MemoryPage::find_free_block(VkDeviceSize size) { const auto block_end = std::prev(m_blocks.end()); for(auto block_it = m_blocks.begin(); block_it != block_end; ++block_it) { if(block_it[0].is_free && block_it[1].offset - block_it[0].offset >= size) return block_it; } return m_blocks.end(); } Allocator::Allocator(Context* context) noexcept : m_context(context) , m_memory_properties{} , m_page_map{} { vkGetPhysicalDeviceMemoryProperties( m_context->physical_device(), &m_memory_properties ); for(auto& next_page_size: m_next_page_sizes) next_page_size = BasePageSize; } Allocator::~Allocator() = default; int32_t Allocator::find_memory_type( uint32_t type_mask, VkMemoryPropertyFlags required_properties ) noexcept { for(uint32_t type_index = 0; type_index < m_memory_properties.memoryTypeCount; type_index += 1 ) { if((type_mask & (1 << type_index)) == 0) continue; const auto memory_properties = m_memory_properties.memoryTypes[type_index].propertyFlags; if((memory_properties & required_properties) == required_properties) return type_index; } return -1; } int32_t Allocator::find_memory_type( uint32_t type_mask, std::initializer_list properties_list ) noexcept { for(const auto& properties: properties_list) { const auto memory_type = find_memory_type(type_mask, properties); if(memory_type >= 0) return memory_type; } return -1; } MemoryBlock Allocator::allocate( VkDeviceSize size, uint32_t memory_type ) noexcept { assert(memory_type < VK_MAX_MEMORY_TYPES); auto& pages = m_page_map[memory_type]; for(auto& page: pages) { auto block = page.allocate(size); if(block) return block; } const VkDeviceSize new_page_size = std::max( size, m_next_page_sizes[memory_type] ); VkMemoryAllocateInfo allocate_info { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .allocationSize = new_page_size, .memoryTypeIndex = memory_type, }; VkDeviceMemory device_memory; vkAllocateMemory( m_context->device(), &allocate_info, nullptr, &device_memory ); pages.emplace_back( new_page_size, device_memory, memory_type ); if(new_page_size == m_next_page_sizes[memory_type]) m_next_page_sizes[memory_type] *= 2; return pages.back().allocate(size); } MemoryBlock Allocator::allocate( VkDeviceSize size, uint32_t type_mask, VkMemoryPropertyFlags properties ) noexcept { const auto memory_type = find_memory_type( type_mask, properties ); if(memory_type < 0) return MemoryBlock(); return allocate(size, uint32_t(memory_type)); } MemoryBlock Allocator::allocate( VkDeviceSize size, uint32_t type_mask, std::initializer_list properties_list ) noexcept { const auto memory_type = find_memory_type( type_mask, properties_list ); if(memory_type < 0) return MemoryBlock(); return allocate(size, uint32_t(memory_type)); } void Allocator::free_all_pages() noexcept { for(uint32_t memory_type = 0; memory_type < VK_MAX_MEMORY_TYPES; memory_type += 1) { for(auto& page: m_page_map[memory_type]) page.free_device_memory(*m_context); } } }