|
17 | 17 |
|
18 | 18 | namespace llvm { |
19 | 19 |
|
| 20 | +bool SectionMemoryManager::hasSpace(const MemoryGroup &MemGroup, |
| 21 | + uintptr_t Size) const { |
| 22 | + for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) { |
| 23 | + if (FreeMB.Free.allocatedSize() >= Size) |
| 24 | + return true; |
| 25 | + } |
| 26 | + return false; |
| 27 | +} |
| 28 | + |
| 29 | +void SectionMemoryManager::reserveAllocationSpace( |
| 30 | + uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize, |
| 31 | + Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) { |
| 32 | + if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0) |
| 33 | + return; |
| 34 | + |
| 35 | + static const size_t PageSize = sys::Process::getPageSizeEstimate(); |
| 36 | + |
| 37 | + // Code alignment needs to be at least the stub alignment - however, we |
| 38 | + // don't have an easy way to get that here so as a workaround, we assume |
| 39 | + // it's 8, which is the largest value I observed across all platforms. |
| 40 | + constexpr uint64_t StubAlign = 8; |
| 41 | + CodeAlign = Align(std::max(CodeAlign.value(), StubAlign)); |
| 42 | + RODataAlign = Align(std::max(RODataAlign.value(), StubAlign)); |
| 43 | + RWDataAlign = Align(std::max(RWDataAlign.value(), StubAlign)); |
| 44 | + |
| 45 | + // Get space required for each section. Use the same calculation as |
| 46 | + // allocateSection because we need to be able to satisfy it. |
| 47 | + uint64_t RequiredCodeSize = alignTo(CodeSize, CodeAlign) + CodeAlign.value(); |
| 48 | + uint64_t RequiredRODataSize = |
| 49 | + alignTo(RODataSize, RODataAlign) + RODataAlign.value(); |
| 50 | + uint64_t RequiredRWDataSize = |
| 51 | + alignTo(RWDataSize, RWDataAlign) + RWDataAlign.value(); |
| 52 | + |
| 53 | + if (hasSpace(CodeMem, RequiredCodeSize) && |
| 54 | + hasSpace(RODataMem, RequiredRODataSize) && |
| 55 | + hasSpace(RWDataMem, RequiredRWDataSize)) { |
| 56 | + // Sufficient space in contiguous block already available. |
| 57 | + return; |
| 58 | + } |
| 59 | + |
| 60 | + // MemoryManager does not have functions for releasing memory after it's |
| 61 | + // allocated. Normally it tries to use any excess blocks that were allocated |
| 62 | + // due to page alignment, but if we have insufficient free memory for the |
| 63 | + // request this can lead to allocating disparate memory that can violate the |
| 64 | + // ARM ABI. Clear free memory so only the new allocations are used, but do |
| 65 | + // not release allocated memory as it may still be in-use. |
| 66 | + CodeMem.FreeMem.clear(); |
| 67 | + RODataMem.FreeMem.clear(); |
| 68 | + RWDataMem.FreeMem.clear(); |
| 69 | + |
| 70 | + // Round up to the nearest page size. Blocks must be page-aligned. |
| 71 | + RequiredCodeSize = alignTo(RequiredCodeSize, PageSize); |
| 72 | + RequiredRODataSize = alignTo(RequiredRODataSize, PageSize); |
| 73 | + RequiredRWDataSize = alignTo(RequiredRWDataSize, PageSize); |
| 74 | + uint64_t RequiredSize = |
| 75 | + RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize; |
| 76 | + |
| 77 | + std::error_code ec; |
| 78 | + sys::MemoryBlock MB = MMapper->allocateMappedMemory( |
| 79 | + AllocationPurpose::RWData, RequiredSize, nullptr, |
| 80 | + sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec); |
| 81 | + if (ec) { |
| 82 | + return; |
| 83 | + } |
| 84 | + // CodeMem will arbitrarily own this MemoryBlock to handle cleanup. |
| 85 | + CodeMem.AllocatedMem.push_back(MB); |
| 86 | + uintptr_t Addr = (uintptr_t)MB.base(); |
| 87 | + FreeMemBlock FreeMB; |
| 88 | + FreeMB.PendingPrefixIndex = (unsigned)-1; |
| 89 | + |
| 90 | + if (CodeSize > 0) { |
| 91 | + assert(isAddrAligned(CodeAlign, (void *)Addr)); |
| 92 | + FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize); |
| 93 | + CodeMem.FreeMem.push_back(FreeMB); |
| 94 | + Addr += RequiredCodeSize; |
| 95 | + } |
| 96 | + |
| 97 | + if (RODataSize > 0) { |
| 98 | + assert(isAddrAligned(RODataAlign, (void *)Addr)); |
| 99 | + FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize); |
| 100 | + RODataMem.FreeMem.push_back(FreeMB); |
| 101 | + Addr += RequiredRODataSize; |
| 102 | + } |
| 103 | + |
| 104 | + if (RWDataSize > 0) { |
| 105 | + assert(isAddrAligned(RWDataAlign, (void *)Addr)); |
| 106 | + FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize); |
| 107 | + RWDataMem.FreeMem.push_back(FreeMB); |
| 108 | + } |
| 109 | +} |
| 110 | + |
20 | 111 | uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size, |
21 | 112 | unsigned Alignment, |
22 | 113 | unsigned SectionID, |
@@ -264,8 +355,10 @@ class DefaultMMapper final : public SectionMemoryManager::MemoryMapper { |
264 | 355 | }; |
265 | 356 | } // namespace |
266 | 357 |
|
267 | | -SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM) |
268 | | - : MMapper(UnownedMM), OwnedMMapper(nullptr) { |
| 358 | +SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM, |
| 359 | + bool ReserveAlloc) |
| 360 | + : MMapper(UnownedMM), OwnedMMapper(nullptr), |
| 361 | + ReserveAllocation(ReserveAlloc) { |
269 | 362 | if (!MMapper) { |
270 | 363 | OwnedMMapper = std::make_unique<DefaultMMapper>(); |
271 | 364 | MMapper = OwnedMMapper.get(); |
|
0 commit comments