|
Lines 1-5
Source/JavaScriptCore/wtf/OSAllocatorSymbian.cpp_sec1
|
| 1 |
/* |
1 |
/* |
| 2 |
* Copyright (C) 2010 Apple Inc. All rights reserved. |
2 |
* Copyright (C) 2010 Apple Inc. All rights reserved. |
|
|
3 |
* Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. |
| 3 |
* |
4 |
* |
| 4 |
* Redistribution and use in source and binary forms, with or without |
5 |
* Redistribution and use in source and binary forms, with or without |
| 5 |
* modification, are permitted provided that the following conditions |
6 |
* modification, are permitted provided that the following conditions |
|
Lines 26-56
Source/JavaScriptCore/wtf/OSAllocatorSymbian.cpp_sec2
|
| 26 |
#include "config.h" |
27 |
#include "config.h" |
| 27 |
#include "OSAllocator.h" |
28 |
#include "OSAllocator.h" |
| 28 |
|
29 |
|
| 29 |
#include <wtf/FastMalloc.h> |
30 |
#include "PageAllocatorSymbian.h" |
| 30 |
|
31 |
|
| 31 |
namespace WTF { |
32 |
namespace WTF { |
| 32 |
|
33 |
|
| 33 |
void* OSAllocator::reserveUncommitted(size_t bytes, Usage, bool, bool) |
34 |
// Array to store code chunks used by JIT engine(s) |
|
|
35 |
static RPointerArray<ChunkWithMetadata> codeChunksContainer; |
| 36 |
|
| 37 |
// Pointer to the data (non code) allocator |
| 38 |
static PageAllocatorSymbian* allocator = 0; |
| 39 |
|
| 40 |
_LIT(KErrorStringInternalConsistency, "OSAllocator:ConsistencyError"); |
| 41 |
_LIT(KErrorStringChunkCreation, "OSAllocator:ChunkInitError"); |
| 42 |
_LIT(KErrorStringPageSize, "OSAllocator:WrongPageSize"); |
| 43 |
|
| 44 |
// Makes a new code chunk for a JIT engine with everything in committed state |
| 45 |
static void* allocateCodeChunk(size_t bytes) |
| 46 |
{ |
| 47 |
RChunk c; |
| 48 |
TInt error = c.CreateLocalCode(bytes, bytes); |
| 49 |
__ASSERT_ALWAYS(error == KErrNone, User::Panic(KErrorStringChunkCreation, error)); |
| 50 |
|
| 51 |
codeChunksContainer.Append(new ChunkWithMetadata(c.Handle())); |
| 52 |
return static_cast<void*>(c.Base()); |
| 53 |
} |
| 54 |
|
| 55 |
// Frees the _entire_ code chunk in which this address resides. |
| 56 |
static bool deallocateCodeChunk(void* address) |
| 57 |
{ |
| 58 |
bool found = false; |
| 59 |
for (int i = 0; i < codeChunksContainer.Count(); i++) { |
| 60 |
ChunkWithMetadata* p = codeChunksContainer[i]; |
| 61 |
if (p && p->contains(address)) { |
| 62 |
codeChunksContainer.Remove(i); |
| 63 |
delete p; |
| 64 |
found = true; |
| 65 |
} |
| 66 |
} |
| 67 |
return found; |
| 68 |
} |
| 69 |
|
| 70 |
// Return the (singleton) object that manages all non-code VM operations |
| 71 |
static PageAllocatorSymbian* dataAllocatorInstance() |
| 34 |
{ |
72 |
{ |
| 35 |
return fastMalloc(bytes); |
73 |
if (!allocator) |
|
|
74 |
allocator = new PageAllocatorSymbian(); |
| 75 |
|
| 76 |
return allocator; |
| 36 |
} |
77 |
} |
| 37 |
|
78 |
|
| 38 |
void* OSAllocator::reserveAndCommit(size_t bytes, Usage, bool, bool) |
79 |
// Reserve memory and return the base address of the region |
|
|
80 |
void* OSAllocator::reserveUncommitted(size_t reservationSize, Usage usage, bool , bool executable) |
| 39 |
{ |
81 |
{ |
| 40 |
return fastMalloc(bytes); |
82 |
void* base = 0; |
|
|
83 |
if (executable) |
| 84 |
base = allocateCodeChunk(reservationSize); |
| 85 |
else |
| 86 |
base = dataAllocatorInstance()->reserve(reservationSize); |
| 87 |
return base; |
| 41 |
} |
88 |
} |
| 42 |
|
89 |
|
| 43 |
void OSAllocator::commit(void*, size_t, bool, bool) |
90 |
// Inverse operation of reserveUncommitted() |
|
|
91 |
void OSAllocator::releaseDecommitted(void* parkedBase, size_t bytes) |
| 44 |
{ |
92 |
{ |
|
|
93 |
if (dataAllocatorInstance()->contains(parkedBase)) |
| 94 |
dataAllocatorInstance()->release(parkedBase, bytes); |
| 95 |
|
| 96 |
// NOOP for code chunks (JIT) because we released them in decommit() |
| 45 |
} |
97 |
} |
| 46 |
|
98 |
|
| 47 |
void OSAllocator::decommit(void*, size_t) |
99 |
// Commit what was previously reserved via reserveUncommitted() |
|
|
100 |
void OSAllocator::commit(void* address, size_t bytes, bool, bool executable) |
| 48 |
{ |
101 |
{ |
|
|
102 |
// For code chunks, we commit (early) in reserveUncommitted(), so NOOP |
| 103 |
// For data regions, do real work |
| 104 |
if (!executable) |
| 105 |
dataAllocatorInstance()->commit(address, bytes); |
| 106 |
} |
| 107 |
|
| 108 |
void OSAllocator::decommit(void* address, size_t bytes) |
| 109 |
{ |
| 110 |
if (dataAllocatorInstance()->contains(address)) |
| 111 |
dataAllocatorInstance()->decommit(address, bytes); |
| 112 |
else |
| 113 |
deallocateCodeChunk(address); // for code chunk, decommit AND release |
| 114 |
} |
| 115 |
|
| 116 |
void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bool executable) |
| 117 |
{ |
| 118 |
void* base = reserveUncommitted(bytes, usage, writable, executable); |
| 119 |
commit(base, bytes, writable, executable); |
| 120 |
return base; |
| 121 |
} |
| 122 |
|
| 123 |
|
| 124 |
// The PageAllocatorSymbian class helps map OSAllocator calls for reserve/commit/decommit |
| 125 |
// to a single large Symbian chunk. Only works with multiples of page size, and as a corollary |
| 126 |
// all addresses accepted or returned by it are also page-sized aligned. |
| 127 |
// Design notes: |
| 128 |
// - We initialize a chunk up-front with a large reservation size |
| 129 |
// - The entire reservation reserve is logically divided into pageSized blocks (4K on Symbian) |
| 130 |
// - The map maintains 1 bit for each of the 4K-sized region in our address space |
| 131 |
// - OSAllocator::reserveUncommitted() requests lead to 1 or more bits being set in map |
| 132 |
// to indicate internally reserved state. The VM address corresponding to the first bit is returned. |
| 133 |
// - OSAllocator::commit() actually calls RChunk.commit() and commits *all or part* of the region |
| 134 |
// reserved via reserveUncommitted() previously. |
| 135 |
// - OSAllocator::decommit() calls RChunk.decommit() |
| 136 |
// - OSAllocator::releaseDecommitted() unparks all the bits in the map, but trusts that a previously |
| 137 |
// call to decommit() would have returned the memory to the OS |
| 138 |
PageAllocatorSymbian::PageAllocatorSymbian() |
| 139 |
{ |
| 140 |
__ASSERT_ALWAYS(m_pageSize == WTF::pageSize(), User::Panic(KErrorStringPageSize, m_pageSize)); |
| 141 |
|
| 142 |
RChunk chunk; |
| 143 |
TInt error = chunk.CreateDisconnectedLocal(0, 0, TInt(largeReservationSize)); |
| 144 |
__ASSERT_ALWAYS(error == KErrNone, User::Panic(KErrorStringChunkCreation, error)); |
| 145 |
|
| 146 |
m_chunkWithMeta = new ChunkWithMetadata(chunk.Handle()); // takes ownership of chunk |
| 147 |
} |
| 148 |
|
| 149 |
PageAllocatorSymbian::~PageAllocatorSymbian() |
| 150 |
{ |
| 151 |
delete m_chunkWithMeta; |
| 152 |
} |
| 153 |
|
| 154 |
// Reserves a region internally in the bitmap |
| 155 |
void* PageAllocatorSymbian::reserve(size_t bytes) |
| 156 |
{ |
| 157 |
// Find first available region |
| 158 |
const size_t nPages = bytes / m_pageSize; |
| 159 |
const int64_t startIdx = m_map.findRunOfZeros(nPages); |
| 160 |
|
| 161 |
// Pseudo OOM |
| 162 |
if (startIdx < 0) |
| 163 |
return 0; |
| 164 |
|
| 165 |
for (size_t i = startIdx; i < startIdx + nPages ; i++) |
| 166 |
m_map.set(i); |
| 167 |
|
| 168 |
return static_cast<void*>( m_chunkWithMeta->m_base + (TUint)(m_pageSize * startIdx) ); |
| 169 |
} |
| 170 |
|
| 171 |
// Reverses the effects of a reserve() call |
| 172 |
void PageAllocatorSymbian::release(void* address, size_t bytes) |
| 173 |
{ |
| 174 |
const size_t startIdx = (static_cast<char*>(address) - m_chunkWithMeta->m_base) / m_pageSize; |
| 175 |
const size_t nPages = bytes / m_pageSize; |
| 176 |
for (size_t i = startIdx; i < startIdx + nPages ; i++) |
| 177 |
m_map.clear(i); |
| 178 |
} |
| 179 |
|
| 180 |
// Actually commit memory from the OS, after a previous call to reserve() |
| 181 |
bool PageAllocatorSymbian::commit(void* address, size_t bytes) |
| 182 |
{ |
| 183 |
// sanity check that bits were previously set |
| 184 |
const size_t idx = (static_cast<char*>(address) - m_chunkWithMeta->m_base) / m_pageSize; |
| 185 |
const size_t nPages = bytes / m_pageSize; |
| 186 |
__ASSERT_ALWAYS(m_map.get(idx), User::Panic(KErrorStringInternalConsistency, idx)); |
| 187 |
__ASSERT_ALWAYS(m_map.get(idx+nPages-1), User::Panic(KErrorStringInternalConsistency, idx+nPages-1)); |
| 188 |
|
| 189 |
TInt error = m_chunkWithMeta->Commit(static_cast<char*>(address) - m_chunkWithMeta->m_base, bytes); |
| 190 |
return (error == KErrNone); |
| 191 |
} |
| 192 |
|
| 193 |
// Inverse operation of commit(), a release() should follow later |
| 194 |
bool PageAllocatorSymbian::decommit(void* address, size_t bytes) |
| 195 |
{ |
| 196 |
TInt error = m_chunkWithMeta->Decommit(static_cast<char*>(address) - m_chunkWithMeta->m_base, bytes); |
| 197 |
return (error == KErrNone); |
| 49 |
} |
198 |
} |
| 50 |
|
199 |
|
| 51 |
void OSAllocator::releaseDecommitted(void* address, size_t) |
200 |
bool PageAllocatorSymbian::contains(const void* address) const |
| 52 |
{ |
201 |
{ |
| 53 |
fastFree(address); |
202 |
return m_chunkWithMeta->contains(address); |
| 54 |
} |
203 |
} |
| 55 |
|
204 |
|
| 56 |
} // namespace WTF |
205 |
} // namespace WTF |