src/Spaces.h
author David Anderson <dvander@alliedmods.net>
Sun Jan 06 13:52:21 2013 -0800 (2013-01-06)
changeset 256 3c184218462d
parent 91 5f4402180204
child 262 c4b1341297e5
permissions -rw-r--r--
Initial implementation of module imports.
     1 /* vim: set ts=4 sw=4 tw=99 et:
     2  *
     3  * Copyright (C) 2012 David Anderson
     4  *
     5  * This file is part of JITCraft.
     6  *
     7  * JITCraft is free software: you can redistribute it and/or modify it under
     8  * the terms of the GNU General Public License as published by the Free
     9  * Software Foundation, either version 3 of the License, or (at your option)
    10  * any later version.
    11  * 
    12  * Foobar is distributed in the hope that it will be useful, but WITHOUT ANY
    13  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
    14  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
    15  *
    16  * You should have received a copy of the GNU General Public License along with
    17  * JITCraft. If not, see http://www.gnu.org/licenses/.
    18  */
    19 #ifndef _include_jitcraft_spaces_h_
    20 #define _include_jitcraft_spaces_h_
    21 
    22 #include <limits.h>
    23 #include "Utility.h"
    24 #include "Platform.h"
    25 
    26 namespace ke {
    27 
    28 class Zone;
    29 class Object;
    30 class Map;
    31 
    32 static const size_t kObjectAlignment = sizeof(Object *);
    33 static const size_t kMinObjectSize   = sizeof(Object *) * 2;
    34 
    35 // Every object gets two successive mark bits. These bits represent various
    36 // states during garbage collection:
    37 //
    38 //  WHITE (00): The object is dead or unmarked.
    39 //  GREY  (11): The object is live but its children have not been marked.
    40 //  BLACK (10): The object and all its children have been marked.
    41 //
    42 // These colors assist in maintaining the "tri-color invariant", explained
    43 // in Heap.h. The bits are organized such that transitioning an object from
    44 // White to Black requires one bit change, and Black to Grey requires another.
    45 class MarkBit
    46 {
    47     uint32 *word_;
    48     uint32 bit_;
    49 
    50   public:
    51     MarkBit(uint32 *word, uint32 bit)
    52       : word_(word),
    53         bit_(bit)
    54     {
    55         assert(bit_ < sizeof(*word_) * 8);
    56     }
    57 
    58     bool isSet() const {
    59         return !!(*word_ & (1 << bit_));
    60     }
    61     void set() {
    62         *word_ |= (1 << bit_);
    63     }
    64     void unset() {
    65         *word_ &= ~(1 << bit_);
    66     }
    67     uint32 *word() {
    68         return word_;
    69     }
    70     MarkBit next() const {
    71         if (bit_ == (sizeof(*word_) * 8) - 1)
    72             return MarkBit(word_ + 1, 0);
    73         return MarkBit(word_, bit_ + 1);
    74     }
    75 };
    76 
    77 class HeapChunk
    78 {
    79     HeapChunk *next_;
    80 
    81     // Base address, from OS::AlignableMapping.
    82     Address base_;
    83 
    84     // End of the allocated region.
    85     Address end_;
    86 
    87     uintptr_t flags_;
    88 
    89   public:
    90     enum Flag {
    91         HAS_OVERFLOWED_OBJECT = (1 << 0),
    92         EMPTY = (1 << 1)
    93     };
    94 
    95   public:
    96     static const size_t kDefaultSize = 1 * kMB;
    97     static const size_t kAlignment = 1 * kMB;
    98 
    99     // Maximum size of an object that will fit in a page. We limit this
   100     // specifically to help JITs: the computation of a[i] has a special
   101     // instruction form on most CPUs, but the index might not be sign-
   102     // extended on 64-bit architectures. We limit the size such that:
   103     //
   104     //    index * sizeof(Object *)
   105     //
   106     // Is guaranteed to not overflow a signed 32-bit integer.
   107     static const size_t kMaxObjectSize = INT_MAX / sizeof(Object *);
   108 
   109     // Size of a HeapChunk header.
   110     static const size_t kHeaderSize =
   111         sizeof(HeapChunk *) +
   112         sizeof(Address) +
   113         sizeof(Address) +
   114         sizeof(Address);
   115 
   116     // All objects must be allocated in the first kAlignment bytes of the
   117     // chunk. We need two mark bits for every allocatable object, since there
   118     // are three possible states: White (00), Black (11), and Grey (10). These
   119     // bits are stored immediately after the HeapChunk header, and before the
   120     // first allocatable object.
   121     //
   122     // Although object alignment is only at word boundaries, we guarantee that
   123     // every object is at least two words. This means we don't need to reserve
   124     // two mark bits for every object, just one mark bit for every allocatable
   125     // address. That also means a pair of mark bits can cross a byte boundary.
   126     //
   127     // --- Math ---
   128     //
   129     // The number of mark bits needed is:
   130     //
   131     // MaxObjectsPerChunk = (kAlignment / kObjectAlignment)
   132     //                    -> 256KB (128KB on 64-bit)
   133     //
   134     // The number of bytes needed for these mark bits is:
   135     //
   136     // NumMarkBytes       = MaxObjectsPerChunk / 8
   137     //                    = 32KB (16KB on 64-bit)
   138     //
   139     // The number of uint32s needed to store these bytes is:
   140     //
   141     // NumMarkWords       = NumMarkBytes / sizeof(uint32)
   142     //                    = 8KB (4KB on 64-bit)
   143     //
   144     static const size_t kMaxObjectsPerChunk = kAlignment / kObjectAlignment;
   145     static const size_t kMarkBits = kMaxObjectsPerChunk;
   146     static const size_t kMarkBytes = kMarkBits / 8;
   147     static const size_t kMarkWords = kMarkBytes / sizeof(uint32);
   148     static const size_t kMarkBitsPerWord = sizeof(uint32) * 8;
   149 
   150     static const size_t kMarkBitsOffset = kHeaderSize;
   151     static const size_t kFirstObject = kMarkBitsOffset + kMarkBytes;
   152 
   153     // Threshold for allocating an object in the large object space.
   154     static const size_t kLargeObjectThreshold = kAlignment - kFirstObject;
   155 
   156     static inline void StaticAsserts() {
   157         STATIC_ASSERT(kHeaderSize == sizeof(HeapChunk));
   158         STATIC_ASSERT(IS_ALIGNED(kHeaderSize, kObjectAlignment));
   159         STATIC_ASSERT(IS_ALIGNED(kFirstObject, kObjectAlignment));
   160     }
   161 
   162   public:
   163     static HeapChunk *New(HeapChunk *prev, size_t size);
   164     static void Destroy(HeapChunk *chunk);
   165 
   166   public:
   167     HeapChunk *next() {
   168         return next_;
   169     }
   170     Address base() {
   171         return base_;
   172     }
   173     Address address() {
   174         return reinterpret_cast<Address>(this);
   175     }
   176     Address end() {
   177         return end_;
   178     }
   179     size_t size() {
   180         return end() - base();
   181     }
   182     Address first() {
   183         return reinterpret_cast<Address>(this) + kFirstObject;
   184     }
   185     HeapChunk **nextp() {
   186         return &next_;
   187     }
   188     uint32 *markBits() {
   189         return reinterpret_cast<uint32 *>(reinterpret_cast<Address>(this) + kMarkBitsOffset);
   190     }
   191     void resetMarkBits();
   192 
   193     // Marking functions.
   194     static inline MarkBit GetMarkBit(Object *obj) {
   195         uintptr_t address = reinterpret_cast<uintptr_t>(obj);
   196 
   197         HeapChunk *chunk = reinterpret_cast<HeapChunk *>(address & ~(kAlignment - 1));
   198         assert(Address(obj) >= chunk->first() && Address(obj) < chunk->end());
   199 
   200         size_t index = (address & (kAlignment - 1)) / kObjectAlignment;
   201         assert(index < kMaxObjectsPerChunk);
   202 
   203         uint32 *word = chunk->markBits() + (index / kMarkBitsPerWord);
   204         uint32 bit = index & (kMarkBitsPerWord - 1);
   205         return MarkBit(word, bit);
   206     }
   207     static inline Address FromMarkBit(HeapChunk *chunk, uint32 *word, uint32 bit) {
   208         // word = bits + (index / kMarkBitsPerWord)
   209         // word - bits = index / kMarkBitsPerWord
   210         // (word - bits) * kMarkBitsPerWord = index
   211         assert(word >= chunk->markBits() && word < chunk->markBits() + kMarkWords);
   212         size_t index = ((word - chunk->markBits()) * kMarkBitsPerWord) + bit;
   213         return chunk->address() + (index * kObjectAlignment);
   214     }
   215     static inline bool IsWhite(const MarkBit &bit) {
   216         assert(bit.isSet() || !bit.next().isSet());
   217         return !bit.isSet();
   218     }
   219     static inline bool IsBlack(const MarkBit &bit) {
   220         return bit.isSet() && !bit.next().isSet();
   221     }
   222     static inline bool IsGrey(const MarkBit &bit) {
   223         return bit.isSet() && bit.next().isSet();
   224     }
   225     static inline bool IsBlackOrGrey(const MarkBit &bit) {
   226         return bit.isSet();
   227     }
   228     static inline void WhiteToBlack(MarkBit &bit) {
   229         assert(IsWhite(bit));
   230         bit.set();
   231     }
   232     static inline void BlackToGrey(MarkBit &bit) {
   233         assert(IsBlack(bit));
   234         bit.next().set();
   235     }
   236     static inline void GreyToBlack(MarkBit &bit) {
   237         assert(IsGrey(bit));
   238         bit.next().unset();
   239     }
   240     static inline void Clear(MarkBit &bit) {
   241         bit.unset();
   242         bit.next().unset();
   243     }
   244 
   245     bool hasOverflowedObject() {
   246         return !!(flags_ & HAS_OVERFLOWED_OBJECT);
   247     }
   248     void setHasOverflowedObject() {
   249         flags_ |= HAS_OVERFLOWED_OBJECT;
   250     }
   251     bool isEmpty() {
   252         return !!(flags_ & EMPTY);
   253     }
   254     void setEmpty() {
   255         flags_ |= EMPTY;
   256     }
   257 };
   258 
   259 template <typename SizeEvaluator>
   260 class ChunkObjectIteratorBase
   261 {
   262     HeapChunk *chunk_;
   263     Address top_;
   264     SizeEvaluator evaluator_;
   265     Address current_;
   266 
   267   public:
   268     ChunkObjectIteratorBase(HeapChunk *start, Address top, const SizeEvaluator &evaluator)
   269       : chunk_(start),
   270         top_(top),
   271         evaluator_(evaluator),
   272         current_(start ? start->first() : NULL)
   273     {
   274     }
   275 
   276     Object *next();
   277 };
   278 
   279 class FreedObject
   280 {
   281     Map *map_;
   282     size_t bytes_;
   283     FreedObject *next_;
   284 
   285   public:
   286     FreedObject(Map *map, size_t bytes)
   287       : map_(map),
   288         bytes_(bytes)
   289     {
   290         assert(bytes_ >= sizeof(Map *) + sizeof(size_t));
   291         if (bytes_ >= sizeof(FreedObject))
   292             next_ = NULL;
   293     }
   294 
   295     void setNext(FreedObject *obj) {
   296         assert(bytes_ >= sizeof(FreedObject));
   297         next_ = obj;
   298     }
   299     FreedObject *next() {
   300         assert(bytes_ >= sizeof(FreedObject));
   301         return next_;
   302     }
   303     size_t size() {
   304         return bytes_;
   305     }
   306 };
   307 
   308 // Free-region list used in old spaces. Based on V8's. Each object pool
   309 // is used to allocate objects of the previous pool's size. For example,
   310 // the medium object pool is used to allocate small objects.
   311 class FreeList
   312 {
   313   private:
   314     static const size_t kMinObjectSize = 32;
   315     static const size_t kMaxSmallObject = 255;
   316     static const size_t kMaxMediumObject = (2 * kKB) - 1;
   317     static const size_t kMaxLargeObject = (16 * kKB) - 1;
   318 
   319   public:
   320     FreeList();
   321 
   322   public:
   323     void add(FreedObject *obj);
   324     FreedObject *pick(size_t size);
   325     void reset();
   326 
   327   private:
   328     FreedObject *small_objects_;
   329     FreedObject *medium_objects_;
   330     FreedObject *large_objects_;
   331     FreedObject *huge_objects_;
   332 };
   333 
   334 // The heap stack is designed for structures which are known to not escape the
   335 // program stack, and can thus be quickly allocated in a LIFO manner.
   336 // 
   337 class HeapStack
   338 {
   339     OS::AlignableMapping map_;
   340     HeapChunk *chunk_;
   341     Address cursor_;
   342     uintptr_t mask_;
   343 
   344   private:
   345     static const size_t kStackSize = 1024 * 1024;
   346 
   347   public:
   348     HeapStack();
   349     ~HeapStack();
   350     bool initialize();
   351 
   352   public:
   353     Address position() const {
   354         return cursor_;
   355     }
   356     inline Address allocate(size_t nbytes);
   357     inline void unwind(Address to);
   358 
   359     inline bool contains(Object *obj) {
   360         return (reinterpret_cast<uintptr_t>(obj) & mask_) == reinterpret_cast<uintptr_t>(chunk_);
   361     }
   362 
   363     template <typename Evaluator>
   364     ChunkObjectIteratorBase<Evaluator> begin(const Evaluator &evaluator) {
   365         return ChunkObjectIteratorBase<Evaluator>(chunk_, cursor_, evaluator);
   366     }
   367 };
   368 
   369 class OldSpace
   370 {
   371     Zone *zone_;
   372     HeapChunk *first_;
   373     HeapChunk *last_;
   374     Address cursor_;
   375     FreeList freelist_;
   376 
   377   public:
   378     OldSpace(Zone *zone);
   379     ~OldSpace();
   380 
   381     bool initialize();
   382 
   383   public:
   384     inline Address allocate(size_t nbytes);
   385     Address allocateSlow(size_t nbytes);
   386     void markRegionFree(Address start, size_t size);
   387     void freeUnusedPages();
   388 
   389     template <typename Evaluator>
   390     ChunkObjectIteratorBase<Evaluator> begin(const Evaluator &evaluator) {
   391         return ChunkObjectIteratorBase<Evaluator>(first_, cursor_, evaluator);
   392     }
   393 
   394     HeapChunk *first() {
   395         return first_;
   396     }
   397     void resetFreeList();
   398 };
   399 
   400 class LargeObjectSpace
   401 {
   402     HeapChunk *first_;
   403     HeapChunk *last_;
   404 
   405   public:
   406     LargeObjectSpace();
   407     ~LargeObjectSpace();
   408 
   409     bool initialize();
   410 
   411   public:
   412     Address allocate(size_t nbytes);
   413 
   414     HeapChunk *first() {
   415         return first_;
   416     }
   417     void sweep();
   418 };
   419 
   420 }
   421 
   422 #endif // _include_jitcraft_spaces_h_