00001 #ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED 00002 # define JSONCPP_BATCHALLOCATOR_H_INCLUDED 00003 00004 # include <stdlib.h> 00005 # include <assert.h> 00006 00007 # ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION 00008 00009 namespace Json { 00010 00011 /* Fast memory allocator. 00012 * 00013 * This memory allocator allocates memory for a batch of object (specified by 00014 * the page size, the number of object in each page). 00015 * 00016 * It does not allow the destruction of a single object. All the allocated objects 00017 * can be destroyed at once. The memory can be either released or reused for future 00018 * allocation. 00019 * 00020 * The in-place new operator must be used to construct the object using the pointer 00021 * returned by allocate. 00022 */ 00023 template<typename AllocatedType 00024 ,const unsigned int objectPerAllocation> 00025 class BatchAllocator 00026 { 00027 public: 00028 typedef AllocatedType Type; 00029 00030 BatchAllocator( unsigned int objectsPerPage = 255 ) 00031 : freeHead_( 0 ) 00032 , objectsPerPage_( objectsPerPage ) 00033 { 00034 // printf( "Size: %d => %s\n", sizeof(AllocatedType), typeid(AllocatedType).name() ); 00035 assert( sizeof(AllocatedType) * objectPerAllocation >= sizeof(AllocatedType *) ); // We must be able to store a slist in the object free space. 00036 assert( objectsPerPage >= 16 ); 00037 batches_ = allocateBatch( 0 ); // allocated a dummy page 00038 currentBatch_ = batches_; 00039 } 00040 00041 ~BatchAllocator() 00042 { 00043 for ( BatchInfo *batch = batches_; batch; ) 00044 { 00045 BatchInfo *nextBatch = batch->next_; 00046 free( batch ); 00047 batch = nextBatch; 00048 } 00049 } 00050 00053 AllocatedType *allocate() 00054 { 00055 if ( freeHead_ ) // returns node from free list. 00056 { 00057 AllocatedType *object = freeHead_; 00058 freeHead_ = *(AllocatedType **)object; 00059 return object; 00060 } 00061 if ( currentBatch_->used_ == currentBatch_->end_ ) 00062 { 00063 currentBatch_ = currentBatch_->next_; 00064 while ( currentBatch_ && currentBatch_->used_ == currentBatch_->end_ ) 00065 currentBatch_ = currentBatch_->next_; 00066 00067 if ( !currentBatch_ ) // no free batch found, allocate a new one 00068 { 00069 currentBatch_ = allocateBatch( objectsPerPage_ ); 00070 currentBatch_->next_ = batches_; // insert at the head of the list 00071 batches_ = currentBatch_; 00072 } 00073 } 00074 AllocatedType *allocated = currentBatch_->used_; 00075 currentBatch_->used_ += objectPerAllocation; 00076 return allocated; 00077 } 00078 00081 void release( AllocatedType *object ) 00082 { 00083 assert( object != 0 ); 00084 *(AllocatedType **)object = freeHead_; 00085 freeHead_ = object; 00086 } 00087 00088 private: 00089 struct BatchInfo 00090 { 00091 BatchInfo *next_; 00092 AllocatedType *used_; 00093 AllocatedType *end_; 00094 AllocatedType buffer_[objectPerAllocation]; 00095 }; 00096 00097 // disabled copy constructor and assignement operator. 00098 BatchAllocator( const BatchAllocator & ); 00099 void operator =( const BatchAllocator &); 00100 00101 static BatchInfo *allocateBatch( unsigned int objectsPerPage ) 00102 { 00103 const unsigned int mallocSize = sizeof(BatchInfo) - sizeof(AllocatedType)* objectPerAllocation 00104 + sizeof(AllocatedType) * objectPerAllocation * objectsPerPage; 00105 BatchInfo *batch = static_cast<BatchInfo*>( malloc( mallocSize ) ); 00106 batch->next_ = 0; 00107 batch->used_ = batch->buffer_; 00108 batch->end_ = batch->buffer_ + objectsPerPage; 00109 return batch; 00110 } 00111 00112 BatchInfo *batches_; 00113 BatchInfo *currentBatch_; 00115 AllocatedType *freeHead_; 00116 unsigned int objectsPerPage_; 00117 }; 00118 00119 00120 } // namespace Json 00121 00122 # endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION 00123 00124 #endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED 00125