pool_allocator.h

Go to the documentation of this file.
00001 // Allocators -*- C++ -*- 00002 00003 // Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 2, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // You should have received a copy of the GNU General Public License along 00017 // with this library; see the file COPYING. If not, write to the Free 00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 00019 // USA. 00020 00021 // As a special exception, you may use this file as part of a free software 00022 // library without restriction. Specifically, if other files instantiate 00023 // templates or use macros or inline functions from this file, or you compile 00024 // this file and link it with other files to produce an executable, this 00025 // file does not by itself cause the resulting executable to be covered by 00026 // the GNU General Public License. This exception does not however 00027 // invalidate any other reasons why the executable file might be covered by 00028 // the GNU General Public License. 00029 00030 /* 00031 * Copyright (c) 1996-1997 00032 * Silicon Graphics Computer Systems, Inc. 00033 * 00034 * Permission to use, copy, modify, distribute and sell this software 00035 * and its documentation for any purpose is hereby granted without fee, 00036 * provided that the above copyright notice appear in all copies and 00037 * that both that copyright notice and this permission notice appear 00038 * in supporting documentation. Silicon Graphics makes no 00039 * representations about the suitability of this software for any 00040 * purpose. It is provided "as is" without express or implied warranty. 00041 */ 00042 00043 /** @file ext/pool_allocator.h 00044 * This file is a GNU extension to the Standard C++ Library. 00045 * You should only include this header if you are using GCC 3 or later. 00046 */ 00047 #ifndef _POOL_ALLOCATOR_H 00048 #define _POOL_ALLOCATOR_H 1 00049 00050 #include <bits/c++config.h> 00051 #include <new> 00052 #include <bits/functexcept.h> 00053 #include <bits/stl_threads.h> 00054 #include <bits/atomicity.h> 00055 00056 namespace __gnu_cxx 00057 { 00058 using std::__throw_bad_alloc; 00059 00060 /** 00061 * @if maint 00062 * Uses various allocators to fulfill underlying requests (and makes as 00063 * few requests as possible when in default high-speed pool mode). 00064 * 00065 * Important implementation properties: 00066 * 0. If globally mandated, then allocate objects from new 00067 * 1. If the clients request an object of size > _S_max_bytes, the resulting 00068 * object will be obtained directly from new 00069 * 2. In all other cases, we allocate an object of size exactly 00070 * _S_round_up(requested_size). Thus the client has enough size 00071 * information that we can return the object to the proper free list 00072 * without permanently losing part of the object. 00073 * 00074 * The template parameter specifies whether more than one thread may use 00075 * this allocator. It is safe to allocate an object from one instance 00076 * of the allocator and deallocate it with another one. This effectively 00077 * transfers its ownership to the second one. This may have undesirable 00078 * effects on reference locality. 00079 * 00080 * @endif 00081 * (See @link Allocators allocators info @endlink for more.) 00082 */ 00083 template<bool __threads> 00084 struct __pool_base 00085 { 00086 enum { _S_align = 8 }; 00087 enum { _S_max_bytes = 128 }; 00088 enum { _S_freelists = _S_max_bytes / _S_align }; 00089 00090 union _Obj 00091 { 00092 union _Obj* _M_free_list_link; 00093 char _M_client_data[1]; // The client sees this. 00094 }; 00095 00096 static _Obj* volatile _S_free_list[_S_freelists]; 00097 00098 // Chunk allocation state. 00099 static char* _S_start_free; 00100 static char* _S_end_free; 00101 static size_t _S_heap_size; 00102 00103 static _STL_mutex_lock _S_lock; 00104 static _Atomic_word _S_force_new; 00105 00106 static size_t 00107 _S_round_up(size_t __bytes) 00108 { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); } 00109 00110 static size_t 00111 _S_freelist_index(size_t __bytes) 00112 { return ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); } 00113 00114 // Returns an object of size __n, and optionally adds to size __n 00115 // free list. 00116 static void* 00117 _S_refill(size_t __n); 00118 00119 // Allocates a chunk for nobjs of size size. nobjs may be reduced 00120 // if it is inconvenient to allocate the requested number. 00121 static char* 00122 _S_chunk_alloc(size_t __n, int& __nobjs); 00123 00124 // It would be nice to use _STL_auto_lock here. But we need a 00125 // test whether threads are in use. 00126 struct _Lock 00127 { 00128 _Lock() { if (__threads) _S_lock._M_acquire_lock(); } 00129 ~_Lock() { if (__threads) _S_lock._M_release_lock(); } 00130 } __attribute__ ((__unused__)); 00131 friend struct _Lock; 00132 }; 00133 00134 typedef __pool_base<true> __pool_alloc_base; 00135 00136 template<typename _Tp> 00137 class __pool_alloc : private __pool_alloc_base 00138 { 00139 public: 00140 typedef size_t size_type; 00141 typedef ptrdiff_t difference_type; 00142 typedef _Tp* pointer; 00143 typedef const _Tp* const_pointer; 00144 typedef _Tp& reference; 00145 typedef const _Tp& const_reference; 00146 typedef _Tp value_type; 00147 00148 template<typename _Tp1> 00149 struct rebind 00150 { typedef __pool_alloc<_Tp1> other; }; 00151 00152 __pool_alloc() throw() { } 00153 00154 __pool_alloc(const __pool_alloc&) throw() { } 00155 00156 template<typename _Tp1> 00157 __pool_alloc(const __pool_alloc<_Tp1>&) throw() { } 00158 00159 ~__pool_alloc() throw() { } 00160 00161 pointer 00162 address(reference __x) const { return &__x; } 00163 00164 const_pointer 00165 address(const_reference __x) const { return &__x; } 00166 00167 size_type 00168 max_size() const throw() 00169 { return size_t(-1) / sizeof(_Tp); } 00170 00171 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00172 // 402. wrong new expression in [some_] allocator::construct 00173 void 00174 construct(pointer __p, const _Tp& __val) 00175 { ::new(__p) _Tp(__val); } 00176 00177 void 00178 destroy(pointer __p) { __p->~_Tp(); } 00179 00180 pointer 00181 allocate(size_type __n, const void* = 0); 00182 00183 void 00184 deallocate(pointer __p, size_type __n); 00185 }; 00186 00187 template<typename _Tp> 00188 inline bool 00189 operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00190 { return true; } 00191 00192 template<typename _Tp> 00193 inline bool 00194 operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00195 { return false; } 00196 00197 // Allocate memory in large chunks in order to avoid fragmenting the 00198 // heap too much. Assume that __n is properly aligned. We hold 00199 // the allocation lock. 00200 template<bool __threads> 00201 char* 00202 __pool_base<__threads>::_S_chunk_alloc(size_t __n, int& __nobjs) 00203 { 00204 char* __result; 00205 size_t __total_bytes = __n * __nobjs; 00206 size_t __bytes_left = _S_end_free - _S_start_free; 00207 00208 if (__bytes_left >= __total_bytes) 00209 { 00210 __result = _S_start_free; 00211 _S_start_free += __total_bytes; 00212 return __result ; 00213 } 00214 else if (__bytes_left >= __n) 00215 { 00216 __nobjs = (int)(__bytes_left / __n); 00217 __total_bytes = __n * __nobjs; 00218 __result = _S_start_free; 00219 _S_start_free += __total_bytes; 00220 return __result; 00221 } 00222 else 00223 { 00224 size_t __bytes_to_get = (2 * __total_bytes 00225 + _S_round_up(_S_heap_size >> 4)); 00226 // Try to make use of the left-over piece. 00227 if (__bytes_left > 0) 00228 { 00229 _Obj* volatile* __free_list = (_S_free_list 00230 + _S_freelist_index(__bytes_left)); 00231 00232 ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list; 00233 *__free_list = (_Obj*)(void*)_S_start_free; 00234 } 00235 00236 _S_start_free = static_cast<char*>(::operator new(__bytes_to_get)); 00237 if (_S_start_free == 0) 00238 { 00239 size_t __i; 00240 _Obj* volatile* __free_list; 00241 _Obj* __p; 00242 // Try to make do with what we have. That can't hurt. We 00243 // do not try smaller requests, since that tends to result 00244 // in disaster on multi-process machines. 00245 __i = __n; 00246 for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align) 00247 { 00248 __free_list = _S_free_list + _S_freelist_index(__i); 00249 __p = *__free_list; 00250 if (__p != 0) 00251 { 00252 *__free_list = __p -> _M_free_list_link; 00253 _S_start_free = (char*)__p; 00254 _S_end_free = _S_start_free + __i; 00255 return _S_chunk_alloc(__n, __nobjs); 00256 // Any leftover piece will eventually make it to the 00257 // right free list. 00258 } 00259 } 00260 _S_end_free = 0; // In case of exception. 00261 _S_start_free = static_cast<char*>(::operator new(__bytes_to_get)); 00262 // This should either throw an exception or remedy the situation. 00263 // Thus we assume it succeeded. 00264 } 00265 _S_heap_size += __bytes_to_get; 00266 _S_end_free = _S_start_free + __bytes_to_get; 00267 return _S_chunk_alloc(__n, __nobjs); 00268 } 00269 } 00270 00271 // Returns an object of size __n, and optionally adds to "size 00272 // __n"'s free list. We assume that __n is properly aligned. We 00273 // hold the allocation lock. 00274 template<bool __threads> 00275 void* 00276 __pool_base<__threads>::_S_refill(size_t __n) 00277 { 00278 int __nobjs = 20; 00279 char* __chunk = _S_chunk_alloc(__n, __nobjs); 00280 _Obj* volatile* __free_list; 00281 _Obj* __result; 00282 _Obj* __current_obj; 00283 _Obj* __next_obj; 00284 int __i; 00285 00286 if (1 == __nobjs) 00287 return __chunk; 00288 __free_list = _S_free_list + _S_freelist_index(__n); 00289 00290 // Build free list in chunk. 00291 __result = (_Obj*)(void*)__chunk; 00292 *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n); 00293 for (__i = 1; ; __i++) 00294 { 00295 __current_obj = __next_obj; 00296 __next_obj = (_Obj*)(void*)((char*)__next_obj + __n); 00297 if (__nobjs - 1 == __i) 00298 { 00299 __current_obj -> _M_free_list_link = 0; 00300 break; 00301 } 00302 else 00303 __current_obj -> _M_free_list_link = __next_obj; 00304 } 00305 return __result; 00306 } 00307 00308 template<typename _Tp> 00309 _Tp* 00310 __pool_alloc<_Tp>::allocate(size_type __n, const void*) 00311 { 00312 pointer __ret = 0; 00313 if (__n) 00314 { 00315 if (__n <= max_size()) 00316 { 00317 const size_t __bytes = __n * sizeof(_Tp); 00318 // If there is a race through here, assume answer from getenv 00319 // will resolve in same direction. Inspired by techniques 00320 // to efficiently support threading found in basic_string.h. 00321 if (_S_force_new == 0) 00322 { 00323 if (getenv("GLIBCXX_FORCE_NEW")) 00324 __atomic_add(&_S_force_new, 1); 00325 else 00326 __atomic_add(&_S_force_new, -1); 00327 } 00328 00329 if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0)) 00330 __ret = static_cast<_Tp*>(::operator new(__bytes)); 00331 else 00332 { 00333 _Obj* volatile* __free_list = (_S_free_list 00334 + _S_freelist_index(__bytes)); 00335 // Acquire the lock here with a constructor call. This 00336 // ensures that it is released in exit or during stack 00337 // unwinding. 00338 _Lock __lock_instance; 00339 _Obj* __restrict__ __result = *__free_list; 00340 if (__builtin_expect(__result == 0, 0)) 00341 __ret = static_cast<_Tp*>(_S_refill(_S_round_up(__bytes))); 00342 else 00343 { 00344 *__free_list = __result->_M_free_list_link; 00345 __ret = reinterpret_cast<_Tp*>(__result); 00346 } 00347 if (__builtin_expect(__ret == 0, 0)) 00348 __throw_bad_alloc(); 00349 } 00350 } 00351 else 00352 __throw_bad_alloc(); 00353 } 00354 return __ret; 00355 } 00356 00357 template<typename _Tp> 00358 void 00359 __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n) 00360 { 00361 if (__n) 00362 { 00363 const size_t __bytes = __n * sizeof(_Tp); 00364 if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0)) 00365 ::operator delete(__p); 00366 else 00367 { 00368 _Obj* volatile* __free_list = (_S_free_list 00369 + _S_freelist_index(__bytes)); 00370 _Obj* __q = (_Obj*)__p; 00371 00372 // Acquire the lock here with a constructor call. This 00373 // ensures that it is released in exit or during stack 00374 // unwinding. 00375 _Lock __lock_instance; 00376 __q -> _M_free_list_link = *__free_list; 00377 *__free_list = __q; 00378 } 00379 } 00380 } 00381 00382 template<bool __threads> 00383 typename __pool_base<__threads>::_Obj* volatile 00384 __pool_base<__threads>::_S_free_list[_S_freelists]; 00385 00386 template<bool __threads> 00387 char* __pool_base<__threads>::_S_start_free = 0; 00388 00389 template<bool __threads> 00390 char* __pool_base<__threads>::_S_end_free = 0; 00391 00392 template<bool __threads> 00393 size_t __pool_base<__threads>::_S_heap_size = 0; 00394 00395 template<bool __threads> 00396 _STL_mutex_lock 00397 __pool_base<__threads>::_S_lock __STL_MUTEX_INITIALIZER; 00398 00399 template<bool __threads> 00400 _Atomic_word 00401 __pool_base<__threads>::_S_force_new = 0; 00402 } // namespace __gnu_cxx 00403 00404 #endif

Generated on Wed Jun 9 11:18:42 2004 for libstdc++-v3 Source by doxygen 1.3.7