$include_dir="/home/hyper-archives/boost-commit/include"; include("$include_dir/msg-header.inc") ?>
From: igaztanaga_at_[hidden]
Date: 2008-01-20 06:54:59
Author: igaztanaga
Date: 2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
New Revision: 42878
URL: http://svn.boost.org/trac/boost/changeset/42878
Log:
Updated Interprocess and Intrusive:
-> Added linear slist to intrusive
-> Updated all allocators to version 2 allocators in Interprocess
-> Optimized rbtree_best_fit size overhead to 1 std:size_t.
Added:
   trunk/boost/interprocess/allocators/detail/allocator_common.hpp   (contents, props changed)
   trunk/boost/intrusive/linear_slist_algorithms.hpp   (contents, props changed)
   trunk/libs/interprocess/test/vector_test.hpp   (contents, props changed)
Text files modified: 
   trunk/boost/interprocess/allocators/adaptive_pool.hpp                    |   476 ++++++++++++++++++---------             
   trunk/boost/interprocess/allocators/allocation_type.hpp                  |     2                                         
   trunk/boost/interprocess/allocators/allocator.hpp                        |    93 ++--                                    
   trunk/boost/interprocess/allocators/cached_adaptive_pool.hpp             |   515 +++++++++++++---------------            
   trunk/boost/interprocess/allocators/cached_node_allocator.hpp            |   474 +++++++++++---------------              
   trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp        |   701 ++++++++++++++++++++------------------- 
   trunk/boost/interprocess/allocators/detail/node_pool.hpp                 |   342 +++++++-----------                      
   trunk/boost/interprocess/allocators/detail/node_tools.hpp                |     5                                         
   trunk/boost/interprocess/allocators/node_allocator.hpp                   |   481 +++++++++++++++++---------              
   trunk/boost/interprocess/allocators/private_adaptive_pool.hpp            |   417 ++++++++++++++++++----                  
   trunk/boost/interprocess/allocators/private_node_allocator.hpp           |   456 +++++++++++++++++++++++++               
   trunk/boost/interprocess/containers/detail/flat_tree.hpp                 |     2                                         
   trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp         |    46 ++                                      
   trunk/boost/interprocess/containers/detail/tree.hpp                      |     8                                         
   trunk/boost/interprocess/containers/flat_map.hpp                         |     2                                         
   trunk/boost/interprocess/containers/flat_set.hpp                         |     2                                         
   trunk/boost/interprocess/containers/list.hpp                             |     9                                         
   trunk/boost/interprocess/containers/map.hpp                              |     2                                         
   trunk/boost/interprocess/containers/set.hpp                              |     2                                         
   trunk/boost/interprocess/containers/slist.hpp                            |     4                                         
   trunk/boost/interprocess/containers/string.hpp                           |     4                                         
   trunk/boost/interprocess/containers/vector.hpp                           |    29 +                                       
   trunk/boost/interprocess/creation_tags.hpp                               |     2                                         
   trunk/boost/interprocess/detail/algorithms.hpp                           |     2                                         
   trunk/boost/interprocess/detail/atomic.hpp                               |     2                                         
   trunk/boost/interprocess/detail/cast_tags.hpp                            |     2                                         
   trunk/boost/interprocess/detail/config_begin.hpp                         |     1                                         
   trunk/boost/interprocess/detail/in_place_interface.hpp                   |     2                                         
   trunk/boost/interprocess/detail/interprocess_tester.hpp                  |     2                                         
   trunk/boost/interprocess/detail/iterators.hpp                            |     8                                         
   trunk/boost/interprocess/detail/managed_memory_impl.hpp                  |     2                                         
   trunk/boost/interprocess/detail/math_functions.hpp                       |     2                                         
   trunk/boost/interprocess/detail/min_max.hpp                              |     2                                         
   trunk/boost/interprocess/detail/mpl.hpp                                  |     2                                         
   trunk/boost/interprocess/detail/named_proxy.hpp                          |     2                                         
   trunk/boost/interprocess/detail/os_file_functions.hpp                    |     2                                         
   trunk/boost/interprocess/detail/os_thread_functions.hpp                  |     2                                         
   trunk/boost/interprocess/detail/pointer_type.hpp                         |     2                                         
   trunk/boost/interprocess/detail/posix_time_types_wrk.hpp                 |     2                                         
   trunk/boost/interprocess/detail/segment_manager_helper.hpp               |     2                                         
   trunk/boost/interprocess/detail/tmp_dir_helpers.hpp                      |     2                                         
   trunk/boost/interprocess/detail/type_traits.hpp                          |     2                                         
   trunk/boost/interprocess/detail/utilities.hpp                            |   145 +++++++-                                
   trunk/boost/interprocess/detail/version_type.hpp                         |     2                                         
   trunk/boost/interprocess/detail/win32_api.hpp                            |     2                                         
   trunk/boost/interprocess/detail/workaround.hpp                           |     6                                         
   trunk/boost/interprocess/errors.hpp                                      |     2                                         
   trunk/boost/interprocess/exceptions.hpp                                  |     2                                         
   trunk/boost/interprocess/file_mapping.hpp                                |     2                                         
   trunk/boost/interprocess/indexes/flat_map_index.hpp                      |     2                                         
   trunk/boost/interprocess/indexes/iset_index.hpp                          |     2                                         
   trunk/boost/interprocess/indexes/iunordered_set_index.hpp                |     2                                         
   trunk/boost/interprocess/indexes/map_index.hpp                           |     2                                         
   trunk/boost/interprocess/indexes/null_index.hpp                          |     2                                         
   trunk/boost/interprocess/indexes/unordered_map_index.hpp                 |     2                                         
   trunk/boost/interprocess/interprocess_fwd.hpp                            |    18                                         
   trunk/boost/interprocess/ipc/message_queue.hpp                           |     2                                         
   trunk/boost/interprocess/managed_external_buffer.hpp                     |     7                                         
   trunk/boost/interprocess/managed_heap_memory.hpp                         |     2                                         
   trunk/boost/interprocess/managed_mapped_file.hpp                         |     2                                         
   trunk/boost/interprocess/managed_shared_memory.hpp                       |     2                                         
   trunk/boost/interprocess/managed_windows_shared_memory.hpp               |     2                                         
   trunk/boost/interprocess/mapped_region.hpp                               |     2                                         
   trunk/boost/interprocess/mem_algo/detail/mem_algo_common.hpp             |   214 +++++++++++-                            
   trunk/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp         |    60 ++                                      
   trunk/boost/interprocess/mem_algo/rbtree_best_fit.hpp                    |   613 ++++++++++++++++++++--------------      
   trunk/boost/interprocess/mem_algo/simple_seq_fit.hpp                     |     2                                         
   trunk/boost/interprocess/offset_ptr.hpp                                  |     6                                         
   trunk/boost/interprocess/segment_manager.hpp                             |    27 +                                       
   trunk/boost/interprocess/shared_memory_object.hpp                        |     2                                         
   trunk/boost/interprocess/smart_ptr/deleter.hpp                           |     2                                         
   trunk/boost/interprocess/smart_ptr/detail/shared_count.hpp               |     2                                         
   trunk/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp     |     2                                         
   trunk/boost/interprocess/smart_ptr/shared_ptr.hpp                        |     2                                         
   trunk/boost/interprocess/smart_ptr/weak_ptr.hpp                          |     2                                         
   trunk/boost/interprocess/streams/bufferstream.hpp                        |     2                                         
   trunk/boost/interprocess/streams/vectorstream.hpp                        |     2                                         
   trunk/boost/interprocess/sync/emulation/interprocess_condition.hpp       |     2                                         
   trunk/boost/interprocess/sync/emulation/interprocess_mutex.hpp           |     2                                         
   trunk/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp |     2                                         
   trunk/boost/interprocess/sync/emulation/interprocess_semaphore.hpp       |     2                                         
   trunk/boost/interprocess/sync/emulation/named_creation_functor.hpp       |     2                                         
   trunk/boost/interprocess/sync/file_lock.hpp                              |     2                                         
   trunk/boost/interprocess/sync/interprocess_barrier.hpp                   |     2                                         
   trunk/boost/interprocess/sync/interprocess_condition.hpp                 |     2                                         
   trunk/boost/interprocess/sync/interprocess_mutex.hpp                     |     2                                         
   trunk/boost/interprocess/sync/interprocess_recursive_mutex.hpp           |     2                                         
   trunk/boost/interprocess/sync/interprocess_semaphore.hpp                 |     2                                         
   trunk/boost/interprocess/sync/interprocess_upgradable_mutex.hpp          |     2                                         
   trunk/boost/interprocess/sync/lock_options.hpp                           |     2                                         
   trunk/boost/interprocess/sync/mutex_family.hpp                           |     2                                         
   trunk/boost/interprocess/sync/named_condition.hpp                        |     2                                         
   trunk/boost/interprocess/sync/named_mutex.hpp                            |     2                                         
   trunk/boost/interprocess/sync/named_recursive_mutex.hpp                  |     2                                         
   trunk/boost/interprocess/sync/named_semaphore.hpp                        |     2                                         
   trunk/boost/interprocess/sync/named_upgradable_mutex.hpp                 |     2                                         
   trunk/boost/interprocess/sync/null_mutex.hpp                             |     2                                         
   trunk/boost/interprocess/sync/posix/interprocess_condition.hpp           |     2                                         
   trunk/boost/interprocess/sync/posix/interprocess_mutex.hpp               |     2                                         
   trunk/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp     |     2                                         
   trunk/boost/interprocess/sync/posix/interprocess_semaphore.hpp           |     2                                         
   trunk/boost/interprocess/sync/posix/pthread_helpers.hpp                  |     2                                         
   trunk/boost/interprocess/sync/posix/ptime_to_timespec.hpp                |     2                                         
   trunk/boost/interprocess/sync/posix/semaphore_wrapper.hpp                |     2                                         
   trunk/boost/interprocess/sync/scoped_lock.hpp                            |     2                                         
   trunk/boost/interprocess/sync/sharable_lock.hpp                          |     2                                         
   trunk/boost/interprocess/sync/upgradable_lock.hpp                        |     2                                         
   trunk/boost/interprocess/windows_shared_memory.hpp                       |     2                                         
   trunk/boost/intrusive/circular_list_algorithms.hpp                       |   109 +++++                                   
   trunk/boost/intrusive/circular_slist_algorithms.hpp                      |   165 ++++++++                                
   trunk/boost/intrusive/detail/tree_algorithms.hpp                         |     4                                         
   trunk/boost/intrusive/detail/utilities.hpp                               |     4                                         
   trunk/boost/intrusive/intrusive_fwd.hpp                                  |     1                                         
   trunk/boost/intrusive/list.hpp                                           |    51 --                                      
   trunk/boost/intrusive/options.hpp                                        |    27 +                                       
   trunk/boost/intrusive/slist.hpp                                          |   236 ++++++-------                           
   trunk/boost/intrusive/slist_hook.hpp                                     |     2                                         
   trunk/libs/interprocess/doc/Jamfile.v2                                   |     1                                         
   trunk/libs/interprocess/doc/interprocess.qbk                             |     2                                         
   trunk/libs/interprocess/proj/vc7ide/interprocesslib.vcproj               |     6                                         
   trunk/libs/interprocess/test/adaptive_node_pool_test.cpp                 |     2                                         
   trunk/libs/interprocess/test/adaptive_pool_test.cpp                      |    24 +                                       
   trunk/libs/interprocess/test/cached_adaptive_pool_test.cpp               |    24 +                                       
   trunk/libs/interprocess/test/cached_node_allocator_test.cpp              |    23 +                                       
   trunk/libs/interprocess/test/file_mapping_test.cpp                       |     1                                         
   trunk/libs/interprocess/test/map_test.hpp                                |     2                                         
   trunk/libs/interprocess/test/memory_algorithm_test.cpp                   |    96 +++--                                   
   trunk/libs/interprocess/test/memory_algorithm_test_template.hpp          |   110 +++++                                   
   trunk/libs/interprocess/test/node_allocator_test.cpp                     |    19 +                                       
   trunk/libs/interprocess/test/node_pool_test.hpp                          |     8                                         
   trunk/libs/interprocess/test/private_adaptive_pool_test.cpp              |    19 +                                       
   trunk/libs/interprocess/test/private_node_allocator_test.cpp             |    21 +                                       
   trunk/libs/interprocess/test/set_test.hpp                                |     2                                         
   trunk/libs/interprocess/test/vector_test.cpp                             |   235 +------------                           
   trunk/libs/intrusive/example/doc_list_algorithms.cpp                     |     2                                         
   trunk/libs/intrusive/example/doc_slist_algorithms.cpp                    |     2                                         
   trunk/libs/intrusive/proj/vc7ide/_intrusivelib/_intrusivelib.vcproj      |     3                                         
   trunk/libs/intrusive/test/list_test.cpp                                  |    67 ++-                                     
   trunk/libs/intrusive/test/slist_test.cpp                                 |   169 +++++++--                               
   139 files changed, 4226 insertions(+), 2538 deletions(-)
Modified: trunk/boost/interprocess/allocators/adaptive_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/adaptive_pool.hpp	(original)
+++ trunk/boost/interprocess/allocators/adaptive_pool.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -25,6 +25,7 @@
 #include <boost/interprocess/detail/type_traits.hpp>
 #include <boost/interprocess/allocators/detail/adaptive_node_pool.hpp>
 #include <boost/interprocess/exceptions.hpp>
+#include <boost/interprocess/allocators/detail/allocator_common.hpp>
 #include <memory>
 #include <algorithm>
 #include <cstddef>
@@ -35,35 +36,38 @@
 namespace boost {
 namespace interprocess {
 
-//!An STL node allocator that uses a segment manager as memory 
-//!source. The internal pointer type will of the same type (raw, smart) as
-//!"typename SegmentManager::void_pointer" type. This allows
-//!placing the allocator in shared memory, memory mapped-files, etc...
-//!This node allocator shares a segregated storage between all instances 
-//!of adaptive_pool with equal sizeof(T) placed in the same segment 
-//!group. NodesPerChunk is the number of nodes allocated at once when the allocator
-//!needs runs out of nodes. MaxFreeChunks is the number of free nodes
-//!in the adaptive node pool that will trigger the deallocation of
-template<class T, class SegmentManager, std::size_t NodesPerChunk, std::size_t MaxFreeChunks>
-class adaptive_pool
+/// @cond
+
+namespace detail{
+
+template < unsigned int Version
+         , class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         , std::size_t MaxFreeChunks
+         , unsigned char OverheadPercent
+         >
+class adaptive_pool_base
+   : public node_pool_allocation_impl
+   < adaptive_pool_base
+      < Version, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>
+   , Version
+   , T
+   , SegmentManager
+   >
 {
    public:
    typedef typename SegmentManager::void_pointer         void_pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
    typedef SegmentManager                                segment_manager;
-   typedef typename detail::
-      pointer_to_other<void_pointer, char>::type         char_pointer;
-   typedef typename SegmentManager::
-      mutex_family::mutex_type                           mutex_type;
-   typedef adaptive_pool
-      <T, SegmentManager, NodesPerChunk, MaxFreeChunks>  self_t;
+   typedef adaptive_pool_base
+      <Version, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>   self_t;
    typedef detail::shared_adaptive_node_pool
-      < SegmentManager, mutex_type
-      , sizeof(T), NodesPerChunk, MaxFreeChunks>         node_pool_t;
+      < SegmentManager, sizeof(T), NodesPerChunk, MaxFreeChunks, OverheadPercent>   node_pool_t;
    typedef typename detail::
       pointer_to_other<void_pointer, node_pool_t>::type  node_pool_ptr;
 
+   BOOST_STATIC_ASSERT((Version <=2));
+
    public:
    //-------
    typedef typename detail::
@@ -78,52 +82,60 @@
    typedef std::size_t                                   size_type;
    typedef std::ptrdiff_t                                difference_type;
 
-   //!Obtains adaptive_pool from 
-   //!adaptive_pool
+   typedef detail::version_type<adaptive_pool_base, Version>   version;
+   typedef transform_iterator
+      < typename SegmentManager::
+         multiallocation_iterator
+      , detail::cast_functor <T> >              multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                     multiallocation_chain;
+
+   //!Obtains adaptive_pool_base from 
+   //!adaptive_pool_base
    template<class T2>
    struct rebind
    {  
-      typedef adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks>       other;
+      typedef adaptive_pool_base<Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>       other;
    };
 
    /// @cond
    private:
-   //!Not assignable from related adaptive_pool
-   template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2>
-   adaptive_pool& operator=
-      (const adaptive_pool<T2, SegmentManager2, N2, F2>&);
+   //!Not assignable from related adaptive_pool_base
+   template<unsigned int Version2, class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char O2>
+   adaptive_pool_base& operator=
+      (const adaptive_pool_base<Version2, T2, SegmentManager2, N2, F2, O2>&);
 
-   //!Not assignable from other adaptive_pool
-   adaptive_pool& operator=(const adaptive_pool&);
+   //!Not assignable from other adaptive_pool_base
+   adaptive_pool_base& operator=(const adaptive_pool_base&);
    /// @endcond
 
    public:
    //!Constructor from a segment manager. If not present, constructs a node
    //!pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
-   adaptive_pool(segment_manager *segment_mngr) 
-      : mp_node_pool(priv_get_or_create(segment_mngr)) { }
+   adaptive_pool_base(segment_manager *segment_mngr) 
+      : mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(segment_mngr)) { }
 
-   //!Copy constructor from other adaptive_pool. Increments the reference 
+   //!Copy constructor from other adaptive_pool_base. Increments the reference 
    //!count of the associated node pool. Never throws
-   adaptive_pool(const adaptive_pool &other) 
+   adaptive_pool_base(const adaptive_pool_base &other) 
       : mp_node_pool(other.get_node_pool()) 
    {  
       mp_node_pool->inc_ref_count();   
    }
 
-   //!Copy constructor from related adaptive_pool. If not present, constructs
+   //!Copy constructor from related adaptive_pool_base. If not present, constructs
    //!a node pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
    template<class T2>
-   adaptive_pool
-      (const adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> &other)
-      : mp_node_pool(priv_get_or_create(other.get_segment_manager())) { }
+   adaptive_pool_base
+      (const adaptive_pool_base<Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(other.get_segment_manager())) { }
 
    //!Destructor, removes node_pool_t from memory
    //!if its reference count reaches to zero. Never throws
-   ~adaptive_pool() 
-      {     priv_destroy_if_last_link();   }
+   ~adaptive_pool_base() 
+   {  detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool));   }
 
    //!Returns a pointer to the node pool.
    //!Never throws
@@ -135,156 +147,300 @@
    segment_manager* get_segment_manager()const
    {  return mp_node_pool->get_segment_manager();  }
 
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different memory segment, the result is undefined.
+   friend void swap(self_t &alloc1, self_t &alloc2)
+   {  detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);  }
+
+   /// @cond
+   private:
+   node_pool_ptr   mp_node_pool;
+   /// @endcond
+};
+
+//!Equality test for same type
+//!of adaptive_pool_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator==(const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1, 
+                const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
+   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
+
+//!Inequality test for same type
+//!of adaptive_pool_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1, 
+                const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
+   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk = 64
+         , std::size_t MaxFreeChunks = 2
+         , unsigned char OverheadPercent = 5
+         >
+class adaptive_pool_v1
+   :  public adaptive_pool_base
+         < 1
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         , MaxFreeChunks
+         , OverheadPercent
+         >
+{
+   public:
+   typedef detail::adaptive_pool_base
+         < 1, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>  other;
+   };
+
+   adaptive_pool_v1(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   adaptive_pool_v1
+      (const adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : base_t(other)
+   {}
+};
+
+}  //namespace detail{
+
+/// @endcond
+
+//!An STL node allocator that uses a segment manager as memory 
+//!source. The internal pointer type will of the same type (raw, smart) as
+//!"typename SegmentManager::void_pointer" type. This allows
+//!placing the allocator in shared memory, memory mapped-files, etc...
+//!
+//!This node allocator shares a segregated storage between all instances 
+//!of adaptive_pool with equal sizeof(T) placed in the same segment 
+//!group. NodesPerChunk is the number of nodes allocated at once when the allocator
+//!needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks
+//!that the adaptive node pool will hold. The rest of the totally free chunks will be
+//!deallocated with the segment manager.
+//!
+//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator:
+//!(memory usable for nodes / total memory allocated from the segment manager)
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         , std::size_t MaxFreeChunks
+         , unsigned char OverheadPercent
+         >
+class adaptive_pool
+   /// @cond
+   :  public detail::adaptive_pool_base
+         < 2
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         , MaxFreeChunks
+         , OverheadPercent
+         >
+   /// @endcond
+{
+
+   #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   typedef detail::adaptive_pool_base
+         < 2, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
+   public:
+   typedef detail::version_type<adaptive_pool, 2>   version;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>  other;
+   };
+
+   adaptive_pool(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   adaptive_pool
+      (const adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : base_t(other)
+   {}
+
+   #else //BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   public:
+   typedef implementation_defined::segment_manager       segment_manager;
+   typedef segment_manager::void_pointer                 void_pointer;
+   typedef implementation_defined::pointer               pointer;
+   typedef implementation_defined::const_pointer         const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+
+   //!Obtains adaptive_pool from 
+   //!adaptive_pool
+   template<class T2>
+   struct rebind
+   {  
+      typedef adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
+   };
+
+   private:
+   //!Not assignable from
+   //!related adaptive_pool
+   template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char OP2>
+   adaptive_pool& operator=
+      (const adaptive_pool<T2, SegmentManager2, N2, F2, OP2>&);
+
+   //!Not assignable from 
+   //!other adaptive_pool
+   adaptive_pool& operator=(const adaptive_pool&);
+
+   public:
+   //!Constructor from a segment manager. If not present, constructs a node
+   //!pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   adaptive_pool(segment_manager *segment_mngr);
+
+   //!Copy constructor from other adaptive_pool. Increments the reference 
+   //!count of the associated node pool. Never throws
+   adaptive_pool(const adaptive_pool &other);
+
+   //!Copy constructor from related adaptive_pool. If not present, constructs
+   //!a node pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   template<class T2>
+   adaptive_pool
+      (const adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other);
+
+   //!Destructor, removes node_pool_t from memory
+   //!if its reference count reaches to zero. Never throws
+   ~adaptive_pool();
+
+   //!Returns a pointer to the node pool.
+   //!Never throws
+   node_pool_t* get_node_pool() const;
+
+   //!Returns the segment manager.
+   //!Never throws
+   segment_manager* get_segment_manager()const;
+
    //!Returns the number of elements that could be allocated.
    //!Never throws
-   size_type max_size() const
-   {  return this->get_segment_manager()->get_size()/sizeof(value_type);  }
+   size_type max_size() const;
 
    //!Allocate memory for an array of count elements. 
-   //!Throws boost::interprocess::bad_alloc if there is no enough memory*/
-   pointer allocate(size_type count, cvoid_pointer = 0)
-   {  
-      if(count > ((size_type)-1)/sizeof(value_type))
-         throw bad_alloc();
-      return pointer(static_cast<T*>(mp_node_pool->allocate(count)));
-   }
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate(size_type count, cvoid_pointer hint = 0);
 
    //!Deallocate allocated memory.
    //!Never throws
-   void deallocate(const pointer &ptr, size_type count)
-   {  mp_node_pool->deallocate(detail::get_pointer(ptr), count);  }
+   void deallocate(const pointer &ptr, size_type count);
 
-   //!Deallocates all free chunks of the pool
-   void deallocate_free_chunks()
-   {  mp_node_pool->deallocate_free_chunks();   }
+   //!Deallocates all free chunks
+   //!of the pool
+   void deallocate_free_chunks();
 
    //!Swaps allocators. Does not throw. If each allocator is placed in a
    //!different memory segment, the result is undefined.
-   friend void swap(self_t &alloc1, self_t &alloc2)
-   {  detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);  }
-
-   //These functions are obsolete. These are here to conserve
-   //backwards compatibility with containers using them...
+   friend void swap(self_t &alloc1, self_t &alloc2);
 
    //!Returns address of mutable object.
    //!Never throws
-   pointer address(reference value) const
-   {  return pointer(boost::addressof(value));  }
+   pointer address(reference value) const;
 
    //!Returns address of non mutable object.
    //!Never throws
-   const_pointer address(const_reference value) const
-   {  return const_pointer(boost::addressof(value));  }
+   const_pointer address(const_reference value) const;
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
-   void construct(const pointer &ptr)
-   {  new(detail::get_pointer(ptr)) value_type;  }
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr);
 
    //!Destroys object. Throws if object's
    //!destructor throws
-   void destroy(const pointer &ptr)
-   {  BOOST_ASSERT(ptr != 0); (*ptr).~value_type();  }
-
-   /// @cond
-   private:
-   //!Object function that creates the node allocator if it is not created and
-   //!increments reference count if it is already created
-   struct get_or_create_func
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-
-      //!This connects or constructs the unique instance of node_pool_t
-      //!Can throw boost::interprocess::bad_alloc
-      void operator()()
-      {
-         //Find or create the node_pool_t
-         mp_node_pool =    mp_named_alloc->template find_or_construct
-                           <node_pool_t>(unique_instance)(mp_named_alloc);
-         //If valid, increment link count
-         if(mp_node_pool != 0)
-            mp_node_pool->inc_ref_count();
-      }
-
-      //!Constructor. Initializes function
-      //!object parameters
-      get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
-      
-      node_pool_t      *mp_node_pool;
-      segment_manager     *mp_named_alloc;
-   };
-
-   //!Initialization function, creates an executes atomically the 
-   //!initialization object functions. Can throw boost::interprocess::bad_alloc
-   node_pool_t *priv_get_or_create(segment_manager *named_alloc)
-   {
-      get_or_create_func func(named_alloc);
-      named_alloc->atomic_func(func);
-      return func.mp_node_pool;
-   }
-
-   //!Object function that decrements the reference count. If the count 
-   //!reaches to zero destroys the node allocator from memory. 
-   //!Never throws
-   struct destroy_if_last_link_func
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-
-      //!Decrements reference count and destroys the object if there is no 
-      //!more attached allocators. Never throws
-      void operator()()
-      {
-         //If not the last link return
-         if(mp_node_pool->dec_ref_count() != 0) return;
-
-         //Last link, let's destroy the segment_manager
-         mp_named_alloc->template destroy<node_pool_t>(unique_instance); 
-      }  
-
-      //!Constructor. Initializes function
-      //!object parameters
-      destroy_if_last_link_func(segment_manager    *nhdr,
-                                node_pool_t *phdr) 
-                            : mp_named_alloc(nhdr), mp_node_pool(phdr){}
-
-      segment_manager   *mp_named_alloc;     
-      node_pool_t       *mp_node_pool;
-   };
+   void destroy(const pointer &ptr);
 
-   //!Destruction function, initializes and executes destruction function 
-   //!object. Never throws
-   void priv_destroy_if_last_link()
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-      //Get segment manager
-      segment_manager *named_segment_mngr = this->get_segment_manager();
-      //Execute destruction functor atomically
-      destroy_if_last_link_func func(named_segment_mngr, detail::get_pointer(mp_node_pool));
-      named_segment_mngr->atomic_func(func);
-   }
-
-   private:
-   node_pool_ptr   mp_node_pool;
-   /// @endcond
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const;
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it);
+
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one();
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements);
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p);
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it);
+   #endif
 };
 
+#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+
 //!Equality test for same type
 //!of adaptive_pool
-template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
-bool operator==(const adaptive_pool<T, S, NodesPerChunk, F> &alloc1, 
-                const adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
-   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator==(const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
 
 //!Inequality test for same type
 //!of adaptive_pool
-template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
-bool operator!=(const adaptive_pool<T, S, NodesPerChunk, F> &alloc1, 
-                const adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
-   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
 
+#endif
 
 }  //namespace interprocess {
 }  //namespace boost {
Modified: trunk/boost/interprocess/allocators/allocation_type.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/allocation_type.hpp	(original)
+++ trunk/boost/interprocess/allocators/allocation_type.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 ///////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/allocators/allocator.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/allocator.hpp	(original)
+++ trunk/boost/interprocess/allocators/allocator.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 ///////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -51,13 +51,6 @@
    /// @cond
    private:
 
-   struct cast_functor
-   {
-      typedef typename detail::add_reference<T>::type result_type;
-      result_type operator()(char &ptr) const
-      {  return *static_cast<T*>(static_cast<void*>(&ptr));  }
-   };
-
    //Self type
    typedef allocator<T, SegmentManager>   self_t;
 
@@ -108,7 +101,9 @@
    typedef transform_iterator
       < typename SegmentManager::
          multiallocation_iterator
-      , cast_functor>                           multiallocation_iterator;
+      , detail::cast_functor <T> >          multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                     multiallocation_chain;
 
    /// @endcond
 
@@ -146,7 +141,7 @@
    pointer allocate(size_type count, cvoid_ptr hint = 0)
    {
       (void)hint;
-      if(count > ((size_type)-1)/sizeof(T))
+      if(count > this->max_size())
          throw bad_alloc();
       return pointer((value_type*)mp_mngr->allocate(count*sizeof(T)));
    }
@@ -166,7 +161,13 @@
    friend void swap(self_t &alloc1, self_t &alloc2)
    {  detail::do_swap(alloc1.mp_mngr, alloc2.mp_mngr);   }
 
-   //Experimental version 2 allocator functions
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const
+   {  
+      return (size_type)mp_mngr->size(detail::get_pointer(p))/sizeof(T);
+   }
 
    std::pair<pointer, bool>
       allocation_command(allocation_type command,
@@ -178,23 +179,42 @@
          (command, limit_size, preferred_size, received_size, detail::get_pointer(reuse));
    }
 
-   //!Returns maximum the number of objects the previously allocated memory
-   //!pointed by p can hold.
-   size_type size(const pointer &p) const
-   {  
-      return (size_type)mp_mngr->size(detail::get_pointer(p))/sizeof(T);
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements)
+   {
+      return multiallocation_iterator
+         (mp_mngr->allocate_many(sizeof(T)*elem_size, num_elements));
    }
 
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements)
+   {
+      return multiallocation_iterator
+         (mp_mngr->allocate_many(elem_sizes, n_elements, sizeof(T)));
+   }
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it)
+   {  return mp_mngr->deallocate_many(it.base()); }
+
    //!Allocates just one object. Memory allocated with this function
    //!must be deallocated only with deallocate_one().
    //!Throws boost::interprocess::bad_alloc if there is no enough memory
    pointer allocate_one()
    {  return this->allocate(1);  }
 
-   /// @cond
-
-   //Experimental. Don't use.
-
    //!Allocates many elements of size == 1 in a contiguous chunk
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
@@ -204,41 +224,20 @@
    multiallocation_iterator allocate_individual(std::size_t num_elements)
    {  return this->allocate_many(1, num_elements); }
 
-   /// @endcond
-
    //!Deallocates memory previously allocated with allocate_one().
    //!You should never use deallocate_one to deallocate memory allocated
    //!with other functions different from allocate_one(). Never throws
    void deallocate_one(const pointer &p)
    {  return this->deallocate(p, 1);  }
 
-   /// @cond
-
-   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!Allocates many elements of size == 1 in a contiguous chunk
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
-   //!will be assigned to received_size. The elements must be deallocated
-   //!with deallocate(...)
-   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements)
-   {
-      return multiallocation_iterator
-         (mp_mngr->allocate_many(sizeof(T)*elem_size, num_elements));
-   }
-
-   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
-   //!contiguous chunk
-   //!of memory. The elements must be deallocated
-   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements)
-   {
-      return multiallocation_iterator
-         (mp_mngr->allocate_many(elem_sizes, n_elements, sizeof(T)));
-   }
-
-   /// @endcond
-
-   //These functions are obsolete. These are here to conserve
-   //backwards compatibility with containers using them...
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it)
+   {  return this->deallocate_many(it); }
 
    //!Returns address of mutable object.
    //!Never throws
@@ -251,7 +250,7 @@
    {  return const_pointer(boost::addressof(value));  }
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
+   //!Throws if T's default constructor throws
    void construct(const pointer &ptr)
    {  new(detail::get_pointer(ptr)) value_type;  }
 
Modified: trunk/boost/interprocess/allocators/cached_adaptive_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/cached_adaptive_pool.hpp	(original)
+++ trunk/boost/interprocess/allocators/cached_adaptive_pool.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -19,56 +19,155 @@
 #include <boost/interprocess/detail/workaround.hpp>
 
 #include <boost/interprocess/interprocess_fwd.hpp>
-#include <boost/interprocess/detail/utilities.hpp>
-#include <boost/assert.hpp>
-#include <boost/utility/addressof.hpp>
 #include <boost/interprocess/allocators/detail/adaptive_node_pool.hpp>
+#include <boost/interprocess/allocators/detail/allocator_common.hpp>
 #include <boost/interprocess/detail/workaround.hpp>
-#include <boost/interprocess/exceptions.hpp>
+#include <boost/interprocess/detail/version_type.hpp>
 #include <boost/interprocess/allocators/detail/node_tools.hpp>
-#include <memory>
-#include <algorithm>
 #include <cstddef>
 
 //!\file
-//!Describes cached_cached_node_allocator pooled shared memory STL compatible allocator 
+//!Describes cached_adaptive_pool pooled shared memory STL compatible allocator 
 
 namespace boost {
 namespace interprocess {
 
+/// @cond
+
+namespace detail {
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk = 64
+         , std::size_t MaxFreeChunks = 2
+         , unsigned char OverheadPercent = 5
+         >
+class cached_adaptive_pool_v1
+   :  public detail::cached_allocator_impl
+         < T
+         , detail::shared_adaptive_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            , MaxFreeChunks
+            , OverheadPercent
+            >
+         , 1>
+{
+   public:
+   typedef detail::cached_allocator_impl
+         < T
+         , detail::shared_adaptive_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            , MaxFreeChunks
+            , OverheadPercent
+            >
+         , 1> base_t;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef cached_adaptive_pool_v1
+         <T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>  other;
+   };
+
+   cached_adaptive_pool_v1(SegmentManager *segment_mngr,
+                         std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) 
+      : base_t(segment_mngr, max_cached_nodes)
+   {}
+
+   template<class T2>
+   cached_adaptive_pool_v1
+      (const cached_adaptive_pool_v1
+         <T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : base_t(other)
+   {}
+};
+
+}  //namespace detail{
+
+/// @endcond
+
 //!An STL node allocator that uses a segment manager as memory 
 //!source. The internal pointer type will of the same type (raw, smart) as
 //!"typename SegmentManager::void_pointer" type. This allows
 //!placing the allocator in shared memory, memory mapped-files, etc...
+//!
 //!This node allocator shares a segregated storage between all instances of 
-//!cached_adaptive_pool with equal sizeof(T) placed in the same fixed size 
+//!cached_adaptive_pool with equal sizeof(T) placed in the same
 //!memory segment. But also caches some nodes privately to
 //!avoid some synchronization overhead.
-template<class T, class SegmentManager, std::size_t NodesPerChunk, std::size_t MaxFreeChunks>
+//!
+//!NodesPerChunk is the minimum number of nodes of nodes allocated at once when
+//!the allocator needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks
+//!that the adaptive node pool will hold. The rest of the totally free chunks will be
+//!deallocated with the segment manager.
+//!
+//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator:
+//!(memory usable for nodes / total memory allocated from the segment manager)
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         , std::size_t MaxFreeChunks
+         , unsigned char OverheadPercent
+         >
 class cached_adaptive_pool
-{
    /// @cond
-   typedef typename SegmentManager::void_pointer         void_pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
-   typedef SegmentManager                                segment_manager;
-   typedef typename detail::
-      pointer_to_other<void_pointer, char>::type         char_pointer;
-   typedef typename SegmentManager::mutex_family::mutex_type mutex_type;
-   typedef cached_adaptive_pool
-      <T, SegmentManager, NodesPerChunk, MaxFreeChunks>              self_t;
-   enum { DEFAULT_MAX_CACHED_NODES = 64 };
-
-   typedef typename detail::node_slist<void_pointer>::node_t         node_t;
-   typedef typename detail::node_slist<void_pointer>::node_slist_t   cached_list_t;
+   :  public detail::cached_allocator_impl
+         < T
+         , detail::shared_adaptive_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            , MaxFreeChunks
+            , OverheadPercent
+            >
+         , 2>
    /// @endcond
+{
+
+   #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   public:
+   typedef detail::cached_allocator_impl
+         < T
+         , detail::shared_adaptive_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            , MaxFreeChunks
+            , OverheadPercent
+            >
+         , 2> base_t;
 
    public:
-   //-------
-   typedef typename detail::
-      pointer_to_other<void_pointer, T>::type            pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef detail::version_type<cached_adaptive_pool, 2>   version;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef cached_adaptive_pool
+         <T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>  other;
+   };
+
+   cached_adaptive_pool(SegmentManager *segment_mngr,
+                         std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) 
+      : base_t(segment_mngr, max_cached_nodes)
+   {}
+
+   template<class T2>
+   cached_adaptive_pool
+      (const cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : base_t(other)
+   {}
+
+   #else
+   public:
+   typedef implementation_defined::segment_manager       segment_manager;
+   typedef segment_manager::void_pointer                 void_pointer;
+   typedef implementation_defined::pointer               pointer;
+   typedef implementation_defined::const_pointer         const_pointer;
    typedef T                                             value_type;
    typedef typename detail::add_reference
                      <value_type>::type                  reference;
@@ -76,312 +175,178 @@
                      <const value_type>::type            const_reference;
    typedef std::size_t                                   size_type;
    typedef std::ptrdiff_t                                difference_type;
-   typedef detail::shared_adaptive_node_pool
-      < SegmentManager, mutex_type
-      , sizeof(T), NodesPerChunk, MaxFreeChunks>         node_pool_t;
-   typedef typename detail::
-      pointer_to_other<void_pointer, node_pool_t>::type  node_pool_ptr;
 
-   //!Obtains cached_adaptive_pool from other
+   //!Obtains cached_adaptive_pool from 
    //!cached_adaptive_pool
    template<class T2>
    struct rebind
    {  
-      typedef cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks>   other;
+      typedef cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
    };
 
-   /// @cond
    private:
-
-   //!Not assignable from related cached_adaptive_pool
-   template<class T2, class SegmentManager2, std::size_t N2, std::size_t MaxFreeChunks2>
+   //!Not assignable from
+   //!related cached_adaptive_pool
+   template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char OP2>
    cached_adaptive_pool& operator=
-      (const cached_adaptive_pool<T2, SegmentManager2, N2, MaxFreeChunks2>&);
+      (const cached_adaptive_pool<T2, SegmentManager2, N2, F2, OP2>&);
 
-   //!Not assignable from other cached_adaptive_pool
+   //!Not assignable from 
+   //!other cached_adaptive_pool
    cached_adaptive_pool& operator=(const cached_adaptive_pool&);
-   /// @endcond
-   
+
    public:
-   //!Constructor from a segment manager. If not present, constructs
-   //!a node pool. Increments the reference count of the node pool.
+   //!Constructor from a segment manager. If not present, constructs a node
+   //!pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
-   cached_adaptive_pool(segment_manager *segment_mngr,
-                         std::size_t max_cached_nodes = DEFAULT_MAX_CACHED_NODES) 
-      : mp_node_pool(priv_get_or_create(segment_mngr)),
-        m_max_cached_nodes(max_cached_nodes)
-   {}
+   cached_adaptive_pool(segment_manager *segment_mngr);
 
-   //!Copy constructor from other cached_adaptive_pool. Increments the 
-   //!reference count of the associated node pool. Never throws
-   cached_adaptive_pool(const cached_adaptive_pool &other) 
-      : mp_node_pool(other.get_node_pool()),
-        m_max_cached_nodes(other.get_max_cached_nodes())
-   {     mp_node_pool->inc_ref_count();   }
+   //!Copy constructor from other cached_adaptive_pool. Increments the reference 
+   //!count of the associated node pool. Never throws
+   cached_adaptive_pool(const cached_adaptive_pool &other);
 
    //!Copy constructor from related cached_adaptive_pool. If not present, constructs
    //!a node pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
    template<class T2>
    cached_adaptive_pool
-      (const cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> &other)
-      : mp_node_pool(priv_get_or_create(other.get_segment_manager())),
-        m_max_cached_nodes(other.get_max_cached_nodes())
-         { }
+      (const cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other);
 
    //!Destructor, removes node_pool_t from memory
    //!if its reference count reaches to zero. Never throws
-   ~cached_adaptive_pool() 
-   {     
-      priv_deallocate_all_cached_nodes();
-      priv_destroy_if_last_link();   
-   }
+   ~cached_adaptive_pool();
 
    //!Returns a pointer to the node pool.
    //!Never throws
-   node_pool_t* get_node_pool() const
-      {  return detail::get_pointer(mp_node_pool);   }
+   node_pool_t* get_node_pool() const;
 
    //!Returns the segment manager.
    //!Never throws
-   segment_manager* get_segment_manager()const
-   {  return mp_node_pool->get_segment_manager();  }
-
-   //!Sets the new max cached nodes value. This can provoke deallocations
-   //!if "newmax" is less than current cached nodes. Never throws
-   void set_max_cached_nodes(std::size_t newmax)
-   {
-      m_max_cached_nodes = newmax;
-      priv_deallocate_remaining_nodes();
-   }
+   segment_manager* get_segment_manager()const;
 
-   //!Returns the max cached nodes parameter.
+   //!Returns the number of elements that could be allocated.
    //!Never throws
-   std::size_t get_max_cached_nodes() const
-      {  return m_max_cached_nodes;  }
-
-   //!Returns the number of elements that could be
-   //!allocated. Never throws
-   size_type max_size() const
-      {  return this->get_segment_manager()->get_size()/sizeof(value_type);  }
+   size_type max_size() const;
 
    //!Allocate memory for an array of count elements. 
    //!Throws boost::interprocess::bad_alloc if there is no enough memory
-   pointer allocate(size_type count, cvoid_pointer hint = 0)
-   {  
-      (void)hint;
-      if(count > ((size_type)-1)/sizeof(value_type))
-         throw bad_alloc();
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-      
-      void * ret;
-      
-      if(count == 1){
-         //If don't have any cached node, we have to get a new list of free nodes from the pool
-         if(m_cached_nodes.empty()){
-            mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes);
-         }
-         ret = &m_cached_nodes.front();
-         m_cached_nodes.pop_front();
-      }
-      else{
-         ret = mp_node_pool->allocate(count);
-      }   
-      return pointer(static_cast<T*>(ret));
-   }
-
-   //!Deallocate allocated memory. Never throws
-   void deallocate(const pointer &ptr, size_type count)
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-
-      if(count == 1){
-         //Check if cache is full
-         if(m_cached_nodes.size() >= m_max_cached_nodes){
-            //This only occurs if this allocator deallocate memory allocated
-            //with other equal allocator. Since the cache is full, and more 
-            //deallocations are probably coming, we'll make some room in cache
-            //in a single, efficient multi node deallocation.
-            priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
-         }
-         m_cached_nodes.push_front(*(node_t*)detail::char_ptr_cast(detail::get_pointer(ptr)));
-      }
-      else{
-         mp_node_pool->deallocate(detail::get_pointer(ptr), count);
-      }
-   }
-
-   //!Deallocates all free chunks of the pool
-   void deallocate_free_chunks()
-   {  mp_node_pool->deallocate_free_chunks();   }
+   pointer allocate(size_type count, cvoid_pointer hint = 0);
 
-   //!Swaps allocators. Does not throw. If each allocator is placed in a
-   //!different shared memory segments, the result is undefined.
-   friend void swap(self_t &alloc1, self_t &alloc2)
-   {
-      detail::do_swap(alloc1.mp_node_pool,       alloc2.mp_node_pool);
-      alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes);
-      detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes);
-   }
+   //!Deallocate allocated memory.
+   //!Never throws
+   void deallocate(const pointer &ptr, size_type count);
 
-   void deallocate_cache()
-   {  this->priv_deallocate_all_cached_nodes(); }
+   //!Deallocates all free chunks
+   //!of the pool
+   void deallocate_free_chunks();
 
-   //These functions are obsolete. These are here to conserve
-   //backwards compatibility with containers using them...
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different memory segment, the result is undefined.
+   friend void swap(self_t &alloc1, self_t &alloc2);
 
    //!Returns address of mutable object.
    //!Never throws
-   pointer address(reference value) const
-   {  return pointer(boost::addressof(value));  }
+   pointer address(reference value) const;
 
    //!Returns address of non mutable object.
    //!Never throws
-   const_pointer address(const_reference value) const
-   {  return const_pointer(boost::addressof(value));  }
+   const_pointer address(const_reference value) const;
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
-   void construct(const pointer &ptr)
-   {  new(detail::get_pointer(ptr)) value_type;  }
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr);
 
    //!Destroys object. Throws if object's
    //!destructor throws
-   void destroy(const pointer &ptr)
-   {  BOOST_ASSERT(ptr != 0); (*ptr).~value_type();  }
-
-   /// @cond
-   private:
+   void destroy(const pointer &ptr);
 
-   //!Object function that creates the node allocator if it is not created and
-   //!increments reference count if it is already created
-   struct get_or_create_func
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-
-      //!This connects or constructs the unique instance of node_pool_t
-      //!Can throw boost::interprocess::bad_alloc
-      void operator()()
-      {
-         //Find or create the node_pool_t
-         mp_node_pool =    mp_named_alloc->template find_or_construct
-                           <node_pool_t>(unique_instance)(mp_named_alloc);
-         //If valid, increment link count
-         if(mp_node_pool != 0)
-            mp_node_pool->inc_ref_count();
-      }
-
-      //!Constructor. Initializes function
-      //!object parameters
-      get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
-      
-      node_pool_t      *mp_node_pool;
-      segment_manager     *mp_named_alloc;
-   };
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const;
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it);
 
-   //!Frees all cached nodes.
-   //!Never throws
-   void priv_deallocate_all_cached_nodes()
-   {
-      if(m_cached_nodes.empty()) return;
-      mp_node_pool->deallocate_nodes(m_cached_nodes);
-   }
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one();
 
-   //!Frees all cached nodes at once.
-   //!Never throws
-   void priv_deallocate_remaining_nodes()
-   {
-      if(m_cached_nodes.size() > m_max_cached_nodes){
-         priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
-      }
-   }
-
-   //!Frees n cached nodes at once. Never throws
-   void priv_deallocate_n_nodes(std::size_t n)
-   {
-      //Deallocate all new linked list at once
-      mp_node_pool->deallocate_nodes(m_cached_nodes, n);
-   }   
-
-   //!Initialization function, creates an executes atomically the 
-   //!initialization object functions. Can throw boost::interprocess::bad_alloc
-   node_pool_t *priv_get_or_create(segment_manager *named_alloc)
-   {
-      get_or_create_func func(named_alloc);
-      named_alloc->atomic_func(func);
-      return func.mp_node_pool;
-   }
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements);
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p);
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it);
+   //!Sets the new max cached nodes value. This can provoke deallocations
+   //!if "newmax" is less than current cached nodes. Never throws
+   void set_max_cached_nodes(std::size_t newmax);
 
-   //!Object function that decrements the reference count. If the count 
-   //!reaches to zero destroys the node allocator from memory. 
+   //!Returns the max cached nodes parameter.
    //!Never throws
-   struct destroy_if_last_link_func
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-
-      //!Decrements reference count and destroys the object if there is no 
-      //!more attached allocators. Never throws
-      void operator()()
-      {
-         //If not the last link return
-         if(mp_node_pool->dec_ref_count() != 0) return;
-
-         //Last link, let's destroy the segment_manager
-         mp_named_alloc->template destroy<node_pool_t>(unique_instance); 
-      }  
-
-      //!Constructor. Initializes function
-      //!object parameters
-      destroy_if_last_link_func(segment_manager    *nhdr,
-                                node_pool_t *phdr) 
-                            : mp_named_alloc(nhdr), mp_node_pool(phdr){}
-
-      segment_manager     *mp_named_alloc;     
-      node_pool_t      *mp_node_pool;
-   };
+   std::size_t get_max_cached_nodes() const;
+   #endif
+};
 
-   //!Destruction function, initializes and executes destruction function 
-   //!object. Never throws
-   void priv_destroy_if_last_link()
-   {
-      typedef detail::shared_adaptive_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks>   node_pool_t;
-      //Get segment manager
-      segment_manager *segment_mngr = this->get_segment_manager();
-      //Execute destruction functor atomically
-      destroy_if_last_link_func func(segment_mngr, detail::get_pointer(mp_node_pool));
-      segment_mngr->atomic_func(func);
-   }
+#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
 
-   private:
-   node_pool_ptr  mp_node_pool;
-   cached_list_t  m_cached_nodes;
-   std::size_t    m_max_cached_nodes;
-   /// @endcond
-};
+//!Equality test for same type
+//!of cached_adaptive_pool
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, std::size_t OP> inline
+bool operator==(const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
+
+//!Inequality test for same type
+//!of cached_adaptive_pool
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, std::size_t OP> inline
+bool operator!=(const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
 
-//!Equality test for same type of
-//!cached_adaptive_pool
-template<class T, class S, std::size_t NodesPerChunk, std::size_t M> inline
-bool operator==(const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc1, 
-                const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc2)
-   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
-
-//!Inequality test for same type of
-//!cached_adaptive_pool
-template<class T, class S, std::size_t NodesPerChunk, std::size_t M> inline
-bool operator!=(const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc1, 
-                const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc2)
-   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+#endif
 
 }  //namespace interprocess {
-
 }  //namespace boost {
 
+
 #include <boost/interprocess/detail/config_end.hpp>
 
 #endif   //#ifndef BOOST_INTERPROCESS_CACHED_ADAPTIVE_POOL_HPP
Modified: trunk/boost/interprocess/allocators/cached_node_allocator.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/cached_node_allocator.hpp	(original)
+++ trunk/boost/interprocess/allocators/cached_node_allocator.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -19,14 +19,11 @@
 #include <boost/interprocess/detail/workaround.hpp>
 
 #include <boost/interprocess/interprocess_fwd.hpp>
-#include <boost/interprocess/detail/utilities.hpp>
-#include <boost/assert.hpp>
-#include <boost/utility/addressof.hpp>
 #include <boost/interprocess/allocators/detail/node_pool.hpp>
+#include <boost/interprocess/allocators/detail/allocator_common.hpp>
 #include <boost/interprocess/detail/workaround.hpp>
-#include <boost/interprocess/exceptions.hpp>
-#include <memory>
-#include <algorithm>
+#include <boost/interprocess/detail/version_type.hpp>
+#include <boost/interprocess/allocators/detail/node_tools.hpp>
 #include <cstddef>
 
 //!\file
@@ -35,37 +32,113 @@
 namespace boost {
 namespace interprocess {
 
-//!An STL node allocator that uses a segment manager as memory 
-//!source. The internal pointer type will of the same type (raw, smart) as
-//!"typename SegmentManager::void_pointer" type. This allows
-//!placing the allocator in shared memory, memory mapped-files, etc...
-//!This node allocator shares a segregated storage between all instances of 
-//!cached_node_allocator with equal sizeof(T) placed in the same fixed size 
-//!memory segment. But also caches some nodes privately to
-//!avoid some synchronization overhead.
-template<class T, class SegmentManager, std::size_t NodesPerChunk>
-class cached_node_allocator
+
+/// @cond
+
+namespace detail {
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk = 64
+         >
+class cached_node_allocator_v1
+   :  public detail::cached_allocator_impl
+         < T
+         , detail::shared_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            >
+         , 1>
 {
+   public:
+   typedef detail::cached_allocator_impl
+         < T
+         , detail::shared_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            >
+         , 1> base_t;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef cached_node_allocator_v1
+         <T2, SegmentManager, NodesPerChunk>  other;
+   };
+
+   cached_node_allocator_v1(SegmentManager *segment_mngr,
+                         std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) 
+      : base_t(segment_mngr, max_cached_nodes)
+   {}
+
+   template<class T2>
+   cached_node_allocator_v1
+      (const cached_node_allocator_v1
+         <T2, SegmentManager, NodesPerChunk> &other)
+      : base_t(other)
+   {}
+};
+
+}  //namespace detail{
+
+/// @endcond
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         >
+class cached_node_allocator
    /// @cond
-   typedef typename SegmentManager::void_pointer          void_pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const void>::type    cvoid_pointer;
-   typedef SegmentManager                                 segment_manager;
-   typedef typename detail::
-      pointer_to_other<void_pointer, char>::type          char_pointer;
-   typedef typename SegmentManager::mutex_family::mutex_type mutex_type;
-   typedef cached_node_allocator<T, SegmentManager, NodesPerChunk>   self_t;
-   enum { DEFAULT_MAX_CACHED_NODES = 64 };
-   typedef typename detail::node_slist<void_pointer>::node_t         node_t;
-   typedef typename detail::node_slist<void_pointer>::node_slist_t   cached_list_t;
+   :  public detail::cached_allocator_impl
+         < T
+         , detail::shared_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            >
+         , 2>
    /// @endcond
+{
+
+   #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   public:
+   typedef detail::cached_allocator_impl
+         < T
+         , detail::shared_node_pool
+            < SegmentManager
+            , sizeof(T)
+            , NodesPerChunk
+            >
+         , 2> base_t;
 
    public:
-   //-------
-   typedef typename detail::
-      pointer_to_other<void_pointer, T>::type            pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef detail::version_type<cached_node_allocator, 2>   version;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef cached_node_allocator<T2, SegmentManager, NodesPerChunk>  other;
+   };
+
+   cached_node_allocator(SegmentManager *segment_mngr,
+                         std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) 
+      : base_t(segment_mngr, max_cached_nodes)
+   {}
+
+   template<class T2>
+   cached_node_allocator
+      (const cached_node_allocator<T2, SegmentManager, NodesPerChunk> &other)
+      : base_t(other)
+   {}
+
+   #else
+   public:
+   typedef implementation_defined::segment_manager       segment_manager;
+   typedef segment_manager::void_pointer                 void_pointer;
+   typedef implementation_defined::pointer               pointer;
+   typedef implementation_defined::const_pointer         const_pointer;
    typedef T                                             value_type;
    typedef typename detail::add_reference
                      <value_type>::type                  reference;
@@ -73,302 +146,173 @@
                      <const value_type>::type            const_reference;
    typedef std::size_t                                   size_type;
    typedef std::ptrdiff_t                                difference_type;
-   typedef detail::shared_node_pool
-      < SegmentManager, mutex_type
-      , sizeof(T), NodesPerChunk>                        node_pool_t;
-   typedef typename detail::
-      pointer_to_other<void_pointer, node_pool_t>::type  node_pool_ptr;
 
-   //!Obtains cached_node_allocator from other cached_node_allocator
+   //!Obtains cached_node_allocator from 
+   //!cached_node_allocator
    template<class T2>
    struct rebind
    {  
-      typedef cached_node_allocator<T2, SegmentManager, NodesPerChunk>   other;
+      typedef cached_node_allocator<T2, SegmentManager> other;
    };
 
-   /// @cond
    private:
-
-   //!Not assignable from related cached_node_allocator
+   //!Not assignable from
+   //!related cached_node_allocator
    template<class T2, class SegmentManager2, std::size_t N2>
    cached_node_allocator& operator=
       (const cached_node_allocator<T2, SegmentManager2, N2>&);
 
-   //!Not assignable from other cached_node_allocator
+   //!Not assignable from 
+   //!other cached_node_allocator
    cached_node_allocator& operator=(const cached_node_allocator&);
-   /// @endcond
 
    public:
-   //!Constructor from a segment manager. If not present, constructs
-   //!a node pool. Increments the reference count of the node pool.
+   //!Constructor from a segment manager. If not present, constructs a node
+   //!pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
-   cached_node_allocator(segment_manager *segment_mngr,
-                         std::size_t max_cached_nodes = DEFAULT_MAX_CACHED_NODES) 
-      : mp_node_pool(priv_get_or_create(segment_mngr)),
-        m_max_cached_nodes(max_cached_nodes)
-   {}
+   cached_node_allocator(segment_manager *segment_mngr);
 
-   //!Copy constructor from other cached_node_allocator. Increments the 
-   //!reference count of the associated node pool. Never throws
-   cached_node_allocator(const cached_node_allocator &other) 
-      : mp_node_pool(other.get_node_pool()),
-        m_max_cached_nodes(other.get_max_cached_nodes())
-   {  mp_node_pool->inc_ref_count();   }
+   //!Copy constructor from other cached_node_allocator. Increments the reference 
+   //!count of the associated node pool. Never throws
+   cached_node_allocator(const cached_node_allocator &other);
 
    //!Copy constructor from related cached_node_allocator. If not present, constructs
    //!a node pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
    template<class T2>
    cached_node_allocator
-      (const cached_node_allocator<T2, SegmentManager, NodesPerChunk> &other)
-      : mp_node_pool(priv_get_or_create(other.get_segment_manager())),
-        m_max_cached_nodes(other.get_max_cached_nodes())
-         { }
+      (const cached_node_allocator<T2, SegmentManager, NodesPerChunk> &other);
 
    //!Destructor, removes node_pool_t from memory
    //!if its reference count reaches to zero. Never throws
-   ~cached_node_allocator() 
-   {     
-      priv_deallocate_all_cached_nodes();
-      priv_destroy_if_last_link();   
-   }
+   ~cached_node_allocator();
 
    //!Returns a pointer to the node pool.
    //!Never throws
-   node_pool_t* get_node_pool() const
-   {  return detail::get_pointer(mp_node_pool);   }
+   node_pool_t* get_node_pool() const;
 
    //!Returns the segment manager.
    //!Never throws
-   segment_manager* get_segment_manager()const
-   {  return mp_node_pool->get_segment_manager();  }
-
-   //!Sets the new max cached nodes value. This can provoke deallocations
-   //!if "newmax" is less than current cached nodes. Never throws
-   void set_max_cached_nodes(std::size_t newmax)
-   {
-      m_max_cached_nodes = newmax;
-      priv_deallocate_remaining_nodes();
-   }
+   segment_manager* get_segment_manager()const;
 
-   //!Returns the max cached nodes parameter.
+   //!Returns the number of elements that could be allocated.
    //!Never throws
-   std::size_t get_max_cached_nodes() const
-      {  return m_max_cached_nodes;  }
-
-   //!Returns the number of elements that could be allocated. Never throws
-   size_type max_size() const
-      {  return this->get_segment_manager()->get_size()/sizeof(value_type);  }
+   size_type max_size() const;
 
    //!Allocate memory for an array of count elements. 
    //!Throws boost::interprocess::bad_alloc if there is no enough memory
-   pointer allocate(size_type count, cvoid_pointer hint = 0)
-   {
-      (void)hint;
-      if(count > ((size_type)-1)/sizeof(value_type))
-         throw bad_alloc();
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk>   node_pool_t;
-      
-      void * ret;
-      
-      if(count == 1){
-         //If don't have any cached node, we have to get a new list of free nodes from the pool
-         if(m_cached_nodes.empty()){
-            mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes);
-         }
-         ret = &m_cached_nodes.front();
-         m_cached_nodes.pop_front();
-      }
-      else{
-         ret = mp_node_pool->allocate(count);
-      }   
-      return pointer(static_cast<T*>(ret));
-   }
+   pointer allocate(size_type count, cvoid_pointer hint = 0);
 
    //!Deallocate allocated memory.
    //!Never throws
-   void deallocate(const pointer &ptr, size_type count)
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk>   node_pool_t;
-
-      if(count == 1){
-         //Check if cache is full
-         if(m_cached_nodes.size() >= m_max_cached_nodes){
-            //This only occurs if this allocator deallocate memory allocated
-            //with other equal allocator. Since the cache is full, and more 
-            //deallocations are probably coming, we'll make some room in cache
-            //in a single, efficient multi node deallocation.
-            priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
-         }
-         m_cached_nodes.push_front(*(node_t*)detail::char_ptr_cast(detail::get_pointer(ptr)));
-      }
-      else{
-         mp_node_pool->deallocate(detail::get_pointer(ptr), count);
-      }
-   }
+   void deallocate(const pointer &ptr, size_type count);
 
-   //!Swaps allocators. Does not throw. If each allocator is placed in a
-   //!different shared memory segments, the result is undefined.
-   friend void swap(self_t &alloc1, self_t &alloc2)
-   {
-      detail::do_swap(alloc1.mp_node_pool,       alloc2.mp_node_pool);
-      alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes);
-      detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes);
-   }
-
-   //!Returns the cached nodes to the shared pool
-   void deallocate_cache()
-   {  this->priv_deallocate_all_cached_nodes(); }
-
-   //!Deallocates all free chunks of the pool
-   void deallocate_free_chunks()
-   {  mp_node_pool->deallocate_free_chunks();   }
+   //!Deallocates all free chunks
+   //!of the pool
+   void deallocate_free_chunks();
 
-   //These functions are obsolete. These are here to conserve
-   //backwards compatibility with containers using them...
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different memory segment, the result is undefined.
+   friend void swap(self_t &alloc1, self_t &alloc2);
 
    //!Returns address of mutable object.
    //!Never throws
-   pointer address(reference value) const
-   {  return pointer(boost::addressof(value));  }
+   pointer address(reference value) const;
 
    //!Returns address of non mutable object.
    //!Never throws
-   const_pointer address(const_reference value) const
-   {  return const_pointer(boost::addressof(value));  }
+   const_pointer address(const_reference value) const;
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
-   void construct(const pointer &ptr)
-   {  new(detail::get_pointer(ptr)) value_type;  }
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr);
 
    //!Destroys object. Throws if object's
    //!destructor throws
-   void destroy(const pointer &ptr)
-   {  BOOST_ASSERT(ptr != 0); (*ptr).~value_type();  }
+   void destroy(const pointer &ptr);
 
-   /// @cond
-   private:
-
-   //!Object function that creates the node allocator if it is not created and
-   //!increments reference count if it is already created
-   struct get_or_create_func
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk>   node_pool_t;
-
-      //!This connects or constructs the unique instance of node_pool_t
-      //!Can throw boost::interprocess::bad_alloc
-      void operator()()
-      {
-         //Find or create the node_pool_t
-         mp_node_pool =    mp_named_alloc->template find_or_construct
-                           <node_pool_t>(unique_instance)(mp_named_alloc);
-         //If valid, increment link count
-         if(mp_node_pool != 0)
-            mp_node_pool->inc_ref_count();
-      }
-
-      //!Constructor. Initializes function
-      //!object parameters
-      get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
-      
-      node_pool_t      *mp_node_pool;
-      segment_manager  *mp_named_alloc;
-   };
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const;
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it);
 
-   //!Frees all cached nodes.
-   //!Never throws
-   void priv_deallocate_all_cached_nodes()
-   {  mp_node_pool->deallocate_nodes(m_cached_nodes); }
-
-   //!Frees all cached nodes at once.
-   //!Never throws
-   void priv_deallocate_remaining_nodes()
-   {
-      if(m_cached_nodes.size() > m_max_cached_nodes){
-         priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
-      }
-   }
-
-   //!Frees n cached nodes at once.
-   //!Never throws
-   void priv_deallocate_n_nodes(std::size_t n)
-   {  mp_node_pool->deallocate_nodes(m_cached_nodes, n); }   
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one();
 
-   //!Initialization function, creates an executes atomically the 
-   //!initialization object functions. Can throw boost::interprocess::bad_alloc
-   node_pool_t *priv_get_or_create(segment_manager *named_alloc)
-   {
-      get_or_create_func func(named_alloc);
-      named_alloc->atomic_func(func);
-      return func.mp_node_pool;
-   }
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements);
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p);
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it);
+   //!Sets the new max cached nodes value. This can provoke deallocations
+   //!if "newmax" is less than current cached nodes. Never throws
+   void set_max_cached_nodes(std::size_t newmax);
 
-   //!Object function that decrements the reference count. If the count 
-   //!reaches to zero destroys the node allocator from memory. 
+   //!Returns the max cached nodes parameter.
    //!Never throws
-   struct destroy_if_last_link_func
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk>   node_pool_t;
-
-      //!Decrements reference count and destroys the object if there is no 
-      //!more attached allocators. Never throws
-      void operator()()
-      {
-         //If not the last link return
-         if(mp_node_pool->dec_ref_count() != 0) return;
-
-         //Last link, let's destroy the segment_manager
-         mp_named_alloc->template destroy<node_pool_t>(unique_instance); 
-      }  
-
-      //!Constructor. Initializes function object
-      //!parameters
-      destroy_if_last_link_func(segment_manager    *nhdr,
-                                node_pool_t *phdr) 
-                            : mp_named_alloc(nhdr), mp_node_pool(phdr){}
-
-      segment_manager     *mp_named_alloc;     
-      node_pool_t      *mp_node_pool;
-   };
-
-   //!Destruction function, initializes and executes destruction function 
-   //!object. Never throws
-   void priv_destroy_if_last_link()
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk>   node_pool_t;
-      //Get segment manager
-      segment_manager *segment_mngr = this->get_segment_manager();
-      //Execute destruction functor atomically
-      destroy_if_last_link_func func(segment_mngr, detail::get_pointer(mp_node_pool));
-      segment_mngr->atomic_func(func);
-   }
-
-   private:
-   node_pool_ptr  mp_node_pool;
-   cached_list_t  m_cached_nodes;
-   std::size_t    m_max_cached_nodes;
-   /// @endcond
+   std::size_t get_max_cached_nodes() const;
+   #endif
 };
 
-//!Equality test for same type of
-//!cached_node_allocator
+#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+
+//!Equality test for same type
+//!of cached_node_allocator
 template<class T, class S, std::size_t NodesPerChunk> inline
 bool operator==(const cached_node_allocator<T, S, NodesPerChunk> &alloc1, 
-                const cached_node_allocator<T, S, NodesPerChunk> &alloc2)
-   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
+                const cached_node_allocator<T, S, NodesPerChunk> &alloc2);
 
-//!Inequality test for same type of
-//!cached_node_allocator
+//!Inequality test for same type
+//!of cached_node_allocator
 template<class T, class S, std::size_t NodesPerChunk> inline
 bool operator!=(const cached_node_allocator<T, S, NodesPerChunk> &alloc1, 
-                const cached_node_allocator<T, S, NodesPerChunk> &alloc2)
-   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+                const cached_node_allocator<T, S, NodesPerChunk> &alloc2);
+
+#endif
 
 }  //namespace interprocess {
 }  //namespace boost {
Modified: trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp	(original)
+++ trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -17,20 +17,21 @@
 
 #include <boost/interprocess/detail/config_begin.hpp>
 #include <boost/interprocess/detail/workaround.hpp>
+#include <boost/interprocess/interprocess_fwd.hpp>
 #include <boost/interprocess/sync/interprocess_mutex.hpp>
 #include <boost/interprocess/detail/utilities.hpp>
 #include <boost/interprocess/detail/min_max.hpp>
 #include <boost/interprocess/detail/math_functions.hpp>
 #include <boost/interprocess/exceptions.hpp>
-#include <boost/intrusive/list.hpp>
+#include <boost/intrusive/set.hpp>
 #include <boost/intrusive/slist.hpp>
 #include <boost/math/common_factor_ct.hpp>
 #include <boost/interprocess/detail/type_traits.hpp>
 #include <boost/interprocess/allocators/detail/node_tools.hpp>
+#include <boost/interprocess/allocators/detail/allocator_common.hpp>
 #include <cstddef>
 #include <cmath>
 #include <cassert>
-#include <cassert>
 
 //!\file
 //!Describes the real adaptive pool shared by many Interprocess pool allocators
@@ -39,10 +40,6 @@
 namespace interprocess {
 namespace detail {
 
-//!Pooled shared memory allocator using an smart adaptive pool. Includes
-//!a reference count but the class does not delete itself, this is  
-//!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time.
 template<class SegmentManagerBase>
 class private_adaptive_node_pool_impl
 {
@@ -56,19 +53,85 @@
    public:
    typedef typename node_slist<void_pointer>::node_t node_t;
    typedef typename node_slist<void_pointer>::node_slist_t free_nodes_t;
+   typedef typename SegmentManagerBase::multiallocation_iterator  multiallocation_iterator;
+   typedef typename SegmentManagerBase::multiallocation_chain     multiallocation_chain;
 
    private:
-   //This hook will be used to chain the memory chunks
-   typedef typename bi::make_list_base_hook
-      <bi::void_pointer<void_pointer>, bi::link_mode<bi::normal_link> >::type list_hook_t;
+   typedef typename bi::make_set_base_hook
+      < bi::void_pointer<void_pointer>
+      , bi::optimize_size<true>
+      , bi::constant_time_size<false>
+      , bi::link_mode<bi::normal_link> >::type multiset_hook_t;
+
+   struct hdr_offset_holder
+   {
+      hdr_offset_holder(std::size_t offset = 0)
+         : hdr_offset(offset)
+      {}
+      std::size_t hdr_offset;
+   };
 
    struct chunk_info_t
-      :  public list_hook_t
+      :  
+         public hdr_offset_holder,
+         public multiset_hook_t
    {
       //An intrusive list of free node from this chunk
       free_nodes_t free_nodes;
+      friend bool operator <(const chunk_info_t &l, const chunk_info_t &r)
+      {
+//      {  return l.free_nodes.size() < r.free_nodes.size();   }
+         //Let's order blocks first by free nodes and then by address
+         //so that highest address fully free chunks are deallocated.
+         //This improves returning memory to the OS (trimming).
+         const bool is_less  = l.free_nodes.size() < r.free_nodes.size();
+         const bool is_equal = l.free_nodes.size() == r.free_nodes.size();
+         return is_less || (is_equal && (&l < &r));
+      }
    };
-   typedef typename bi::make_list<chunk_info_t, bi::base_hook<list_hook_t> >::type  chunk_list_t;
+   typedef typename bi::make_multiset
+      <chunk_info_t, bi::base_hook<multiset_hook_t> >::type  chunk_multiset_t;
+   typedef typename chunk_multiset_t::iterator               chunk_iterator;
+
+   static const std::size_t MaxAlign = alignment_of<node_t>::value;
+   static const std::size_t HdrSize  = ((sizeof(chunk_info_t)-1)/MaxAlign+1)*MaxAlign;
+   static const std::size_t HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign;
+   static std::size_t calculate_alignment
+      (std::size_t overhead_percent, std::size_t real_node_size)
+   {
+      //to-do: handle real_node_size != node_size
+      const std::size_t divisor  = overhead_percent*real_node_size;
+      const std::size_t dividend = HdrOffsetSize*100;
+      std::size_t elements_per_subchunk = (dividend - 1)/divisor + 1;
+      std::size_t candidate_power_of_2 = 
+         upper_power_of_2(elements_per_subchunk*real_node_size + HdrOffsetSize);
+      bool overhead_satisfied = false;
+      while(!overhead_satisfied){
+         elements_per_subchunk = (candidate_power_of_2 - HdrOffsetSize)/real_node_size;
+         std::size_t overhead_size = candidate_power_of_2 - elements_per_subchunk*real_node_size;
+         if(overhead_size*100/candidate_power_of_2 < overhead_percent){
+            overhead_satisfied = true;
+         }
+         else{
+            candidate_power_of_2 <<= 1;
+         }
+      }
+      return candidate_power_of_2;
+   }
+
+   static void calculate_num_subchunks
+      (std::size_t alignment, std::size_t real_node_size, std::size_t elements_per_chunk
+      ,std::size_t &num_subchunks, std::size_t &real_num_node)
+   {
+      std::size_t elements_per_subchunk = (alignment - HdrOffsetSize)/real_node_size;
+      std::size_t possible_num_subchunk = (elements_per_chunk - 1)/elements_per_subchunk + 1;
+      std::size_t hdr_subchunk_elements   = (alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/real_node_size;
+      while(((possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements) < elements_per_chunk){
+         ++possible_num_subchunk;
+      }
+      num_subchunks = possible_num_subchunk;
+      real_num_node = (possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements;
+   }
 
    public:
    //!Segment manager typedef
@@ -77,26 +140,25 @@
    //!Constructor from a segment manager. Never throws
    private_adaptive_node_pool_impl
       ( segment_manager_base_type *segment_mngr_base, std::size_t node_size
-      , std::size_t nodes_per_chunk, std::size_t max_free_chunks)
-   :  m_node_size(node_size)
-   ,  m_max_free_chunks(max_free_chunks)
-   ,  m_real_node_size(lcm(m_node_size, sizeof(node_t)))
-   ,  m_header_size(min_value(get_rounded_size(sizeof(chunk_info_t), alignment_of<max_align>::value)
-                             ,get_rounded_size(sizeof(chunk_info_t), m_real_node_size)))
-      //Round the size to a power of two value.
-      //This is the total memory size (including payload) that we want to
-      //allocate from the general-purpose allocator
-   ,  m_real_chunk_alignment(upper_power_of_2(m_header_size + m_real_node_size*nodes_per_chunk))
+      , std::size_t nodes_per_chunk, std::size_t max_free_chunks
+      , unsigned char overhead_percent
+      )
+   :  m_max_free_chunks(max_free_chunks)
+   ,  m_real_node_size(lcm(node_size, std::size_t(alignment_of<node_t>::value)))
+   //Round the size to a power of two value.
+   //This is the total memory size (including payload) that we want to
+   //allocate from the general-purpose allocator
+   ,  m_real_chunk_alignment(calculate_alignment(overhead_percent, m_real_node_size))
       //This is the real number of nodes per chunk
-   ,  m_real_num_node((m_real_chunk_alignment - SegmentManagerBase::PayloadPerAllocation - m_header_size)/m_real_node_size)
+   ,  m_num_subchunks(0)
+   ,  m_real_num_node(0)
       //General purpose allocator
    ,  mp_segment_mngr_base(segment_mngr_base)
-   ,  m_chunklist()
-   ,  m_first_free_chunk(m_chunklist.end())
-      //Debug node count
-   ,  m_allocated(0)
-   ,  m_free_chunks(0)
-   {}
+   ,  m_chunk_multiset()
+   ,  m_totally_free_chunks(0)
+   {
+      calculate_num_subchunks(m_real_chunk_alignment, m_real_node_size, nodes_per_chunk, m_num_subchunks, m_real_num_node);
+   }
 
    //!Destructor. Deallocates all allocated chunks. Never throws
    ~private_adaptive_node_pool_impl()
@@ -110,60 +172,118 @@
    {  return detail::get_pointer(mp_segment_mngr_base);  }
 
    //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
-   void *allocate(std::size_t count)
+   void *allocate_node()
    {
-      std::size_t bytes = count*m_node_size;
-      if(bytes > m_real_node_size){//Normal allocation, no pooling used
-         void *addr = mp_segment_mngr_base->allocate(bytes);
-         if(!addr)   throw bad_alloc();
-         return addr;
+      priv_invariants();
+      //If there are no free nodes we allocate a new block
+      if (m_chunk_multiset.empty()){ 
+         priv_alloc_chunk(1);
       }
-      else                    //Node allocation, pooling used
-         return priv_alloc_node();
+      //We take the first free node the multiset can't be empty
+      return priv_take_first_node();
    }
-   
+
    //!Deallocates an array pointed by ptr. Never throws
-   void deallocate(void *ptr, std::size_t count)
+   void deallocate_node(void *pElem)
    {
-      std::size_t bytes = count*m_node_size;
-      if(bytes > m_real_node_size)//Normal allocation was used
-         mp_segment_mngr_base->deallocate(ptr);
-      else                    //Node allocation was used
-         priv_dealloc_node(ptr);
+      priv_invariants();
+      chunk_info_t *chunk_info = priv_chunk_from_node(pElem);
+      assert(chunk_info->free_nodes.size() < m_real_num_node);
+      //We put the node at the beginning of the free node list
+      node_t * to_deallocate = static_cast<node_t*>(pElem);
+      chunk_info->free_nodes.push_front(*to_deallocate);
+
+      chunk_iterator this_chunk(chunk_multiset_t::s_iterator_to(*chunk_info));
+      chunk_iterator next_chunk(this_chunk);
+      ++next_chunk;
+
+      //Cache the free nodes from the chunk
+      std::size_t this_chunk_free_nodes = this_chunk->free_nodes.size();
+
+      if(this_chunk_free_nodes == 1){
+         m_chunk_multiset.insert(m_chunk_multiset.begin(), *chunk_info);
+      }
+      else{
+         chunk_iterator next_chunk(this_chunk);
+         ++next_chunk;
+         if(next_chunk != m_chunk_multiset.end()){
+            std::size_t next_free_nodes = next_chunk->free_nodes.size();
+            if(this_chunk_free_nodes > next_free_nodes){
+               //Now move the chunk to the new position
+               m_chunk_multiset.erase(this_chunk);
+               m_chunk_multiset.insert(*chunk_info);
+            }
+         }
+      }
+      //Update free chunk count
+      if(this_chunk_free_nodes == m_real_num_node){
+         ++m_totally_free_chunks;
+         priv_deallocate_free_chunks(m_max_free_chunks);
+      }
+      priv_invariants();
    }
 
    //!Allocates a singly linked list of n nodes ending in null pointer. 
    //!can throw boost::interprocess::bad_alloc
-   void allocate_nodes(const std::size_t n, free_nodes_t &nodes)
+   void allocate_nodes(multiallocation_chain &nodes, const std::size_t n)
    {
-      std::size_t i = 0;
+      std::size_t old_node_count = nodes.size();
       try{
-         for(; i < n; ++i){
-            nodes.push_front(*priv_alloc_node());
+         priv_invariants();
+         for(std::size_t i = 0; i != n; ++i){
+            //If there are no free nodes we allocate all needed chunks
+            if (m_chunk_multiset.empty()){
+               priv_alloc_chunk(((n - i) - 1)/m_real_num_node + 1);
+            }
+            nodes.push_front(priv_take_first_node());
          }
       }
       catch(...){
-         priv_deallocate_nodes(nodes, i);
+         priv_deallocate_nodes(nodes, nodes.size());
+         priv_deallocate_free_chunks(m_max_free_chunks);
          throw;
       }
+      //remove me
+      assert((n+old_node_count) == (std::size_t)std::distance(nodes.get_it(), multiallocation_iterator()));
+      priv_invariants();
+   }
+
+   //!Allocates n nodes, pointed by the multiallocation_iterator. 
+   //!Can throw boost::interprocess::bad_alloc
+   multiallocation_iterator allocate_nodes(const std::size_t n)
+   {
+      multiallocation_chain chain;
+      this->allocate_nodes(chain, n);
+      return chain.get_it();
    }
 
    //!Deallocates a linked list of nodes. Never throws
-   void deallocate_nodes(free_nodes_t &nodes)
+   void deallocate_nodes(multiallocation_chain &nodes)
    {  priv_deallocate_nodes(nodes, nodes.size());  }
 
    //!Deallocates the first n nodes of a linked list of nodes. Never throws
-   void deallocate_nodes(free_nodes_t &nodes, std::size_t n)
+   void deallocate_nodes(multiallocation_chain &nodes, std::size_t n)
    {  priv_deallocate_nodes(nodes, n);  }
 
+   //!Deallocates the nodes pointed by the multiallocation iterator. Never throws
+   void deallocate_nodes(multiallocation_iterator it)
+   {
+      multiallocation_iterator itend;
+      while(it != itend){
+         void *addr = &*it;
+         ++it;
+         deallocate_node(addr);
+      }
+   }
+
    void deallocate_free_chunks()
    {  priv_deallocate_free_chunks(0);  }
 
    std::size_t num_free_nodes()
    {
-      typedef typename chunk_list_t::const_iterator citerator;
+      typedef typename chunk_multiset_t::const_iterator citerator;
       std::size_t count = 0;
-      citerator it (m_first_free_chunk), itend(m_chunklist.end());
+      citerator it (m_chunk_multiset.begin()), itend(m_chunk_multiset.end());
       for(; it != itend; ++it){
          count += it->free_nodes.size();
       }
@@ -172,22 +292,40 @@
 
    void swap(private_adaptive_node_pool_impl &other)
    {
+      assert(m_max_free_chunks == other.m_max_free_chunks);
+      assert(m_real_node_size == other.m_real_node_size);
+      assert(m_real_chunk_alignment == other.m_real_chunk_alignment);
+      assert(m_real_num_node == other.m_real_num_node);
       std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
-      m_chunklist.swap(other.m_chunklist);
-      std::swap(m_first_free_chunk, other.m_first_free_chunk);
-      std::swap(m_allocated, other.m_allocated);
-      std::swap(m_free_chunks, other.m_allocated);
+      std::swap(m_totally_free_chunks, other.m_totally_free_chunks);
+      m_chunk_multiset.swap(other.m_chunk_multiset);
    }
 
    private:
+   node_t *priv_take_first_node()
+   {
+      assert(m_chunk_multiset.begin() != m_chunk_multiset.end());
+      //We take the first free node the multiset can't be empty
+      free_nodes_t &free_nodes = m_chunk_multiset.begin()->free_nodes;
+      node_t *first_node = &free_nodes.front();
+      const std::size_t free_nodes_count = free_nodes.size();
+      assert(0 != free_nodes_count);
+      free_nodes.pop_front();
+      if(free_nodes_count == 1){
+         m_chunk_multiset.erase(m_chunk_multiset.begin());
+      }
+      else if(free_nodes_count == m_real_num_node){
+         --m_totally_free_chunks;
+      }
+      priv_invariants();
+      return first_node;
+   }
 
-   void priv_deallocate_nodes(free_nodes_t &nodes, const std::size_t num)
+   void priv_deallocate_nodes(multiallocation_chain &nodes, const std::size_t num)
    {
       assert(nodes.size() >= num);
       for(std::size_t i = 0; i < num; ++i){
-         node_t *to_deallocate = &nodes.front();
-         nodes.pop_front();
-         deallocate(to_deallocate, 1);
+         deallocate_node(nodes.pop_front());
       }
    }
 
@@ -197,71 +335,75 @@
    class chunk_destroyer
    {
       public:
-      chunk_destroyer(segment_manager_base_type *mngr, std::size_t real_num_node)
-         :  mngr_(mngr), m_real_num_node(real_num_node)
+      chunk_destroyer(const private_adaptive_node_pool_impl *impl)
+         :  mp_impl(impl)
       {}
 
-      void operator()(typename chunk_list_t::pointer to_deallocate)
+      void operator()(typename chunk_multiset_t::pointer to_deallocate)
       {
          std::size_t free_nodes = to_deallocate->free_nodes.size();
          (void)free_nodes;
-         assert(free_nodes == m_real_num_node);
-         mngr_->deallocate(detail::get_pointer(to_deallocate));
+         assert(free_nodes == mp_impl->m_real_num_node);
+         assert(0 == to_deallocate->hdr_offset);
+         hdr_offset_holder *hdr_off_holder = mp_impl->priv_first_subchunk_from_chunk((chunk_info_t*)detail::get_pointer(to_deallocate));
+         mp_impl->mp_segment_mngr_base->deallocate(hdr_off_holder);
       }
-      segment_manager_base_type *mngr_;
-      const std::size_t m_real_num_node;
+      const private_adaptive_node_pool_impl *mp_impl;
    };
 
    //This macro will activate invariant checking. Slow, but helpful for debugging the code.
-   //#define BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
+   #define BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
    void priv_invariants()
    #ifdef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
    #undef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
    {
-      typedef typename chunk_list_t::iterator chunk_iterator;
-      //We iterate though the chunk list to free the memory
-      chunk_iterator it(m_chunklist.begin()), 
-                     itend(m_chunklist.end()), to_deallocate;
-      for(++it; it != itend; ++it){
-         chunk_iterator prev(it);
-         --prev;
-         std::size_t sp = prev->free_nodes.size(),
-                     si = it->free_nodes.size();
-         assert(sp <= si);
-         (void)sp;   (void)si;
-      }
-
-      //Check that the total free nodes are correct
-      it    = m_chunklist.begin();
-      itend = m_chunklist.end();
-      std::size_t total_free = 0;
-      for(; it != itend; ++it){
-         total_free += it->free_nodes.size();
+      //We iterate through the chunk list to free the memory
+      chunk_iterator it(m_chunk_multiset.begin()), 
+                     itend(m_chunk_multiset.end()), to_deallocate;
+      if(it != itend){
+         for(++it; it != itend; ++it){
+            chunk_iterator prev(it);
+            --prev;
+            std::size_t sp = prev->free_nodes.size(),
+                        si = it->free_nodes.size();
+            assert(sp <= si);
+            (void)sp;   (void)si;
+         }
       }
-      assert(total_free >= m_free_chunks*m_real_num_node);
 
-      //Check that the total totally free chunks are correct
-      it    = m_chunklist.begin();
-      itend = m_chunklist.end();
-      total_free = 0;
-      for(; it != itend; ++it){
-         total_free += it->free_nodes.size() == m_real_num_node;
-      }
-      assert(total_free >= m_free_chunks);
-      
-      //The chunk pointed by m_first_free_chunk should point
-      //to end or to a non-empty chunk
-      if(m_first_free_chunk != m_chunklist.end()){
-         std::size_t s = m_first_free_chunk->free_nodes.size();
-         assert(s != 0);
+      {
+         //Check that the total free nodes are correct
+         it    = m_chunk_multiset.begin();
+         itend = m_chunk_multiset.end();
+         std::size_t total_free_nodes = 0;
+         for(; it != itend; ++it){
+            total_free_nodes += it->free_nodes.size();
+         }
+         assert(total_free_nodes >= m_totally_free_chunks*m_real_num_node);
       }
 
-      //All previous nodes of m_first_free_chunk should be 0
-      it    = m_chunklist.begin();
-      itend = m_first_free_chunk;
+      {
+         //Check that the total totally free chunks are correct
+         it    = m_chunk_multiset.begin();
+         itend = m_chunk_multiset.end();
+         std::size_t total_free_chunks = 0;
+         for(; it != itend; ++it){
+            total_free_chunks += (it->free_nodes.size() == m_real_num_node);
+         }
+         assert(total_free_chunks == m_totally_free_chunks);
+      }
+      {
+      //Check that header offsets are correct
+      it = m_chunk_multiset.begin();
       for(; it != itend; ++it){
-         std::size_t s = it->free_nodes.size();
-         assert(s == 0);
+         hdr_offset_holder *hdr_off_holder = priv_first_subchunk_from_chunk(&*it);
+         for(std::size_t i = 0, max = m_num_subchunks; i < max; ++i){
+            assert(hdr_off_holder->hdr_offset == std::size_t((char*)&*it- (char*)hdr_off_holder));
+            assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
+            assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
+            hdr_off_holder = (hdr_offset_holder *)((char*)hdr_off_holder + m_real_chunk_alignment);
+         }
+      }
       }
    }
    #else
@@ -271,165 +413,136 @@
    //!Deallocates all used memory. Never throws
    void priv_clear()
    {
-      //Check for memory leaks
-      assert(m_allocated==0);
-      priv_invariants();
-      m_first_free_chunk = m_chunklist.end();
-      m_chunklist.clear_and_dispose
-         (chunk_destroyer(detail::get_pointer(mp_segment_mngr_base), m_real_num_node));
-      m_free_chunks = 0;
-   }
-
-   chunk_info_t *priv_chunk_from_node(void *node)
-   {
-      return (chunk_info_t *)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1)));
-   }
-
-   //!Allocates one node, using the adaptive pool algorithm.
-   //!Never throws
-   node_t *priv_alloc_node()
-   {
-      priv_invariants();
-      //If there are no free nodes we allocate a new block
-      if (m_first_free_chunk == m_chunklist.end()){ 
-         priv_alloc_chunk();
-         --m_first_free_chunk;
-      }
-      //We take the first free node since m_first_free_chunk can't be end()
-      chunk_info_t &chunk_info = *m_first_free_chunk;
-      assert(!chunk_info.free_nodes.empty());
-      node_t *first_node = &chunk_info.free_nodes.front();
-      if(chunk_info.free_nodes.size() == 1){
-         ++m_first_free_chunk;
-      }
-      else if(chunk_info.free_nodes.size() == m_real_num_node){
-         --m_free_chunks;
-      }
-      chunk_info.free_nodes.pop_front();
-      ++m_allocated;
-      priv_invariants();
-      return detail::get_pointer(first_node);
-   }
-
-   //!Deallocates one node, using the adaptive pool algorithm.
-   //!Never throws
-   void priv_dealloc_node(void *pElem)
-   {
-      typedef typename chunk_list_t::iterator chunk_iterator;
-      priv_invariants();
-      chunk_info_t *chunk_info = priv_chunk_from_node(pElem);
-      assert(chunk_info->free_nodes.size() < m_real_num_node);
-      //We put the node at the beginning of the free node list
-      node_t * to_deallocate = static_cast<node_t*>(pElem);
-      chunk_info->free_nodes.push_front(*to_deallocate);
-      chunk_iterator this_chunk(chunk_list_t::s_iterator_to(*chunk_info));
-      chunk_iterator next_chunk(this_chunk);
-      ++next_chunk;
-
-      //If this chunk has more free nodes than the next ones,
-      //we have to move the chunk in the list to maintain it ordered.
-      //Check if we have to move it
-      while(next_chunk != m_chunklist.end() &&
-            this_chunk->free_nodes.size() > next_chunk->free_nodes.size()){
-         ++next_chunk;
-      }
-      //Check if the chunk must be moved
-      if(++chunk_iterator(this_chunk) != next_chunk){
-         //Update m_first_free_chunk iterator if it was pointing to this_chunk
-         if(m_first_free_chunk == this_chunk){
-            ++m_first_free_chunk;
-         }
-         //Update m_first_free_chunk if the moved chunk crosses the empty boundary
-         else if(this_chunk->free_nodes.size() == 1){
-            m_first_free_chunk = chunk_list_t::s_iterator_to(*chunk_info);
-         }
-         //Now move the chunk to the new position
-         m_chunklist.erase(this_chunk);
-         m_chunklist.insert(next_chunk, *chunk_info);
-      }
-      //Update m_first_free_chunk if the chunk crosses the empty boundary
-      else if(this_chunk->free_nodes.size() == 1){
-         --m_first_free_chunk;
-      }
-   
-      if(this_chunk->free_nodes.size() == m_real_num_node){
-         ++m_free_chunks;
+      #ifndef NDEBUG
+      chunk_iterator it    = m_chunk_multiset.begin();
+      chunk_iterator itend = m_chunk_multiset.end();
+      std::size_t num_free_nodes = 0;
+      for(; it != itend; ++it){
+         //Check for memory leak
+         assert(it->free_nodes.size() == m_real_num_node);
+         ++num_free_nodes;
       }
-
-      assert(m_allocated>0);
-      --m_allocated;
-      priv_invariants();
-      priv_deallocate_free_chunks(m_max_free_chunks);
+      assert(num_free_nodes == m_totally_free_chunks);
+      #endif
       priv_invariants();
+      m_chunk_multiset.clear_and_dispose
+         (chunk_destroyer(this));
+      m_totally_free_chunks = 0;
+   }
+
+   chunk_info_t *priv_chunk_from_node(void *node) const
+   {
+      hdr_offset_holder *hdr_off_holder =
+         (hdr_offset_holder*)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1)));
+      assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
+      assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
+      chunk_info_t *chunk = (chunk_info_t *)(((char*)hdr_off_holder) + hdr_off_holder->hdr_offset);
+      assert(chunk->hdr_offset == 0);
+      return chunk;
+   }
+
+   hdr_offset_holder *priv_first_subchunk_from_chunk(chunk_info_t *chunk) const
+   {
+      hdr_offset_holder *hdr_off_holder = (hdr_offset_holder*)
+            (((char*)chunk) - (m_num_subchunks-1)*m_real_chunk_alignment);
+      assert(hdr_off_holder->hdr_offset == std::size_t((char*)chunk - (char*)hdr_off_holder));
+      assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
+      assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
+      return hdr_off_holder;
    }
 
    void priv_deallocate_free_chunks(std::size_t max_free_chunks)
    {
-      typedef typename chunk_list_t::iterator chunk_iterator;
+      priv_invariants();
       //Now check if we've reached the free nodes limit
       //and check if we have free chunks. If so, deallocate as much
       //as we can to stay below the limit
-      while(m_free_chunks > max_free_chunks &&
-            m_chunklist.back().free_nodes.size() == m_real_num_node){
-         chunk_iterator it(--m_chunklist.end());
-         if(it == m_first_free_chunk)
-            ++m_first_free_chunk; //m_first_free_chunk is now equal to end()
-         m_chunklist.erase_and_dispose(it, chunk_destroyer(detail::get_pointer(mp_segment_mngr_base),m_real_num_node));
-         --m_free_chunks;
+      for( chunk_iterator itend = m_chunk_multiset.end()
+         ; m_totally_free_chunks > max_free_chunks
+         ; --m_totally_free_chunks
+         ){
+         assert(!m_chunk_multiset.empty());
+         chunk_iterator it = itend;
+         --it;
+         std::size_t num_nodes = it->free_nodes.size();
+         assert(num_nodes == m_real_num_node);
+         (void)num_nodes;
+         m_chunk_multiset.erase_and_dispose
+            (it, chunk_destroyer(this));
       }
    }
 
-   //!Allocates a chunk of nodes. Can throw boost::interprocess::bad_alloc
-   void priv_alloc_chunk()
-   {
-      //We allocate a new NodeBlock and put it as first
-      //element in the free Node list
-      std::size_t real_chunk_size = m_real_chunk_alignment - SegmentManagerBase::PayloadPerAllocation;
-      char *pNode = detail::char_ptr_cast
-         (mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment));
-      if(!pNode)   throw bad_alloc();
-      chunk_info_t *c_info = new(pNode)chunk_info_t;
-      m_chunklist.push_back(*c_info);
-      
-      pNode += m_header_size;
-      //We initialize all Nodes in Node Block to insert 
-      //them in the free Node list
-      for(std::size_t i = 0; i < m_real_num_node; ++i){
-         c_info->free_nodes.push_front(*new (pNode) node_t);
-         pNode   += m_real_node_size;
+   //!Allocates a several chunks of nodes. Can throw boost::interprocess::bad_alloc
+   void priv_alloc_chunk(std::size_t n)
+   {
+      std::size_t real_chunk_size = m_real_chunk_alignment*m_num_subchunks - SegmentManagerBase::PayloadPerAllocation;
+      std::size_t elements_per_subchunk = (m_real_chunk_alignment - HdrOffsetSize)/m_real_node_size;
+      std::size_t hdr_subchunk_elements   = (m_real_chunk_alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/m_real_node_size;
+
+      for(std::size_t i = 0; i != n; ++i){
+         //We allocate a new NodeBlock and put it the last
+         //element of the tree
+         char *mem_address = detail::char_ptr_cast
+            (mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment));
+         if(!mem_address)   throw std::bad_alloc();
+         ++m_totally_free_chunks;
+
+         //First initialize header information on the last subchunk
+         char *hdr_addr = mem_address + m_real_chunk_alignment*(m_num_subchunks-1);
+         chunk_info_t *c_info = new(hdr_addr)chunk_info_t;
+         //Some structural checks
+         assert(static_cast<void*>(&static_cast<hdr_offset_holder*>(c_info)->hdr_offset) ==
+                static_cast<void*>(c_info));
+         typename free_nodes_t::iterator prev_insert_pos = c_info->free_nodes.before_begin();
+         for( std::size_t subchunk = 0, maxsubchunk = m_num_subchunks - 1
+            ; subchunk < maxsubchunk
+            ; ++subchunk, mem_address += m_real_chunk_alignment){
+            //Initialize header offset mark
+            new(mem_address) hdr_offset_holder(std::size_t(hdr_addr - mem_address));
+            char *pNode = mem_address + HdrOffsetSize;
+            for(std::size_t i = 0; i < elements_per_subchunk; ++i){
+               prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t);
+               pNode   += m_real_node_size;
+            }
+         }
+         {
+            char *pNode = hdr_addr + HdrSize;
+            //We initialize all Nodes in Node Block to insert 
+            //them in the free Node list
+            for(std::size_t i = 0; i < hdr_subchunk_elements; ++i){
+               prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t);
+               pNode   += m_real_node_size;
+            }
+         }
+         //Insert the chunk after the free node list is full
+         m_chunk_multiset.insert(m_chunk_multiset.end(), *c_info);
       }
-      ++m_free_chunks;
    }
 
    private:
    typedef typename pointer_to_other
       <void_pointer, segment_manager_base_type>::type   segment_mngr_base_ptr_t;
 
-   const std::size_t m_node_size;
    const std::size_t m_max_free_chunks;
    const std::size_t m_real_node_size;
-   const std::size_t m_header_size;
    //Round the size to a power of two value.
    //This is the total memory size (including payload) that we want to
    //allocate from the general-purpose allocator
    const std::size_t m_real_chunk_alignment;
+   std::size_t m_num_subchunks;
    //This is the real number of nodes per chunk
-   const std::size_t m_real_num_node;
+   //const
+   std::size_t m_real_num_node;
    segment_mngr_base_ptr_t                mp_segment_mngr_base;//Segment manager
-   chunk_list_t                           m_chunklist;         //Intrusive chunk list
-   typename chunk_list_t::iterator        m_first_free_chunk;  //Iterator to the active chunk
-   std::size_t                            m_allocated;         //Used nodes for debugging
-   std::size_t                            m_free_chunks;       //Free chunks
+   chunk_multiset_t                       m_chunk_multiset;    //Intrusive chunk list
+   std::size_t                            m_totally_free_chunks;       //Free chunks
 };
 
-//!Pooled shared memory allocator using an smart adaptive pool. Includes
-//!a reference count but the class does not delete itself, this is  
-//!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time.
 template< class SegmentManager
         , std::size_t NodeSize
         , std::size_t NodesPerChunk
         , std::size_t MaxFreeChunks
+        , unsigned char OverheadPercent
         >
 class private_adaptive_node_pool
    :  public private_adaptive_node_pool_impl
@@ -448,8 +561,8 @@
    static const std::size_t nodes_per_chunk = NodesPerChunk;
 
    //!Constructor from a segment manager. Never throws
-   private_adaptive_node_pool(segment_manager *segmeng_mngr)
-      :  base_t(segmeng_mngr, NodeSize, NodesPerChunk, MaxFreeChunks)
+   private_adaptive_node_pool(segment_manager *segment_mngr)
+      :  base_t(segment_mngr, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent)
    {}
 
    //!Returns the segment manager. Never throws
@@ -462,117 +575,25 @@
 //!responsibility of user classes. Node size (NodeSize) and the number of
 //!nodes allocated per chunk (NodesPerChunk) are known at compile time
 template< class SegmentManager
-        , class Mutex
         , std::size_t NodeSize
         , std::size_t NodesPerChunk
         , std::size_t MaxFreeChunks
+        , unsigned char OverheadPercent
         >
 class shared_adaptive_node_pool 
-   : public private_adaptive_node_pool 
-      <SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks> 
+   :  public detail::shared_pool_impl
+      < private_adaptive_node_pool
+         <SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent>
+      >
 {
- private:
-   typedef typename SegmentManager::void_pointer void_pointer;
-   typedef private_adaptive_node_pool
-      <SegmentManager, 
-       NodeSize, NodesPerChunk, MaxFreeChunks> private_node_allocator_t;
- public:
-   //!Segment manager typedef
-   typedef SegmentManager   segment_manager;
-   typedef typename private_node_allocator_t::free_nodes_t  free_nodes_t;
-
-   //!Constructor from a segment manager. Never throws
-   shared_adaptive_node_pool(segment_manager *segment_mgnr)
-   : private_node_allocator_t(segment_mgnr){}
-
-   //!Destructor. Deallocates all allocated chunks. Never throws
-   ~shared_adaptive_node_pool()
+   typedef detail::shared_pool_impl
+      < private_adaptive_node_pool
+         <SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent>
+      > base_t;
+   public:
+   shared_adaptive_node_pool(SegmentManager *segment_mgnr)
+      : base_t(segment_mgnr)
    {}
-
-   //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
-   void *allocate(std::size_t count)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      return private_node_allocator_t::allocate(count);
-   }
-   
-   //!Deallocates an array pointed by ptr. Never throws
-   void deallocate(void *ptr, std::size_t count)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate(ptr, count);
-   }
-
-   //!Allocates a singly linked list of n nodes ending in null pointer.
-   //!can throw boost::interprocess::bad_alloc
-   void allocate_nodes(std::size_t n, free_nodes_t &nodes)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      return private_node_allocator_t::allocate_nodes(n, nodes);
-   }
-
-   //!Deallocates a linked list of nodes ending in null pointer. Never throws
-   void deallocate_nodes(free_nodes_t &nodes, std::size_t num)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate_nodes(nodes, num);
-   }
-
-   //!Deallocates a linked list of nodes ending in null pointer. Never throws
-   void deallocate_nodes(free_nodes_t &nodes)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate_nodes(nodes);
-   }
-
-   //!Deallocates all the free chunks of memory. Never throws
-   void deallocate_free_chunks()
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate_free_chunks();
-   }
-
-   //!Increments internal reference count and returns new count. Never throws
-   std::size_t inc_ref_count()
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      return ++m_header.m_usecount;
-   }
-
-   //!Decrements internal reference count and returns new count. Never throws
-   std::size_t dec_ref_count()
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      assert(m_header.m_usecount > 0);
-      return --m_header.m_usecount;
-   }
-
-   private:
-   //!This struct includes needed data and derives from
-   //!interprocess_mutex to allow EBO when using null_mutex
-   struct header_t : Mutex
-   {
-      std::size_t m_usecount;    //Number of attached allocators
-
-      header_t()
-      :  m_usecount(0) {}
-   } m_header;
 };
 
 }  //namespace detail {
Added: trunk/boost/interprocess/allocators/detail/allocator_common.hpp
==============================================================================
--- (empty file)
+++ trunk/boost/interprocess/allocators/detail/allocator_common.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -0,0 +1,760 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2008. Distributed under the Boost
+// Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/interprocess for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP
+#define BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP
+
+#include <boost/interprocess/detail/config_begin.hpp>
+#include <boost/interprocess/detail/workaround.hpp>
+#include <boost/interprocess/segment_manager.hpp>
+#include <boost/interprocess/interprocess_fwd.hpp>
+#include <boost/interprocess/detail/utilities.hpp> //pointer_to_other, get_pointer
+#include <utility>   //std::pair
+#include <boost/utility/addressof.hpp> //boost::addressof
+#include <boost/assert.hpp>   //BOOST_ASSERT
+#include <boost/interprocess/exceptions.hpp> //bad_alloc
+#include <boost/interprocess/sync/scoped_lock.hpp> //scoped_lock
+#include <boost/interprocess/allocators/allocation_type.hpp> //allocation_type
+#include <algorithm> //std::swap
+
+
+namespace boost {
+namespace interprocess {
+namespace detail {
+
+//!Object function that creates the node allocator if it is not created and
+//!increments reference count if it is already created
+template<class NodePool>
+struct get_or_create_node_pool_func
+{
+   
+   //!This connects or constructs the unique instance of node_pool_t
+   //!Can throw boost::interprocess::bad_alloc
+   void operator()()
+   {
+      //Find or create the node_pool_t
+      mp_node_pool =    mp_segment_manager->template find_or_construct
+                        <NodePool>(unique_instance)(mp_segment_manager);
+      //If valid, increment link count
+      if(mp_node_pool != 0)
+         mp_node_pool->inc_ref_count();
+   }
+
+   //!Constructor. Initializes function
+   //!object parameters
+   get_or_create_node_pool_func(typename NodePool::segment_manager *mngr)
+      : mp_segment_manager(mngr){}
+   
+   NodePool                            *mp_node_pool;
+   typename NodePool::segment_manager  *mp_segment_manager;
+};
+
+template<class NodePool>
+inline NodePool *get_or_create_node_pool(typename NodePool::segment_manager *mgnr)
+{
+   detail::get_or_create_node_pool_func<NodePool> func(mgnr);
+   mgnr->atomic_func(func);
+   return func.mp_node_pool;
+}
+
+//!Object function that decrements the reference count. If the count 
+//!reaches to zero destroys the node allocator from memory. 
+//!Never throws
+template<class NodePool>
+struct destroy_if_last_link_func
+{
+   //!Decrements reference count and destroys the object if there is no 
+   //!more attached allocators. Never throws
+   void operator()()
+   {
+      //If not the last link return
+      if(mp_node_pool->dec_ref_count() != 0) return;
+
+      //Last link, let's destroy the segment_manager
+      mp_node_pool->get_segment_manager()->template destroy<NodePool>(unique_instance); 
+   }  
+
+   //!Constructor. Initializes function
+   //!object parameters
+   destroy_if_last_link_func(NodePool *pool) 
+      : mp_node_pool(pool)
+   {}
+
+   NodePool                           *mp_node_pool;
+};
+
+//!Destruction function, initializes and executes destruction function 
+//!object. Never throws
+template<class NodePool>
+inline void destroy_node_pool_if_last_link(NodePool *pool)
+{
+   //Get segment manager
+   typename NodePool::segment_manager *mngr = pool->get_segment_manager();
+   //Execute destruction functor atomically
+   destroy_if_last_link_func<NodePool>func(pool);
+   mngr->atomic_func(func);
+}
+
+template<class NodePool>
+class cache_impl
+{
+   typedef typename NodePool::segment_manager::
+      void_pointer                                    void_pointer;
+   typedef typename pointer_to_other
+      <void_pointer, NodePool>::type                  node_pool_ptr;
+   typedef typename NodePool::multiallocation_chain   multiallocation_chain;
+   node_pool_ptr           mp_node_pool;
+   multiallocation_chain   m_cached_nodes;
+   std::size_t             m_max_cached_nodes;
+
+   public:
+   typedef typename NodePool::multiallocation_iterator   multiallocation_iterator;
+   typedef typename NodePool::segment_manager            segment_manager;
+
+   cache_impl(segment_manager *segment_mngr, std::size_t max_cached_nodes)
+      : mp_node_pool(get_or_create_node_pool<NodePool>(segment_mngr))
+      , m_max_cached_nodes(max_cached_nodes)
+   {}
+
+   cache_impl(const cache_impl &other)
+      : mp_node_pool(other.get_node_pool())
+      , m_max_cached_nodes(other.get_max_cached_nodes())
+   {
+      mp_node_pool->inc_ref_count();
+   }
+
+   ~cache_impl()
+   {
+      this->deallocate_all_cached_nodes();
+      detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool));   
+   }
+
+   NodePool *get_node_pool() const
+   {  return detail::get_pointer(mp_node_pool); }
+
+   segment_manager *get_segment_manager() const
+   {  return mp_node_pool->get_segment_manager(); }
+
+   std::size_t get_max_cached_nodes() const
+   {  return m_max_cached_nodes; }
+
+   void *cached_allocation()
+   {
+      //If don't have any cached node, we have to get a new list of free nodes from the pool
+      if(m_cached_nodes.empty()){
+         mp_node_pool->allocate_nodes(m_cached_nodes, m_max_cached_nodes/2);
+      }
+      return m_cached_nodes.pop_front();
+   }
+
+   multiallocation_iterator cached_allocation(std::size_t n)
+   {
+      multiallocation_chain chain;
+      std::size_t count = n;
+      BOOST_TRY{
+         //If don't have any cached node, we have to get a new list of free nodes from the pool
+         while(!m_cached_nodes.empty() && count--){
+            void *ret = m_cached_nodes.pop_front();
+            chain.push_back(ret);
+         }
+
+         if(chain.size() != n){
+            mp_node_pool->allocate_nodes(chain, n - chain.size());
+         }
+         assert(chain.size() == n);
+         chain.splice_back(m_cached_nodes);
+         return multiallocation_iterator(chain.get_it());
+      }
+      BOOST_CATCH(...){
+         this->cached_deallocation(multiallocation_iterator(chain.get_it()));
+         throw;
+      }
+      BOOST_CATCH_END
+   }
+
+   void cached_deallocation(void *ptr)
+   {
+      //Check if cache is full
+      if(m_cached_nodes.size() >= m_max_cached_nodes){
+         //This only occurs if this allocator deallocate memory allocated
+         //with other equal allocator. Since the cache is full, and more 
+         //deallocations are probably coming, we'll make some room in cache
+         //in a single, efficient multi node deallocation.
+         this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
+      }
+      m_cached_nodes.push_front(ptr);
+   }
+
+   void cached_deallocation(multiallocation_iterator it)
+   {
+      multiallocation_iterator itend;
+
+      while(it != itend){
+         void *addr = &*it;
+         ++it;
+         m_cached_nodes.push_front(addr);
+      }
+
+      //Check if cache is full
+      if(m_cached_nodes.size() >= m_max_cached_nodes){
+         //This only occurs if this allocator deallocate memory allocated
+         //with other equal allocator. Since the cache is full, and more 
+         //deallocations are probably coming, we'll make some room in cache
+         //in a single, efficient multi node deallocation.
+         this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
+      }
+   }
+
+   //!Sets the new max cached nodes value. This can provoke deallocations
+   //!if "newmax" is less than current cached nodes. Never throws
+   void set_max_cached_nodes(std::size_t newmax)
+   {
+      m_max_cached_nodes = newmax;
+      this->priv_deallocate_remaining_nodes();
+   }
+
+   //!Frees all cached nodes.
+   //!Never throws
+   void deallocate_all_cached_nodes()
+   {
+      if(m_cached_nodes.empty()) return;
+      mp_node_pool->deallocate_nodes(m_cached_nodes);
+   }
+
+   private:
+   //!Frees all cached nodes at once.
+   //!Never throws
+   void priv_deallocate_remaining_nodes()
+   {
+      if(m_cached_nodes.size() > m_max_cached_nodes){
+         priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
+      }
+   }
+
+   //!Frees n cached nodes at once. Never throws
+   void priv_deallocate_n_nodes(std::size_t n)
+   {
+      //Deallocate all new linked list at once
+      mp_node_pool->deallocate_nodes(m_cached_nodes, n);
+   }
+};
+
+template<class Derived, class T, class SegmentManager>
+class array_allocation_impl
+{
+   const Derived *derived() const
+   {  return static_cast<const Derived*>(this); }
+   Derived *derived()
+   {  return static_cast<Derived*>(this); }
+
+   typedef typename SegmentManager::void_pointer         void_pointer;
+
+   public:
+   typedef typename detail::
+      pointer_to_other<void_pointer, T>::type            pointer;
+   typedef typename detail::
+      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+   typedef transform_iterator
+      < typename SegmentManager::
+         multiallocation_iterator
+      , detail::cast_functor <T> >          multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                 multiallocation_chain;
+
+   public:
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const
+   {  
+      return (size_type)this->derived()->get_segment_manager()->size(detail::get_pointer(p))/sizeof(T);
+   }
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0)
+   {
+      return this->derived()->get_segment_manager()->allocation_command
+         (command, limit_size, preferred_size, received_size, detail::get_pointer(reuse));
+   }
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements)
+   {
+      return multiallocation_iterator
+         (this->derived()->get_segment_manager()->allocate_many(sizeof(T)*elem_size, num_elements));
+   }
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements)
+   {
+      return multiallocation_iterator
+         (this->derived()->get_segment_manager()->allocate_many(elem_sizes, n_elements, sizeof(T)));
+   }
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it)
+   {  return this->derived()->get_segment_manager()->deallocate_many(it.base()); }
+
+   //!Returns the number of elements that could be
+   //!allocated. Never throws
+   size_type max_size() const
+   {  return this->derived()->get_segment_manager()->get_size()/sizeof(T);  }
+
+   //!Returns address of mutable object.
+   //!Never throws
+   pointer address(reference value) const
+   {  return pointer(boost::addressof(value));  }
+
+   //!Returns address of non mutable object.
+   //!Never throws
+   const_pointer address(const_reference value) const
+   {  return const_pointer(boost::addressof(value));  }
+
+   //!Default construct an object. 
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr)
+   {  new(detail::get_pointer(ptr)) value_type;  }
+
+   //!Destroys object. Throws if object's
+   //!destructor throws
+   void destroy(const pointer &ptr)
+   {  BOOST_ASSERT(ptr != 0); (*ptr).~value_type();  }
+};
+
+
+template<class Derived, unsigned int Version, class T, class SegmentManager>
+class node_pool_allocation_impl
+   :  public array_allocation_impl
+      < Derived
+      , T
+      , SegmentManager>
+{
+   const Derived *derived() const
+   {  return static_cast<const Derived*>(this); }
+   Derived *derived()
+   {  return static_cast<Derived*>(this); }
+
+   typedef typename SegmentManager::void_pointer         void_pointer;
+   typedef typename detail::
+      pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
+
+   public:
+   typedef typename detail::
+      pointer_to_other<void_pointer, T>::type            pointer;
+   typedef typename detail::
+      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+   typedef transform_iterator
+      < typename SegmentManager::
+         multiallocation_iterator
+      , detail::cast_functor <T> >          multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                 multiallocation_chain;
+
+   public:
+   //!Allocate memory for an array of count elements. 
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate(size_type count, cvoid_pointer hint = 0)
+   {
+      (void)hint;
+      if(count > this->max_size())
+         throw bad_alloc();
+      else if(Version == 1 && count == 1)
+         return pointer(static_cast<value_type*>(this->derived()->get_node_pool()->allocate_node()));
+      else
+         return pointer(static_cast<value_type*>
+            (this->derived()->get_node_pool()->get_segment_manager()->allocate(sizeof(T)*count)));
+   }
+
+   //!Deallocate allocated memory. Never throws
+   void deallocate(const pointer &ptr, size_type count)
+   {
+      (void)count;
+      if(Version == 1 && count == 1)
+         this->derived()->get_node_pool()->deallocate_node(detail::get_pointer(ptr));
+      else
+         this->derived()->get_node_pool()->get_segment_manager()->deallocate(detail::get_pointer(ptr));
+   }
+
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one()
+   {  return pointer(static_cast<value_type*>(this->derived()->get_node_pool()->allocate_node()));   }
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements)
+   {  return multiallocation_iterator(this->derived()->get_node_pool()->allocate_nodes(num_elements));   }
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p)
+   {  this->derived()->get_node_pool()->deallocate_node(detail::get_pointer(p)); }
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it)
+   {  this->derived()->get_node_pool()->deallocate_nodes(it.base());   }
+
+   //!Deallocates all free chunks of the pool
+   void deallocate_free_chunks()
+   {  this->derived()->get_node_pool()->deallocate_free_chunks();  }
+};
+
+template<class T, class NodePool, unsigned int Version>
+class cached_allocator_impl
+   :  public array_allocation_impl
+         <cached_allocator_impl<T, NodePool, Version>, T, typename NodePool::segment_manager>
+{
+   cached_allocator_impl & operator=(const cached_allocator_impl& other);
+   typedef array_allocation_impl
+         < cached_allocator_impl
+            <T, NodePool, Version>
+         , T
+         , typename NodePool::segment_manager> base_t;
+
+   public:
+   typedef NodePool                                      node_pool_t;
+   typedef typename NodePool::segment_manager            segment_manager;
+   typedef typename segment_manager::void_pointer        void_pointer;
+   typedef typename detail::
+      pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
+   typedef typename base_t::pointer                      pointer;
+   typedef typename base_t::size_type                    size_type;
+   typedef typename base_t::multiallocation_iterator     multiallocation_iterator;
+   typedef typename base_t::multiallocation_chain        multiallocation_chain;
+   typedef typename base_t::value_type                   value_type;
+
+   public:
+   enum { DEFAULT_MAX_CACHED_NODES = 64 };
+
+   cached_allocator_impl(segment_manager *segment_mngr, std::size_t max_cached_nodes)
+      : m_cache(segment_mngr, max_cached_nodes)
+   {}
+
+   cached_allocator_impl(const cached_allocator_impl &other)
+      : m_cache(other.m_cache)
+   {}
+
+   //!Copy constructor from related cached_adaptive_pool_base. If not present, constructs
+   //!a node pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   template<class T2, class NodePool2>
+   cached_allocator_impl
+      (const cached_allocator_impl
+         <T2, NodePool2, Version> &other)
+      : m_cache(other.get_segment_manager(), other.get_max_cached_nodes())
+   {}
+
+   //!Returns a pointer to the node pool.
+   //!Never throws
+   node_pool_t* get_node_pool() const
+   {  return m_cache.get_node_pool();   }
+
+   //!Returns the segment manager.
+   //!Never throws
+   segment_manager* get_segment_manager()const
+   {  return m_cache.get_segment_manager();   }
+
+   //!Sets the new max cached nodes value. This can provoke deallocations
+   //!if "newmax" is less than current cached nodes. Never throws
+   void set_max_cached_nodes(std::size_t newmax)
+   {  m_cache.set_max_cached_nodes(newmax);   }
+
+   //!Returns the max cached nodes parameter.
+   //!Never throws
+   std::size_t get_max_cached_nodes() const
+   {  return m_cache.get_max_cached_nodes();   }
+
+   //!Allocate memory for an array of count elements. 
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate(size_type count, cvoid_pointer hint = 0)
+   {
+      (void)hint;
+      void * ret;
+      if(count > this->max_size())
+         throw bad_alloc();
+      else if(Version == 1 && count == 1){
+         ret = m_cache.cached_allocation();
+      }
+      else{
+         ret = this->get_segment_manager()->allocate(sizeof(T)*count);
+      }   
+      return pointer(static_cast<T*>(ret));
+   }
+
+   //!Deallocate allocated memory. Never throws
+   void deallocate(const pointer &ptr, size_type count)
+   {
+      (void)count;
+      if(Version == 1 && count == 1){
+         m_cache.cached_deallocation(detail::get_pointer(ptr));
+      }
+      else{
+         this->get_segment_manager()->deallocate(detail::get_pointer(ptr));
+      }
+   }
+
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one()
+   {  return pointer(static_cast<value_type*>(this->m_cache.cached_allocation()));   }
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements)
+   {  return multiallocation_iterator(this->m_cache.cached_allocation(num_elements));   }
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p)
+   {  this->m_cache.cached_deallocation(detail::get_pointer(p)); }
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it)
+   {  m_cache.cached_deallocation(it.base());   }
+
+   //!Deallocates all free chunks of the pool
+   void deallocate_free_chunks()
+   {  m_cache.get_node_pool()->deallocate_free_chunks();   }
+
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different shared memory segments, the result is undefined.
+   friend void swap(cached_allocator_impl &alloc1, cached_allocator_impl &alloc2)
+   {
+      detail::do_swap(alloc1.mp_node_pool,       alloc2.mp_node_pool);
+      alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes);
+      detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes);
+   }
+
+   void deallocate_cache()
+   {  m_cache.deallocate_all_cached_nodes(); }
+
+   /// @cond
+   private:
+   cache_impl<node_pool_t> m_cache;
+};
+
+//!Equality test for same type of
+//!cached_allocator_impl
+template<class T, class N, unsigned int V> inline
+bool operator==(const cached_allocator_impl<T, N, V> &alloc1, 
+                const cached_allocator_impl<T, N, V> &alloc2)
+   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
+
+//!Inequality test for same type of
+//!cached_allocator_impl
+template<class T, class N, unsigned int V> inline
+bool operator!=(const cached_allocator_impl<T, N, V> &alloc1, 
+                const cached_allocator_impl<T, N, V> &alloc2)
+   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+
+
+//!Pooled shared memory allocator using adaptive pool. Includes
+//!a reference count but the class does not delete itself, this is  
+//!responsibility of user classes. Node size (NodeSize) and the number of
+//!nodes allocated per chunk (NodesPerChunk) are known at compile time
+template<class private_node_allocator_t>
+class shared_pool_impl
+   : public private_node_allocator_t
+{
+ public:
+   //!Segment manager typedef
+   typedef typename private_node_allocator_t::segment_manager segment_manager;
+   typedef typename private_node_allocator_t::
+      multiallocation_iterator                  multiallocation_iterator;
+   typedef typename private_node_allocator_t::
+      multiallocation_chain                     multiallocation_chain;
+
+ private:
+   typedef typename segment_manager::mutex_family::mutex_type mutex_type;
+
+ public:
+   //!Constructor from a segment manager. Never throws
+   shared_pool_impl(segment_manager *segment_mngr)
+      : private_node_allocator_t(segment_mngr)
+   {}
+
+   //!Destructor. Deallocates all allocated chunks. Never throws
+   ~shared_pool_impl()
+   {}
+
+   //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
+   void *allocate_node()
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      return private_node_allocator_t::allocate_node();
+   }
+   
+   //!Deallocates an array pointed by ptr. Never throws
+   void deallocate_node(void *ptr)
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      private_node_allocator_t::deallocate_node(ptr);
+   }
+
+   //!Allocates a singly linked list of n nodes ending in null pointer.
+   //!can throw boost::interprocess::bad_alloc
+   void allocate_nodes(multiallocation_chain &nodes, std::size_t n)
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      return private_node_allocator_t::allocate_nodes(nodes, n);
+   }
+
+   //!Allocates n nodes, pointed by the multiallocation_iterator. 
+   //!Can throw boost::interprocess::bad_alloc
+   multiallocation_iterator allocate_nodes(const std::size_t n)
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      return private_node_allocator_t::allocate_nodes(n);
+   }
+
+   //!Deallocates a linked list of nodes ending in null pointer. Never throws
+   void deallocate_nodes(multiallocation_chain &nodes, std::size_t num)
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      private_node_allocator_t::deallocate_nodes(nodes, num);
+   }
+
+   //!Deallocates a linked list of nodes ending in null pointer. Never throws
+   void deallocate_nodes(multiallocation_chain &nodes)
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      private_node_allocator_t::deallocate_nodes(nodes);
+   }
+
+   //!Deallocates the nodes pointed by the multiallocation iterator. Never throws
+   void deallocate_nodes(multiallocation_iterator it)
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      private_node_allocator_t::deallocate_nodes(it);
+   }
+
+   //!Deallocates all the free chunks of memory. Never throws
+   void deallocate_free_chunks()
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      private_node_allocator_t::deallocate_free_chunks();
+   }
+
+   //!Deallocates all used memory from the common pool.
+   //!Precondition: all nodes allocated from this pool should
+   //!already be deallocated. Otherwise, undefined behavior. Never throws
+   void purge_chunks()
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      private_node_allocator_t::purge_chunks();
+   }
+
+   //!Increments internal reference count and returns new count. Never throws
+   std::size_t inc_ref_count()
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      return ++m_header.m_usecount;
+   }
+
+   //!Decrements internal reference count and returns new count. Never throws
+   std::size_t dec_ref_count()
+   {
+      //-----------------------
+      boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+      //-----------------------
+      assert(m_header.m_usecount > 0);
+      return --m_header.m_usecount;
+   }
+
+   private:
+   //!This struct includes needed data and derives from
+   //!interprocess_mutex to allow EBO when using null_mutex
+   struct header_t : mutex_type
+   {
+      std::size_t m_usecount;    //Number of attached allocators
+
+      header_t()
+      :  m_usecount(0) {}
+   } m_header;
+};
+
+}  //namespace detail {
+}  //namespace interprocess {
+}  //namespace boost {
+
+#include <boost/interprocess/detail/config_end.hpp>
+
+#endif   //#ifndef BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP
Modified: trunk/boost/interprocess/allocators/detail/node_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/detail/node_pool.hpp	(original)
+++ trunk/boost/interprocess/allocators/detail/node_pool.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -24,7 +24,9 @@
 #include <boost/intrusive/slist.hpp>
 #include <boost/math/common_factor_ct.hpp>
 #include <boost/interprocess/detail/math_functions.hpp>
+#include <boost/interprocess/detail/type_traits.hpp>
 #include <boost/interprocess/allocators/detail/node_tools.hpp>
+#include <boost/interprocess/allocators/detail/allocator_common.hpp>
 #include <cstddef>
 #include <functional>
 #include <algorithm>
@@ -51,10 +53,14 @@
    typedef typename node_slist<void_pointer>::slist_hook_t        slist_hook_t;
    typedef typename node_slist<void_pointer>::node_t              node_t;
    typedef typename node_slist<void_pointer>::node_slist_t        free_nodes_t;
+   typedef typename SegmentManagerBase::multiallocation_iterator  multiallocation_iterator;
+   typedef typename SegmentManagerBase::multiallocation_chain     multiallocation_chain;
 
    private:
-   typedef typename bi::make_slist < node_t, bi::base_hook<slist_hook_t>
-                     , bi::constant_time_size<false> >::type      chunkslist_t;
+   typedef typename bi::make_slist
+      < node_t, bi::base_hook<slist_hook_t>
+      , bi::linear<true>
+      , bi::constant_time_size<false> >::type      chunkslist_t;
    public:
 
    //!Segment manager typedef
@@ -62,10 +68,8 @@
 
    //!Constructor from a segment manager. Never throws
    private_node_pool_impl(segment_manager_base_type *segment_mngr_base, std::size_t node_size, std::size_t nodes_per_chunk)
-   :  m_node_size(node_size)
-   ,  m_nodes_per_chunk(nodes_per_chunk)
-   ,  m_real_node_size(detail::lcm(node_size, sizeof(node_t)))
-   ,  m_block_size(detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, sizeof(node_t)))
+   :  m_nodes_per_chunk(nodes_per_chunk)
+   ,  m_real_node_size(detail::lcm(node_size, std::size_t(alignment_of<node_t>::value)))
       //General purpose allocator
    ,  mp_segment_mngr_base(segment_mngr_base)
    ,  m_chunklist()
@@ -76,7 +80,7 @@
 
    //!Destructor. Deallocates all allocated chunks. Never throws
    ~private_node_pool_impl()
-   {  priv_clear();  }
+   {  this->purge_chunks();  }
 
    std::size_t get_real_num_node() const
    {  return m_nodes_per_chunk; }
@@ -86,116 +90,73 @@
    {  return detail::get_pointer(mp_segment_mngr_base);  }
 
    //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
-   void *allocate(std::size_t count)
-   {
-      std::size_t bytes = count*m_node_size;
-      if(bytes > m_real_node_size){//Normal allocation, no pooling used
-         void *addr = mp_segment_mngr_base->allocate(bytes);
-         if(!addr)   throw bad_alloc();
-         return addr;
-      }
-      else                    //Node allocation, pooling used
-         return priv_alloc_node();
-   }
+   void *allocate_node()
+   {  return priv_alloc_node();  }
    
    //!Deallocates an array pointed by ptr. Never throws
-   void deallocate(void *ptr, std::size_t count)
+   void deallocate_node(void *ptr)
+   {  priv_dealloc_node(ptr); }
+
+   //!Allocates a singly linked list of n nodes ending in null pointer and pushes them in the chain. 
+   //!can throw boost::interprocess::bad_alloc
+   void allocate_nodes(multiallocation_chain &nodes, const std::size_t n)
    {
-      std::size_t bytes = count*m_node_size;
-      if(bytes > m_real_node_size)//Normal allocation was used
-         mp_segment_mngr_base->deallocate(ptr);
-      else                    //Node allocation was used
-         priv_dealloc_node(ptr);
+      std::size_t i = 0;
+      try{
+         for(; i < n; ++i){
+            nodes.push_front(priv_alloc_node());
+         }
+      }
+      catch(...){
+         this->deallocate_nodes(nodes, i);
+         throw;
+      }
    }
 
-   //!Allocates a singly linked list of n nodes ending in null pointer. 
+   //!Allocates a singly linked list of n nodes ending in null pointer 
    //!can throw boost::interprocess::bad_alloc
-   void allocate_nodes(const std::size_t n, free_nodes_t &nodes)
+   multiallocation_iterator allocate_nodes(const std::size_t n)
    {
+      multiallocation_chain nodes;
       std::size_t i = 0;
       try{
          for(; i < n; ++i){
-            nodes.push_front(*priv_alloc_node());
+            nodes.push_front(priv_alloc_node());
          }
       }
       catch(...){
-         priv_deallocate_nodes(nodes, i);
+         this->deallocate_nodes(nodes, i);
          throw;
       }
+      return nodes.get_it();
    }
 
    //!Deallocates a linked list of nodes. Never throws
-   void deallocate_nodes(free_nodes_t &nodes)
-   {  priv_deallocate_nodes(nodes, nodes.size());  }
+   void deallocate_nodes(multiallocation_chain &nodes)
+   {  this->deallocate_nodes(nodes.get_it());  }
 
    //!Deallocates the first n nodes of a linked list of nodes. Never throws
-   void deallocate_nodes(free_nodes_t &nodes, std::size_t n)
-   {  priv_deallocate_nodes(nodes, n);  }
-
-   //!Deallocates all the free chunks of memory. Never throws
-   void deallocate_free_chunks()
-   {  priv_deallocate_free_chunks();   }
-
-   std::size_t num_free_nodes()
-   {  return m_freelist.size();  }
-
-   void swap(private_node_pool_impl &other)
-   {
-      std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
-      m_chunklist.swap(other.m_chunklist);
-      m_freelist.swap(other.m_freelist);
-      std::swap(m_allocated, other.m_allocated);
-   }
-
-   private:
-
-   void priv_deallocate_nodes(free_nodes_t &nodes, const std::size_t num)
+   void deallocate_nodes(multiallocation_chain &nodes, std::size_t num)
    {
       assert(nodes.size() >= num);
       for(std::size_t i = 0; i < num; ++i){
-         node_t *to_deallocate = &nodes.front();
-         nodes.pop_front();
-         deallocate(to_deallocate, 1);
+         deallocate_node(nodes.pop_front());
       }
    }
 
-   struct push_in_list
-   {
-      push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it)
-         :  slist_(l), last_it_(it)
-      {}
-      
-      void operator()(typename free_nodes_t::pointer p) const
-      {
-         slist_.push_front(*p);
-         if(slist_.size() == 1){ //Cache last element
-            ++last_it_ = slist_.begin();
-         }
-      }
-
-      private:
-      free_nodes_t &slist_;
-      typename free_nodes_t::iterator &last_it_;
-   };
-
-   struct is_between
-      :  std::unary_function<typename free_nodes_t::value_type, bool>
+   //!Deallocates the nodes pointed by the multiallocation iterator. Never throws
+   void deallocate_nodes(multiallocation_iterator it)
    {
-      is_between(const void *addr, std::size_t size)
-         :  beg_((const char *)addr), end_(beg_+size)
-      {}
-      
-      bool operator()(typename free_nodes_t::const_reference v) const
-      {
-         return (beg_ <= (const char *)&v && 
-                 end_ >  (const char *)&v);
+      multiallocation_iterator itend;
+      while(it != itend){
+         void *addr = &*it;
+         ++it;
+         deallocate_node(addr);
       }
-      private:
-      const char *      beg_;
-      const char *      end_;
-   };
+   }
 
-   void priv_deallocate_free_chunks()
+   //!Deallocates all the free chunks of memory. Never throws
+   void deallocate_free_chunks()
    {
       typedef typename free_nodes_t::iterator nodelist_iterator;
       typename chunkslist_t::iterator bit(m_chunklist.before_begin()),
@@ -204,16 +165,19 @@
       free_nodes_t backup_list;
       nodelist_iterator backup_list_last = backup_list.before_begin();
 
+      //Execute the algorithm and get an iterator to the last value
+      std::size_t blocksize = detail::get_rounded_size
+         (m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
+
       while(it != itend){
          //Collect all the nodes from the chunk pointed by it
          //and push them in the list
          free_nodes_t free_nodes;
          nodelist_iterator last_it = free_nodes.before_begin();
-         const void *addr = get_chunk_from_hook(&*it);
+         const void *addr = get_chunk_from_hook(&*it, blocksize);
 
-         //Execute the algorithm and get an iterator to the last value
          m_freelist.remove_and_dispose_if
-            (is_between(addr, m_block_size), push_in_list(free_nodes, last_it));
+            (is_between(addr, blocksize), push_in_list(free_nodes, last_it));
 
          //If the number of nodes is equal to m_nodes_per_chunk
          //this means that the block can be deallocated
@@ -253,23 +217,76 @@
          , backup_list.size());
    }
 
-   //!Deallocates all used memory. Never throws
-   void priv_clear()
+   std::size_t num_free_nodes()
+   {  return m_freelist.size();  }
+
+   //!Deallocates all used memory. Precondition: all nodes allocated from this pool should
+   //!already be deallocated. Otherwise, undefined behaviour. Never throws
+   void purge_chunks()
    {
       //check for memory leaks
       assert(m_allocated==0);
-
+      std::size_t blocksize = detail::get_rounded_size
+         (m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
       typename chunkslist_t::iterator
          it(m_chunklist.begin()), itend(m_chunklist.end()), aux;
 
       //We iterate though the NodeBlock list to free the memory
       while(!m_chunklist.empty()){
-         void *addr = get_chunk_from_hook(&m_chunklist.front());
+         void *addr = get_chunk_from_hook(&m_chunklist.front(), blocksize);
          m_chunklist.pop_front();
          mp_segment_mngr_base->deallocate(addr);
-      }  
+      }
+      //Just clear free node list
+      m_freelist.clear();
    }
 
+   void swap(private_node_pool_impl &other)
+   {
+      std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
+      m_chunklist.swap(other.m_chunklist);
+      m_freelist.swap(other.m_freelist);
+      std::swap(m_allocated, other.m_allocated);
+   }
+
+   private:
+
+   struct push_in_list
+   {
+      push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it)
+         :  slist_(l), last_it_(it)
+      {}
+      
+      void operator()(typename free_nodes_t::pointer p) const
+      {
+         slist_.push_front(*p);
+         if(slist_.size() == 1){ //Cache last element
+            ++last_it_ = slist_.begin();
+         }
+      }
+
+      private:
+      free_nodes_t &slist_;
+      typename free_nodes_t::iterator &last_it_;
+   };
+
+   struct is_between
+      :  std::unary_function<typename free_nodes_t::value_type, bool>
+   {
+      is_between(const void *addr, std::size_t size)
+         :  beg_((const char *)addr), end_(beg_+size)
+      {}
+      
+      bool operator()(typename free_nodes_t::const_reference v) const
+      {
+         return (beg_ <= (const char *)&v && 
+                 end_ >  (const char *)&v);
+      }
+      private:
+      const char *      beg_;
+      const char *      end_;
+   };
+
    //!Allocates one node, using single segregated storage algorithm.
    //!Never throws
    node_t *priv_alloc_node()
@@ -300,10 +317,13 @@
    {
       //We allocate a new NodeBlock and put it as first
       //element in the free Node list
-      char *pNode = detail::char_ptr_cast(mp_segment_mngr_base->allocate(m_block_size + sizeof(node_t)));
+      std::size_t blocksize = 
+         detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
+      char *pNode = detail::char_ptr_cast
+         (mp_segment_mngr_base->allocate(blocksize + sizeof(node_t)));
       if(!pNode)  throw bad_alloc();
       char *pBlock = pNode;
-      m_chunklist.push_front(get_chunk_hook(pBlock));
+      m_chunklist.push_front(get_chunk_hook(pBlock, blocksize));
 
       //We initialize all Nodes in Node Block to insert 
       //them in the free Node list
@@ -314,26 +334,24 @@
 
    private:
    //!Returns a reference to the chunk hook placed in the end of the chunk
-   inline node_t & get_chunk_hook (void *chunk)
+   static inline node_t & get_chunk_hook (void *chunk, std::size_t blocksize)
    {  
       return *static_cast<node_t*>(
-               static_cast<void*>((detail::char_ptr_cast(chunk)+m_block_size)));  
+               static_cast<void*>((detail::char_ptr_cast(chunk) + blocksize)));  
    }
 
    //!Returns the starting address of the chunk reference to the chunk hook placed in the end of the chunk
-   inline void *get_chunk_from_hook (node_t *hook)
+   inline void *get_chunk_from_hook (node_t *hook, std::size_t blocksize)
    {  
-      return static_cast<void*>((detail::char_ptr_cast(hook) - m_block_size));  
+      return static_cast<void*>((detail::char_ptr_cast(hook) - blocksize));  
    }
 
    private:
    typedef typename pointer_to_other
       <void_pointer, segment_manager_base_type>::type   segment_mngr_base_ptr_t;
 
-   const std::size_t m_node_size;
    const std::size_t m_nodes_per_chunk;
    const std::size_t m_real_node_size;
-   const std::size_t m_block_size;
    segment_mngr_base_ptr_t mp_segment_mngr_base;   //Segment manager
    chunkslist_t      m_chunklist;      //Intrusive container of chunks
    free_nodes_t      m_freelist;       //Intrusive container of free nods
@@ -376,114 +394,28 @@
 //!a reference count but the class does not delete itself, this is  
 //!responsibility of user classes. Node size (NodeSize) and the number of
 //!nodes allocated per chunk (NodesPerChunk) are known at compile time
+//!Pooled shared memory allocator using adaptive pool. Includes
+//!a reference count but the class does not delete itself, this is  
+//!responsibility of user classes. Node size (NodeSize) and the number of
+//!nodes allocated per chunk (NodesPerChunk) are known at compile time
 template< class SegmentManager
-        , class Mutex
         , std::size_t NodeSize
         , std::size_t NodesPerChunk
         >
 class shared_node_pool 
-   : public private_node_pool<SegmentManager, NodeSize, NodesPerChunk> 
+   :  public detail::shared_pool_impl
+      < private_node_pool
+         <SegmentManager, NodeSize, NodesPerChunk>
+      >
 {
-   private:
-   typedef typename SegmentManager::void_pointer void_pointer;
-   typedef private_node_pool
-      <SegmentManager, NodeSize, NodesPerChunk> private_node_allocator_t;
-
+   typedef detail::shared_pool_impl
+      < private_node_pool
+         <SegmentManager, NodeSize, NodesPerChunk>
+      > base_t;
    public:
-   typedef SegmentManager   segment_manager;
-   typedef typename private_node_allocator_t::free_nodes_t  free_nodes_t;
-
-   //!Constructor from a segment manager. Never throws
-   shared_node_pool(segment_manager *segment_mngr)
-   : private_node_allocator_t(segment_mngr){}
-
-   //!Destructor. Deallocates all allocated chunks. Never throws
-   ~shared_node_pool()
+   shared_node_pool(SegmentManager *segment_mgnr)
+      : base_t(segment_mgnr)
    {}
-
-   //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
-   void *allocate(std::size_t count)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      return private_node_allocator_t::allocate(count);
-   }
-   
-   //!Deallocates an array pointed by ptr. Never throws
-   void deallocate(void *ptr, std::size_t count)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate(ptr, count);
-   }
-
-   //!Allocates a singly linked list of n nodes ending in null pointer. 
-   //!can throw boost::interprocess::bad_alloc
-   void allocate_nodes(const std::size_t n, free_nodes_t &nodes)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::allocate_nodes(n, nodes);
-   }
-
-   //!Deallocates a linked list of nodes ending in null pointer. Never throws
-   void deallocate_nodes(free_nodes_t &nodes, std::size_t n)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate_nodes(nodes, n);
-   }
-
-   void deallocate_nodes(free_nodes_t &nodes)
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate_nodes(nodes);
-   }
-
-   //!Deallocates all the free chunks of memory. Never throws
-   void deallocate_free_chunks()
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      private_node_allocator_t::deallocate_free_chunks();
-   }
-
-   //!Increments internal reference count and returns new count. Never throws
-   std::size_t inc_ref_count()
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      return ++m_header.m_usecount;
-   }
-
-   //!Decrements internal reference count and returns new count. Never throws
-   std::size_t dec_ref_count()
-   {
-      //-----------------------
-      boost::interprocess::scoped_lock<Mutex> guard(m_header);
-      //-----------------------
-      assert(m_header.m_usecount > 0);
-      return --m_header.m_usecount;
-   }
-
-   private:
-   //!This struct includes needed data and derives from
-   //!interprocess_mutex to allow EBO when using null_mutex
-   struct header_t : Mutex
-   {
-      std::size_t m_usecount;    //Number of attached allocators
-
-      header_t()
-      :  m_usecount(0) {}
-   } m_header;
 };
 
 }  //namespace detail {
Modified: trunk/boost/interprocess/allocators/detail/node_tools.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/detail/node_tools.hpp	(original)
+++ trunk/boost/interprocess/allocators/detail/node_tools.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -37,7 +37,8 @@
       :  public slist_hook_t
    {};
 
-   typedef typename bi::make_slist<node_t, bi::base_hook<slist_hook_t> >::type node_slist_t;
+   typedef typename bi::make_slist
+      <node_t, bi::linear<true>, bi::base_hook<slist_hook_t> >::type node_slist_t;
 };
 
 }  //namespace detail {
Modified: trunk/boost/interprocess/allocators/node_allocator.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/node_allocator.hpp	(original)
+++ trunk/boost/interprocess/allocators/node_allocator.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -8,8 +8,8 @@
 //
 //////////////////////////////////////////////////////////////////////////////
 
-#ifndef BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP
-#define BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP
+#ifndef BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP
+#define BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP
 
 #if (defined _MSC_VER) && (_MSC_VER >= 1200)
 #  pragma once
@@ -22,9 +22,10 @@
 #include <boost/assert.hpp>
 #include <boost/utility/addressof.hpp>
 #include <boost/interprocess/detail/utilities.hpp>
-#include <boost/interprocess/detail/workaround.hpp>
+#include <boost/interprocess/detail/type_traits.hpp>
 #include <boost/interprocess/allocators/detail/node_pool.hpp>
 #include <boost/interprocess/exceptions.hpp>
+#include <boost/interprocess/allocators/detail/allocator_common.hpp>
 #include <memory>
 #include <algorithm>
 #include <cstddef>
@@ -35,6 +36,177 @@
 namespace boost {
 namespace interprocess {
 
+/// @cond
+
+namespace detail{
+
+template < unsigned int Version
+         , class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         >
+class node_allocator_base
+   : public node_pool_allocation_impl
+   < node_allocator_base
+      < Version, T, SegmentManager, NodesPerChunk>
+   , Version
+   , T
+   , SegmentManager
+   >
+{
+   public:
+   typedef typename SegmentManager::void_pointer         void_pointer;
+   typedef SegmentManager                                segment_manager;
+   typedef node_allocator_base
+      <Version, T, SegmentManager, NodesPerChunk>   self_t;
+   typedef detail::shared_node_pool
+      < SegmentManager, sizeof(T), NodesPerChunk>   node_pool_t;
+   typedef typename detail::
+      pointer_to_other<void_pointer, node_pool_t>::type  node_pool_ptr;
+
+   BOOST_STATIC_ASSERT((Version <=2));
+
+   public:
+   //-------
+   typedef typename detail::
+      pointer_to_other<void_pointer, T>::type            pointer;
+   typedef typename detail::
+      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+
+   typedef detail::version_type<node_allocator_base, Version>   version;
+   typedef transform_iterator
+      < typename SegmentManager::
+         multiallocation_iterator
+      , detail::cast_functor <T> >              multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                     multiallocation_chain;
+
+   //!Obtains node_allocator_base from 
+   //!node_allocator_base
+   template<class T2>
+   struct rebind
+   {  
+      typedef node_allocator_base<Version, T2, SegmentManager, NodesPerChunk>       other;
+   };
+
+   /// @cond
+   private:
+   //!Not assignable from related node_allocator_base
+   template<unsigned int Version2, class T2, class SegmentManager2, std::size_t N2>
+   node_allocator_base& operator=
+      (const node_allocator_base<Version2, T2, SegmentManager2, N2>&);
+
+   //!Not assignable from other node_allocator_base
+   node_allocator_base& operator=(const node_allocator_base&);
+   /// @endcond
+
+   public:
+   //!Constructor from a segment manager. If not present, constructs a node
+   //!pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   node_allocator_base(segment_manager *segment_mngr) 
+      : mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(segment_mngr)) { }
+
+   //!Copy constructor from other node_allocator_base. Increments the reference 
+   //!count of the associated node pool. Never throws
+   node_allocator_base(const node_allocator_base &other) 
+      : mp_node_pool(other.get_node_pool()) 
+   {  
+      mp_node_pool->inc_ref_count();   
+   }
+
+   //!Copy constructor from related node_allocator_base. If not present, constructs
+   //!a node pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   template<class T2>
+   node_allocator_base
+      (const node_allocator_base<Version, T2, SegmentManager, NodesPerChunk> &other)
+      : mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(other.get_segment_manager())) { }
+
+   //!Destructor, removes node_pool_t from memory
+   //!if its reference count reaches to zero. Never throws
+   ~node_allocator_base() 
+   {  detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool));   }
+
+   //!Returns a pointer to the node pool.
+   //!Never throws
+   node_pool_t* get_node_pool() const
+   {  return detail::get_pointer(mp_node_pool);   }
+
+   //!Returns the segment manager.
+   //!Never throws
+   segment_manager* get_segment_manager()const
+   {  return mp_node_pool->get_segment_manager();  }
+
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different memory segment, the result is undefined.
+   friend void swap(self_t &alloc1, self_t &alloc2)
+   {  detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);  }
+
+   /// @cond
+   private:
+   node_pool_ptr   mp_node_pool;
+   /// @endcond
+};
+
+//!Equality test for same type
+//!of node_allocator_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk> inline
+bool operator==(const node_allocator_base<V, T, S, NodesPerChunk> &alloc1, 
+                const node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
+   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
+
+//!Inequality test for same type
+//!of node_allocator_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const node_allocator_base<V, T, S, NodesPerChunk> &alloc1, 
+                const node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
+   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk = 64
+         >
+class node_allocator_v1
+   :  public node_allocator_base
+         < 1
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         >
+{
+   public:
+   typedef detail::node_allocator_base
+         < 1, T, SegmentManager, NodesPerChunk> base_t;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef node_allocator_v1<T2, SegmentManager, NodesPerChunk>  other;
+   };
+
+   node_allocator_v1(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   node_allocator_v1
+      (const node_allocator_v1<T2, SegmentManager, NodesPerChunk> &other)
+      : base_t(other)
+   {}
+};
+
+}  //namespace detail{
+
+/// @endcond
+
 //!An STL node allocator that uses a segment manager as memory 
 //!source. The internal pointer type will of the same type (raw, smart) as
 //!"typename SegmentManager::void_pointer" type. This allows
@@ -43,25 +215,49 @@
 //!of node_allocator with equal sizeof(T) placed in the same segment 
 //!group. NodesPerChunk is the number of nodes allocated at once when the allocator
 //!needs runs out of nodes
-template<class T, class SegmentManager, std::size_t NodesPerChunk>
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         >
 class node_allocator
+   /// @cond
+   :  public detail::node_allocator_base
+         < 2
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         >
+   /// @endcond
 {
+
+   #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   typedef detail::node_allocator_base
+         < 2, T, SegmentManager, NodesPerChunk> base_t;
    public:
-   typedef typename SegmentManager::void_pointer         void_pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
-   typedef SegmentManager                                segment_manager;
-   typedef typename SegmentManager::
-      mutex_family::mutex_type                           mutex_type;
-   typedef node_allocator
-      <T, SegmentManager, NodesPerChunk>                 self_t;
+   typedef detail::version_type<node_allocator, 2>   version;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef node_allocator<T2, SegmentManager, NodesPerChunk>  other;
+   };
+
+   node_allocator(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
 
+   template<class T2>
+   node_allocator
+      (const node_allocator<T2, SegmentManager, NodesPerChunk> &other)
+      : base_t(other)
+   {}
+
+   #else //BOOST_INTERPROCESS_DOXYGEN_INVOKED
    public:
-   //-------
-   typedef typename detail::
-      pointer_to_other<void_pointer, T>::type            pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef implementation_defined::segment_manager       segment_manager;
+   typedef segment_manager::void_pointer                 void_pointer;
+   typedef implementation_defined::pointer               pointer;
+   typedef implementation_defined::const_pointer         const_pointer;
    typedef T                                             value_type;
    typedef typename detail::add_reference
                      <value_type>::type                  reference;
@@ -69,225 +265,170 @@
                      <const value_type>::type            const_reference;
    typedef std::size_t                                   size_type;
    typedef std::ptrdiff_t                                difference_type;
-   typedef detail::shared_node_pool
-      < SegmentManager, mutex_type
-      , sizeof(T), NodesPerChunk>                        node_pool_t;
-   typedef typename detail::
-      pointer_to_other<void_pointer, node_pool_t>::type  node_pool_ptr;
 
-   //!Obtains node_allocator from other
+   //!Obtains node_allocator from 
    //!node_allocator
    template<class T2>
    struct rebind
    {  
-      typedef node_allocator<T2, SegmentManager, NodesPerChunk>   other;
+      typedef node_allocator<T2, SegmentManager, NodesPerChunk> other;
    };
 
-   /// @cond
    private:
-   //!Not assignable from related
-   //!node_allocator
+   //!Not assignable from
+   //!related node_allocator
    template<class T2, class SegmentManager2, std::size_t N2>
    node_allocator& operator=
       (const node_allocator<T2, SegmentManager2, N2>&);
 
-   //!Not assignable from other
-   //!node_allocator
+   //!Not assignable from 
+   //!other node_allocator
    node_allocator& operator=(const node_allocator&);
-   /// @endcond
 
    public:
-
    //!Constructor from a segment manager. If not present, constructs a node
    //!pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
-   node_allocator(segment_manager *segment_mngr) 
-      : mp_node_pool(priv_get_or_create(segment_mngr))
-   {}
+   node_allocator(segment_manager *segment_mngr);
 
    //!Copy constructor from other node_allocator. Increments the reference 
    //!count of the associated node pool. Never throws
-   node_allocator(const node_allocator &other) 
-      : mp_node_pool(other.get_node_pool()) 
-   {  mp_node_pool->inc_ref_count();   }
+   node_allocator(const node_allocator &other);
 
    //!Copy constructor from related node_allocator. If not present, constructs
    //!a node pool. Increments the reference count of the associated node pool.
    //!Can throw boost::interprocess::bad_alloc
    template<class T2>
    node_allocator
-      (const node_allocator<T2, SegmentManager, NodesPerChunk> &other)
-      : mp_node_pool(priv_get_or_create(other.get_segment_manager()))
-   {}
+      (const node_allocator<T2, SegmentManager, NodesPerChunk> &other);
 
    //!Destructor, removes node_pool_t from memory
    //!if its reference count reaches to zero. Never throws
-   ~node_allocator() 
-   {  priv_destroy_if_last_link();  }
+   ~node_allocator();
 
    //!Returns a pointer to the node pool.
    //!Never throws
-   node_pool_t* get_node_pool() const
-   {  return detail::get_pointer(mp_node_pool);   }
+   node_pool_t* get_node_pool() const;
 
    //!Returns the segment manager.
    //!Never throws
-   segment_manager* get_segment_manager()const
-   {  return mp_node_pool->get_segment_manager();  }
+   segment_manager* get_segment_manager()const;
 
-   //!Returns the number of elements that could be allocated. Never throws
-   size_type max_size() const
-   {  return this->get_segment_manager()->get_size()/sizeof(value_type);  }
+   //!Returns the number of elements that could be allocated.
+   //!Never throws
+   size_type max_size() const;
 
    //!Allocate memory for an array of count elements. 
    //!Throws boost::interprocess::bad_alloc if there is no enough memory
-   pointer allocate(size_type count, cvoid_pointer = 0)
-   {  
-      if(count > ((size_type)-1)/sizeof(value_type))
-         throw bad_alloc();
-      return pointer(static_cast<T*>(mp_node_pool->allocate(count)));
-   }
+   pointer allocate(size_type count, cvoid_pointer hint = 0);
 
    //!Deallocate allocated memory.
    //!Never throws
-   void deallocate(const pointer &ptr, size_type count)
-   {  mp_node_pool->deallocate(detail::get_pointer(ptr), count);  }
+   void deallocate(const pointer &ptr, size_type count);
 
-   //!Deallocates all free chunks of the pool
-   void deallocate_free_chunks()
-   {  mp_node_pool->deallocate_free_chunks();   }
+   //!Deallocates all free chunks
+   //!of the pool
+   void deallocate_free_chunks();
 
    //!Swaps allocators. Does not throw. If each allocator is placed in a
    //!different memory segment, the result is undefined.
-   friend void swap(self_t &alloc1, self_t &alloc2)
-   {  detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);  }
-
-   //These functions are obsolete. These are here to conserve
-   //backwards compatibility with containers using them...
+   friend void swap(self_t &alloc1, self_t &alloc2);
 
    //!Returns address of mutable object.
    //!Never throws
-   pointer address(reference value) const
-   {  return pointer(boost::addressof(value));  }
+   pointer address(reference value) const;
 
    //!Returns address of non mutable object.
    //!Never throws
-   const_pointer address(const_reference value) const
-   {  return const_pointer(boost::addressof(value));  }
+   const_pointer address(const_reference value) const;
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
-   void construct(const pointer &ptr)
-   {  new(detail::get_pointer(ptr)) value_type;  }
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr);
 
    //!Destroys object. Throws if object's
    //!destructor throws
-   void destroy(const pointer &ptr)
-   {  BOOST_ASSERT(ptr != 0); (*ptr).~value_type();  }
-
-   /// @cond
-   private:
-   //!Object function that creates the node allocator if it is not created and
-   //!increments reference count if it is already created
-   struct get_or_create_func
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type, sizeof(T), NodesPerChunk>   node_pool_t;
-
-      //!This connects or constructs the unique instance of node_pool_t
-      //!Can throw boost::interprocess::bad_alloc
-      void operator()()
-      {
-         //Find or create the node_pool_t
-         mp_node_pool =    mp_named_alloc->template find_or_construct
-                           <node_pool_t>(unique_instance)(mp_named_alloc);
-         //If valid, increment link count
-         if(mp_node_pool != 0)
-            mp_node_pool->inc_ref_count();
-      }
-
-      //!Constructor. Initializes function
-      //!object parameters
-      get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
-      
-      node_pool_t       *mp_node_pool;
-      segment_manager   *mp_named_alloc;
-   };
-
-   //!Initialization function, creates an executes atomically the 
-   //!initialization object functions. Can throw boost::interprocess::bad_alloc
-   node_pool_t *priv_get_or_create(segment_manager *named_alloc)
-   {
-      get_or_create_func func(named_alloc);
-      named_alloc->atomic_func(func);
-      return func.mp_node_pool;
-   }
-
-   //!Object function that decrements the reference count. If the count 
-   //!reaches to zero destroys the node allocator from memory. 
-   //!Never throws
-   struct destroy_if_last_link_func
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk>   node_pool_t;
-
-      //!Decrements reference count and destroys the object if there is no 
-      //!more attached allocators. Never throws
-      void operator()()
-      {
-         //If not the last link return
-         if(mp_node_pool->dec_ref_count() != 0) return;
-
-         //Last link, let's destroy the segment_manager
-         mp_named_alloc->template destroy<node_pool_t>(unique_instance); 
-      }  
-
-      //!Constructor. Initializes function
-      //!object parameters
-      destroy_if_last_link_func(segment_manager    *nhdr,
-                                node_pool_t *phdr) 
-         : mp_named_alloc(nhdr), mp_node_pool(phdr)
-      {}
+   void destroy(const pointer &ptr);
 
-      segment_manager     *mp_named_alloc;     
-      node_pool_t      *mp_node_pool;
-   };
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const;
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it);
 
-   //!Destruction function, initializes and executes destruction function 
-   //!object. Never throws
-   void priv_destroy_if_last_link()
-   {
-      typedef detail::shared_node_pool
-               <SegmentManager, mutex_type,sizeof(T), NodesPerChunk>   node_pool_t;
-      //Get segment manager
-      segment_manager *named_segment_mngr = this->get_segment_manager();
-      //Execute destruction functor atomically
-      destroy_if_last_link_func func(named_segment_mngr, detail::get_pointer(mp_node_pool));
-      named_segment_mngr->atomic_func(func);
-   }
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one();
 
-   private:
-   node_pool_ptr   mp_node_pool;
-   /// @endcond
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements);
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p);
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it);
+   #endif
 };
 
-//!Equality test for same type of
-//!node_allocator
-template<class T, class S, std::size_t NodesPerChunk> inline
-bool operator==(const node_allocator<T, S, NodesPerChunk> &alloc1, 
-                const node_allocator<T, S, NodesPerChunk> &alloc2)
-   {  return alloc1.get_node_pool() == alloc2.get_node_pool(); }
+#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
 
-//!Inequality test for same type of
-//!node_allocator
-template<class T, class S, std::size_t NodesPerChunk> inline
-bool operator!=(const node_allocator<T, S, NodesPerChunk> &alloc1, 
-                const node_allocator<T, S, NodesPerChunk> &alloc2)
-   {  return alloc1.get_node_pool() != alloc2.get_node_pool(); }
+//!Equality test for same type
+//!of node_allocator
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator==(const node_allocator<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
+
+//!Inequality test for same type
+//!of node_allocator
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const node_allocator<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
+
+#endif
 
 }  //namespace interprocess {
 }  //namespace boost {
 
 #include <boost/interprocess/detail/config_end.hpp>
 
-#endif   //#ifndef BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP
+#endif   //#ifndef BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP
Modified: trunk/boost/interprocess/allocators/private_adaptive_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/private_adaptive_pool.hpp	(original)
+++ trunk/boost/interprocess/allocators/private_adaptive_pool.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -30,38 +30,47 @@
 #include <cstddef>
 
 //!\file
-//!Describes private_adaptive_pool pooled shared memory STL compatible allocator 
+//!Describes private_adaptive_pool_base pooled shared memory STL compatible allocator 
 
 namespace boost {
 namespace interprocess {
 
-//!An STL node allocator that uses a segment manager as memory 
-//!source. The internal pointer type will of the same type (raw, smart) as
-//!"typename SegmentManager::void_pointer" type. This allows
-//!placing the allocator in shared memory, memory mapped-files, etc...
-//!This allocator has its own node pool. NodesPerChunk is the minimum number of nodes
-//!allocated at once when the allocator needs runs out of nodes.
-template<class T, class SegmentManager, std::size_t NodesPerChunk, std::size_t MaxFreeChunks>
-class private_adaptive_pool
+/// @cond
+
+namespace detail {
+
+template < unsigned int Version
+         , class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         , std::size_t MaxFreeChunks
+         , unsigned char OverheadPercent
+         >
+class private_adaptive_pool_base
+   : public node_pool_allocation_impl
+   < private_adaptive_pool_base < Version, T, SegmentManager, NodesPerChunk
+                                , MaxFreeChunks, OverheadPercent>
+   , Version
+   , T
+   , SegmentManager
+   >
 {
    /// @cond
    private:
    typedef typename SegmentManager::void_pointer         void_pointer;
-   typedef typename detail::
-      pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
    typedef SegmentManager                                segment_manager;
-   typedef typename detail::
-      pointer_to_other<void_pointer, char>::type         char_pointer;
-   typedef typename detail::pointer_to_other
-      <void_pointer, segment_manager>::type              segment_mngr_ptr_t;
-   typedef typename SegmentManager::
-      mutex_family::mutex_type                           mutex_type;
-   typedef private_adaptive_pool
-      <T, SegmentManager, NodesPerChunk, MaxFreeChunks>  self_t;
+   typedef private_adaptive_pool_base
+      < Version, T, SegmentManager, NodesPerChunk
+      , MaxFreeChunks, OverheadPercent>                  self_t;
    typedef detail::private_adaptive_node_pool
-      <SegmentManager, sizeof(T)
-      , NodesPerChunk, MaxFreeChunks>                    priv_node_pool_t;
+      <SegmentManager
+      , sizeof(T)
+      , NodesPerChunk
+      , MaxFreeChunks
+      , OverheadPercent
+      > node_pool_t;
 
+   BOOST_STATIC_ASSERT((Version <=2));
 
    /// @endcond
 
@@ -77,120 +86,358 @@
                      <const value_type>::type            const_reference;
    typedef std::size_t                                   size_type;
    typedef std::ptrdiff_t                                difference_type;
+   typedef detail::version_type
+      <private_adaptive_pool_base, Version>              version;
+   typedef transform_iterator
+      < typename SegmentManager::
+         multiallocation_iterator
+      , detail::cast_functor <T> >              multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                     multiallocation_chain;
 
    //!Obtains node_allocator from other node_allocator
    template<class T2>
    struct rebind
    {  
-      typedef private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks>   other;
+      typedef private_adaptive_pool_base
+         <Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>   other;
    };
 
    /// @cond
    private:
-   //!Not assignable from related private_adaptive_pool
-   template<class T2, class MemoryAlgorithm2, std::size_t N2, std::size_t F2>
-   private_adaptive_pool& operator=
-      (const private_adaptive_pool<T2, MemoryAlgorithm2, N2, F2>&);
+   //!Not assignable from related private_adaptive_pool_base
+   template<unsigned int Version2, class T2, class MemoryAlgorithm2, std::size_t N2, std::size_t F2, unsigned char OP2>
+   private_adaptive_pool_base& operator=
+      (const private_adaptive_pool_base<Version2, T2, MemoryAlgorithm2, N2, F2, OP2>&);
 
-   //!Not assignable from other private_adaptive_pool
-   private_adaptive_pool& operator=(const private_adaptive_pool&);
+   //!Not assignable from other private_adaptive_pool_base
+   private_adaptive_pool_base& operator=(const private_adaptive_pool_base&);
    /// @endcond
 
    public:
    //!Constructor from a segment manager
-   private_adaptive_pool(segment_manager *segment_mngr)
+   private_adaptive_pool_base(segment_manager *segment_mngr)
       : m_node_pool(segment_mngr)
    {}
 
-   //!Copy constructor from other private_adaptive_pool. Never throws
-   private_adaptive_pool(const private_adaptive_pool &other)
+   //!Copy constructor from other private_adaptive_pool_base. Never throws
+   private_adaptive_pool_base(const private_adaptive_pool_base &other)
       : m_node_pool(other.get_segment_manager())
    {}
 
-   //!Copy constructor from related private_adaptive_pool. Never throws.
+   //!Copy constructor from related private_adaptive_pool_base. Never throws.
    template<class T2>
-   private_adaptive_pool
-      (const private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> &other)
+   private_adaptive_pool_base
+      (const private_adaptive_pool_base
+         <Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
       : m_node_pool(other.get_segment_manager())
    {}
 
    //!Destructor, frees all used memory. Never throws
-   ~private_adaptive_pool() 
+   ~private_adaptive_pool_base() 
    {}
 
    //!Returns the segment manager. Never throws
    segment_manager* get_segment_manager()const
    {  return m_node_pool.get_segment_manager(); }
 
-   //!Returns the number of elements that could be allocated. Never throws
-   size_type max_size() const
-   {  return this->get_segment_manager()/sizeof(value_type);  }
-
-   //!Allocate memory for an array of count elements. 
-   //!Throws boost::interprocess::bad_alloc if there is no enough memory
-   pointer allocate(size_type count, cvoid_pointer hint = 0)
-   {
-      (void)hint;
-      if(count > ((size_type)-1)/sizeof(value_type))
-         throw bad_alloc();
-      return pointer(static_cast<value_type*>(m_node_pool.allocate(count)));
-   }
-
-   //!Deallocate allocated memory. Never throws
-   void deallocate(const pointer &ptr, size_type count)
-   {  m_node_pool.deallocate(detail::get_pointer(ptr), count); }
-
-   //!Deallocates all free chunks of the pool
-   void deallocate_free_chunks()
-   {  m_node_pool.deallocate_free_chunks();  }
+   //!Returns the internal node pool. Never throws
+   node_pool_t* get_node_pool() const
+   {  return const_cast<node_pool_t*>(&m_node_pool); }
 
    //!Swaps allocators. Does not throw. If each allocator is placed in a
-   //!different shared memory segments, the result is undefined.*/
+   //!different shared memory segments, the result is undefined.
    friend void swap(self_t &alloc1,self_t &alloc2)
    {  alloc1.m_node_pool.swap(alloc2.m_node_pool);  }
 
-   //These functions are obsolete. These are here to conserve
-   //backwards compatibility with containers using them...
+   /// @cond
+   private:
+   node_pool_t m_node_pool;
+   /// @endcond
+};
+
+//!Equality test for same type of private_adaptive_pool_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator==(const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1, 
+                const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
+{  return &alloc1 == &alloc2; }
+
+//!Inequality test for same type of private_adaptive_pool_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1, 
+                const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
+{  return &alloc1 != &alloc2; }
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk = 64
+         , std::size_t MaxFreeChunks = 2
+         , unsigned char OverheadPercent = 5
+         >
+class private_adaptive_pool_v1
+   :  public private_adaptive_pool_base
+         < 1
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         , MaxFreeChunks
+         , OverheadPercent
+         >
+{
+   public:
+   typedef detail::private_adaptive_pool_base
+         < 1, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>  other;
+   };
+
+   private_adaptive_pool_v1(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   private_adaptive_pool_v1
+      (const private_adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : base_t(other)
+   {}
+};
+
+}  //namespace detail {
+
+/// @endcond
+
+//!An STL node allocator that uses a segment manager as memory 
+//!source. The internal pointer type will of the same type (raw, smart) as
+//!"typename SegmentManager::void_pointer" type. This allows
+//!placing the allocator in shared memory, memory mapped-files, etc...
+//!This allocator has its own node pool.
+//!
+//!NodesPerChunk is the minimum number of nodes of nodes allocated at once when
+//!the allocator needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks
+//!that the adaptive node pool will hold. The rest of the totally free chunks will be
+//!deallocated with the segment manager.
+//!
+//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator:
+//!(memory usable for nodes / total memory allocated from the segment manager)
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         , std::size_t MaxFreeChunks
+         , unsigned char OverheadPercent
+         >
+class private_adaptive_pool
+   /// @cond
+   :  public detail::private_adaptive_pool_base
+         < 2
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         , MaxFreeChunks
+         , OverheadPercent
+         >
+   /// @endcond
+{
+
+   #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   typedef detail::private_adaptive_pool_base
+         < 2, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
+   public:
+   typedef detail::version_type<private_adaptive_pool, 2>   version;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_adaptive_pool
+         <T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>  other;
+   };
+
+   private_adaptive_pool(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   private_adaptive_pool
+      (const private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
+      : base_t(other)
+   {}
+
+   #else
+   public:
+   typedef implementation_defined::segment_manager       segment_manager;
+   typedef segment_manager::void_pointer                 void_pointer;
+   typedef implementation_defined::pointer               pointer;
+   typedef implementation_defined::const_pointer         const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+
+   //!Obtains private_adaptive_pool from 
+   //!private_adaptive_pool
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_adaptive_pool
+         <T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
+   };
+
+   private:
+   //!Not assignable from
+   //!related private_adaptive_pool
+   template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char OP2>
+   private_adaptive_pool& operator=
+      (const private_adaptive_pool<T2, SegmentManager2, N2, F2>&);
+
+   //!Not assignable from 
+   //!other private_adaptive_pool
+   private_adaptive_pool& operator=(const private_adaptive_pool&);
+
+   public:
+   //!Constructor from a segment manager. If not present, constructs a node
+   //!pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   private_adaptive_pool(segment_manager *segment_mngr);
+
+   //!Copy constructor from other private_adaptive_pool. Increments the reference 
+   //!count of the associated node pool. Never throws
+   private_adaptive_pool(const private_adaptive_pool &other);
+
+   //!Copy constructor from related private_adaptive_pool. If not present, constructs
+   //!a node pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   template<class T2>
+   private_adaptive_pool
+      (const private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other);
+
+   //!Destructor, removes node_pool_t from memory
+   //!if its reference count reaches to zero. Never throws
+   ~private_adaptive_pool();
+
+   //!Returns a pointer to the node pool.
+   //!Never throws
+   node_pool_t* get_node_pool() const;
+
+   //!Returns the segment manager.
+   //!Never throws
+   segment_manager* get_segment_manager()const;
+
+   //!Returns the number of elements that could be allocated.
+   //!Never throws
+   size_type max_size() const;
+
+   //!Allocate memory for an array of count elements. 
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate(size_type count, cvoid_pointer hint = 0);
+
+   //!Deallocate allocated memory.
+   //!Never throws
+   void deallocate(const pointer &ptr, size_type count);
+
+   //!Deallocates all free chunks
+   //!of the pool
+   void deallocate_free_chunks();
+
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different memory segment, the result is undefined.
+   friend void swap(self_t &alloc1, self_t &alloc2);
 
    //!Returns address of mutable object.
    //!Never throws
-   pointer address(reference value) const
-   {  return pointer(boost::addressof(value));  }
+   pointer address(reference value) const;
 
    //!Returns address of non mutable object.
    //!Never throws
-   const_pointer address(const_reference value) const
-   {  return const_pointer(boost::addressof(value));  }
+   const_pointer address(const_reference value) const;
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
-   void construct(const pointer &ptr)
-   {  new(detail::get_pointer(ptr)) value_type;  }
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr);
 
    //!Destroys object. Throws if object's
    //!destructor throws
-   void destroy(const pointer &ptr)
-   {  BOOST_ASSERT(ptr != 0); (*ptr).~value_type();  }
+   void destroy(const pointer &ptr);
 
-   /// @cond
-   private:
-   priv_node_pool_t m_node_pool;
-   /// @endcond
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const;
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it);
+
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one();
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements);
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p);
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it);
+   #endif
 };
 
-//!Equality test for same type of private_adaptive_pool
-template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
-bool operator==(const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc1, 
-                const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
-{  return &alloc1 == &alloc2; }
+#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+
+//!Equality test for same type
+//!of private_adaptive_pool
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator==(const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
+
+//!Inequality test for same type
+//!of private_adaptive_pool
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
 
-//!Inequality test for same type of private_adaptive_pool
-template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
-bool operator!=(const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc1, 
-                const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
-{  
-   return &alloc1 != &alloc2;
-}
+#endif
 
 }  //namespace interprocess {
 }  //namespace boost {
Modified: trunk/boost/interprocess/allocators/private_node_allocator.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/private_node_allocator.hpp	(original)
+++ trunk/boost/interprocess/allocators/private_node_allocator.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,13 +1,13 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
 // See http://www.boost.org/libs/interprocess for documentation.
 //
 //////////////////////////////////////////////////////////////////////////////
-
+/*
 #ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
 #define BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
 
@@ -50,12 +50,8 @@
    typedef typename detail::
       pointer_to_other<void_pointer, const void>::type   cvoid_pointer;
    typedef SegmentManager                                segment_manager;
-   typedef typename detail::
-      pointer_to_other<void_pointer, char>::type         char_pointer;
    typedef typename detail::pointer_to_other
       <void_pointer, segment_manager>::type              segment_mngr_ptr_t;
-   typedef typename SegmentManager::
-      mutex_family::mutex_type                           mutex_type;
    typedef private_node_allocator
       <T, SegmentManager, NodesPerChunk>                 self_t;
    typedef detail::private_node_pool
@@ -128,21 +124,30 @@
    pointer allocate(size_type count, cvoid_pointer hint = 0)
    {
       (void)hint;
-      if(count > ((size_type)-1)/sizeof(value_type))
+      if(count > this->max_size())
          throw bad_alloc();
-      return pointer(static_cast<value_type*>(m_node_pool.allocate(count)));
+      else if(count == 1)
+         return pointer(static_cast<value_type*>(m_node_pool.allocate_node()));
+      else
+         return pointer(static_cast<value_type*>
+            (m_node_pool.get_segment_manager()->allocate(sizeof(T)*count)));
    }
 
    //!Deallocate allocated memory. Never throws
    void deallocate(const pointer &ptr, size_type count)
-   {  m_node_pool.deallocate(detail::get_pointer(ptr), count);   }
+   {
+      if(count == 1)
+         m_node_pool.deallocate_node(detail::get_pointer(ptr));
+      else
+         m_node_pool.get_segment_manager()->deallocate(detail::get_pointer(ptr));
+   }
 
    //!Deallocates all free chunks of the pool
    void deallocate_free_chunks()
    {  m_node_pool.deallocate_free_chunks(); }
 
    //!Swaps allocators. Does not throw. If each allocator is placed in a
-   //!different shared memory segments, the result is undefined.*/
+   //!different shared memory segments, the result is undefined.
    friend void swap(self_t &alloc1,self_t &alloc2)
    {  alloc1.m_node_pool.swap(alloc2.m_node_pool);  }
 
@@ -160,7 +165,7 @@
    {  return const_pointer(boost::addressof(value));  }
 
    //!Default construct an object. 
-   //!Throws if T's default constructor throws*/
+   //!Throws if T's default constructor throws
    void construct(const pointer &ptr)
    {  new(detail::get_pointer(ptr)) value_type;  }
 
@@ -196,3 +201,432 @@
 
 #endif   //#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
 
+*/
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
+// Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/interprocess for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
+#define BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
+
+#if (defined _MSC_VER) && (_MSC_VER >= 1200)
+#  pragma once
+#endif
+
+#include <boost/interprocess/detail/config_begin.hpp>
+#include <boost/interprocess/detail/workaround.hpp>
+
+#include <boost/interprocess/interprocess_fwd.hpp>
+#include <boost/assert.hpp>
+#include <boost/utility/addressof.hpp>
+#include <boost/interprocess/allocators/detail/node_pool.hpp>
+#include <boost/interprocess/exceptions.hpp>
+#include <boost/interprocess/detail/utilities.hpp>
+#include <boost/interprocess/detail/workaround.hpp>
+#include <memory>
+#include <algorithm>
+#include <cstddef>
+
+//!\file
+//!Describes private_node_allocator_base pooled shared memory STL compatible allocator 
+
+namespace boost {
+namespace interprocess {
+
+/// @cond
+
+namespace detail {
+
+template < unsigned int Version
+         , class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         >
+class private_node_allocator_base
+   : public node_pool_allocation_impl
+   < private_node_allocator_base < Version, T, SegmentManager, NodesPerChunk>
+   , Version
+   , T
+   , SegmentManager
+   >
+{
+   /// @cond
+   private:
+   typedef typename SegmentManager::void_pointer         void_pointer;
+   typedef SegmentManager                                segment_manager;
+   typedef private_node_allocator_base
+      < Version, T, SegmentManager, NodesPerChunk>       self_t;
+   typedef detail::private_node_pool
+      <SegmentManager
+      , sizeof(T)
+      , NodesPerChunk
+      > node_pool_t;
+
+   BOOST_STATIC_ASSERT((Version <=2));
+
+   /// @endcond
+
+   public:
+   typedef typename detail::
+      pointer_to_other<void_pointer, T>::type            pointer;
+   typedef typename detail::
+      pointer_to_other<void_pointer, const T>::type      const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+   typedef detail::version_type
+      <private_node_allocator_base, Version>              version;
+   typedef transform_iterator
+      < typename SegmentManager::
+         multiallocation_iterator
+      , detail::cast_functor <T> >              multiallocation_iterator;
+   typedef typename SegmentManager::
+      multiallocation_chain                     multiallocation_chain;
+
+   //!Obtains node_allocator from other node_allocator
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_node_allocator_base
+         <Version, T2, SegmentManager, NodesPerChunk>   other;
+   };
+
+   /// @cond
+   private:
+   //!Not assignable from related private_node_allocator_base
+   template<unsigned int Version2, class T2, class MemoryAlgorithm2, std::size_t N2>
+   private_node_allocator_base& operator=
+      (const private_node_allocator_base<Version2, T2, MemoryAlgorithm2, N2>&);
+
+   //!Not assignable from other private_node_allocator_base
+   private_node_allocator_base& operator=(const private_node_allocator_base&);
+   /// @endcond
+
+   public:
+   //!Constructor from a segment manager
+   private_node_allocator_base(segment_manager *segment_mngr)
+      : m_node_pool(segment_mngr)
+   {}
+
+   //!Copy constructor from other private_node_allocator_base. Never throws
+   private_node_allocator_base(const private_node_allocator_base &other)
+      : m_node_pool(other.get_segment_manager())
+   {}
+
+   //!Copy constructor from related private_node_allocator_base. Never throws.
+   template<class T2>
+   private_node_allocator_base
+      (const private_node_allocator_base
+         <Version, T2, SegmentManager, NodesPerChunk> &other)
+      : m_node_pool(other.get_segment_manager())
+   {}
+
+   //!Destructor, frees all used memory. Never throws
+   ~private_node_allocator_base() 
+   {}
+
+   //!Returns the segment manager. Never throws
+   segment_manager* get_segment_manager()const
+   {  return m_node_pool.get_segment_manager(); }
+
+   //!Returns the internal node pool. Never throws
+   node_pool_t* get_node_pool() const
+   {  return const_cast<node_pool_t*>(&m_node_pool); }
+
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different shared memory segments, the result is undefined.
+   friend void swap(self_t &alloc1,self_t &alloc2)
+   {  alloc1.m_node_pool.swap(alloc2.m_node_pool);  }
+
+   /// @cond
+   private:
+   node_pool_t m_node_pool;
+   /// @endcond
+};
+
+//!Equality test for same type of private_node_allocator_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk> inline
+bool operator==(const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc1, 
+                const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
+{  return &alloc1 == &alloc2; }
+
+//!Inequality test for same type of private_node_allocator_base
+template<unsigned int V, class T, class S, std::size_t NodesPerChunk> inline
+bool operator!=(const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc1, 
+                const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
+{  return &alloc1 != &alloc2; }
+
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk = 64
+         >
+class private_node_allocator_v1
+   :  public private_node_allocator_base
+         < 1
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         >
+{
+   public:
+   typedef detail::private_node_allocator_base
+         < 1, T, SegmentManager, NodesPerChunk> base_t;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_node_allocator_v1<T2, SegmentManager, NodesPerChunk>  other;
+   };
+
+   private_node_allocator_v1(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   private_node_allocator_v1
+      (const private_node_allocator_v1<T2, SegmentManager, NodesPerChunk> &other)
+      : base_t(other)
+   {}
+};
+
+}  //namespace detail {
+
+/// @endcond
+
+//!An STL node allocator that uses a segment manager as memory 
+//!source. The internal pointer type will of the same type (raw, smart) as
+//!"typename SegmentManager::void_pointer" type. This allows
+//!placing the allocator in shared memory, memory mapped-files, etc...
+//!This allocator has its own node pool. NodesPerChunk is the number of nodes allocated 
+//!at once when the allocator needs runs out of nodes
+template < class T
+         , class SegmentManager
+         , std::size_t NodesPerChunk
+         >
+class private_node_allocator
+   /// @cond
+   :  public detail::private_node_allocator_base
+         < 2
+         , T
+         , SegmentManager
+         , NodesPerChunk
+         >
+   /// @endcond
+{
+
+   #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+   typedef detail::private_node_allocator_base
+         < 2, T, SegmentManager, NodesPerChunk> base_t;
+   public:
+   typedef detail::version_type<private_node_allocator, 2>   version;
+
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_node_allocator
+         <T2, SegmentManager, NodesPerChunk>  other;
+   };
+
+   private_node_allocator(SegmentManager *segment_mngr) 
+      : base_t(segment_mngr)
+   {}
+
+   template<class T2>
+   private_node_allocator
+      (const private_node_allocator<T2, SegmentManager, NodesPerChunk> &other)
+      : base_t(other)
+   {}
+
+   #else
+   public:
+   typedef implementation_defined::segment_manager       segment_manager;
+   typedef segment_manager::void_pointer                 void_pointer;
+   typedef implementation_defined::pointer               pointer;
+   typedef implementation_defined::const_pointer         const_pointer;
+   typedef T                                             value_type;
+   typedef typename detail::add_reference
+                     <value_type>::type                  reference;
+   typedef typename detail::add_reference
+                     <const value_type>::type            const_reference;
+   typedef std::size_t                                   size_type;
+   typedef std::ptrdiff_t                                difference_type;
+
+   //!Obtains private_node_allocator from 
+   //!private_node_allocator
+   template<class T2>
+   struct rebind
+   {  
+      typedef private_node_allocator
+         <T2, SegmentManager, NodesPerChunk> other;
+   };
+
+   private:
+   //!Not assignable from
+   //!related private_node_allocator
+   template<class T2, class SegmentManager2, std::size_t N2>
+   private_node_allocator& operator=
+      (const private_node_allocator<T2, SegmentManager2, N2>&);
+
+   //!Not assignable from 
+   //!other private_node_allocator
+   private_node_allocator& operator=(const private_node_allocator&);
+
+   public:
+   //!Constructor from a segment manager. If not present, constructs a node
+   //!pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   private_node_allocator(segment_manager *segment_mngr);
+
+   //!Copy constructor from other private_node_allocator. Increments the reference 
+   //!count of the associated node pool. Never throws
+   private_node_allocator(const private_node_allocator &other);
+
+   //!Copy constructor from related private_node_allocator. If not present, constructs
+   //!a node pool. Increments the reference count of the associated node pool.
+   //!Can throw boost::interprocess::bad_alloc
+   template<class T2>
+   private_node_allocator
+      (const private_node_allocator<T2, SegmentManager, NodesPerChunk> &other);
+
+   //!Destructor, removes node_pool_t from memory
+   //!if its reference count reaches to zero. Never throws
+   ~private_node_allocator();
+
+   //!Returns a pointer to the node pool.
+   //!Never throws
+   node_pool_t* get_node_pool() const;
+
+   //!Returns the segment manager.
+   //!Never throws
+   segment_manager* get_segment_manager()const;
+
+   //!Returns the number of elements that could be allocated.
+   //!Never throws
+   size_type max_size() const;
+
+   //!Allocate memory for an array of count elements. 
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate(size_type count, cvoid_pointer hint = 0);
+
+   //!Deallocate allocated memory.
+   //!Never throws
+   void deallocate(const pointer &ptr, size_type count);
+
+   //!Deallocates all free chunks
+   //!of the pool
+   void deallocate_free_chunks();
+
+   //!Swaps allocators. Does not throw. If each allocator is placed in a
+   //!different memory segment, the result is undefined.
+   friend void swap(self_t &alloc1, self_t &alloc2);
+
+   //!Returns address of mutable object.
+   //!Never throws
+   pointer address(reference value) const;
+
+   //!Returns address of non mutable object.
+   //!Never throws
+   const_pointer address(const_reference value) const;
+
+   //!Default construct an object. 
+   //!Throws if T's default constructor throws
+   void construct(const pointer &ptr);
+
+   //!Destroys object. Throws if object's
+   //!destructor throws
+   void destroy(const pointer &ptr);
+
+   //!Returns maximum the number of objects the previously allocated memory
+   //!pointed by p can hold. This size only works for memory allocated with
+   //!allocate, allocation_command and allocate_many.
+   size_type size(const pointer &p) const;
+
+   std::pair<pointer, bool>
+      allocation_command(allocation_type command,
+                         size_type limit_size, 
+                         size_type preferred_size,
+                         size_type &received_size, const pointer &reuse = 0);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
+
+   //!Allocates n_elements elements, each one of size elem_sizes[i]in a
+   //!contiguous chunk
+   //!of memory. The elements must be deallocated
+   multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
+
+   //!Allocates many elements of size elem_size in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. The elements must be deallocated
+   //!with deallocate(...)
+   void deallocate_many(multiallocation_iterator it);
+
+   //!Allocates just one object. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   //!Throws boost::interprocess::bad_alloc if there is no enough memory
+   pointer allocate_one();
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   multiallocation_iterator allocate_individual(std::size_t num_elements);
+
+   //!Deallocates memory previously allocated with allocate_one().
+   //!You should never use deallocate_one to deallocate memory allocated
+   //!with other functions different from allocate_one(). Never throws
+   void deallocate_one(const pointer &p);
+
+   //!Allocates many elements of size == 1 in a contiguous chunk
+   //!of memory. The minimum number to be allocated is min_elements,
+   //!the preferred and maximum number is
+   //!preferred_elements. The number of actually allocated elements is
+   //!will be assigned to received_size. Memory allocated with this function
+   //!must be deallocated only with deallocate_one().
+   void deallocate_individual(multiallocation_iterator it);
+   #endif
+};
+
+#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
+
+//!Equality test for same type
+//!of private_node_allocator
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator==(const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
+
+//!Inequality test for same type
+//!of private_node_allocator
+template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
+bool operator!=(const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc1, 
+                const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
+
+#endif
+
+}  //namespace interprocess {
+}  //namespace boost {
+
+#include <boost/interprocess/detail/config_end.hpp>
+
+#endif   //#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
+
Modified: trunk/boost/interprocess/containers/detail/flat_tree.hpp
==============================================================================
--- trunk/boost/interprocess/containers/detail/flat_tree.hpp	(original)
+++ trunk/boost/interprocess/containers/detail/flat_tree.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 ////////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp
==============================================================================
--- trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp	(original)
+++ trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -74,6 +74,9 @@
    typedef detail::integral_constant<unsigned,
       boost::interprocess::detail::
          version<NodeAlloc>::value>                   alloc_version;
+   typedef typename ICont::iterator                   icont_iterator;
+   typedef typename ICont::const_iterator             icont_citerator;
+   typedef allocator_destroyer<NodeAlloc>             Destroyer;
 
    node_alloc_holder(const ValAlloc &a) 
       : members_(a)
@@ -292,18 +295,41 @@
          if(constructed){
             this->destroy(p);
          }
-         this->deallocate_one(p);
-         multiallocation_iterator itend;
-         while(itbeg != itend){
-            Node *n = &*itbeg;
-            ++itbeg;
-            this->deallocate_one(n);
-         }
+         this->node_alloc().deallocate_many(itbeg);
       }
       BOOST_CATCH_END
       return beg;
    }
 
+   void clear(allocator_v1)
+   {  this->icont().clear_and_dispose(Destroyer(this->node_alloc()));   }
+
+   void clear(allocator_v2)
+   {
+      allocator_multialloc_chain_node_deallocator<NodeAlloc> chain_holder(this->node_alloc());
+      this->icont().clear_and_dispose(chain_holder.get_chain_builder());
+   }
+
+   icont_iterator erase_range(icont_iterator first, icont_iterator last, allocator_v1)
+   {  return this->icont().erase_and_dispose(first, last, Destroyer(this->node_alloc())); }
+
+   icont_iterator erase_range(icont_iterator first, icont_iterator last, allocator_v2)
+   {
+      allocator_multialloc_chain_node_deallocator<NodeAlloc> chain_holder(this->node_alloc());
+      return this->icont().erase_and_dispose(first, last, chain_holder.get_chain_builder());
+   }
+
+   template<class Key, class Comparator>
+   size_type erase_key(const Key& k, const Comparator &comp, allocator_v1)
+   {  return this->icont().erase_and_dispose(k, comp, Destroyer(this->node_alloc())); }
+
+   template<class Key, class Comparator>
+   size_type erase_key(const Key& k, const Comparator &comp, allocator_v2)
+   {
+      allocator_multialloc_chain_node_deallocator<NodeAlloc> chain_holder(this->node_alloc());
+      return this->icont().erase_and_dispose(k, comp, chain_holder.get_chain_builder());
+   }
+
    protected:
    struct cloner
    {
@@ -359,10 +385,10 @@
    {  return this->members_.m_icont;   }
 
    NodeAlloc &node_alloc()
-   {  return this->members_;   }
+   {  return static_cast<NodeAlloc &>(this->members_);   }
 
    const NodeAlloc &node_alloc() const
-   {  return this->members_;   }
+   {  return static_cast<const NodeAlloc &>(this->members_);   }
 };
 
 }  //namespace detail {
Modified: trunk/boost/interprocess/containers/detail/tree.hpp
==============================================================================
--- trunk/boost/interprocess/containers/detail/tree.hpp	(original)
+++ trunk/boost/interprocess/containers/detail/tree.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -732,13 +732,13 @@
    {  return iterator(this->icont().erase_and_dispose(position.get(), Destroyer(this->node_alloc()))); }
 
    size_type erase(const key_type& k)
-   {  return this->icont().erase_and_dispose(k, KeyNodeCompare(value_comp()), Destroyer(this->node_alloc())); }
+   {  return AllocHolder::erase_key(k, KeyNodeCompare(value_comp()), alloc_version()); }
 
    iterator erase(const_iterator first, const_iterator last)
-   {  return iterator(this->icont().erase_and_dispose(first.get(), last.get(), Destroyer(this->node_alloc()))); }
+   {  return iterator(AllocHolder::erase_range(first.get(), last.get(), alloc_version())); }
 
    void clear() 
-   {  this->icont().clear_and_dispose(Destroyer(this->node_alloc())); }
+   {  AllocHolder::clear(alloc_version());  }
 
    // set operations:
    iterator find(const key_type& k)
Modified: trunk/boost/interprocess/containers/flat_map.hpp
==============================================================================
--- trunk/boost/interprocess/containers/flat_map.hpp	(original)
+++ trunk/boost/interprocess/containers/flat_map.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/containers/flat_set.hpp
==============================================================================
--- trunk/boost/interprocess/containers/flat_set.hpp	(original)
+++ trunk/boost/interprocess/containers/flat_set.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/containers/list.hpp
==============================================================================
--- trunk/boost/interprocess/containers/list.hpp	(original)
+++ trunk/boost/interprocess/containers/list.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -404,7 +404,7 @@
    //!
    //! <b>Complexity</b>: Linear to the number of elements in the list.
    void clear()
-   {  this->icont().clear_and_dispose(Destroyer(this->node_alloc()));  }
+   {  AllocHolder::clear(alloc_version());  }
 
    //! <b>Effects</b>: Returns an iterator to the first element contained in the list.
    //! 
@@ -786,7 +786,7 @@
    //!
    //! <b>Complexity</b>: Linear to the distance between first and last.
    iterator erase(iterator first, iterator last)
-   {  return iterator(this->icont().erase_and_dispose(first.get(), last.get(), Destroyer(this->node_alloc()))); }
+   {  return iterator(AllocHolder::erase_range(first.get(), last.get(), alloc_version())); }
 
    //! <b>Effects</b>: Assigns the n copies of val to *this.
    //!
@@ -1085,6 +1085,7 @@
 
    /// @cond
    private:
+
    //Iterator range version
    template<class InpIterator>
    void priv_create_and_insert_nodes
@@ -1160,7 +1161,7 @@
 
    template<class Integer>
    void priv_insert_dispatch(iterator p, Integer n, Integer x, detail::true_) 
-   {  this->priv_create_and_insert_nodes(p, n, x);  }
+   {  this->insert(p, (size_type)n, x);  }
 
    void priv_fill_assign(size_type n, const T& val) 
    {
Modified: trunk/boost/interprocess/containers/map.hpp
==============================================================================
--- trunk/boost/interprocess/containers/map.hpp	(original)
+++ trunk/boost/interprocess/containers/map.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/containers/set.hpp
==============================================================================
--- trunk/boost/interprocess/containers/set.hpp	(original)
+++ trunk/boost/interprocess/containers/set.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/containers/slist.hpp
==============================================================================
--- trunk/boost/interprocess/containers/slist.hpp	(original)
+++ trunk/boost/interprocess/containers/slist.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2004-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -8,7 +8,7 @@
 //
 //////////////////////////////////////////////////////////////////////////////
 //
-// This file comes from SGI's stl_slist.h file. Modified by Ion Gaztanaga 2004-2007
+// This file comes from SGI's stl_slist.h file. Modified by Ion Gaztanaga 2004-2008
 // Renaming, isolating and porting to generic algorithms. Pointer typedef 
 // set to allocator::pointer to allow placing it in shared memory.
 //
Modified: trunk/boost/interprocess/containers/string.hpp
==============================================================================
--- trunk/boost/interprocess/containers/string.hpp	(original)
+++ trunk/boost/interprocess/containers/string.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -8,7 +8,7 @@
 //
 //////////////////////////////////////////////////////////////////////////////
 //
-// This file comes from SGI's string file. Modified by Ion Gaztanaga 2004-2007
+// This file comes from SGI's string file. Modified by Ion Gaztanaga 2004-2008
 // Renaming, isolating and porting to generic algorithms. Pointer typedef 
 // set to allocator::pointer to allow placing it in shared memory.
 //
Modified: trunk/boost/interprocess/containers/vector.hpp
==============================================================================
--- trunk/boost/interprocess/containers/vector.hpp	(original)
+++ trunk/boost/interprocess/containers/vector.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -740,6 +740,9 @@
          //Check for forward expansion
          same_buffer_start = ret.second && this->members_.m_start == ret.first;
          if(same_buffer_start){
+            #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+            ++this->num_expand_fwd;
+            #endif
             this->members_.m_capacity  = real_cap;
          }
          //If there is no forward expansion, move objects
@@ -748,6 +751,9 @@
             copy_move_it dummy_it(detail::get_pointer(this->members_.m_start));
             //Backwards (and possibly forward) expansion
             if(ret.second){
+               #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+               ++this->num_expand_bwd;
+               #endif
                this->priv_range_insert_expand_backwards
                   ( detail::get_pointer(ret.first)
                   , real_cap
@@ -758,6 +764,9 @@
             }
             //New buffer
             else{
+               #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+               ++this->num_alloc;
+               #endif
                this->priv_range_insert_new_allocation
                   ( detail::get_pointer(ret.first)
                   , real_cap
@@ -1184,11 +1193,17 @@
          
          //If we had room or we have expanded forward
          if (same_buffer_start){
+            #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+            ++this->num_expand_fwd;
+            #endif
             this->priv_range_insert_expand_forward
                (detail::get_pointer(pos), first, last, n);
          }
          //Backwards (and possibly forward) expansion
          else if(ret.second){
+            #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+            ++this->num_expand_bwd;
+            #endif
             this->priv_range_insert_expand_backwards
                ( detail::get_pointer(ret.first)
                , real_cap
@@ -1199,6 +1214,9 @@
          }
          //New buffer
          else{
+            #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+            ++this->num_alloc;
+            #endif
             this->priv_range_insert_new_allocation
                ( detail::get_pointer(ret.first)
                , real_cap
@@ -1778,6 +1796,15 @@
       if (n >= size())
          throw std::out_of_range("vector::at");
    }
+
+   #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+   public:
+   unsigned int num_expand_fwd;
+   unsigned int num_expand_bwd;
+   unsigned int num_alloc;
+   void reset_alloc_stats()
+   {  num_expand_fwd = num_expand_bwd = num_alloc = 0;   }                 
+   #endif
    /// @endcond
 };
 
Modified: trunk/boost/interprocess/creation_tags.hpp
==============================================================================
--- trunk/boost/interprocess/creation_tags.hpp	(original)
+++ trunk/boost/interprocess/creation_tags.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/algorithms.hpp
==============================================================================
--- trunk/boost/interprocess/detail/algorithms.hpp	(original)
+++ trunk/boost/interprocess/detail/algorithms.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 //
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
Modified: trunk/boost/interprocess/detail/atomic.hpp
==============================================================================
--- trunk/boost/interprocess/detail/atomic.hpp	(original)
+++ trunk/boost/interprocess/detail/atomic.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2006-2007
+// (C) Copyright Ion Gaztanaga 2006-2008
 // (C) Copyright Markus Schoepflin 2007
 //
 // Distributed under the Boost Software License, Version 1.0. (See
Modified: trunk/boost/interprocess/detail/cast_tags.hpp
==============================================================================
--- trunk/boost/interprocess/detail/cast_tags.hpp	(original)
+++ trunk/boost/interprocess/detail/cast_tags.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/config_begin.hpp
==============================================================================
--- trunk/boost/interprocess/detail/config_begin.hpp	(original)
+++ trunk/boost/interprocess/detail/config_begin.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -9,6 +9,7 @@
    #define _CRT_SECURE_NO_DEPRECATE
    #endif
    #pragma warning (push)
+   #pragma warning (disable : 4702) // unreachable code
    #pragma warning (disable : 4706) // assignment within conditional expression
    #pragma warning (disable : 4127) // conditional expression is constant
    #pragma warning (disable : 4146) // unary minus operator applied to unsigned type, result still unsigned
Modified: trunk/boost/interprocess/detail/in_place_interface.hpp
==============================================================================
--- trunk/boost/interprocess/detail/in_place_interface.hpp	(original)
+++ trunk/boost/interprocess/detail/in_place_interface.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/interprocess_tester.hpp
==============================================================================
--- trunk/boost/interprocess/detail/interprocess_tester.hpp	(original)
+++ trunk/boost/interprocess/detail/interprocess_tester.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/iterators.hpp
==============================================================================
--- trunk/boost/interprocess/detail/iterators.hpp	(original)
+++ trunk/boost/interprocess/detail/iterators.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 // (C) Copyright Gennaro Prota 2003 - 2004.
 //
 // Distributed under the Boost Software License, Version 1.0.
@@ -428,6 +428,12 @@
       operator->() const
    { return operator_arrow_proxy<typename UnaryFunction::result_type>(dereference());  }
 
+   Iterator & base()
+   {  return m_it;   }
+
+   const Iterator & base() const
+   {  return m_it;   }
+
    private:
    Iterator m_it;
 
Modified: trunk/boost/interprocess/detail/managed_memory_impl.hpp
==============================================================================
--- trunk/boost/interprocess/detail/managed_memory_impl.hpp	(original)
+++ trunk/boost/interprocess/detail/managed_memory_impl.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/math_functions.hpp
==============================================================================
--- trunk/boost/interprocess/detail/math_functions.hpp	(original)
+++ trunk/boost/interprocess/detail/math_functions.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,7 +1,7 @@
 //////////////////////////////////////////////////////////////////////////////
 //
 // (C) Copyright Stephen Cleary 2000.
-// (C) Copyright Ion Gaztanaga 2007.
+// (C) Copyright Ion Gaztanaga 2007-2008.
 //
 // Distributed under the Boost Software License, Version 1.0.
 //    (See accompanying file LICENSE_1_0.txt or copy at 
Modified: trunk/boost/interprocess/detail/min_max.hpp
==============================================================================
--- trunk/boost/interprocess/detail/min_max.hpp	(original)
+++ trunk/boost/interprocess/detail/min_max.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 //
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
Modified: trunk/boost/interprocess/detail/mpl.hpp
==============================================================================
--- trunk/boost/interprocess/detail/mpl.hpp	(original)
+++ trunk/boost/interprocess/detail/mpl.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 //
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
Modified: trunk/boost/interprocess/detail/named_proxy.hpp
==============================================================================
--- trunk/boost/interprocess/detail/named_proxy.hpp	(original)
+++ trunk/boost/interprocess/detail/named_proxy.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/os_file_functions.hpp
==============================================================================
--- trunk/boost/interprocess/detail/os_file_functions.hpp	(original)
+++ trunk/boost/interprocess/detail/os_file_functions.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/os_thread_functions.hpp
==============================================================================
--- trunk/boost/interprocess/detail/os_thread_functions.hpp	(original)
+++ trunk/boost/interprocess/detail/os_thread_functions.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/pointer_type.hpp
==============================================================================
--- trunk/boost/interprocess/detail/pointer_type.hpp	(original)
+++ trunk/boost/interprocess/detail/pointer_type.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 // (C) Copyright Gennaro Prota 2003 - 2004.
 //
 // Distributed under the Boost Software License, Version 1.0.
Modified: trunk/boost/interprocess/detail/posix_time_types_wrk.hpp
==============================================================================
--- trunk/boost/interprocess/detail/posix_time_types_wrk.hpp	(original)
+++ trunk/boost/interprocess/detail/posix_time_types_wrk.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/segment_manager_helper.hpp
==============================================================================
--- trunk/boost/interprocess/detail/segment_manager_helper.hpp	(original)
+++ trunk/boost/interprocess/detail/segment_manager_helper.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/tmp_dir_helpers.hpp
==============================================================================
--- trunk/boost/interprocess/detail/tmp_dir_helpers.hpp	(original)
+++ trunk/boost/interprocess/detail/tmp_dir_helpers.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/type_traits.hpp
==============================================================================
--- trunk/boost/interprocess/detail/type_traits.hpp	(original)
+++ trunk/boost/interprocess/detail/type_traits.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 // (C) Copyright John Maddock 2000.
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 //
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
Modified: trunk/boost/interprocess/detail/utilities.hpp
==============================================================================
--- trunk/boost/interprocess/detail/utilities.hpp	(original)
+++ trunk/boost/interprocess/detail/utilities.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007.
+// (C) Copyright Ion Gaztanaga 2005-2008.
 // (C) Copyright Gennaro Prota 2003 - 2004.
 //
 // Distributed under the Boost Software License, Version 1.0.
@@ -25,6 +25,9 @@
 #include <boost/interprocess/detail/move.hpp>
 #include <boost/type_traits/has_trivial_destructor.hpp>
 #include <boost/interprocess/detail/min_max.hpp>
+#include <boost/interprocess/detail/type_traits.hpp>
+#include <boost/interprocess/detail/type_traits.hpp>
+#include <boost/interprocess/detail/version_type.hpp>
 #include <utility>
 #include <algorithm>
 
@@ -70,14 +73,27 @@
 struct scoped_ptr_dealloc_functor
 {
    typedef typename Allocator::pointer pointer;
+   typedef detail::integral_constant<unsigned,
+      boost::interprocess::detail::
+         version<Allocator>::value>                   alloc_version;
+   typedef detail::integral_constant<unsigned, 1>     allocator_v1;
+   typedef detail::integral_constant<unsigned, 2>     allocator_v2;
 
+   private:
+   void priv_deallocate(const typename Allocator::pointer &p, allocator_v1)
+   {  m_alloc.deallocate(p, 1); }
+
+   void priv_deallocate(const typename Allocator::pointer &p, allocator_v2)
+   {  m_alloc.deallocate_one(p); }
+
+   public:
    Allocator& m_alloc;
 
    scoped_ptr_dealloc_functor(Allocator& a)
-         : m_alloc(a) {}
+      : m_alloc(a) {}
 
    void operator()(pointer ptr)
-      {  if (ptr) m_alloc.deallocate(ptr, 1);  }
+   {  if (ptr) priv_deallocate(ptr, alloc_version());  }
 };
 
 //!A deleter for scoped_ptr that deallocates the memory
@@ -86,7 +102,20 @@
 struct scoped_deallocator
 {
    typedef typename Allocator::pointer pointer;
+   typedef detail::integral_constant<unsigned,
+      boost::interprocess::detail::
+         version<Allocator>::value>                   alloc_version;
+   typedef detail::integral_constant<unsigned, 1>     allocator_v1;
+   typedef detail::integral_constant<unsigned, 2>     allocator_v2;
 
+   private:
+   void priv_deallocate(allocator_v1)
+   {  m_alloc.deallocate(m_ptr, 1); }
+
+   void priv_deallocate(allocator_v2)
+   {  m_alloc.deallocate_one(m_ptr); }
+
+   public:
    pointer     m_ptr;
    Allocator&  m_alloc;
 
@@ -94,7 +123,7 @@
       : m_ptr(p), m_alloc(a) {}
 
    ~scoped_deallocator()
-   {  if (m_ptr) m_alloc.deallocate(m_ptr, 1);  }
+   {  if (m_ptr)priv_deallocate(alloc_version());  }
 
    void release()
    {  m_ptr = 0; }
@@ -189,9 +218,22 @@
 class allocator_destroyer
 {
    typedef typename A::value_type value_type;
+   typedef detail::integral_constant<unsigned,
+      boost::interprocess::detail::
+         version<A>::value>                           alloc_version;
+   typedef detail::integral_constant<unsigned, 1>     allocator_v1;
+   typedef detail::integral_constant<unsigned, 2>     allocator_v2;
+
    private:
    A & a_;
 
+   private:
+   void priv_deallocate(const typename A::pointer &p, allocator_v1)
+   {  a_.deallocate(p, 1); }
+
+   void priv_deallocate(const typename A::pointer &p, allocator_v2)
+   {  a_.deallocate_one(p); }
+
    public:
    allocator_destroyer(A &a)
       :  a_(a)
@@ -200,35 +242,86 @@
    void operator()(const typename A::pointer &p)
    {  
       detail::get_pointer(p)->~value_type();
-      a_.deallocate(p, 1);
+      priv_deallocate(p, alloc_version());
    }
 };
 
-//!A class used for exception-safe multi-allocation + construction.
-template <class Allocator>
-struct multiallocation_deallocator
+template <class A>
+class allocator_destroyer_and_chain_builder
 {
-   typedef typename Allocator::multiallocation_iterator multiallocation_iterator;
+   typedef typename A::value_type value_type;
+   typedef typename A::multiallocation_iterator multiallocation_iterator;
+   typedef typename A::multiallocation_chain    multiallocation_chain;
 
-   multiallocation_iterator m_itbeg;
-   Allocator&  m_alloc;
+   A & a_;
+   multiallocation_chain &c_;
 
-   multiallocation_deallocator(multiallocation_iterator itbeg, Allocator& a)
-      : m_itbeg(itbeg), m_alloc(a) {}
+   public:
+   allocator_destroyer_and_chain_builder(A &a, multiallocation_chain &c)
+      :  a_(a), c_(c)
+   {}
 
-   ~multiallocation_deallocator()
+   void operator()(const typename A::pointer &p)
+   {  
+      value_type *vp = detail::get_pointer(p);
+      vp->~value_type();
+      c_.push_back(vp);
+   }
+};
+
+template <class A>
+class allocator_multialloc_chain_node_deallocator
+{
+   typedef typename A::value_type value_type;
+   typedef typename A::multiallocation_iterator multiallocation_iterator;
+   typedef typename A::multiallocation_chain    multiallocation_chain;
+   typedef allocator_destroyer_and_chain_builder<A> chain_builder;
+
+   A & a_;
+   multiallocation_chain c_;
+
+   public:
+   allocator_multialloc_chain_node_deallocator(A &a)
+      :  a_(a), c_()
+   {}
+
+   chain_builder get_chain_builder()
+   {  return chain_builder(a_, c_);  }
+
+   ~allocator_multialloc_chain_node_deallocator()
    {
-      multiallocation_iterator endit;
-      while(m_itbeg != endit){
-         m_alloc.deallocate(&*m_itbeg, 1);
-         ++m_itbeg;
-      }
+      multiallocation_iterator it(c_.get_it());
+      if(it != multiallocation_iterator())
+         a_.deallocate_individual(it);
    }
-   
-   void increment()
-   {  ++m_itbeg;  }
 };
 
+template <class A>
+class allocator_multialloc_chain_array_deallocator
+{
+   typedef typename A::value_type value_type;
+   typedef typename A::multiallocation_iterator multiallocation_iterator;
+   typedef typename A::multiallocation_chain    multiallocation_chain;
+   typedef allocator_destroyer_and_chain_builder<A> chain_builder;
+
+   A & a_;
+   multiallocation_chain c_;
+
+   public:
+   allocator_multialloc_chain_array_deallocator(A &a)
+      :  a_(a), c_()
+   {}
+
+   chain_builder get_chain_builder()
+   {  return chain_builder(a_, c_);  }
+
+   ~allocator_multialloc_chain_array_deallocator()
+   {
+      multiallocation_iterator it(c_.get_it());
+      if(it != multiallocation_iterator())
+         a_.deallocate_many(it);
+   }
+};
 
 //!A class used for exception-safe multi-allocation + construction.
 template <class Allocator>
@@ -577,6 +670,14 @@
 }
 #endif
 
+template<class T>
+struct cast_functor
+{
+   typedef typename detail::add_reference<T>::type result_type;
+   result_type operator()(char &ptr) const
+   {  return *static_cast<T*>(static_cast<void*>(&ptr));  }
+};
+
 }  //namespace detail {
 
 //!The pair is movable if any of its members is movable
Modified: trunk/boost/interprocess/detail/version_type.hpp
==============================================================================
--- trunk/boost/interprocess/detail/version_type.hpp	(original)
+++ trunk/boost/interprocess/detail/version_type.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/win32_api.hpp
==============================================================================
--- trunk/boost/interprocess/detail/win32_api.hpp	(original)
+++ trunk/boost/interprocess/detail/win32_api.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/detail/workaround.hpp
==============================================================================
--- trunk/boost/interprocess/detail/workaround.hpp	(original)
+++ trunk/boost/interprocess/detail/workaround.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -19,8 +19,8 @@
 
    #if defined(_POSIX_THREAD_PROCESS_SHARED)
    # if !((_XOPEN_VERSION >= 600) && (_POSIX_THREAD_PROCESS_SHARED - 0 <= 0))
-   // Cygwin defines _POSIX_THREAD_PROCESS_SHARED but does not support it.
-   // Mac Os X >= Leopard defines _POSIX_THREAD_PROCESS_SHARED but it does not seem to work
+   //Cygwin defines _POSIX_THREAD_PROCESS_SHARED but does not implement it.
+   //Mac Os X >= Leopard defines _POSIX_THREAD_PROCESS_SHARED but does not seems to work.
    #  if !defined(__CYGWIN__) && !defined(__APPLE__)
    #  define BOOST_INTERPROCESS_POSIX_PROCESS_SHARED
    #  endif
Modified: trunk/boost/interprocess/errors.hpp
==============================================================================
--- trunk/boost/interprocess/errors.hpp	(original)
+++ trunk/boost/interprocess/errors.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/exceptions.hpp
==============================================================================
--- trunk/boost/interprocess/exceptions.hpp	(original)
+++ trunk/boost/interprocess/exceptions.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/file_mapping.hpp
==============================================================================
--- trunk/boost/interprocess/file_mapping.hpp	(original)
+++ trunk/boost/interprocess/file_mapping.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/indexes/flat_map_index.hpp
==============================================================================
--- trunk/boost/interprocess/indexes/flat_map_index.hpp	(original)
+++ trunk/boost/interprocess/indexes/flat_map_index.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/indexes/iset_index.hpp
==============================================================================
--- trunk/boost/interprocess/indexes/iset_index.hpp	(original)
+++ trunk/boost/interprocess/indexes/iset_index.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/indexes/iunordered_set_index.hpp
==============================================================================
--- trunk/boost/interprocess/indexes/iunordered_set_index.hpp	(original)
+++ trunk/boost/interprocess/indexes/iunordered_set_index.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/indexes/map_index.hpp
==============================================================================
--- trunk/boost/interprocess/indexes/map_index.hpp	(original)
+++ trunk/boost/interprocess/indexes/map_index.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/indexes/null_index.hpp
==============================================================================
--- trunk/boost/interprocess/indexes/null_index.hpp	(original)
+++ trunk/boost/interprocess/indexes/null_index.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/indexes/unordered_map_index.hpp
==============================================================================
--- trunk/boost/interprocess/indexes/unordered_map_index.hpp	(original)
+++ trunk/boost/interprocess/indexes/unordered_map_index.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/interprocess_fwd.hpp
==============================================================================
--- trunk/boost/interprocess/interprocess_fwd.hpp	(original)
+++ trunk/boost/interprocess/interprocess_fwd.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -129,13 +129,19 @@
 template<class T, class SegmentManager, std::size_t NodesPerChunk = 64>
 class cached_node_allocator;
 
-template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2>
+template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2
+         , unsigned char OverheadPercent = 5
+>
 class adaptive_pool;
 
-template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2>
+template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2
+         , unsigned char OverheadPercent = 5
+>
 class private_adaptive_pool;
 
-template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2>
+template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2
+         , unsigned char OverheadPercent = 5
+>
 class cached_adaptive_pool;
 
 
@@ -151,10 +157,10 @@
 //////////////////////////////////////////////////////////////////////////////
 
 //Single segment memory allocation algorithms
-template<class MutexFamily, class VoidMutex = void*>//offset_ptr<void> >
+template<class MutexFamily, class VoidMutex = offset_ptr<void> >
 class simple_seq_fit;
 
-template<class MutexFamily, class VoidMutex = offset_ptr<void> >
+template<class MutexFamily, class VoidMutex = offset_ptr<void>, std::size_t MemAlignment = 0>
 class rbtree_best_fit;
 
 //////////////////////////////////////////////////////////////////////////////
Modified: trunk/boost/interprocess/ipc/message_queue.hpp
==============================================================================
--- trunk/boost/interprocess/ipc/message_queue.hpp	(original)
+++ trunk/boost/interprocess/ipc/message_queue.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/managed_external_buffer.hpp
==============================================================================
--- trunk/boost/interprocess/managed_external_buffer.hpp	(original)
+++ trunk/boost/interprocess/managed_external_buffer.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -20,6 +20,7 @@
 #include <boost/interprocess/creation_tags.hpp>
 #include <boost/interprocess/detail/managed_memory_impl.hpp>
 #include <boost/interprocess/detail/move.hpp>
+#include <cassert>
 
 //!\file
 //!Describes a named user memory allocation user class. 
@@ -49,6 +50,8 @@
    basic_managed_external_buffer
       (create_only_t, void *addr, std::size_t size)
    {
+      //Check if alignment is correct
+      assert((0 == (((std::size_t)addr) & (AllocationAlgorithm::Alignment - std::size_t(1u)))));
       if(!base_t::create_impl(addr, size)){
          throw interprocess_exception();
       }
@@ -58,6 +61,8 @@
    basic_managed_external_buffer
       (open_only_t, void *addr, std::size_t size)
    {
+      //Check if alignment is correct
+      assert((0 == (((std::size_t)addr) & (AllocationAlgorithm::Alignment - std::size_t(1u)))));
       if(!base_t::open_impl(addr, size)){
          throw interprocess_exception();
       }
Modified: trunk/boost/interprocess/managed_heap_memory.hpp
==============================================================================
--- trunk/boost/interprocess/managed_heap_memory.hpp	(original)
+++ trunk/boost/interprocess/managed_heap_memory.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/managed_mapped_file.hpp
==============================================================================
--- trunk/boost/interprocess/managed_mapped_file.hpp	(original)
+++ trunk/boost/interprocess/managed_mapped_file.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/managed_shared_memory.hpp
==============================================================================
--- trunk/boost/interprocess/managed_shared_memory.hpp	(original)
+++ trunk/boost/interprocess/managed_shared_memory.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/managed_windows_shared_memory.hpp
==============================================================================
--- trunk/boost/interprocess/managed_windows_shared_memory.hpp	(original)
+++ trunk/boost/interprocess/managed_windows_shared_memory.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/mapped_region.hpp
==============================================================================
--- trunk/boost/interprocess/mapped_region.hpp	(original)
+++ trunk/boost/interprocess/mapped_region.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/mem_algo/detail/mem_algo_common.hpp
==============================================================================
--- trunk/boost/interprocess/mem_algo/detail/mem_algo_common.hpp	(original)
+++ trunk/boost/interprocess/mem_algo/detail/mem_algo_common.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -104,6 +104,167 @@
    multi_allocation_next<VoidPointer> next_alloc_;
 };
 
+template<class VoidPointer>
+class basic_multiallocation_chain
+{
+   private:
+   basic_multiallocation_iterator<VoidPointer> it_;
+   VoidPointer last_mem_;
+   std::size_t num_mem_;
+
+   basic_multiallocation_chain(const basic_multiallocation_chain &);
+   basic_multiallocation_chain &operator=(const basic_multiallocation_chain &);
+
+   public:
+   typedef basic_multiallocation_iterator<VoidPointer> multiallocation_iterator;
+
+   basic_multiallocation_chain()
+      :  it_(0), last_mem_(0), num_mem_(0)
+   {}
+
+   void push_back(void *mem)
+   {
+      typedef multi_allocation_next<VoidPointer> next_impl_t;
+      next_impl_t * tmp_mem = static_cast<next_impl_t*>(mem);
+      
+      if(!this->last_mem_){
+         this->it_ = basic_multiallocation_iterator<VoidPointer>(tmp_mem);
+      }
+      else{
+         static_cast<next_impl_t*>(detail::get_pointer(this->last_mem_))->next_ = tmp_mem;
+      }
+      tmp_mem->next_ = 0;
+      this->last_mem_ = tmp_mem;
+      ++num_mem_;
+   }
+
+   void push_back(multiallocation_iterator it, std::size_t n)
+   {
+      typedef multi_allocation_next<VoidPointer> next_impl_t;
+      next_impl_t * tmp_mem = (next_impl_t*)(&*it);
+      
+      if(!this->last_mem_){
+         this->it_ = it;
+      }
+      else{
+         static_cast<next_impl_t*>(detail::get_pointer(this->last_mem_))->next_ = tmp_mem;
+      }
+      tmp_mem->next_ = 0;
+      this->last_mem_ = tmp_mem;
+      ++num_mem_;
+   }
+
+   void push_front(void *mem)
+   {
+      typedef multi_allocation_next<VoidPointer> next_impl_t;
+      
+      if(!this->last_mem_){
+         push_back(mem);
+      }
+      else{
+         next_impl_t * tmp_mem   = static_cast<next_impl_t*>(mem);
+         next_impl_t * old_first = (next_impl_t*)(&*this->it_);
+         static_cast<next_impl_t*>(mem)->next_ = old_first;
+         this->it_ = basic_multiallocation_iterator<VoidPointer>(tmp_mem);
+         ++num_mem_;
+      }
+   }
+
+   void swap(basic_multiallocation_chain &other_chain)
+   {
+      std::swap(this->it_, other_chain.it_);
+      std::swap(this->last_mem_, other_chain.last_mem_);
+      std::swap(this->num_mem_, other_chain.num_mem_);
+   }
+
+   void splice_back(basic_multiallocation_chain &other_chain)
+   {
+      typedef multi_allocation_next<VoidPointer> next_impl_t;
+      multiallocation_iterator end_it;
+      multiallocation_iterator other_it = other_chain.get_it();
+      multiallocation_iterator this_it  = this->get_it();
+      if(end_it == other_it){
+         return;
+      }
+      else if(end_it == other_it){
+         this->swap(other_chain);
+      }
+      
+      static_cast<next_impl_t*>(detail::get_pointer(this->last_mem_))->next_
+         = (next_impl_t*)&*this->it_;
+      this->last_mem_ = other_chain.last_mem_;
+      this->num_mem_ += other_chain.num_mem_;
+   }
+
+   void *pop_front()
+   {
+      multiallocation_iterator itend;
+      if(this->it_ == itend){
+         this->last_mem_= 0;
+         this->num_mem_ = 0;
+         return 0;
+      }
+      else{
+         void *addr = &*it_;
+         ++it_;
+         --num_mem_;
+         if(!num_mem_){
+            this->last_mem_ = 0;
+            this->it_ = multiallocation_iterator();
+         }
+         return addr;
+      }
+   }
+
+   bool empty() const
+   {  return !num_mem_; }
+
+   multiallocation_iterator get_it() const
+   {  return it_;  }
+
+   std::size_t size() const
+   {  return num_mem_;  }
+};
+
+template<class Allocator>
+class allocator_multiallocation_chain
+{
+   typedef typename detail::
+      pointer_to_other<typename Allocator::pointer, void>::type
+         void_ptr;
+
+   typedef typename Allocator::multiallocation_iterator  multiallocation_iterator;
+   basic_multiallocation_chain<void_ptr> chain_;
+
+   public:
+
+   allocator_multiallocation_chain()
+      :  chain_()
+   {}
+
+   void push_back(void *mem)
+   {  chain_.push_back(mem);  }
+
+   multiallocation_iterator get_it() const
+   {  return multiallocation_iterator(chain_.get_it());  }
+};
+
+
+#define BOOST_MULTIALLOC_IT_CHAIN_INIT(IT_CHAIN) ((IT_CHAIN).it.next = 0, (IT_CHAIN).last_mem = 0)
+#define BOOST_MULTIALLOC_IT_CHAIN_ADD(IT_CHAIN, MEM)\
+   do{\
+      multialloc_it_t *____tmp_mem____ = (multialloc_it_t*)(MEM);\
+      if(!IT_CHAIN.last_mem){\
+         (IT_CHAIN).it.next = ____tmp_mem____;\
+      }else{\
+         ((multialloc_it_t*)(IT_CHAIN.last_mem))->next = ____tmp_mem____;\
+      }\
+      ____tmp_mem____->next = 0;\
+      IT_CHAIN.last_mem = ____tmp_mem____;\
+   }while(0)
+
+#define BOOST_MULTIALLOC_IT_CHAIN_IT(IT_CHAIN) ((IT_CHAIN).it)
+
 
 //!This class implements several allocation functions shared by different algorithms
 //!(aligned allocation, multiple allocation...).
@@ -125,6 +286,7 @@
    static const std::size_t AllocatedCtrlUnits  = MemoryAlgorithm::AllocatedCtrlUnits;
    static const std::size_t BlockCtrlBytes      = MemoryAlgorithm::BlockCtrlBytes;
    static const std::size_t BlockCtrlUnits      = MemoryAlgorithm::BlockCtrlUnits;
+   static const std::size_t UsableByPreviousChunk   = MemoryAlgorithm::UsableByPreviousChunk;
 
    static void assert_alignment(const void *ptr)
    {  assert_alignment((std::size_t)ptr); }
@@ -165,10 +327,11 @@
    static void* allocate_aligned
       (MemoryAlgorithm *memory_algo, std::size_t nbytes, std::size_t alignment)
    {
+      
       //Ensure power of 2
       if ((alignment & (alignment - std::size_t(1u))) != 0){
          //Alignment is not power of two
-         BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) != 0);
+         BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) == 0);
          return 0;
       }
 
@@ -176,6 +339,9 @@
       if(alignment <= Alignment){
          return memory_algo->priv_allocate(allocate_new, nbytes, nbytes, real_size).first;
       }
+
+      if(nbytes > UsableByPreviousChunk)
+         nbytes -= UsableByPreviousChunk;
       
       //We can find a aligned portion if we allocate a chunk that has alignment
       //nbytes + alignment bytes or more.
@@ -191,7 +357,9 @@
       // | MBU | 
       //  -----------------------------------------------------
       std::size_t request = 
-         minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes);
+         minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
+         //prevsize - UsableByPreviousChunk
+         );
 
       //Now allocate the buffer
       void *buffer = memory_algo->priv_allocate(allocate_new, request, request, real_size).first;
@@ -207,7 +375,8 @@
             max_value(ceil_units(nbytes) + AllocatedCtrlUnits, std::size_t(MinBlockUnits));
          //We can create a new block in the end of the segment
          if(old_size >= (first_min_units + MinBlockUnits)){
-            block_ctrl *second =  new((char*)first + Alignment*first_min_units) block_ctrl;
+            //block_ctrl *second =  new((char*)first + Alignment*first_min_units) block_ctrl;
+            block_ctrl *second =  (block_ctrl *)((char*)first + Alignment*first_min_units);
             first->m_size  = first_min_units;
             second->m_size = old_size - first->m_size;
             BOOST_ASSERT(second->m_size >= MinBlockUnits);
@@ -285,6 +454,7 @@
       ,const std::size_t max_size,   const std::size_t preferred_size
       ,std::size_t &received_size)
    {
+      (void)memory_algo;
       //Obtain the real block
       block_ctrl *block = memory_algo->priv_get_block(ptr);
       std::size_t old_block_units = block->m_size;
@@ -296,11 +466,11 @@
       assert_alignment(ptr);
 
       //Put this to a safe value
-      received_size = (old_block_units - AllocatedCtrlUnits)*Alignment;
+      received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
 
       //Now translate it to Alignment units
-      const std::size_t max_user_units       = floor_units(max_size);
-      const std::size_t preferred_user_units = ceil_units(preferred_size);
+      const std::size_t max_user_units       = floor_units(max_size - UsableByPreviousChunk);
+      const std::size_t preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
 
       //Check if rounded max and preferred are possible correct
       if(max_user_units < preferred_user_units)
@@ -331,7 +501,7 @@
       }
 
       //Update new size
-      received_size = shrunk_user_units*Alignment;
+      received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
       return true;
    }
 
@@ -350,22 +520,23 @@
       }
 
       //Check if the old size was just the shrunk size (no splitting)
-      if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size))
+      if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
          return true;
 
       //Now we can just rewrite the size of the old buffer
-      block->m_size = received_size/Alignment + AllocatedCtrlUnits;
+      block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits;
       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
-      memory_algo->priv_mark_new_allocated_block(block);
 
       //We create the new block
-      block_ctrl *new_block = new(reinterpret_cast<block_ctrl*>
-                  (detail::char_ptr_cast(block) + block->m_size*Alignment)) block_ctrl;
-
+//      block_ctrl *new_block = new(reinterpret_cast<block_ctrl*>
+//                  (detail::char_ptr_cast(block) + block->m_size*Alignment)) block_ctrl;
+      block_ctrl *new_block = reinterpret_cast<block_ctrl*>
+                  (detail::char_ptr_cast(block) + block->m_size*Alignment);
       //Write control data to simulate this new block was previously allocated
       //and deallocate it
       new_block->m_size = old_block_units - block->m_size;
       BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
+      memory_algo->priv_mark_new_allocated_block(block);
       memory_algo->priv_mark_new_allocated_block(new_block);
       memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
       return true;
@@ -401,11 +572,11 @@
       multi_allocation_next_ptr first = 0, previous = 0;
       std::size_t low_idx = 0;
       while(low_idx < n_elements){
-         std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes;
+         std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
          std::size_t min_allocation = (!sizeof_element)
             ?  elem_units
             :  memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
-         min_allocation = min_allocation*Alignment - AllocatedCtrlBytes;
+         min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
 
          std::size_t received_size;
          std::pair<void *, bool> ret = memory_algo->priv_allocate
@@ -419,6 +590,7 @@
          char *block_address = (char*)block;
 
          std::size_t total_used_units = 0;
+//         block_ctrl *prev_block = 0;
          while(total_used_units < received_units){
             if(sizeof_element){
                elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
@@ -428,7 +600,10 @@
                break;
             total_request_units -= elem_units;
             //This is the position where the new block must be created
-            block_ctrl *new_block = new(block_address)block_ctrl;
+//            if(prev_block)
+//               memory_algo->priv_mark_new_allocated_block(prev_block);
+            block_ctrl *new_block = (block_ctrl *)(block_address);
+//             block_ctrl *new_block = new(block_address)block_ctrl;
             assert_alignment(new_block);
 
             //The last block should take all the remaining space
@@ -446,7 +621,7 @@
                //split it obtaining a new free memory block do it.
                if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
                   std::size_t shrunk_received;
-                  std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes;
+                  std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
                   bool ret = shrink
                         (memory_algo
                         ,memory_algo->priv_get_user_buffer(new_block)
@@ -457,7 +632,7 @@
                   BOOST_ASSERT(ret);
                   //Some sanity checks
                   BOOST_ASSERT(shrunk_request == shrunk_received);
-                  BOOST_ASSERT(elem_units == (shrunk_request/Alignment + AllocatedCtrlUnits));
+                  BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
                   //"new_block->m_size" must have been reduced to elem_units by "shrink"
                   BOOST_ASSERT(new_block->m_size == elem_units);
                   //Now update the total received units with the reduction
@@ -483,6 +658,7 @@
             }
             previous = p;
             ++low_idx;
+            //prev_block = new_block;
          }
          //Sanity check
          BOOST_ASSERT(total_used_units == received_units);
Modified: trunk/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp
==============================================================================
--- trunk/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp	(original)
+++ trunk/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -64,6 +64,8 @@
 
    typedef detail::basic_multiallocation_iterator
       <void_pointer> multiallocation_iterator;
+   typedef detail::basic_multiallocation_chain
+      <void_pointer> multiallocation_chain;
 
    private:
    class block_ctrl;
@@ -137,6 +139,9 @@
    //!Multiple element allocation, different size
    multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element);
 
+   //!Multiple element deallocation
+   void deallocate_many(multiallocation_iterator it);
+
    /// @endcond
 
    //!Deallocates previously allocated bytes
@@ -170,8 +175,13 @@
                            std::size_t preferred_size,std::size_t &received_size, 
                            T *reuse_ptr = 0);
 
+   std::pair<void *, bool>
+      raw_allocation_command  (allocation_type command,   std::size_t limit_size,
+                               std::size_t preferred_size,std::size_t &received_size, 
+                               void *reuse_ptr = 0, std::size_t sizeof_object = 1);
+
    //!Returns the size of the buffer previously allocated pointed by ptr
-   std::size_t size(void *ptr) const;
+   std::size_t size(const void *ptr) const;
 
    //!Allocates aligned bytes, returns 0 if there is not more memory.
    //!Alignment must be power of 2
@@ -247,13 +257,16 @@
 
    void priv_mark_new_allocated_block(block_ctrl *block);
 
+   public:
    static const std::size_t Alignment      = detail::alignment_of<detail::max_align>::value;
+   private:
    static const std::size_t BlockCtrlBytes = detail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
    static const std::size_t BlockCtrlUnits = BlockCtrlBytes/Alignment;
    static const std::size_t MinBlockUnits  = BlockCtrlUnits;
    static const std::size_t MinBlockSize   = MinBlockUnits*Alignment;
    static const std::size_t AllocatedCtrlBytes = BlockCtrlBytes;
    static const std::size_t AllocatedCtrlUnits = BlockCtrlUnits;
+   static const std::size_t UsableByPreviousChunk = 0;
 
    public:
    static const std::size_t PayloadPerAllocation = BlockCtrlBytes;
@@ -549,19 +562,34 @@
                         std::size_t preferred_size,std::size_t &received_size, 
                         T *reuse_ptr)
 {
-   if(command & try_shrink_in_place){
-      bool success = 
-         algo_impl_t::try_shrink(this, reuse_ptr, limit_size, preferred_size, received_size);
-      return std::pair<T *, bool> ((success ? reuse_ptr : 0), true);
-   }
    std::pair<void*, bool> ret = priv_allocation_command
       (command, limit_size, preferred_size, received_size, reuse_ptr, sizeof(T));
+
    BOOST_ASSERT(0 == ((std::size_t)ret.first % detail::alignment_of<T>::value));
    return std::pair<T *, bool>(static_cast<T*>(ret.first), ret.second);
 }
 
 template<class MutexFamily, class VoidPointer>
 inline std::pair<void*, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::
+   raw_allocation_command  (allocation_type command,   std::size_t limit_objects,
+                        std::size_t preferred_objects,std::size_t &received_objects, 
+                        void *reuse_ptr, std::size_t sizeof_object)
+{
+   if(!sizeof_object)
+      return std::pair<void *, bool>(0, 0);
+   if(command & try_shrink_in_place){
+      bool success = algo_impl_t::try_shrink
+         ( this, reuse_ptr, limit_objects*sizeof_object
+         , preferred_objects*sizeof_object, received_objects);
+      received_objects /= sizeof_object;
+      return std::pair<void *, bool> ((success ? reuse_ptr : 0), true);
+   }
+   return priv_allocation_command
+      (command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object);
+}
+
+template<class MutexFamily, class VoidPointer>
+inline std::pair<void*, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::
    priv_allocation_command (allocation_type command,   std::size_t limit_size,
                        std::size_t preferred_size, std::size_t &received_size, 
                        void *reuse_ptr, std::size_t sizeof_object)
@@ -589,13 +617,13 @@
 
 template<class MutexFamily, class VoidPointer>
 inline std::size_t simple_seq_fit_impl<MutexFamily, VoidPointer>::
-   size(void *ptr) const
+   size(const void *ptr) const
 {
    //We need no synchronization since this block is not going
    //to be modified
    //Obtain the real size of the block
    block_ctrl *block = reinterpret_cast<block_ctrl*>
-                        (priv_get_block(detail::char_ptr_cast(ptr)));
+                        (priv_get_block(detail::char_ptr_cast(const_cast<void*>(ptr))));
    return block->get_user_bytes();
 }
 
@@ -690,6 +718,20 @@
 }
 
 template<class MutexFamily, class VoidPointer>
+inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
+   deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_iterator it)
+{
+   //-----------------------
+   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
+   //-----------------------
+   while(it){
+      void *addr = &*it;
+      ++it;
+      this->priv_deallocate(addr);
+   }
+}
+
+template<class MutexFamily, class VoidPointer>
 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_iterator
    simple_seq_fit_impl<MutexFamily, VoidPointer>::
    allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element)
Modified: trunk/boost/interprocess/mem_algo/rbtree_best_fit.hpp
==============================================================================
--- trunk/boost/interprocess/mem_algo/rbtree_best_fit.hpp	(original)
+++ trunk/boost/interprocess/mem_algo/rbtree_best_fit.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -60,7 +60,7 @@
 
 //!This class implements an algorithm that stores the free nodes in a red-black tree
 //!to have logarithmic search/insert times.
-template<class MutexFamily, class VoidPointer>
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
 class rbtree_best_fit
 {
    /// @cond
@@ -77,6 +77,8 @@
    typedef VoidPointer        void_pointer;
    typedef detail::basic_multiallocation_iterator
       <void_pointer> multiallocation_iterator;
+   typedef detail::basic_multiallocation_chain
+      <void_pointer> multiallocation_chain;
 
    /// @cond
 
@@ -106,9 +108,9 @@
    {
       //!This block's memory size (including block_ctrl 
       //!header) in Alignment units
-      std::size_t m_prev_size :  sizeof(std::size_t)*CHAR_BIT - 1;
-      std::size_t m_end       :  1;
-      std::size_t m_size      :  sizeof(std::size_t)*CHAR_BIT - 1;
+      std::size_t m_prev_size :  sizeof(std::size_t)*CHAR_BIT;
+      std::size_t m_size      :  sizeof(std::size_t)*CHAR_BIT - 2;
+      std::size_t m_prev_allocated :  1;
       std::size_t m_allocated :  1;
    };
 
@@ -117,7 +119,7 @@
       :  public SizeHolder, public TreeHook
    {
       block_ctrl()
-      {  this->m_end = 0;  this->m_size = 0; this->m_allocated = 0;  }
+      {  this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0;  }
 
       friend bool operator<(const block_ctrl &a, const block_ctrl &b)
       {  return a.m_size < b.m_size;  }
@@ -195,6 +197,9 @@
    //!Multiple element allocation, different size
    multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element);
 
+   //!Multiple element allocation, different size
+   void deallocate_many(multiallocation_iterator it);
+
    /// @endcond
 
    //!Deallocates previously allocated bytes
@@ -230,6 +235,11 @@
                            std::size_t preferred_size,std::size_t &received_size, 
                            T *reuse_ptr = 0);
 
+   std::pair<void *, bool>
+     raw_allocation_command  (allocation_type command,   std::size_t limit_object,
+                              std::size_t preferred_object,std::size_t &received_object, 
+                              void *reuse_ptr = 0, std::size_t sizeof_object = 1);
+
    //!Returns the size of the buffer previously allocated pointed by ptr
    std::size_t size(const void *ptr) const;
 
@@ -279,17 +289,14 @@
                                ,bool only_preferred_backwards
                                ,std::size_t backwards_multiple);
 
-   //!Set the size in the tail of the block
-   void priv_tail_size(block_ctrl *ptr, std::size_t size);
+   //!Get poitner of the previous block (previous block must be free)
+   block_ctrl * priv_prev_block(block_ctrl *ptr);
 
-   //!Real private aligned allocation function
-   //void* priv_allocate_aligned     (std::size_t nbytes, std::size_t alignment);
+   //!Returns true if the previous block is allocated
+   bool priv_is_prev_allocated(block_ctrl *ptr);
 
-   //!Get the size in the tail of the block
-   std::size_t priv_tail_size(block_ctrl *ptr);
-
-   //!Get the size in the tail of the previous block
-   block_ctrl * priv_prev_block(block_ctrl *ptr);
+   //!Get a pointer of the "end" block from the first block of the segment
+   block_ctrl * priv_end_block(block_ctrl *first_segment_block);
 
    //!Get the size in the tail of the previous block
    block_ctrl * priv_next_block(block_ctrl *ptr);
@@ -316,44 +323,50 @@
 
    void priv_mark_new_allocated_block(block_ctrl *block);
 
-   static const std::size_t Alignment = detail::alignment_of<detail::max_align>::value;
+   public:
+   
+   static const std::size_t Alignment = !MemAlignment
+      ? detail::alignment_of<detail::max_align>::value
+      : MemAlignment
+      ;
+
+   private:
    //Due to embedded bits in size, Alignment must be at least 2
-   BOOST_STATIC_ASSERT((Alignment >= 2));
+   BOOST_STATIC_ASSERT((Alignment >= 4));
    //Due to rbtree size optimizations, Alignment must have at least pointer alignment
    BOOST_STATIC_ASSERT((Alignment >= detail::alignment_of<void_pointer>::value));
    static const std::size_t AlignmentMask = (Alignment - 1);
    static const std::size_t BlockCtrlBytes = detail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
    static const std::size_t BlockCtrlUnits = BlockCtrlBytes/Alignment;
-   static const std::size_t AllocatedCtrlBytes = detail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
-   static const std::size_t AllocatedCtrlUnits   = AllocatedCtrlBytes/Alignment;
+   static const std::size_t AllocatedCtrlBytes  = detail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
+   static const std::size_t AllocatedCtrlUnits  = AllocatedCtrlBytes/Alignment;
    static const std::size_t EndCtrlBlockBytes   = detail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
-   static const std::size_t EndCtrlBlockUnits    = EndCtrlBlockBytes/Alignment;
-   static const std::size_t MinBlockUnits        = BlockCtrlUnits;
+   static const std::size_t EndCtrlBlockUnits   = EndCtrlBlockBytes/Alignment;
+   static const std::size_t MinBlockUnits       = BlockCtrlUnits;
+   static const std::size_t UsableByPreviousChunk   = sizeof(std::size_t);
 
    //Make sure the maximum alignment is power of two
    BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - std::size_t(1u)))));
    /// @endcond
    public:
-   static const std::size_t PayloadPerAllocation = AllocatedCtrlBytes;
+   static const std::size_t PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
 };
 
-template<class MutexFamily, class VoidPointer>
-inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer>
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
    ::priv_first_block_offset(const void *this_ptr, std::size_t extra_hdr_bytes)
 {
-   //First align "this" pointer
-   std::size_t uint_this         = (std::size_t)this_ptr;
-   std::size_t uint_aligned_this = uint_this/Alignment*Alignment;
-   std::size_t this_disalignment = (uint_this - uint_aligned_this);
-   std::size_t block1_off = 
-      detail::get_rounded_size(sizeof(rbtree_best_fit) + extra_hdr_bytes + this_disalignment, Alignment)
-      - this_disalignment;
-   algo_impl_t::assert_alignment(this_disalignment + block1_off);
+   std::size_t uint_this      = (std::size_t)this_ptr;
+   std::size_t main_hdr_end   = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
+   std::size_t aligned_main_hdr_end = detail::get_rounded_size(main_hdr_end, Alignment);
+   std::size_t block1_off = aligned_main_hdr_end -  uint_this;
+   algo_impl_t::assert_alignment(aligned_main_hdr_end);
+   algo_impl_t::assert_alignment(uint_this + block1_off);
    return block1_off;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    rbtree_best_fit(std::size_t size, std::size_t extra_hdr_bytes)
 {
    //Initialize the header
@@ -368,26 +381,25 @@
    priv_add_segment(detail::char_ptr_cast(this) + block1_off, size - block1_off);
 }
 
-template<class MutexFamily, class VoidPointer>
-inline rbtree_best_fit<MutexFamily, VoidPointer>::~rbtree_best_fit()
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
 {
    //There is a memory leak!
 //   assert(m_header.m_allocated == 0);
 //   assert(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
 }
 
-template<class MutexFamily, class VoidPointer>
-void rbtree_best_fit<MutexFamily, VoidPointer>::grow(std::size_t extra_size)
-{  
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(std::size_t extra_size)
+{
    //Get the address of the first block
    std::size_t block1_off =
       priv_first_block_offset(this, m_header.m_extra_hdr_bytes);
 
    block_ctrl *first_block = reinterpret_cast<block_ctrl *>
                                  (detail::char_ptr_cast(this) + block1_off);
-   block_ctrl *old_end_block   = priv_prev_block(first_block);
+   block_ctrl *old_end_block   = priv_end_block(first_block);
    assert(priv_is_allocated_block(old_end_block));
-   assert(old_end_block->m_end);
    std::size_t old_border_offset = (detail::char_ptr_cast(old_end_block) - 
                                     detail::char_ptr_cast(this)) + EndCtrlBlockBytes;
 
@@ -406,18 +418,16 @@
       (detail::char_ptr_cast(old_end_block) + align_offset*Alignment);
    new_end_block->m_size      = (detail::char_ptr_cast(first_block) - 
                                  detail::char_ptr_cast(new_end_block))/Alignment;
+   first_block->m_prev_size = new_end_block->m_size;
    assert(first_block == priv_next_block(new_end_block));
-   new_end_block->m_end       = 1;
    priv_mark_new_allocated_block(new_end_block);
-
-   assert(new_end_block == priv_prev_block(first_block));
+   
+   assert(new_end_block == priv_end_block(first_block));
 
    //The old end block is the new block
-   std::size_t old_end_prev = old_end_block->m_prev_size;
-   block_ctrl *new_block = new(old_end_block)block_ctrl;
+   block_ctrl *new_block = old_end_block;
    new_block->m_size = (detail::char_ptr_cast(new_end_block) - 
                         detail::char_ptr_cast(new_block))/Alignment;
-   new_block->m_prev_size = old_end_prev;
    assert(new_block->m_size >= BlockCtrlUnits);
    priv_mark_new_allocated_block(new_block);
    assert(priv_next_block(new_block) == new_end_block);
@@ -428,8 +438,8 @@
    this->priv_deallocate(priv_get_user_buffer(new_block));
 }
 
-template<class MutexFamily, class VoidPointer>
-void rbtree_best_fit<MutexFamily, VoidPointer>::shrink_to_fit()
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
 {  
    //Get the address of the first block
    std::size_t block1_off =
@@ -439,32 +449,36 @@
                                  (detail::char_ptr_cast(this) + block1_off);
    algo_impl_t::assert_alignment(first_block);
 
-   block_ctrl *old_end_block   = priv_prev_block(first_block);
+   block_ctrl *old_end_block = priv_end_block(first_block);
    algo_impl_t::assert_alignment(old_end_block);
    assert(priv_is_allocated_block(old_end_block));
-   assert(old_end_block->m_end);
 
-   block_ctrl *last_block = priv_prev_block(old_end_block);
    algo_impl_t::assert_alignment(old_end_block);
 
-   std::size_t old_end_block_size      = old_end_block->m_size;
+   std::size_t old_end_block_size = old_end_block->m_size;
 
-   void *unique_block = 0;
-   if(last_block == first_block){
+   void *unique_buffer = 0;
+   block_ctrl *last_block;
+   if(priv_next_block(first_block) == old_end_block){
       std::size_t ignore;
-      unique_block = priv_allocate(allocate_new, 0, 0, ignore).first;
-      if(!unique_block)
+      unique_buffer = priv_allocate(allocate_new, 0, 0, ignore).first;
+      if(!unique_buffer)
          return;
+      algo_impl_t::assert_alignment(unique_buffer);
+      block_ctrl *unique_block = priv_get_block(unique_buffer);
+      assert(priv_is_allocated_block(unique_block));
       algo_impl_t::assert_alignment(unique_block);
-      last_block = priv_prev_block(old_end_block);
+      last_block = priv_next_block(unique_block);
+      assert(!priv_is_allocated_block(last_block));
       algo_impl_t::assert_alignment(last_block);
    }
+   else{
+      if(priv_is_prev_allocated(old_end_block))
+         return;
+      last_block = priv_prev_block(old_end_block);
+   }
 
-   //The last block must be free to be able to shrink
-   if(priv_is_allocated_block(last_block))
-      return;
-
-   std::size_t last_block_size      = last_block->m_size;
+   std::size_t last_block_size = last_block->m_size;
 
    //Erase block from the free tree, since we will erase it
    m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
@@ -474,20 +488,23 @@
    
    block_ctrl *new_end_block = last_block;
    algo_impl_t::assert_alignment(new_end_block);
-   priv_mark_as_allocated_block(new_end_block);
-   new_end_block->m_end  = 1;
    new_end_block->m_size = old_end_block_size + last_block_size;
-   priv_tail_size(new_end_block, new_end_block->m_size);
-   assert(priv_prev_block(first_block) == new_end_block);
+   priv_mark_as_allocated_block(new_end_block);
+
+   //Although the first block might be allocated, we'll
+   //store the offset to the end block since in the previous
+   //offset can't be overwritten by a previous block
+   first_block->m_prev_size = new_end_block->m_size;
+   assert(priv_end_block(first_block) == new_end_block);
 
    //Update managed buffer's size
    m_header.m_size = shrunk_border_offset;
-   if(unique_block)
-      priv_deallocate(unique_block);
+   if(unique_buffer)
+      priv_deallocate(unique_buffer);
 }
 
-template<class MutexFamily, class VoidPointer>
-void rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_add_segment(void *addr, std::size_t size)
 {  
    //Check alignment
@@ -506,17 +523,15 @@
          (detail::char_ptr_cast(addr) + first_big_block->m_size*Alignment))SizeHolder);
 
    //This will overwrite the prev part of the "end" node
-   priv_tail_size(first_big_block, first_big_block->m_size);
    priv_mark_as_free_block (first_big_block);
    first_big_block->m_prev_size = end_block->m_size =
       (detail::char_ptr_cast(first_big_block) - detail::char_ptr_cast(end_block))/Alignment;
-   end_block->m_end        = 1;
-   end_block->m_allocated  = 1;
+   priv_mark_as_allocated_block(end_block);
 
    assert(priv_next_block(first_big_block) == end_block);
-   assert(priv_prev_block(end_block) == first_big_block);
    assert(priv_next_block(end_block) == first_big_block);
-   assert(priv_prev_block(first_big_block) == end_block);
+   assert(priv_end_block(first_big_block) == end_block);
+   assert(priv_prev_block(end_block) == first_big_block);
 
    //Some check to validate the algorithm, since it makes some assumptions
    //to optimize the space wasted in bookkeeping:
@@ -530,27 +545,24 @@
    m_header.m_imultiset.insert(*first_big_block);
 }
 
-template<class MutexFamily, class VoidPointer>
-inline void rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_mark_new_allocated_block(block_ctrl *new_block)
-{
-   priv_tail_size(new_block, new_block->m_size);
-   priv_mark_as_allocated_block(new_block);
-}
+{  priv_mark_as_allocated_block(new_block);  }
 
-template<class MutexFamily, class VoidPointer>
-inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer>::get_size()  const
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size()  const
 {  return m_header.m_size;  }
 
-template<class MutexFamily, class VoidPointer>
-inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer>::get_free_memory()  const
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory()  const
 {
    return m_header.m_size - m_header.m_allocated - 
       priv_first_block_offset(this, m_header.m_extra_hdr_bytes);
 }
 
-template<class MutexFamily, class VoidPointer>
-inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    get_min_size (std::size_t extra_hdr_bytes)
 {
    return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
@@ -558,8 +570,8 @@
            MinBlockUnits + EndCtrlBlockUnits)*Alignment;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline bool rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
     all_memory_deallocated()
 {
    //-----------------------
@@ -575,8 +587,8 @@
          (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
 }
 
-template<class MutexFamily, class VoidPointer>
-bool rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
     check_sanity()
 {
    //-----------------------
@@ -609,8 +621,8 @@
    return true;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline void* rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    allocate(std::size_t nbytes)
 {  
    //-----------------------
@@ -621,8 +633,8 @@
    return ret;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline void* rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    allocate_aligned(std::size_t nbytes, std::size_t alignment)
 { 
    //-----------------------
@@ -631,26 +643,42 @@
    return algo_impl_t::allocate_aligned(this, nbytes, alignment); 
 }
 
-template<class MutexFamily, class VoidPointer>
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
 template<class T>
-inline std::pair<T*, bool> rbtree_best_fit<MutexFamily, VoidPointer>::
+inline std::pair<T*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    allocation_command  (allocation_type command,   std::size_t limit_size,
                         std::size_t preferred_size,std::size_t &received_size, 
                         T *reuse_ptr)
 {
-   if(command & try_shrink_in_place){
-      bool success = 
-         algo_impl_t::try_shrink(this, reuse_ptr, limit_size, preferred_size, received_size);
-      return std::pair<T *, bool> ((success ? reuse_ptr : 0), true);
-   }
    std::pair<void*, bool> ret = priv_allocation_command
       (command, limit_size, preferred_size, received_size, reuse_ptr, sizeof(T));
+
    BOOST_ASSERT(0 == ((std::size_t)ret.first % detail::alignment_of<T>::value));
    return std::pair<T *, bool>(static_cast<T*>(ret.first), ret.second);
 }
 
-template<class MutexFamily, class VoidPointer>
-inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
+   raw_allocation_command  (allocation_type command,   std::size_t limit_objects,
+                        std::size_t preferred_objects,std::size_t &received_objects, 
+                        void *reuse_ptr, std::size_t sizeof_object)
+{
+   if(!sizeof_object)
+      return std::pair<void *, bool>(0, 0);
+   if(command & try_shrink_in_place){
+      bool success = algo_impl_t::try_shrink
+         ( this, reuse_ptr, limit_objects*sizeof_object
+         , preferred_objects*sizeof_object, received_objects);
+      received_objects /= sizeof_object;
+      return std::pair<void *, bool> ((success ? reuse_ptr : 0), true);
+   }
+   return priv_allocation_command
+      (command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object);
+}
+
+
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_allocation_command (allocation_type command,   std::size_t limit_size,
                        std::size_t preferred_size,std::size_t &received_size, 
                        void *reuse_ptr, std::size_t sizeof_object)
@@ -673,18 +701,18 @@
    return ret;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    size(const void *ptr) const
 {
    //We need no synchronization since this block's size is not going
    //to be modified by anyone else
    //Obtain the real size of the block
-   return (priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment;
+   return (priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline void rbtree_best_fit<MutexFamily, VoidPointer>::zero_free_memory()
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
 {
    //-----------------------
    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
@@ -700,8 +728,8 @@
    }
 }
 
-template<class MutexFamily, class VoidPointer>
-void* rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_expand_both_sides(allocation_type command
                          ,std::size_t min_size
                          ,std::size_t preferred_size
@@ -717,7 +745,7 @@
    }
    else{
       received_size = this->size(reuse_ptr);
-      if(received_size >= preferred_size)
+      if(received_size >= preferred_size || received_size >= min_size)
          return reuse_ptr;
    }
 
@@ -731,18 +759,21 @@
       block_ctrl *reuse = priv_get_block(reuse_ptr);
 
       //Sanity check 
-      assert(reuse->m_size == priv_tail_size(reuse));
+      //assert(reuse->m_size == priv_tail_size(reuse));
       algo_impl_t::assert_alignment(reuse);
 
       block_ctrl *prev_block;
 
       //If the previous block is not free, there is nothing to do
-      if(priv_is_allocated_block(prev_block = priv_prev_block(reuse))){
+      if(priv_is_prev_allocated(reuse)){
          return 0;
       }
 
+      prev_block = priv_prev_block(reuse);
+      assert(!priv_is_allocated_block(prev_block));
+
       //Some sanity checks
-      assert(prev_block->m_size == priv_tail_size(prev_block));
+      assert(prev_block->m_size == reuse->m_prev_size);
       algo_impl_t::assert_alignment(prev_block);
 
       //Let's calculate the number of extra bytes of data before the current
@@ -769,32 +800,41 @@
       if(std::size_t(prev_block->m_size*Alignment) >= needs_backwards_aligned){
          //Now take all next space. This will succeed
          if(command & expand_fwd){
-            if(!priv_expand(reuse_ptr, received_size, received_size, received_size)){
+            std::size_t received_size2;
+            if(!priv_expand(reuse_ptr, received_size, received_size, received_size2)){
                assert(0);
             }
+            assert(received_size = received_size2);
          }
          //We need a minimum size to split the previous one
          if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
             block_ctrl *new_block = reinterpret_cast<block_ctrl *>
                (detail::char_ptr_cast(reuse) - needs_backwards_aligned);
 
-            //Erase old previous block, since we will change it
-            m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
-
             //Free old previous buffer
             new_block->m_size = 
-               AllocatedCtrlUnits + (needs_backwards_aligned + received_size)/Alignment;
+               AllocatedCtrlUnits + (needs_backwards_aligned + (received_size - UsableByPreviousChunk))/Alignment;
             assert(new_block->m_size >= BlockCtrlUnits);
             priv_mark_new_allocated_block(new_block);
 
             prev_block->m_size = (detail::char_ptr_cast(new_block) - 
                                   detail::char_ptr_cast(prev_block))/Alignment;
             assert(prev_block->m_size >= BlockCtrlUnits);
-            priv_tail_size(prev_block, prev_block->m_size);
             priv_mark_as_free_block(prev_block);
 
-            //Insert the remaining previous block in the free tree
-            m_header.m_imultiset.insert( m_header.m_imultiset.begin(), *prev_block);
+            //Update the old previous block in the free chunks tree
+            //If the new size fulfills tree invariants do nothing,
+            //otherwise erase() + insert()
+            {
+               imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
+               imultiset_iterator was_smaller_it(prev_block_it);
+               if(prev_block_it != m_header.m_imultiset.begin() && 
+                  (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
+                  m_header.m_imultiset.erase(prev_block_it);
+                  m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
+               }
+            }
+
             received_size = needs_backwards_aligned + received_size;
             m_header.m_allocated += needs_backwards_aligned;
          
@@ -812,14 +852,15 @@
          //Check if there is no place to create a new block and
          //the whole new block is multiple of the backwards expansion multiple
          else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
-                 0 == (prev_block->m_size % lcm)) {
+                 0 == ((prev_block->m_size*Alignment) % lcm)) {
             //Erase old previous block, since we will change it
             m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
 
             //Just merge the whole previous block
-            const std::size_t needs_backwards_aligned = prev_block->m_size*Alignment;
-            const std::size_t needs_backwards = detail::get_truncated_size(needs_backwards_aligned, backwards_multiple);
-            received_size = received_size/backwards_multiple*backwards_multiple + needs_backwards;
+            needs_backwards = detail::get_truncated_size
+               (prev_block->m_size*Alignment, backwards_multiple);
+            //received_size = received_size/backwards_multiple*backwards_multiple + needs_backwards;
+            received_size = received_size + needs_backwards;
 
             m_header.m_allocated += prev_block->m_size*Alignment;
             //Now update sizes
@@ -843,9 +884,9 @@
    return 0;
 }
 
-template<class MutexFamily, class VoidPointer>
-inline typename rbtree_best_fit<MutexFamily, VoidPointer>::multiallocation_iterator
-   rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_iterator
+   rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    allocate_many(std::size_t elem_bytes, std::size_t num_elements)
 {
    //-----------------------
@@ -854,9 +895,23 @@
    return algo_impl_t::allocate_many(this, elem_bytes, num_elements);
 }
 
-template<class MutexFamily, class VoidPointer>
-inline typename rbtree_best_fit<MutexFamily, VoidPointer>::multiallocation_iterator
-   rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
+   deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_iterator it)
+{
+   //-----------------------
+   boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
+   //-----------------------
+   while(it){
+      void *addr = &*it;
+      ++it;
+      this->priv_deallocate(addr);
+   }
+}
+
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_iterator
+   rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element)
 {
    //-----------------------
@@ -865,8 +920,8 @@
    return algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element);
 }
 
-template<class MutexFamily, class VoidPointer>
-std::pair<void *, bool> rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+std::pair<void *, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_allocate(allocation_type command
                 ,std::size_t limit_size
                 ,std::size_t preferred_size
@@ -929,32 +984,34 @@
    return return_type(0, false);
 }
 
-template<class MutexFamily, class VoidPointer>
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
 inline
-typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *
-   rbtree_best_fit<MutexFamily, VoidPointer>::priv_get_block(const void *ptr)
+typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
+   rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
 {
    return reinterpret_cast<block_ctrl*>(detail::char_ptr_cast(ptr) - AllocatedCtrlBytes);
 }
 
-template<class MutexFamily, class VoidPointer>
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
 inline
-void *rbtree_best_fit<MutexFamily, VoidPointer>::
-      priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *block)
+void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
+      priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
 {  return detail::char_ptr_cast(block) + AllocatedCtrlBytes;   }
 
-template<class MutexFamily, class VoidPointer>
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
 inline
-std::size_t rbtree_best_fit<MutexFamily, VoidPointer>::
+std::size_t rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_get_total_units(std::size_t userbytes)
 {
-   std::size_t units = detail::get_rounded_size(userbytes, Alignment)/Alignment + AllocatedCtrlUnits;
+   if(userbytes < UsableByPreviousChunk)
+      userbytes = UsableByPreviousChunk;
+   std::size_t units = detail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
    if(units < BlockCtrlUnits) units = BlockCtrlUnits;
    return units;
 }
 
-template<class MutexFamily, class VoidPointer>
-bool rbtree_best_fit<MutexFamily, VoidPointer>::
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
    priv_expand (void *ptr
                ,const std::size_t min_size
                ,const std::size_t preferred_size
@@ -966,16 +1023,16 @@
 
    //The block must be marked as allocated and the sizes must be equal
    assert(priv_is_allocated_block(block));
-   assert(old_block_units == priv_tail_size(block));
+   //assert(old_block_units == priv_tail_size(block));
    
    //Put this to a safe value
-   received_size = (old_block_units - AllocatedCtrlUnits)*Alignment;
-   if(received_size > preferred_size)
+   received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
+   if(received_size >= preferred_size || received_size >= min_size)
       return true;
 
    //Now translate it to Alignment units
-   const std::size_t min_user_units = algo_impl_t::ceil_units(min_size);
-   const std::size_t preferred_user_units = algo_impl_t::ceil_units(preferred_size);
+   const std::size_t min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
+   const std::size_t preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
 
    //Some parameter checks
    assert(min_user_units <= preferred_user_units);
@@ -994,7 +1051,7 @@
    const std::size_t merged_user_units = merged_units - AllocatedCtrlUnits;
 
    if(merged_user_units < min_user_units){
-      received_size = merged_user_units*Alignment;
+      received_size = merged_units*Alignment - UsableByPreviousChunk;
       return false;
    }
 
@@ -1007,30 +1064,44 @@
 
    //Check if we can split the next one in two parts
    if((merged_units - intended_units) >=  BlockCtrlUnits){
-      //Now we have to update the data in the tree
-      m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
-
       //This block is bigger than needed, split it in 
       //two blocks, the first one will be merged and
       //the second's size will be the remaining space
-      assert(next_block->m_size == priv_tail_size(next_block));
+      assert(next_block->m_size == priv_next_block(next_block)->m_prev_size);
+      const std::size_t rem_units = merged_units - intended_units;
 
+      //Check if we we need to update the old next block in the free chunks tree
+      //If the new size fulfills tree invariants, we just need to replace the node
+      //(the block start has been displaced), otherwise erase() + insert().
+      //
+      //This fixup must be done in two parts, because the new next chunk might
+      //overwrite the tree hook of the old next chunk. So we first erase the
+      //old if needed and we'll insert the new one after creating the new next
+      imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
+      const bool size_invariants_broken = 
+            (next_block->m_size - rem_units ) < BlockCtrlUnits ||
+            (old_next_block_it != m_header.m_imultiset.begin() && 
+            (--imultiset_iterator(old_next_block_it))->m_size > rem_units);
+      if(size_invariants_broken){
+         m_header.m_imultiset.erase(old_next_block_it);
+      }
       //This is the remaining block
-      block_ctrl *new_block = new(reinterpret_cast<block_ctrl*>
+      block_ctrl *rem_block = new(reinterpret_cast<block_ctrl*>
                      (detail::char_ptr_cast(block) + intended_units*Alignment))block_ctrl;
-      new_block->m_size  = merged_units - intended_units;
-      algo_impl_t::assert_alignment(new_block);
-      assert(new_block->m_size >= BlockCtrlUnits);
-      priv_tail_size(new_block, new_block->m_size);
-      priv_mark_as_free_block(new_block);
-
-      //Insert the new block in the container
-      m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *new_block);
+      rem_block->m_size  = rem_units;
+      algo_impl_t::assert_alignment(rem_block);
+      assert(rem_block->m_size >= BlockCtrlUnits);
+      priv_mark_as_free_block(rem_block);
+
+      //Now the second part of the fixup
+      if(size_invariants_broken)
+         m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
+      else
+         m_header.m_imultiset.replace_node(old_next_block_it, *rem_block);
 
       //Write the new length
       block->m_size = intended_user_units + AllocatedCtrlUnits;
       assert(block->m_size >= BlockCtrlUnits);
-      priv_tail_size(block, block->m_size);
       m_header.m_allocated += (intended_units - old_block_units)*Alignment;
    }
    //There is no free space to create a new node: just merge both blocks
@@ -1041,61 +1112,95 @@
       //Write the new length
       block->m_size = merged_units;
       assert(block->m_size >= BlockCtrlUnits);
-      priv_tail_size(block, merged_units);
       m_header.m_allocated += (merged_units - old_block_units)*Alignment;
    }
-
-   received_size = (block->m_size - AllocatedCtrlUnits)*Alignment;
+   priv_mark_as_allocated_block(block);
+   received_size = (block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
    return true;
 }
 
-template<class MutexFamily, class VoidPointer> inline
-void rbtree_best_fit<MutexFamily, VoidPointer>::priv_tail_size
-   (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *ptr, std::size_t size)
-{  priv_next_block(ptr)->m_prev_size = size;    }
-
-template<class MutexFamily, class VoidPointer> inline
-std::size_t rbtree_best_fit<MutexFamily, VoidPointer>::priv_tail_size
-   (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *ptr)
-{  return priv_next_block(ptr)->m_prev_size; }
-
-template<class MutexFamily, class VoidPointer> inline
-typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *
-   rbtree_best_fit<MutexFamily, VoidPointer>::priv_prev_block
-      (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *ptr)
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
+   rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
 {
+   assert(!ptr->m_prev_allocated);
    return reinterpret_cast<block_ctrl *>
       (detail::char_ptr_cast(ptr) - ptr->m_prev_size*Alignment);
 }
 
-template<class MutexFamily, class VoidPointer> inline
-typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *
-   rbtree_best_fit<MutexFamily, VoidPointer>::priv_next_block
-      (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *ptr)
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
+{
+   if(ptr->m_prev_allocated){
+      return true;
+   }
+   else{
+      block_ctrl *prev = priv_prev_block(ptr);
+      assert(!priv_is_allocated_block(prev));
+      return false;
+   }
+}
+
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
+   rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
+{
+   assert(first_segment_block->m_prev_allocated);
+   block_ctrl *end_block = reinterpret_cast<block_ctrl *>
+      (detail::char_ptr_cast(first_segment_block) - first_segment_block->m_prev_size*Alignment);
+   assert(priv_is_allocated_block(end_block));
+   assert(end_block > first_segment_block);
+   return end_block;
+}
+
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
+   rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
 {
    return reinterpret_cast<block_ctrl *>
       (detail::char_ptr_cast(ptr) + ptr->m_size*Alignment);
 }
 
-template<class MutexFamily, class VoidPointer> inline
-bool rbtree_best_fit<MutexFamily, VoidPointer>::priv_is_allocated_block
-      (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *block)
-{  return block->m_allocated != 0;  }
-
-template<class MutexFamily, class VoidPointer> inline
-void rbtree_best_fit<MutexFamily, VoidPointer>::priv_mark_as_allocated_block
-      (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *block)
-{  block->m_allocated = 1; }
-
-template<class MutexFamily, class VoidPointer> inline
-void rbtree_best_fit<MutexFamily, VoidPointer>::priv_mark_as_free_block
-      (typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl *block)
-{  block->m_allocated = 0; }
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
+{
+   bool allocated = block->m_allocated != 0;
+   block_ctrl *next_block = (block_ctrl *)
+      (detail::char_ptr_cast(block) + block->m_size*Alignment);
+   bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
+   (void)next_block_prev_allocated;
+   assert(allocated == next_block_prev_allocated);
+   return allocated;
+}
 
-template<class MutexFamily, class VoidPointer> inline
-void* rbtree_best_fit<MutexFamily, VoidPointer>::priv_check_and_allocate
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
+{
+   //assert(!priv_is_allocated_block(block));
+   block->m_allocated = 1;
+   ((block_ctrl *)(((char*)block) + block->m_size*Alignment))->m_prev_allocated = 1;
+}
+
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
+      (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
+{
+   block->m_allocated = 0;
+   ((block_ctrl *)(((char*)block) + block->m_size*Alignment))->m_prev_allocated = 0;
+   //assert(!priv_is_allocated_block(ptr));
+   priv_next_block(block)->m_prev_size = block->m_size;
+}
+
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
+void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
    (std::size_t nunits
-   ,typename rbtree_best_fit<MutexFamily, VoidPointer>::block_ctrl* block
+   ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
    ,std::size_t &received_size)
 {
    std::size_t upper_nunits = nunits + BlockCtrlUnits;
@@ -1109,32 +1214,30 @@
       std::size_t block_old_size = block->m_size;
       block->m_size = nunits;
       assert(block->m_size >= BlockCtrlUnits);
-      priv_tail_size(block, block->m_size);
 
       //This is the remaining block
-      block_ctrl *new_block = new(reinterpret_cast<block_ctrl*>
+      block_ctrl *rem_block = new(reinterpret_cast<block_ctrl*>
                      (detail::char_ptr_cast(block) + Alignment*nunits))block_ctrl;
-      algo_impl_t::assert_alignment(new_block);
-      new_block->m_size  = block_old_size - nunits;
-      assert(new_block->m_size >= BlockCtrlUnits);
-      priv_tail_size(new_block, new_block->m_size);
-      priv_mark_as_free_block(new_block);
+      algo_impl_t::assert_alignment(rem_block);
+      rem_block->m_size  = block_old_size - nunits;
+      assert(rem_block->m_size >= BlockCtrlUnits);
+      priv_mark_as_free_block(rem_block);
 
       imultiset_iterator it_hint;
       if(it_old == m_header.m_imultiset.begin()
-         || (--imultiset_iterator(it_old))->m_size < new_block->m_size){
+         || (--imultiset_iterator(it_old))->m_size < rem_block->m_size){
          //option a: slow but secure
-         //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *new_block);
+         //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
          //option b: Construct an empty node and swap
-         //Imultiset::init_node(*new_block);
-         //block->swap_nodes(*new_block);
+         //Imultiset::init_node(*rem_block);
+         //block->swap_nodes(*rem_block);
          //option c: replace the node directly
-         m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *new_block);
+         m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block);
       }
       else{
          //Now we have to update the data in the tree
          m_header.m_imultiset.erase(it_old);
-         m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *new_block);
+         m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
       }
          
    }
@@ -1148,7 +1251,7 @@
    //We need block_ctrl for deallocation stuff, so
    //return memory user can overwrite
    m_header.m_allocated += block->m_size*Alignment;
-   received_size =  (block->m_size - AllocatedCtrlUnits)*Alignment;
+   received_size =  (block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
 
    //Mark the block as allocated
    priv_mark_as_allocated_block(block);
@@ -1157,11 +1260,12 @@
    //cleared with zero_free_memory
    TreeHook *t = static_cast<TreeHook*>(block);
    std::memset(t, 0, sizeof(*t));
+   this->priv_next_block(block)->m_prev_size = 0;
    return priv_get_user_buffer(block);
 }
 
-template<class MutexFamily, class VoidPointer>
-void rbtree_best_fit<MutexFamily, VoidPointer>::deallocate(void* addr)
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
 {
    if(!addr)   return;
    //-----------------------
@@ -1170,8 +1274,8 @@
    return this->priv_deallocate(addr);
 }
 
-template<class MutexFamily, class VoidPointer>
-void rbtree_best_fit<MutexFamily, VoidPointer>::priv_deallocate(void* addr)
+template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
+void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
 {
    if(!addr)   return;
 
@@ -1179,7 +1283,7 @@
   
    //The blocks must be marked as allocated and the sizes must be equal
    assert(priv_is_allocated_block(block));
-   assert(block->m_size == priv_tail_size(block));
+//   assert(block->m_size == priv_tail_size(block));
 
    //Check if alignment and block size are right
    algo_impl_t::assert_alignment(addr);
@@ -1194,33 +1298,48 @@
    block_ctrl *block_to_insert = block;
 
    //Get the next block
-   block_ctrl *next_block = priv_next_block(block);
+   block_ctrl *next_block  = priv_next_block(block);
+   bool merge_with_prev    = !priv_is_prev_allocated(block);
+   bool merge_with_next    = !priv_is_allocated_block(next_block);
+
+   //Merge logic. First just update block sizes, then fix free chunks tree
+   if(merge_with_prev || merge_with_next){
+      //Merge if the previous is free
+      if(merge_with_prev){
+         //Get the previous block
+         block_ctrl *prev_block = priv_prev_block(block);
+         prev_block->m_size += block->m_size;
+         assert(prev_block->m_size >= BlockCtrlUnits);
+         block_to_insert = prev_block;
+      }
+      //Merge if the next is free
+      if(merge_with_next){
+         block_to_insert->m_size += next_block->m_size;
+         assert(block_to_insert->m_size >= BlockCtrlUnits);
+         if(merge_with_prev)
+            m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
+      }
 
-   //Merge if the next is free
-   if(!priv_is_allocated_block(next_block)){
-      block->m_size += next_block->m_size;
-      assert(block->m_size >= BlockCtrlUnits);
-      priv_tail_size(block, block->m_size);
-      m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
+      bool only_merge_next = !merge_with_prev && merge_with_next;
+      imultiset_iterator free_block_to_check_it
+         (Imultiset::s_iterator_to(only_merge_next ? *next_block : *block_to_insert));
+      imultiset_iterator was_bigger_it(free_block_to_check_it);
+
+      //Now try to shortcut erasure + insertion (O(log(N))) with
+      //a O(1) operation if merging does not alter tree positions
+      if(++was_bigger_it != m_header.m_imultiset.end()   &&
+         block_to_insert->m_size > was_bigger_it->m_size ){
+         m_header.m_imultiset.erase(free_block_to_check_it);
+         m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
+      }
+      else if(only_merge_next){
+         m_header.m_imultiset.replace_node(free_block_to_check_it, *block_to_insert);
+      }
    }
-
-   //Get the previous block
-   block_ctrl *prev_block = priv_prev_block(block);
-
-   //Now check that tail size and control size are equal
-   assert(prev_block->m_size == priv_tail_size(prev_block));
-
-   //Merge if the previous is free
-   if(!priv_is_allocated_block(prev_block)){
-      prev_block->m_size += block->m_size;
-      assert(prev_block->m_size >= BlockCtrlUnits);
-      priv_tail_size(prev_block, prev_block->m_size);
-      m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
-      block_to_insert = prev_block;
+   else{
+      m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
    }
-
    priv_mark_as_free_block(block_to_insert);
-   m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
 }
 
 }  //namespace interprocess {
Modified: trunk/boost/interprocess/mem_algo/simple_seq_fit.hpp
==============================================================================
--- trunk/boost/interprocess/mem_algo/simple_seq_fit.hpp	(original)
+++ trunk/boost/interprocess/mem_algo/simple_seq_fit.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/offset_ptr.hpp
==============================================================================
--- trunk/boost/interprocess/offset_ptr.hpp	(original)
+++ trunk/boost/interprocess/offset_ptr.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -61,7 +61,7 @@
    typedef void (self_t::*unspecified_bool_type)() const;
 
    #if defined(_MSC_VER) && (_MSC_VER >= 1400)
-   __declspec(noinline)
+   __declspec(noinline) //this workaround is needed for msvc-8.0 and msvc-9.0
    #endif
    void set_offset(const volatile void *ptr)
    {
@@ -77,7 +77,7 @@
    }
 
    #if defined(_MSC_VER) && (_MSC_VER >= 1400)
-   __declspec(noinline)
+   __declspec(noinline) //this workaround is needed for msvc-8.0 and msvc-9.0
    #endif
    void* get_pointer() const
    {  return (m_offset == 1) ? 0 : (detail::char_ptr_cast(this) + m_offset); }
Modified: trunk/boost/interprocess/segment_manager.hpp
==============================================================================
--- trunk/boost/interprocess/segment_manager.hpp	(original)
+++ trunk/boost/interprocess/segment_manager.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -70,7 +70,8 @@
    /// @cond
    
    //Experimental. Don't use
-   typedef typename MemoryAlgorithm::multiallocation_iterator multiallocation_iterator;
+   typedef typename MemoryAlgorithm::multiallocation_iterator  multiallocation_iterator;
+   typedef typename MemoryAlgorithm::multiallocation_chain     multiallocation_chain;
 
    /// @endcond
 
@@ -148,6 +149,11 @@
    multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element, std::nothrow_t)
    {  return MemoryAlgorithm::allocate_many(elem_sizes, n_elements, sizeof_element); }
 
+   //!Deallocates elements pointed by the
+   //!multiallocation iterator range.
+   void deallocate_many(multiallocation_iterator it)
+   {  MemoryAlgorithm::deallocate_many(it); }
+
    /// @endcond
 
    //!Allocates nbytes bytes. Throws boost::interprocess::bad_alloc
@@ -189,6 +195,19 @@
       return ret;
    }
 
+   std::pair<void *, bool>
+      raw_allocation_command  (allocation_type command,   std::size_t limit_objects,
+                           std::size_t preferred_objects,std::size_t &received_objects,
+                           void *reuse_ptr = 0, std::size_t sizeof_object = 1)
+   {
+      std::pair<void *, bool> ret = MemoryAlgorithm::raw_allocation_command
+         ( command | nothrow_allocation, limit_objects, preferred_objects, received_objects
+         , reuse_ptr, sizeof_object);
+      if(!(command & nothrow_allocation) && !ret.first)
+         throw bad_alloc();
+      return ret;
+   }
+
    //!Deallocates the bytes allocated with allocate/allocate_many()
    //!pointed by addr
    void   deallocate          (void *addr)
@@ -219,6 +238,10 @@
    void zero_free_memory()
    {   MemoryAlgorithm::zero_free_memory(); }
 
+   //!Returns the size of the buffer previously allocated pointed by ptr
+   std::size_t size(const void *ptr) const
+   {   return MemoryAlgorithm::size(ptr); }
+
    /// @cond
    protected:
    void * prot_anonymous_construct
Modified: trunk/boost/interprocess/shared_memory_object.hpp
==============================================================================
--- trunk/boost/interprocess/shared_memory_object.hpp	(original)
+++ trunk/boost/interprocess/shared_memory_object.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/smart_ptr/deleter.hpp
==============================================================================
--- trunk/boost/interprocess/smart_ptr/deleter.hpp	(original)
+++ trunk/boost/interprocess/smart_ptr/deleter.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2007.
+// (C) Copyright Ion Gaztanaga 2007-2008.
 //
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
Modified: trunk/boost/interprocess/smart_ptr/detail/shared_count.hpp
==============================================================================
--- trunk/boost/interprocess/smart_ptr/detail/shared_count.hpp	(original)
+++ trunk/boost/interprocess/smart_ptr/detail/shared_count.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -4,7 +4,7 @@
 //
 // (C) Copyright Peter Dimov and Multi Media Ltd. 2001, 2002, 2003
 // (C) Copyright Peter Dimov 2004-2005
-// (C) Copyright Ion Gaztanaga 2006-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2006-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp
==============================================================================
--- trunk/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp	(original)
+++ trunk/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -9,7 +9,7 @@
 
 //  Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
 //  Copyright 2004-2005 Peter Dimov
-//  Copyright 2007 Ion Gaztanaga
+//  Copyright 2007-2008 Ion Gaztanaga
 //
 // Distributed under the Boost Software License, Version 1.0. (See
 // accompanying file LICENSE_1_0.txt or copy at
Modified: trunk/boost/interprocess/smart_ptr/shared_ptr.hpp
==============================================================================
--- trunk/boost/interprocess/smart_ptr/shared_ptr.hpp	(original)
+++ trunk/boost/interprocess/smart_ptr/shared_ptr.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -4,7 +4,7 @@
 //
 // (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
 // (C) Copyright Peter Dimov 2001, 2002, 2003
-// (C) Copyright Ion Gaztanaga 2006-2007.
+// (C) Copyright Ion Gaztanaga 2006-2008.
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
Modified: trunk/boost/interprocess/smart_ptr/weak_ptr.hpp
==============================================================================
--- trunk/boost/interprocess/smart_ptr/weak_ptr.hpp	(original)
+++ trunk/boost/interprocess/smart_ptr/weak_ptr.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -3,7 +3,7 @@
 // This file is the adaptation for Interprocess of boost/weak_ptr.hpp
 //
 // (C) Copyright Peter Dimov 2001, 2002, 2003
-// (C) Copyright Ion Gaztanaga 2006-2007.
+// (C) Copyright Ion Gaztanaga 2006-2008.
 // Distributed under the Boost Software License, Version 1.0.
 // (See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
Modified: trunk/boost/interprocess/streams/bufferstream.hpp
==============================================================================
--- trunk/boost/interprocess/streams/bufferstream.hpp	(original)
+++ trunk/boost/interprocess/streams/bufferstream.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/streams/vectorstream.hpp
==============================================================================
--- trunk/boost/interprocess/streams/vectorstream.hpp	(original)
+++ trunk/boost/interprocess/streams/vectorstream.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/emulation/interprocess_condition.hpp
==============================================================================
--- trunk/boost/interprocess/sync/emulation/interprocess_condition.hpp	(original)
+++ trunk/boost/interprocess/sync/emulation/interprocess_condition.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/emulation/interprocess_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/emulation/interprocess_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/emulation/interprocess_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/emulation/interprocess_semaphore.hpp
==============================================================================
--- trunk/boost/interprocess/sync/emulation/interprocess_semaphore.hpp	(original)
+++ trunk/boost/interprocess/sync/emulation/interprocess_semaphore.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/emulation/named_creation_functor.hpp
==============================================================================
--- trunk/boost/interprocess/sync/emulation/named_creation_functor.hpp	(original)
+++ trunk/boost/interprocess/sync/emulation/named_creation_functor.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/file_lock.hpp
==============================================================================
--- trunk/boost/interprocess/sync/file_lock.hpp	(original)
+++ trunk/boost/interprocess/sync/file_lock.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/interprocess_barrier.hpp
==============================================================================
--- trunk/boost/interprocess/sync/interprocess_barrier.hpp	(original)
+++ trunk/boost/interprocess/sync/interprocess_barrier.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/interprocess_condition.hpp
==============================================================================
--- trunk/boost/interprocess/sync/interprocess_condition.hpp	(original)
+++ trunk/boost/interprocess/sync/interprocess_condition.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/interprocess_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/interprocess_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/interprocess_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/interprocess_recursive_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/interprocess_recursive_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/interprocess_recursive_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/interprocess_semaphore.hpp
==============================================================================
--- trunk/boost/interprocess/sync/interprocess_semaphore.hpp	(original)
+++ trunk/boost/interprocess/sync/interprocess_semaphore.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/interprocess_upgradable_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/interprocess_upgradable_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/interprocess_upgradable_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/lock_options.hpp
==============================================================================
--- trunk/boost/interprocess/sync/lock_options.hpp	(original)
+++ trunk/boost/interprocess/sync/lock_options.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/mutex_family.hpp
==============================================================================
--- trunk/boost/interprocess/sync/mutex_family.hpp	(original)
+++ trunk/boost/interprocess/sync/mutex_family.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/named_condition.hpp
==============================================================================
--- trunk/boost/interprocess/sync/named_condition.hpp	(original)
+++ trunk/boost/interprocess/sync/named_condition.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/named_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/named_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/named_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/named_recursive_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/named_recursive_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/named_recursive_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/named_semaphore.hpp
==============================================================================
--- trunk/boost/interprocess/sync/named_semaphore.hpp	(original)
+++ trunk/boost/interprocess/sync/named_semaphore.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
  //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/named_upgradable_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/named_upgradable_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/named_upgradable_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/null_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/null_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/null_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/interprocess_condition.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/interprocess_condition.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/interprocess_condition.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/interprocess_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/interprocess_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/interprocess_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/interprocess_semaphore.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/interprocess_semaphore.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/interprocess_semaphore.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/pthread_helpers.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/pthread_helpers.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/pthread_helpers.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/ptime_to_timespec.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/ptime_to_timespec.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/ptime_to_timespec.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/posix/semaphore_wrapper.hpp
==============================================================================
--- trunk/boost/interprocess/sync/posix/semaphore_wrapper.hpp	(original)
+++ trunk/boost/interprocess/sync/posix/semaphore_wrapper.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/scoped_lock.hpp
==============================================================================
--- trunk/boost/interprocess/sync/scoped_lock.hpp	(original)
+++ trunk/boost/interprocess/sync/scoped_lock.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/sharable_lock.hpp
==============================================================================
--- trunk/boost/interprocess/sync/sharable_lock.hpp	(original)
+++ trunk/boost/interprocess/sync/sharable_lock.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/sync/upgradable_lock.hpp
==============================================================================
--- trunk/boost/interprocess/sync/upgradable_lock.hpp	(original)
+++ trunk/boost/interprocess/sync/upgradable_lock.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/interprocess/windows_shared_memory.hpp
==============================================================================
--- trunk/boost/interprocess/windows_shared_memory.hpp	(original)
+++ trunk/boost/interprocess/windows_shared_memory.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
Modified: trunk/boost/intrusive/circular_list_algorithms.hpp
==============================================================================
--- trunk/boost/intrusive/circular_list_algorithms.hpp	(original)
+++ trunk/boost/intrusive/circular_list_algorithms.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -54,6 +54,27 @@
    typedef typename NodeTraits::const_node_ptr  const_node_ptr;
    typedef NodeTraits                           node_traits;
 
+   //! <b>Effects</b>: Constructs an non-used list element, so that
+   //! inited(this_node) == true
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void init(node_ptr this_node)
+   {
+      NodeTraits::set_next(this_node, 0);
+      NodeTraits::set_previous(this_node, 0);
+   }
+
+   //! <b>Effects</b>: Returns true is "this_node" is in a non-used state
+   //! as if it was initialized by the "init" function.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static bool inited(const_node_ptr this_node)  
+   {  return !NodeTraits::get_next(this_node); }
+
    //! <b>Effects</b>: Constructs an empty list, making this_node the only
    //!   node of the circular list:
    //!  <tt>NodeTraits::get_next(this_node) == NodeTraits::get_previous(this_node)
@@ -62,11 +83,12 @@
    //! <b>Complexity</b>: Constant 
    //! 
    //! <b>Throws</b>: Nothing.
-   static void init(node_ptr this_node)
+   static void init_header(node_ptr this_node)
    {
       NodeTraits::set_next(this_node, this_node);
       NodeTraits::set_previous(this_node, this_node);
-   }  
+   }
+
 
    //! <b>Requires</b>: this_node must be in a circular list or be an empty circular list.
    //! 
@@ -76,8 +98,11 @@
    //! <b>Complexity</b>: Constant 
    //! 
    //! <b>Throws</b>: Nothing.
-   static bool unique(const_node_ptr this_node)  
-   {  return NodeTraits::get_next(this_node) == this_node;  }
+   static bool unique(const_node_ptr this_node)
+   {
+      node_ptr next = NodeTraits::get_next(this_node);
+      return !next || next == this_node;
+   }
 
    //! <b>Requires</b>: this_node must be in a circular list or be an empty circular list.
    //! 
@@ -107,11 +132,16 @@
    //! <b>Throws</b>: Nothing.
    static node_ptr unlink(node_ptr this_node)
    {
-      node_ptr next(NodeTraits::get_next(this_node));
-      node_ptr prev(NodeTraits::get_previous(this_node));
-      NodeTraits::set_next(prev, next);
-      NodeTraits::set_previous(next, prev);
-      return next;
+      if(NodeTraits::get_next(this_node)){
+         node_ptr next(NodeTraits::get_next(this_node));
+         node_ptr prev(NodeTraits::get_previous(this_node));
+         NodeTraits::set_next(prev, next);
+         NodeTraits::set_previous(next, prev);
+         return next;
+      }
+      else{
+         return this_node;
+      }
    }
 
    //! <b>Requires</b>: b and e must be nodes of the same circular list or an empty range.
@@ -229,6 +259,17 @@
    public: 
    static void swap_nodes(node_ptr this_node, node_ptr other_node) 
    {
+      if (other_node == this_node)
+         return;
+      bool this_inited  = inited(this_node);
+      bool other_inited = inited(other_node);
+      if(this_inited){
+         init_header(this_node);
+      }
+      if(other_inited){
+         init_header(other_node);
+      }
+
       node_ptr next_this(NodeTraits::get_next(this_node)); 
       node_ptr prev_this(NodeTraits::get_previous(this_node)); 
       node_ptr next_other(NodeTraits::get_next(other_node)); 
@@ -238,6 +279,13 @@
       swap_next(prev_this, prev_other); 
       swap_next(this_node, other_node); 
       swap_prev(this_node, other_node); 
+
+      if(this_inited){
+         init(other_node);
+      }
+      if(other_inited){
+         init(this_node);
+      }
    }
 
    //! <b>Requires</b>: b and e must be nodes of the same circular list or an empty range.
@@ -254,8 +302,8 @@
    {
       if (b != e) {
          node_ptr prev_p(NodeTraits::get_previous(p));
-         node_ptr prev_e(NodeTraits::get_previous(e));
          node_ptr prev_b(NodeTraits::get_previous(b));
+         node_ptr prev_e(NodeTraits::get_previous(e));
          NodeTraits::set_next(prev_e, p);
          NodeTraits::set_previous(p, prev_e);
          NodeTraits::set_next(prev_b, e);
@@ -308,6 +356,47 @@
          f = n;
       }
    }
+
+   //! <b>Effects</b>: Moves the node p n positions towards the end of the list.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: Linear to the number of moved positions.
+   static void move_backwards(node_ptr p, std::size_t n)
+   {
+      //Null shift, nothing to do
+      if(!n) return;
+      node_ptr first  = NodeTraits::get_next(p);
+      //size() == 0 or 1, nothing to do
+      if(first == NodeTraits::get_previous(p)) return;
+      unlink(p);
+      //Now get the new first node
+      while(n--){
+         first = NodeTraits::get_next(first);
+      }
+      link_before(first, p);
+   }
+
+   //! <b>Effects</b>: Moves the node p n positions towards the beginning of the list.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: Linear to the number of moved positions.
+   static void move_forward(node_ptr p, std::size_t n)
+   {
+      //Null shift, nothing to do
+      if(!n)   return;
+      node_ptr last  = NodeTraits::get_previous(p);
+      //size() == 0 or 1, nothing to do
+      if(last == NodeTraits::get_next(p))   return;
+
+      unlink(p);
+      //Now get the new last node
+      while(n--){
+         last = NodeTraits::get_previous(last);
+      }
+      link_after(last, p);
+   }
 };
 
 } //namespace intrusive 
Modified: trunk/boost/intrusive/circular_slist_algorithms.hpp
==============================================================================
--- trunk/boost/intrusive/circular_slist_algorithms.hpp	(original)
+++ trunk/boost/intrusive/circular_slist_algorithms.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -16,6 +16,7 @@
 
 #include <boost/intrusive/detail/config_begin.hpp>
 #include <boost/intrusive/intrusive_fwd.hpp>
+#include <boost/intrusive/detail/assert.hpp>
 #include <cstddef>
 
 namespace boost {
@@ -25,7 +26,7 @@
 //! forming a circular singly linked list. An empty circular list is formed by a node
 //! whose pointer to the next node points to itself.
 //!
-//! circular_slist_algorithms is configured with a NodeTraits class, which capsulates the
+//! circular_slist_algorithms is configured with a NodeTraits class, which encapsulates the
 //! information about the node to be manipulated. NodeTraits must support the
 //! following interface:
 //!
@@ -72,11 +73,13 @@
    //! <b>Throws</b>: Nothing.
    static node_ptr get_previous_node(node_ptr prev_init_node, node_ptr this_node)
    {
-      node_ptr p      = prev_init_node;
+      node_ptr p = prev_init_node;
       for( node_ptr p_next
          ; this_node != (p_next = NodeTraits::get_next(p))
          ; p = p_next){
-         //empty
+         //Logic error: possible use of linear lists with
+         //operations only permitted with circular lists
+         BOOST_INTRUSIVE_INVARIANT_ASSERT(p);
       }
       return p;
    }
@@ -115,25 +118,46 @@
 
    //! <b>Effects</b>: Constructs an empty list, making this_node the only
    //!   node of the circular list:
-   //!  <tt>NodeTraits::get_next(this_node) == NodeTraits::get_previous(this_node)
-   //!  == this_node</tt>.
+   //!  <tt>NodeTraits::get_next(this_node) == this_node</tt>.
    //! 
    //! <b>Complexity</b>: Constant 
    //! 
    //! <b>Throws</b>: Nothing.
-   static void init(node_ptr this_node)  
+   static void init_header(node_ptr this_node)  
    {  NodeTraits::set_next(this_node, this_node);  }  
 
+   //! <b>Effects</b>: Constructs an non-used list element, putting the next
+   //!   pointer to null:
+   //!  <tt>NodeTraits::get_next(this_node) == 0
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void init(node_ptr this_node)  
+   {  NodeTraits::set_next(this_node, 0);  }  
+
    //! <b>Requires</b>: this_node must be in a circular list or be an empty circular list.
    //! 
    //! <b>Effects</b>: Returns true is "this_node" is the only node of a circular list:
-   //!  <tt>return NodeTraits::get_next(this_node) == this_node</tt>
+   //!  or it's a not inserted node:
+   //!  <tt>return !NodeTraits::get_next(this_node) || NodeTraits::get_next(this_node) == this_node</tt> or 
    //! 
    //! <b>Complexity</b>: Constant 
    //! 
    //! <b>Throws</b>: Nothing.
-   static bool unique(const_node_ptr this_node)  
-   {  return NodeTraits::get_next(this_node) == this_node; }
+   static bool unique(const_node_ptr this_node)
+   {
+      node_ptr next = NodeTraits::get_next(this_node);
+      return !next || next == this_node;
+   }  
+
+   //! <b>Effects</b>: Returns true is "this_node" has the same state as if it was inited using "init(node_ptr)"
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static bool inited(const_node_ptr this_node)  
+   {  return !NodeTraits::get_next(this_node); }
 
    //! <b>Requires</b>: this_node must be in a circular list or be an empty circular list.
    //! 
@@ -165,7 +189,7 @@
    {
       node_ptr this_node(NodeTraits::get_next(prev_node));
       NodeTraits::set_next(prev_node, NodeTraits::get_next(this_node));
-      NodeTraits::set_next(this_node, this_node);
+      //NodeTraits::set_next(this_node, this_node);
    }
 
    //! <b>Requires</b>: nxt_node must be in a circular list or be an empty circular list.
@@ -181,7 +205,7 @@
       unlink_after(prev_to_erase);
    }
 
-   //! <b>Requires</b>: this_node must be in a circular list or be an empty circular list.
+   //! <b>Requires</b>: this_node must be in a circular list, be an empty circular list or be inited.
    //! 
    //! <b>Effects</b>: Unlinks the node from the circular list.
    //! 
@@ -189,7 +213,10 @@
    //! 
    //! <b>Throws</b>: Nothing.
    static void unlink(node_ptr this_node)
-   {  unlink_after(get_previous_node(this_node)); }
+   {
+      if(NodeTraits::get_next(this_node))
+         unlink_after(get_previous_node(this_node));
+   }
 
    //! <b>Requires</b>: prev_node must be a node of a circular list.
    //! 
@@ -200,8 +227,7 @@
    //! <b>Throws</b>: Nothing.
    static void link_after(node_ptr prev_node, node_ptr this_node)
    {
-      node_ptr this_nxt = NodeTraits::get_next(prev_node);
-      NodeTraits::set_next(this_node, this_nxt);
+      NodeTraits::set_next(this_node, NodeTraits::get_next(prev_node));
       NodeTraits::set_next(prev_node, this_node);
    }
 
@@ -229,6 +255,15 @@
    {
       if (other_node == this_node)
          return;
+      bool this_inited  = inited(this_node);
+      bool other_inited = inited(other_node);
+      if(this_inited){
+         init_header(this_node);
+      }
+      if(other_inited){
+         init_header(other_node);
+      }
+
       bool empty1 = unique(this_node);
       bool empty2 = unique(other_node);
       node_ptr prev_this (get_previous_node(this_node));
@@ -240,12 +275,19 @@
       NodeTraits::set_next(other_node, this_next);
       NodeTraits::set_next(empty1 ? other_node : prev_this, other_node);
       NodeTraits::set_next(empty2 ? this_node  : prev_other, this_node);
+
+      if(this_inited){
+         init(other_node);
+      }
+      if(other_inited){
+         init(this_node);
+      }
    }
 
    //! <b>Requires</b>: b and e must be nodes of the same circular list or an empty range.
    //!   and p must be a node of a different circular list.
    //! 
-   //! <b>Effects</b>: Removes the nodes from [b, e) range from their circular list and inserts
+   //! <b>Effects</b>: Removes the nodes from (b, e] range from their circular list and inserts
    //!   them after p in p's circular list.
    //! 
    //! <b>Complexity</b>: Constant 
@@ -278,6 +320,99 @@
          transfer_after(e, i, nxt);
       }
    }
+
+   //! <b>Effects</b>: Moves the node p n positions towards the end of the list.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: Linear to the number of elements plus the number moved positions.
+   static void move_backwards(node_ptr p, std::size_t n)
+   {
+      //Null shift, nothing to do
+      if(!n) return;
+      node_ptr first  = NodeTraits::get_next(p);
+
+      //count() == 1 or 2, nothing to do
+      if(NodeTraits::get_next(first) == p)
+         return;
+
+      bool end_found = false;
+      node_ptr new_last(0);
+
+      //Now find the new last node according to the shift count.
+      //If we find p before finding the new last node
+      //unlink p, shortcut the search now that we know the size of the list
+      //and continue.
+      for(std::size_t i = 1; i <= n; ++i){
+         new_last = first;
+         first = NodeTraits::get_next(first);
+         if(first == p){
+            //Shortcut the shift with the modulo of the size of the list
+            n %= i;
+            if(!n)
+               return;
+            i = 0;
+            //Unlink p and continue the new first node search
+            first = NodeTraits::get_next(p);
+            unlink_after(new_last);
+            end_found = true;
+         }
+      }
+
+      //If the p has not been found in the previous loop, find it
+      //starting in the new first node and unlink it
+      if(!end_found){
+         unlink_after(get_previous_node(first, p));
+      }
+
+      //Now link p after the new last node
+      link_after(new_last, p);
+   }
+
+   //! <b>Effects</b>: Moves the node p n positions towards the beginning of the list.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: Linear to the number of elements plus the number moved positions.
+   static void move_forward(node_ptr p, std::size_t n)
+   {
+      //Null shift, nothing to do
+      if(!n) return;
+      node_ptr first  = node_traits::get_next(p);
+
+      //count() == 1 or 2, nothing to do
+      if(node_traits::get_next(first) == p) return;
+
+      //Iterate until p is found to know where the current last node is.
+      //If the shift count is less than the size of the list, we can also obtain
+      //the position of the new last node after the shift.
+      node_ptr old_last(first), next_to_it, new_last(p);
+      std::size_t distance = 1;
+      while(p != (next_to_it = node_traits::get_next(old_last))){
+         if(++distance > n)
+            new_last = node_traits::get_next(new_last);
+         old_last = next_to_it;
+      }
+      //If the shift was bigger or equal than the size, obtain the equivalent
+      //forward shifts and find the new last node.
+      if(distance <= n){
+         //Now find the equivalent forward shifts.
+         //Shortcut the shift with the modulo of the size of the list
+         std::size_t new_before_last_pos = (distance - (n % distance))% distance;
+         //If the shift is a multiple of the size there is nothing to do
+         if(!new_before_last_pos)   return;
+         
+         for( new_last = p
+            ; new_before_last_pos--
+            ; new_last = node_traits::get_next(new_last)){
+            //empty
+         }
+      }
+
+      //Now unlink p and link it after the new last node
+      unlink_after(old_last);
+      link_after(new_last, p);
+   }
 };
 
 } //namespace intrusive 
Modified: trunk/boost/intrusive/detail/tree_algorithms.hpp
==============================================================================
--- trunk/boost/intrusive/detail/tree_algorithms.hpp	(original)
+++ trunk/boost/intrusive/detail/tree_algorithms.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -137,9 +137,9 @@
    {  return uncast(header);   }
 
    //! <b>Requires</b>: node is a node of the tree or an node initialized
-   //!   by init(...).
+   //!   by init(...) or init_node.
    //! 
-   //! <b>Effects</b>: Returns true if the node is initialized by init().
+   //! <b>Effects</b>: Returns true if the node is initialized by init() or init_node().
    //! 
    //! <b>Complexity</b>: Constant time.
    //! 
Modified: trunk/boost/intrusive/detail/utilities.hpp
==============================================================================
--- trunk/boost/intrusive/detail/utilities.hpp	(original)
+++ trunk/boost/intrusive/detail/utilities.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -498,8 +498,8 @@
 
 inline float fast_log2 (float val)
 {
-  boost::uint32_t * const exp_ptr = 
-     static_cast <boost::uint32_t * const>(static_cast<void * const >(&val));
+   boost::uint32_t * exp_ptr =
+      static_cast<boost::uint32_t *>(static_cast<void*>(&val));
    boost::uint32_t x = *exp_ptr;
    const int log_2 = (int)(((x >> 23) & 255) - 128);
    x &= ~(255 << 23);
Modified: trunk/boost/intrusive/intrusive_fwd.hpp
==============================================================================
--- trunk/boost/intrusive/intrusive_fwd.hpp	(original)
+++ trunk/boost/intrusive/intrusive_fwd.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -69,6 +69,7 @@
    , class O1  = none
    , class O2  = none
    , class O3  = none
+   , class O4  = none
    >
 class slist;
 
Added: trunk/boost/intrusive/linear_slist_algorithms.hpp
==============================================================================
--- (empty file)
+++ trunk/boost/intrusive/linear_slist_algorithms.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -0,0 +1,324 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Olaf Krzikalla 2004-2006.
+// (C) Copyright Ion Gaztanaga  2006-2007
+//
+// Distributed under the Boost Software License, Version 1.0.
+//    (See accompanying file LICENSE_1_0.txt or copy at
+//          http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/intrusive for documentation.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_INTRUSIVE_LINEAR_SLIST_ALGORITHMS_HPP
+#define BOOST_INTRUSIVE_LINEAR_SLIST_ALGORITHMS_HPP
+
+#include <boost/intrusive/detail/config_begin.hpp>
+#include <boost/intrusive/intrusive_fwd.hpp>
+#include <cstddef>
+
+namespace boost {
+namespace intrusive {
+
+//! linear_slist_algorithms provides basic algorithms to manipulate nodes
+//! forming a linear singly linked list.
+//!
+//! linear_slist_algorithms is configured with a NodeTraits class, which encapsulates the
+//! information about the node to be manipulated. NodeTraits must support the
+//! following interface:
+//!
+//! <b>Typedefs</b>:
+//!
+//! <tt>node</tt>: The type of the node that forms the linear list
+//!
+//! <tt>node_ptr</tt>: A pointer to a node
+//!
+//! <tt>const_node_ptr</tt>: A pointer to a const node
+//!
+//! <b>Static functions</b>:
+//!
+//! <tt>static node_ptr get_next(const_node_ptr n);</tt>
+//! 
+//! <tt>static void set_next(node_ptr n, node_ptr next);</tt>
+template<class NodeTraits>
+class linear_slist_algorithms
+{
+   public:
+   typedef typename NodeTraits::node_ptr        node_ptr;
+   typedef typename NodeTraits::const_node_ptr  const_node_ptr;
+   typedef NodeTraits                           node_traits;
+
+   //! <b>Requires</b>: this_node and prev_init_node must be in the same linear list.
+   //! 
+   //! <b>Effects</b>: Returns the previous node of this_node in the linear list starting.
+   //!   the search from prev_init_node. The first node checked for equality
+   //!   is NodeTraits::get_next(prev_init_node).
+   //! 
+   //! <b>Complexity</b>: Linear to the number of elements between prev_init_node and this_node.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static node_ptr get_previous_node(node_ptr prev_init_node, node_ptr this_node)
+   {
+      node_ptr p      = prev_init_node;
+      for( node_ptr p_next
+         ; this_node != (p_next = NodeTraits::get_next(p))
+         ; p = p_next){
+         //empty
+      }
+      return p;
+   }
+
+   //! <b>Effects</b>: Constructs an empty list, making this_node the only
+   //!   node of the linear list:
+   //!  <tt>NodeTraits::get_next(this_node) == 0.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void init_header(node_ptr this_node)  
+   {  NodeTraits::set_next(this_node, 0);  }  
+
+   //! <b>Effects</b>: Constructs an non-used list element, putting the next
+   //!   pointer to null:
+   //!  <tt>NodeTraits::get_next(this_node) == 0
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void init(node_ptr this_node)  
+   {  NodeTraits::set_next(this_node, 0);  }  
+
+   //! <b>Requires</b>: this_node must be in a linear list or be an empty linear list.
+   //! 
+   //! <b>Effects</b>: Returns true is "this_node" is the only node of a linear list:
+   //!  <tt>return NodeTraits::get_next(this_node) == this_node</tt>
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static bool unique(const_node_ptr this_node)  
+   {
+      node_ptr next = NodeTraits::get_next(this_node);
+      return !next || next == this_node;
+   }
+
+   //! <b>Requires</b>: this_node must be in a linear list or be an empty linear list.
+   //! 
+   //! <b>Effects</b>: Returns true is "this_node" is the only node of a linear list:
+   //!  <tt>return NodeTraits::get_next(this_node) == this_node</tt>
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static bool inited(const_node_ptr this_node)  
+   {  return !NodeTraits::get_next(this_node);  }
+
+   //! <b>Requires</b>: this_node must be in a linear list or be an empty linear list.
+   //! 
+   //! <b>Effects</b>: Returns the number of nodes in a linear list. If the linear list
+   //!  is empty, returns 1.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static std::size_t count(const_node_ptr this_node) 
+   {
+      std::size_t result = 0;
+      const_node_ptr p = this_node;
+      do{
+         p = NodeTraits::get_next(p);
+         ++result;
+      } while (p);
+      return result;
+   }
+
+   //! <b>Requires</b>: prev_node must be in a linear list or be an empty linear list.
+   //! 
+   //! <b>Effects</b>: Unlinks the next node of prev_node from the linear list.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void unlink_after(node_ptr prev_node)
+   {
+      node_ptr this_node(NodeTraits::get_next(prev_node));
+      NodeTraits::set_next(prev_node, NodeTraits::get_next(this_node));
+   }
+
+   //! <b>Requires</b>: prev_node must be a node of a linear list.
+   //! 
+   //! <b>Effects</b>: Links this_node after prev_node in the linear list.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void link_after(node_ptr prev_node, node_ptr this_node)
+   {
+      NodeTraits::set_next(this_node, NodeTraits::get_next(prev_node));
+      NodeTraits::set_next(prev_node, this_node);
+   }
+
+   //! <b>Requires</b>: this_node and other_node must be nodes inserted
+   //!  in linear lists or be empty linear lists.
+   //! 
+   //! <b>Effects</b>: Moves all the nodes previously chained after this_node after other_node
+   //!   and vice-versa.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void swap_trailing_nodes(node_ptr this_node, node_ptr other_node)
+   {
+      node_ptr this_nxt    = NodeTraits::get_next(this_node);
+      node_ptr other_nxt   = NodeTraits::get_next(other_node);
+      NodeTraits::set_next(this_node, other_nxt);
+      NodeTraits::set_next(other_node, this_nxt);
+   }
+
+   //! <b>Requires</b>: b and e must be nodes of the same linear list or an empty range.
+   //!   and p must be a node of a different linear list.
+   //! 
+   //! <b>Effects</b>: Removes the nodes from (b, e] range from their linear list and inserts
+   //!   them after p in p's linear list.
+   //! 
+   //! <b>Complexity</b>: Constant 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   static void transfer_after(node_ptr p, node_ptr b, node_ptr e)
+   {
+      if (p != b && p != e) {
+         node_ptr next_b = NodeTraits::get_next(b);
+         node_ptr next_e = NodeTraits::get_next(e);
+         node_ptr next_p = NodeTraits::get_next(p);
+         NodeTraits::set_next(b, next_e);
+         NodeTraits::set_next(e, next_p);
+         NodeTraits::set_next(p, next_b);
+      }
+   }
+
+   //! <b>Effects</b>: Reverses the order of elements in the list. 
+   //! 
+   //! <b>Returns</b>: The new first node of the list. 
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: This function is linear to the contained elements.
+   static node_ptr reverse(node_ptr p)
+   {
+      if(!p) return 0;
+      node_ptr i = NodeTraits::get_next(p); 
+      node_ptr first(p);
+      while(i){
+         node_ptr nxti(NodeTraits::get_next(i));
+         unlink_after(p);
+         NodeTraits::set_next(i, first);
+         first = i;
+         i = nxti;
+      }
+      return first;
+   }
+
+   //! <b>Effects</b>: Moves the node p n positions towards the end of the list.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: Linear to the number of elements plus the number moved positions.
+   static node_ptr move_backwards(node_ptr p, std::size_t n)
+   {
+      //Null shift, or count() == 0 or 1, nothing to do
+      if(!n || !p || !NodeTraits::get_next(p))
+         return p;
+
+      node_ptr first = p;
+      bool end_found = false;
+      node_ptr new_last(0);
+      node_ptr old_last(0);
+
+      //Now find the new last node according to the shift count.
+      //If we find 0 before finding the new last node
+      //unlink p, shortcut the search now that we know the size of the list
+      //and continue.
+      for(std::size_t i = 1; i <= n; ++i){
+         new_last = first;
+         first = NodeTraits::get_next(first);
+         if(first == 0){
+            //Shortcut the shift with the modulo of the size of the list
+            n %= i;
+            if(!n)   return p;
+            old_last = new_last;
+            i = 0;
+            //Unlink p and continue the new first node search
+            first = p;
+            //unlink_after(new_last);
+            end_found = true;
+         }
+      }
+
+      //If the p has not been found in the previous loop, find it
+      //starting in the new first node and unlink it
+      if(!end_found){
+         old_last = get_previous_node(first, 0);
+      }
+      
+      //Now link p after the new last node
+      NodeTraits::set_next(old_last, p);
+      NodeTraits::set_next(new_last, 0);
+      return first;
+   }
+
+   //! <b>Effects</b>: Moves the node p n positions towards the beginning of the list.
+   //! 
+   //! <b>Throws</b>: Nothing.
+   //! 
+   //! <b>Complexity</b>: Linear to the number of elements plus the number moved positions.
+   static node_ptr move_forward(node_ptr p, std::size_t n)
+   {
+      //Null shift, or count() == 0 or 1, nothing to do
+      if(!n || !p || !NodeTraits::get_next(p))
+         return p;
+
+      node_ptr first  = p;
+
+      //Iterate until p is found to know where the current last node is.
+      //If the shift count is less than the size of the list, we can also obtain
+      //the position of the new last node after the shift.
+      node_ptr old_last(first), next_to_it, new_last(p);
+      std::size_t distance = 1;
+      while(!!(next_to_it = node_traits::get_next(old_last))){
+         if(distance++ > n)
+            new_last = node_traits::get_next(new_last);
+         old_last = next_to_it;
+      }
+      //If the shift was bigger or equal than the size, obtain the equivalent
+      //forward shifts and find the new last node.
+      if(distance <= n){
+         //Now find the equivalent forward shifts.
+         //Shortcut the shift with the modulo of the size of the list
+         std::size_t new_before_last_pos = (distance - (n % distance))% distance;
+         //If the shift is a multiple of the size there is nothing to do
+         if(!new_before_last_pos)
+            return p;
+         
+         for( new_last = p
+            ; --new_before_last_pos
+            ; new_last = node_traits::get_next(new_last)){
+            //empty
+         }
+      }
+
+      //Get the first new node
+      node_ptr new_first = node_traits::get_next(new_last);
+      //Now put the old beginning after the old end
+      NodeTraits::set_next(old_last, p);
+      NodeTraits::set_next(new_last, 0);
+      return new_first;
+   }
+};
+
+} //namespace intrusive 
+} //namespace boost 
+
+#include <boost/intrusive/detail/config_end.hpp>
+
+#endif //BOOST_INTRUSIVE_LINEAR_SLIST_ALGORITHMS_HPP
Modified: trunk/boost/intrusive/list.hpp
==============================================================================
--- trunk/boost/intrusive/list.hpp	(original)
+++ trunk/boost/intrusive/list.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -208,7 +208,7 @@
       :  data_(v_traits)
    {  
       this->priv_size_traits().set_size(size_type(0));
-      node_algorithms::init(this->get_root_node());  
+      node_algorithms::init_header(this->get_root_node());  
    }
 
    //! <b>Requires</b>: Dereferencing iterator must yield an lvalue of type value_type.
@@ -224,7 +224,7 @@
       :  data_(v_traits)
    {
       this->priv_size_traits().set_size(size_type(0));
-      node_algorithms::init(this->get_root_node());
+      node_algorithms::init_header(this->get_root_node());
       this->insert(this->end(), b, e);
    }
 
@@ -258,7 +258,7 @@
    {
       node_ptr to_insert = get_real_value_traits().to_node_ptr(value);
       if(safemode_or_autounlink)
-         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::unique(to_insert));
+         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::inited(to_insert));
       node_algorithms::link_before(this->get_root_node(), to_insert);
       this->priv_size_traits().increment();
    }
@@ -277,7 +277,7 @@
    {
       node_ptr to_insert = get_real_value_traits().to_node_ptr(value);
       if(safemode_or_autounlink)
-         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::unique(to_insert));
+         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::inited(to_insert));
       node_algorithms::link_before(node_traits::get_next(this->get_root_node()), to_insert); 
       this->priv_size_traits().increment();
    }
@@ -569,21 +569,7 @@
    //! 
    //! <b>Note</b>: Does not affect the validity of iterators and references.
    void shift_backwards(size_type n = 1)
-   {
-      //Null shift, nothing to do
-      if(!n)   return;
-      node_ptr root  = this->get_root_node();
-      node_ptr last  = node_traits::get_previous(root);
-      //size() == 0 or 1, nothing to do
-      if(last == node_traits::get_next(root))   return;
-
-      node_algorithms::unlink(root);
-      //Now get the new last node
-      while(n--){
-         last = node_traits::get_previous(last);
-      }
-      node_algorithms::link_after(last, root);
-   }
+   {  node_algorithms::move_forward(this->get_root_node(), n);  }
 
    //! <b>Effects</b>: Moves forward all the elements, so that the second
    //!   element becomes the first, the third becomes the second...
@@ -595,20 +581,7 @@
    //! 
    //! <b>Note</b>: Does not affect the validity of iterators and references.
    void shift_forward(size_type n = 1)
-   {
-      //Null shift, nothing to do
-      if(!n) return;
-      node_ptr root  = this->get_root_node();
-      node_ptr first  = node_traits::get_next(root);
-      //size() == 0 or 1, nothing to do
-      if(first == node_traits::get_previous(root)) return;
-      node_algorithms::unlink(root);
-      //Now get the new first node
-      while(n--){
-         first = node_traits::get_next(first);
-      }
-      node_algorithms::link_before(first, root);
-   }
+   {  node_algorithms::move_backwards(this->get_root_node(), n);  }
 
    //! <b>Effects</b>: Erases the element pointed by i of the list.
    //!   No destructors are called.
@@ -729,7 +702,7 @@
          this->erase(this->begin(), this->end()); 
       }
       else{
-         node_algorithms::init(this->get_root_node());
+         node_algorithms::init_header(this->get_root_node());
          this->priv_size_traits().set_size(size_type(0));
       }
    }
@@ -794,7 +767,7 @@
    {
       node_ptr to_insert = get_real_value_traits().to_node_ptr(value);
       if(safemode_or_autounlink)
-         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::unique(to_insert));
+         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::inited(to_insert));
       node_algorithms::link_before(p.pointed_node(), to_insert);
       this->priv_size_traits().increment();
       return iterator(to_insert, this);
@@ -1234,7 +1207,7 @@
    static iterator s_iterator_to(reference value)
    {
       BOOST_STATIC_ASSERT((!stateful_value_traits));
-      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::unique(real_value_traits::to_node_ptr(value)));
+      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::inited(real_value_traits::to_node_ptr(value)));
       return iterator(real_value_traits::to_node_ptr(value), 0);
    }
 
@@ -1252,7 +1225,7 @@
    static const_iterator s_iterator_to(const_reference value) 
    {
       BOOST_STATIC_ASSERT((!stateful_value_traits));
-      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::unique(real_value_traits::to_node_ptr(const_cast<reference> (value))));
+      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::inited(real_value_traits::to_node_ptr(const_cast<reference> (value))));
       return const_iterator(real_value_traits::to_node_ptr(const_cast<reference> (value)), 0);
    }
 
@@ -1267,7 +1240,7 @@
    //! <b>Note</b>: Iterators and references are not invalidated.
    iterator iterator_to(reference value)
    { 
-      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::unique(real_value_traits::to_node_ptr(value)));
+      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::inited(real_value_traits::to_node_ptr(value)));
       return iterator(real_value_traits::to_node_ptr(value), this);
    }
 
@@ -1282,7 +1255,7 @@
    //! <b>Note</b>: Iterators and references are not invalidated.
    const_iterator iterator_to(const_reference value) const
    { 
-      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::unique(real_value_traits::to_node_ptr(const_cast<reference> (value))));
+      BOOST_INTRUSIVE_INVARIANT_ASSERT(!node_algorithms::inited(real_value_traits::to_node_ptr(const_cast<reference> (value))));
       return const_iterator(real_value_traits::to_node_ptr(const_cast<reference> (value)), this);
    }
 
Modified: trunk/boost/intrusive/options.hpp
==============================================================================
--- trunk/boost/intrusive/options.hpp	(original)
+++ trunk/boost/intrusive/options.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -318,10 +318,8 @@
 /// @endcond
 };
 
-//!This option setter specifies the type of
-//!a void pointer. This will instruct the hook
-//!to use this type of pointer instead of the
-//!default one
+//!This option setter specifies the link mode
+//!(normal_link, safe_link or auto_unlink)
 template<link_mode_type LinkType>
 struct link_mode
 {
@@ -334,10 +332,8 @@
 /// @endcond
 };
 
-//!This option setter specifies the type of
-//!a void pointer. This will instruct the hook
-//!to use this type of pointer instead of the
-//!default one
+//!This option setter specifies if the hook
+//!should be optimized for size instead of for speed.
 template<bool Enabled>
 struct optimize_size
 {
@@ -350,6 +346,20 @@
 /// @endcond
 };
 
+//!This option setter specifies if the list container should
+//!use a linear implementation instead of a circular one.
+template<bool Enabled>
+struct linear
+{
+/// @cond
+   template<class Base>
+   struct pack : Base
+   {
+      static const bool linear = Enabled;
+   };
+/// @endcond
+};
+
 //!This option setter specifies the bucket traits
 //!class for unordered associative containers. When this option is specified,
 //!instead of using the default bucket traits, a user defined holder will be defined
@@ -475,6 +485,7 @@
       , tag<default_tag>
       , optimize_size<false>
       , store_hash<false>
+      , linear<false>
       >::type
 {};
 
Modified: trunk/boost/intrusive/slist.hpp
==============================================================================
--- trunk/boost/intrusive/slist.hpp	(original)
+++ trunk/boost/intrusive/slist.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -21,6 +21,7 @@
 #include <boost/intrusive/intrusive_fwd.hpp>
 #include <boost/intrusive/slist_hook.hpp>
 #include <boost/intrusive/circular_slist_algorithms.hpp>
+#include <boost/intrusive/linear_slist_algorithms.hpp>
 #include <boost/intrusive/detail/pointer_to_other.hpp>
 #include <boost/intrusive/link_mode.hpp>
 #include <boost/intrusive/options.hpp>
@@ -46,12 +47,13 @@
 struct get_default_slist_hook
 {  typedef typename T::default_slist_hook type; };
 
-template <class ValueTraits, class SizeType, bool ConstantTimeSize>
+template <class ValueTraits, class SizeType, bool ConstantTimeSize, bool Linear>
 struct slistopt
 {
    typedef ValueTraits  value_traits;
    typedef SizeType     size_type;
    static const bool constant_time_size = ConstantTimeSize;
+   static const bool linear = Linear;
 };
 
 template <class T>
@@ -66,6 +68,7 @@
                >::type
          >
       , constant_time_size<true>
+      , linear<false>
       , size_type<std::size_t>
       >::type
 {};
@@ -129,10 +132,15 @@
       <pointer, node>::type                                          node_ptr;
    typedef typename boost::pointer_to_other
       <pointer, const node>::type                                    const_node_ptr;
-   typedef circular_slist_algorithms<node_traits>                    node_algorithms;
+   typedef typename detail::if_c
+      < Config::linear
+      , linear_slist_algorithms<node_traits>
+      , circular_slist_algorithms<node_traits>
+      >::type                                                        node_algorithms;
 
    static const bool constant_time_size = Config::constant_time_size;
    static const bool stateful_value_traits = detail::store_cont_ptr_on_it<slist_impl>::value;
+   static const bool linear = Config::linear;
 
    /// @cond
    private:
@@ -152,6 +160,8 @@
 
    //Constant-time size is incompatible with auto-unlink hooks!
    BOOST_STATIC_ASSERT(!(constant_time_size && ((int)real_value_traits::link_mode == (int)auto_unlink)));
+   //Linear singly linked lists are incompatible with auto-unlink hooks!
+   BOOST_STATIC_ASSERT(!(linear && ((int)real_value_traits::link_mode == (int)auto_unlink)));
 
    node_ptr get_root_node()
    {  return node_ptr(&data_.root_plus_size_.root_);  }
@@ -220,7 +230,7 @@
       :  data_(v_traits)
    {
       this->priv_size_traits().set_size(size_type(0));
-      node_algorithms::init(this->get_root_node()); 
+      node_algorithms::init_header(this->get_root_node()); 
    }
 
    //! <b>Requires</b>: Dereferencing iterator must yield an lvalue of type value_type.
@@ -236,7 +246,7 @@
       :  data_(v_traits)
    {
       this->priv_size_traits().set_size(size_type(0));
-      node_algorithms::init(this->get_root_node());
+      node_algorithms::init_header(this->get_root_node());
       insert_after(before_begin(), b, e);
    }
 
@@ -266,7 +276,7 @@
          this->erase_after(this->before_begin(), this->end()); 
       }
       else{
-         node_algorithms::init(this->get_root_node());
+         node_algorithms::init_header(this->get_root_node());
          this->priv_size_traits().set_size(size_type(0));
       }
    }
@@ -299,7 +309,7 @@
    {
       node_ptr to_insert = get_real_value_traits().to_node_ptr(value);
       if(safemode_or_autounlink)
-         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::unique(to_insert));
+         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::inited(to_insert));
       node_algorithms::link_after(this->get_root_node(), to_insert); 
       this->priv_size_traits().increment();
    }
@@ -385,7 +395,7 @@
    //! 
    //! <b>Complexity</b>: Constant.
    iterator end() 
-   { return iterator (this->get_root_node(), this); }
+   { return iterator (linear ? 0 : this->get_root_node(), this); }
 
    //! <b>Effects</b>: Returns a const_iterator to the end of the list.
    //! 
@@ -393,7 +403,7 @@
    //! 
    //! <b>Complexity</b>: Constant.
    const_iterator end() const 
-   { return const_iterator (uncast(this->get_root_node()), this); }
+   { return const_iterator (linear ? 0 : uncast(this->get_root_node()), this); }
 
    //! <b>Effects</b>: Returns a const_iterator to the end of the list.
    //! 
@@ -401,7 +411,7 @@
    //! 
    //! <b>Complexity</b>: Constant.
    const_iterator cend() const 
-   { return const_iterator (uncast(this->get_root_node()), this); }
+   { return this->end(); }
 
    //! <b>Effects</b>: Returns an iterator that points to a position
    //!   before the first element. Equivalent to "end()"
@@ -410,7 +420,7 @@
    //! 
    //! <b>Complexity</b>: Constant.
    iterator before_begin() 
-   { return end(); }
+   { return iterator(this->get_root_node(), this); }
 
    //! <b>Effects</b>: Returns an iterator that points to a position
    //!   before the first element. Equivalent to "end()"
@@ -419,7 +429,7 @@
    //! 
    //! <b>Complexity</b>: Constant.
    const_iterator before_begin() const 
-   { return end(); }
+   { return const_iterator(uncast(this->get_root_node()), this); }
 
    //! <b>Effects</b>: Returns an iterator that points to a position
    //!   before the first element. Equivalent to "end()"
@@ -428,7 +438,7 @@
    //! 
    //! <b>Complexity</b>: Constant.
    const_iterator cbefore_begin() const 
-   { return end(); }
+   { return this->before_begin(); }
 
    //! <b>Precondition</b>: end_iterator must be a valid end iterator
    //!   of slist.
@@ -487,7 +497,7 @@
    //! <b>Note</b>: Does not affect the validity of iterators and references.
    void swap(slist_impl& other)
    {
-      node_algorithms::swap_nodes(this->get_root_node(), other.get_root_node());
+      priv_swap_lists(this->get_root_node(), other.get_root_node(), detail::bool_<linear>());
       if(constant_time_size){
          size_type backup = this->priv_size_traits().get_size();
          this->priv_size_traits().set_size(other.priv_size_traits().get_size());
@@ -506,43 +516,7 @@
    //! <b>Note</b>: Iterators Does not affect the validity of iterators and references.
    void shift_backwards(size_type n = 1)
    {
-      //Null shift, nothing to do
-      if(!n) return;
-      node_ptr root = this->get_root_node();
-      node_ptr first  = node_traits::get_next(root);
-
-      //size() == 0 or 1, nothing to do
-      if(node_traits::get_next(first) == root) return;
-
-      //Iterate until the root node is found to know where the current last node is.
-      //If the shift count is less than the size of the list, we can also obtain
-      //the position of the new last node after the shift.
-      node_ptr old_last(first), next_to_it, new_last(root);
-      size_type distance = 1;
-      while(root != (next_to_it = node_traits::get_next(old_last))){
-         if(++distance > n)
-            new_last = node_traits::get_next(new_last);
-         old_last = next_to_it;
-      }
-      //If the shift was bigger or equal than the size, obtain the equivalent
-      //forward shifts and find the new last node.
-      if(distance <= n){
-         //Now find the equivalent forward shifts.
-         //Shorcut the shift with the modulo of the size of the list
-         size_type new_before_last_pos = (distance - (n % distance))% distance;
-         //If the shift is a multiple of the size there is nothing to do
-         if(!new_before_last_pos)   return;
-         
-         for( new_last = root
-            ; new_before_last_pos--
-            ; new_last = node_traits::get_next(new_last)){
-            //empty
-         }
-      }
-
-      //Now unlink the root node and link it after the new last node
-      node_algorithms::unlink_after(old_last);
-      node_algorithms::link_after(new_last, root);
+      priv_shift_backwards(n, detail::bool_<linear>());
    }
 
    //! <b>Effects</b>: Moves forward all the elements, so that the second
@@ -556,43 +530,7 @@
    //! <b>Note</b>: Does not affect the validity of iterators and references.
    void shift_forward(size_type n = 1)
    {
-      //Null shift, nothing to do
-      if(!n) return;
-      node_ptr root = this->get_root_node();
-      node_ptr first  = node_traits::get_next(root);
-
-      //size() == 0 or 1, nothing to do
-      if(node_traits::get_next(first) == root) return;
-
-      bool end_found = false;
-      node_ptr new_last(0);
-
-      //Now find the new last node according to the shift count.
-      //If we find the root node before finding the new last node
-      //unlink the root, shortcut the search now that we know the size of the list
-      //and continue.
-      for(size_type i = 1; i <= n; ++i){
-         new_last = first;
-         first = node_traits::get_next(first);
-         if(first == root){
-            //Shorcut the shift with the modulo of the size of the list
-            n %= i;
-            i = 0;
-            //Unlink the root node and continue the new first node search
-            first = node_traits::get_next(first);
-            node_algorithms::unlink_after(new_last);
-            end_found = true;
-         }
-      }
-
-      //If the root node has not been found in the previous loop, find it
-      //starting in the new first node and unlink it
-      if(!end_found){
-         node_algorithms::unlink_after(node_algorithms::get_previous_node(first, root));
-      }
-
-      //Now link the root node after the new last node
-      node_algorithms::link_after(new_last, root);
+      priv_shift_forward(n, detail::bool_<linear>());
    }
 
    //! <b>Requires</b>: Disposer::operator()(pointer) shouldn't throw.
@@ -643,7 +581,7 @@
    {
       node_ptr n = get_real_value_traits().to_node_ptr(value);
       if(safemode_or_autounlink)
-         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::unique(n));
+         BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(node_algorithms::inited(n));
       node_algorithms::link_after(prev_p.pointed_node(), n);
       this->priv_size_traits().increment();
       return iterator (n, this);
@@ -712,16 +650,7 @@
    //! <b>Note</b>: Invalidates the iterators (but not the references) to the
    //!   erased element.
    iterator erase_after(iterator prev)
-   {
-      iterator it(prev); ++it;
-      node_ptr to_erase(it.pointed_node());
-      node_algorithms::unlink_after(prev.pointed_node());
-      this->priv_size_traits().decrement();
-      iterator ret(++prev);
-      if(safemode_or_autounlink)
-         node_algorithms::init(to_erase);
-      return ret;
-   }
+   {  return this->erase_after_and_dispose(prev, detail::null_disposer());  }
 
    //! <b>Effects</b>: Erases the range (before_first, last) from
    //!   the list. No destructors are called.
@@ -731,18 +660,12 @@
    //! 
    //! <b>Throws</b>: Nothing.
    //! 
-   //! <b>Complexity</b>: Lineal to the elements (last - before_first).
+   //! <b>Complexity</b>: Lineal to the elements (last - before_first + 1).
    //! 
    //! <b>Note</b>: Invalidates the iterators (but not the references) to the
    //!   erased element.
    iterator erase_after(iterator before_first, iterator last)
-   {
-      iterator first;
-      while(++(first = before_first) != last){
-         this->erase_after(before_first);
-      }
-      return last;
-   }
+   {  return this->erase_after_and_dispose(before_first, last, detail::null_disposer()); }
 
    //! <b>Effects</b>: Erases the element pointed by i of the list. 
    //!   No destructors are called.
@@ -794,11 +717,16 @@
    template<class Disposer>
    iterator erase_after_and_dispose(iterator prev, Disposer disposer)
    {
-      iterator it(prev); ++it;
+      iterator it(prev);
+      ++it;
       node_ptr to_erase(it.pointed_node());
-      iterator ret(this->erase_after(prev));
+      ++it;
+      node_algorithms::unlink_after(prev.pointed_node());
+      this->priv_size_traits().decrement();
+      if(safemode_or_autounlink)
+         node_algorithms::init(to_erase);
       disposer(get_real_value_traits().to_value_ptr(to_erase));
-      return ret;
+      return it;
    }
 
    //! <b>Requires</b>: Disposer::operator()(pointer) shouldn't throw.
@@ -818,9 +746,10 @@
    template<class Disposer>
    iterator erase_after_and_dispose(iterator before_first, iterator last, Disposer disposer)
    {
-      iterator first;
-      while(++(first = before_first) != last){
-         this->erase_after_and_dispose(before_first, disposer);
+      iterator next(before_first);
+      ++next;
+      while(next != last){
+         next = this->erase_after_and_dispose(before_first, disposer);
       }
       return last;
    }
@@ -931,7 +860,7 @@
          iterator last_x(x.previous(x.end()));
          node_algorithms::transfer_after
             ( prev.pointed_node()
-            , x.end().pointed_node()
+            , x.before_begin().pointed_node()
             , last_x.pointed_node());
          this->priv_size_traits().set_size(this->priv_size_traits().get_size() + x.priv_size_traits().get_size());
          x.priv_size_traits().set_size(size_type(0));
@@ -1133,12 +1062,12 @@
                (last_inserted.pointed_node(), carry.end().pointed_node());
             iterator last_element(p, this);
             if(constant_time_size){
-               counter[i].splice_after( counter[i].end(), carry
+               counter[i].splice_after( counter[i].before_begin(), carry
                                       , carry.before_begin(), last_element
                                       , carry.size());
             }
             else{
-               counter[i].splice_after( counter[i].end(), carry
+               counter[i].splice_after( counter[i].before_begin(), carry
                                       , carry.before_begin(), last_element);
             }
             if(i == fill)
@@ -1153,11 +1082,11 @@
             (last_inserted.pointed_node(), counter[--fill].end().pointed_node());
          iterator last_element(p, this);
          if(constant_time_size){
-            this->splice_after( end(), counter[fill], counter[fill].before_begin()
+            this->splice_after( before_begin(), counter[fill], counter[fill].before_begin()
                               , last_element, counter[fill].size());
          }
          else{
-            this->splice_after( end(), counter[fill], counter[fill].before_begin()
+            this->splice_after( before_begin(), counter[fill], counter[fill].before_begin()
                               , last_element);
          }
       }
@@ -1201,14 +1130,14 @@
    template<class Predicate>
    iterator merge(slist_impl& x, Predicate p) 
    {
-      iterator a(before_begin()), e(end()), ax(x.before_begin());
+      iterator a(before_begin()), e(end()), ax(x.before_begin()), ex(x.end());
       iterator last_inserted(e);
       iterator a_next;
       while(++(a_next = a) != e && !x.empty()) {
          iterator ix(ax);
          iterator cx;
          size_type n(0);
-         while(++(cx = ix) != ax && p(*cx, *a_next)){
+         while(++(cx = ix) != ex && p(*cx, *a_next)){
             ++ix; ++n;
          }
          if(ax != ix){
@@ -1235,7 +1164,7 @@
    //! 
    //! <b>Note</b>: Iterators and references are not invalidated
    void merge(slist_impl& x)
-   { this->merge(x, std::less<value_type>()); }
+   {  this->merge(x, std::less<value_type>());  }
 
    //! <b>Effects</b>: Reverses the order of elements in the list. 
    //! 
@@ -1245,7 +1174,7 @@
    //! 
    //! <b>Note</b>: Iterators and references are not invalidated
    void reverse() 
-   {  node_algorithms::reverse(this->get_root_node());  }
+   {  priv_reverse(detail::bool_<linear>()); }
 
    //! <b>Effects</b>: Removes all the elements that compare equal to value.
    //!   No destructors are called.
@@ -1406,7 +1335,7 @@
    static iterator s_iterator_to(reference value) 
    {
       BOOST_STATIC_ASSERT((!stateful_value_traits));
-      BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::unique(value_traits::to_node_ptr(value)));
+      //BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::inited(value_traits::to_node_ptr(value)));
       return iterator (value_traits::to_node_ptr(value), 0);
    }
 
@@ -1424,7 +1353,7 @@
    static const_iterator s_iterator_to(const_reference value) 
    {
       BOOST_STATIC_ASSERT((!stateful_value_traits));
-      BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::unique(value_traits::to_node_ptr(const_cast<reference> (value))));
+      //BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::inited(value_traits::to_node_ptr(const_cast<reference> (value))));
       return const_iterator (value_traits::to_node_ptr(const_cast<reference> (value)), 0);
    }
 
@@ -1439,7 +1368,7 @@
    //! <b>Note</b>: Iterators and references are not invalidated.
    iterator iterator_to(reference value) 
    { 
-      BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::unique(value_traits::to_node_ptr(value)));
+      //BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::inited(value_traits::to_node_ptr(value)));
       return iterator (value_traits::to_node_ptr(value), this);
    }
 
@@ -1454,7 +1383,7 @@
    //! <b>Note</b>: Iterators and references are not invalidated.
    const_iterator iterator_to(const_reference value) const
    { 
-      BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::unique(value_traits::to_node_ptr(const_cast<reference> (value))));
+      //BOOST_INTRUSIVE_INVARIANT_ASSERT (!node_algorithms::inited(value_traits::to_node_ptr(const_cast<reference> (value))));
       return const_iterator (value_traits::to_node_ptr(const_cast<reference> (value)), this);
    }
 
@@ -1487,8 +1416,54 @@
    }
 
    private:
+
+   void priv_reverse(detail::bool_<false>)
+   {  node_algorithms::reverse(this->get_root_node());   }
+
+   void priv_reverse(detail::bool_<true>)
+   {
+      node_ptr new_first = node_algorithms::reverse
+         (node_traits::get_next(this->get_root_node()));
+      node_traits::set_next(this->get_root_node(), new_first);
+   }
+
+   void priv_shift_backwards(size_type n, detail::bool_<false>)
+   {
+      node_algorithms::move_forward(this->get_root_node(), (std::size_t)n);   
+   }
+
+   void priv_shift_backwards(size_type n, detail::bool_<true>)
+   {
+      node_ptr new_first = node_algorithms::move_forward
+         (node_traits::get_next(this->get_root_node()), (std::size_t)n);   
+      node_traits::set_next(this->get_root_node(), new_first);
+   }
+
+   void priv_shift_forward(size_type n, detail::bool_<false>)
+   {
+      node_algorithms::move_backwards(this->get_root_node(), (std::size_t)n);   
+   }
+
+   void priv_shift_forward(size_type n, detail::bool_<true>)
+   {
+      node_ptr new_first = node_algorithms::move_backwards
+         (node_traits::get_next(this->get_root_node()), (std::size_t)n);   
+      node_traits::set_next(this->get_root_node(), new_first);
+   }
+
+   //circular version
+   static void priv_swap_lists(node_ptr this_node, node_ptr other_node, detail::bool_<false>)
+   {  node_algorithms::swap_nodes(this_node, other_node); }
+
+   //linear version
+   static void priv_swap_lists(node_ptr this_node, node_ptr other_node, detail::bool_<true>)
+   {  node_algorithms::swap_trailing_nodes(this_node, other_node); }
+
    static slist_impl &priv_container_from_end_iterator(const const_iterator &end_iterator)
    {
+      //Obtaining the container from the end iterator is not possible with linear
+      //singly linked lists (because "end" is represented by the null pointer)
+      BOOST_STATIC_ASSERT(!linear);
       root_plus_size *r = detail::parent_from_member<root_plus_size, node>
          ( detail::get_pointer(end_iterator.pointed_node()), &root_plus_size::root_);
       data_t *d = detail::parent_from_member<data_t, root_plus_size>
@@ -1620,13 +1595,13 @@
 #ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
 template<class T, class ...Options>
 #else
-template<class T, class O1 = none, class O2 = none, class O3 = none>
+template<class T, class O1 = none, class O2 = none, class O3 = none, class O4 = none>
 #endif
 struct make_slist
 {
    /// @cond
    typedef typename pack_options
-      < slist_defaults<T>, O1, O2, O3>::type packed_options;
+      < slist_defaults<T>, O1, O2, O3, O4>::type packed_options;
    typedef typename detail::get_value_traits
       <T, typename packed_options::value_traits>::type value_traits;
    typedef slist_impl
@@ -1635,6 +1610,7 @@
          < value_traits
          , typename packed_options::size_type
          , packed_options::constant_time_size
+         , packed_options::linear
          >
       > implementation_defined;
    /// @endcond
@@ -1643,12 +1619,12 @@
 
 
 #ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
-template<class T, class O1, class O2, class O3>
+template<class T, class O1, class O2, class O3, class O4>
 class slist
-   :  public make_slist<T, O1, O2, O3>::type
+   :  public make_slist<T, O1, O2, O3, O4>::type
 {
    typedef typename make_slist
-      <T, O1, O2, O3>::type   Base;
+      <T, O1, O2, O3, O4>::type   Base;
    typedef typename Base::real_value_traits  real_value_traits;
    //Assert if passed value traits are compatible with the type
    BOOST_STATIC_ASSERT((detail::is_same<typename real_value_traits::value_type, T>::value));
Modified: trunk/boost/intrusive/slist_hook.hpp
==============================================================================
--- trunk/boost/intrusive/slist_hook.hpp	(original)
+++ trunk/boost/intrusive/slist_hook.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -19,6 +19,7 @@
 #include <boost/intrusive/detail/utilities.hpp>
 #include <boost/intrusive/detail/slist_node.hpp>
 #include <boost/intrusive/circular_slist_algorithms.hpp>
+#include <boost/intrusive/link_mode.hpp>
 #include <boost/intrusive/options.hpp>
 #include <boost/intrusive/detail/generic_hook.hpp>
 
@@ -31,6 +32,7 @@
 {
    typedef circular_slist_algorithms<slist_node_traits<VoidPointer> > type;
 };
+
 /// @endcond
 
 //! Helper metafunction to define a \c slist_base_hook that yields to the same
Modified: trunk/libs/interprocess/doc/Jamfile.v2
==============================================================================
--- trunk/libs/interprocess/doc/Jamfile.v2	(original)
+++ trunk/libs/interprocess/doc/Jamfile.v2	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -26,6 +26,7 @@
         <doxygen:param>HIDE_UNDOC_MEMBERS=YES
         <doxygen:param>EXTRACT_PRIVATE=NO
         <doxygen:param>EXPAND_ONLY_PREDEF=YES
+        <doxygen:param>PREDEFINED=BOOST_INTERPROCESS_DOXYGEN_INVOKED
         <xsl:param>"boost.doxygen.reftitle=Boost.Interprocess Reference"
    ;
 
Modified: trunk/libs/interprocess/doc/interprocess.qbk
==============================================================================
--- trunk/libs/interprocess/doc/interprocess.qbk	(original)
+++ trunk/libs/interprocess/doc/interprocess.qbk	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -6418,7 +6418,7 @@
    [classref boost::interprocess::named_mutex named_mutex].
 
 *  Reduced template bloat for node and adaptive allocators extracting node
-   implementation to a class taht only depends on the memory algorithm, instead of
+   implementation to a class that only depends on the memory algorithm, instead of
    the segment manager + node size + node number...
 
 *  Fixed bug in `mapped_region` in UNIX when mapping address was provided but
Modified: trunk/libs/interprocess/proj/vc7ide/interprocesslib.vcproj
==============================================================================
--- trunk/libs/interprocess/proj/vc7ide/interprocesslib.vcproj	(original)
+++ trunk/libs/interprocess/proj/vc7ide/interprocesslib.vcproj	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -173,6 +173,9 @@
                                         RelativePath="..\..\..\..\boost\interprocess\allocators\detail\adaptive_node_pool.hpp">
                                 </File>
                                 <File
+					RelativePath="..\..\..\..\boost\interprocess\allocators\detail\allocator_common.hpp">
+				</File>
+				<File
                                         RelativePath="..\..\..\..\boost\interprocess\allocators\detail\node_pool.hpp">
                                 </File>
                                 <File
@@ -571,6 +574,9 @@
                         <File
                                 RelativePath="..\..\test\util.hpp">
                         </File>
+			<File
+				RelativePath="..\..\test\vector_test.hpp">
+			</File>
                 </Filter>
                 <Filter
                         Name="Proj"
Modified: trunk/libs/interprocess/test/adaptive_node_pool_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/adaptive_node_pool_test.cpp	(original)
+++ trunk/libs/interprocess/test/adaptive_node_pool_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -17,7 +17,7 @@
    typedef managed_shared_memory::segment_manager segment_manager;
 
    typedef detail::private_adaptive_node_pool
-      <segment_manager, 4, 64, 64> node_pool_t;
+      <segment_manager, 4, 64, 64, 5> node_pool_t;
 
    if(!test::test_all_node_pool<node_pool_t>())
       return 1;
Modified: trunk/libs/interprocess/test/adaptive_pool_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/adaptive_pool_test.cpp	(original)
+++ trunk/libs/interprocess/test/adaptive_pool_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -11,11 +11,13 @@
 #include <boost/interprocess/detail/config_begin.hpp>
 #include <boost/interprocess/managed_shared_memory.hpp>
 #include <boost/interprocess/containers/list.hpp>
+#include <boost/interprocess/containers/vector.hpp>
 #include <boost/interprocess/allocators/adaptive_pool.hpp>
 #include "print_container.hpp"
 #include "dummy_test_allocator.hpp"
 #include "movable_int.hpp"
 #include "list_test.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
@@ -24,14 +26,36 @@
 typedef adaptive_pool
    <int, managed_shared_memory::segment_manager> shmem_node_allocator_t;
 
+typedef detail::adaptive_pool_v1
+   <int, managed_shared_memory::segment_manager> shmem_node_allocator_v1_t;
+
+//Explicit instantiations to catch compilation errors
+template class adaptive_pool<int, managed_shared_memory::segment_manager>;
+template class detail::adaptive_pool_v1<int, managed_shared_memory::segment_manager>;
+
 //Alias list types
 typedef list<int, shmem_node_allocator_t>    MyShmList;
+typedef list<int, shmem_node_allocator_v1_t> MyShmListV1;
+
+//Alias vector types
+typedef vector<int, shmem_node_allocator_t>    MyShmVector;
+typedef vector<int, shmem_node_allocator_v1_t> MyShmVectorV1;
+
 
 int main ()
 {
    if(test::list_test<managed_shared_memory, MyShmList, true>())
       return 1;
 
+   if(test::list_test<managed_shared_memory, MyShmListV1, true>())
+      return 1;
+
+   if(test::vector_test<managed_shared_memory, MyShmVector>())
+      return 1;
+
+   if(test::vector_test<managed_shared_memory, MyShmVectorV1>())
+      return 1;
+
    return 0;
 }
 
Modified: trunk/libs/interprocess/test/cached_adaptive_pool_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/cached_adaptive_pool_test.cpp	(original)
+++ trunk/libs/interprocess/test/cached_adaptive_pool_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -16,6 +16,7 @@
 #include "dummy_test_allocator.hpp"
 #include "movable_int.hpp"
 #include "list_test.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
@@ -25,14 +26,37 @@
    <int, managed_shared_memory::segment_manager>
    cached_node_allocator_t;
 
+typedef detail::cached_adaptive_pool_v1
+   <int, managed_shared_memory::segment_manager>
+   cached_node_allocator_v1_t;
+
+//Explicit instantiations to catch compilation errors
+template class cached_adaptive_pool<int, managed_shared_memory::segment_manager>;
+template class detail::cached_adaptive_pool_v1<int, managed_shared_memory::segment_manager>;
+
+
 //Alias list types
 typedef list<int, cached_node_allocator_t>    MyShmList;
+typedef list<int, cached_node_allocator_v1_t> MyShmListV1;
+
+//Alias vector types
+typedef vector<int, cached_node_allocator_t>    MyShmVector;
+typedef vector<int, cached_node_allocator_v1_t> MyShmVectorV1;
 
 int main ()
 {
    if(test::list_test<managed_shared_memory, MyShmList, true>())
       return 1;
 
+   if(test::list_test<managed_shared_memory, MyShmListV1, true>())
+      return 1;
+
+   if(test::vector_test<managed_shared_memory, MyShmVector>())
+      return 1;
+
+   if(test::vector_test<managed_shared_memory, MyShmVectorV1>())
+      return 1;
+
    return 0;
 }
 
Modified: trunk/libs/interprocess/test/cached_node_allocator_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/cached_node_allocator_test.cpp	(original)
+++ trunk/libs/interprocess/test/cached_node_allocator_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -1,6 +1,6 @@
 //////////////////////////////////////////////////////////////////////////////
 //
-// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost
+// (C) Copyright Ion Gaztanaga 2004-2008. Distributed under the Boost
 // Software License, Version 1.0. (See accompanying file
 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
 //
@@ -16,23 +16,40 @@
 #include "dummy_test_allocator.hpp"
 #include "movable_int.hpp"
 #include "list_test.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
-//We will work with wide characters for shared memory objects
 //Alias a integer node allocator type
 typedef cached_node_allocator
    <int, managed_shared_memory::segment_manager>
    cached_node_allocator_t;
+typedef detail::cached_node_allocator_v1
+   <int, managed_shared_memory::segment_manager>
+   cached_node_allocator_v1_t;
+
+//Explicit instantiations to catch compilation errors
+template class cached_node_allocator<int, managed_shared_memory::segment_manager>;
+template class detail::cached_node_allocator_v1<int, managed_shared_memory::segment_manager>;
 
 //Alias list types
 typedef list<int, cached_node_allocator_t>    MyShmList;
+typedef list<int, cached_node_allocator_v1_t> MyShmListV1;
+
+//Alias vector types
+typedef vector<int, cached_node_allocator_t>    MyShmVector;
+typedef vector<int, cached_node_allocator_v1_t> MyShmVectorV1;
 
 int main ()
 {
    if(test::list_test<managed_shared_memory, MyShmList, true>())
       return 1;
-
+   if(test::list_test<managed_shared_memory, MyShmListV1, true>())
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVector>())
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVectorV1>())
+      return 1;
    return 0;
 }
 
Modified: trunk/libs/interprocess/test/file_mapping_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/file_mapping_test.cpp	(original)
+++ trunk/libs/interprocess/test/file_mapping_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -11,7 +11,6 @@
 #include <boost/interprocess/detail/config_begin.hpp>
 #include <fstream>
 #include <iostream>
-#include <ios>
 #include <boost/interprocess/file_mapping.hpp>
 #include <boost/interprocess/mapped_region.hpp>
 #include <memory>
Modified: trunk/libs/interprocess/test/map_test.hpp
==============================================================================
--- trunk/libs/interprocess/test/map_test.hpp	(original)
+++ trunk/libs/interprocess/test/map_test.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -296,6 +296,8 @@
       }
 
       //Now do count exercise
+      shmmap->erase(shmmap->begin(), shmmap->end());
+      shmmultimap->erase(shmmultimap->begin(), shmmultimap->end());
       shmmap->clear();
       shmmultimap->clear();
 
Modified: trunk/libs/interprocess/test/memory_algorithm_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/memory_algorithm_test.cpp	(original)
+++ trunk/libs/interprocess/test/memory_algorithm_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -19,47 +19,67 @@
 #include <string>
 #include "get_process_id_name.hpp"
 
+using namespace boost::interprocess;
+
+const int memsize = 16384;
+const char *const shMemName = test::get_process_id_name();
+
+int test_simple_seq_fit()
+{
+   //A shared memory with simple sequential fit algorithm
+   typedef basic_managed_shared_memory
+      <char
+      ,simple_seq_fit<mutex_family>
+      ,null_index
+      > my_managed_shared_memory;
+
+   //Create shared memory
+   shared_memory_object::remove(shMemName);
+   my_managed_shared_memory segment(create_only, shMemName, memsize);
+
+   //Now take the segment manager and launch memory test
+   if(!test::test_all_allocation(*segment.get_segment_manager())){
+      return 1;
+   }
+   return 0;
+}
+
+template<std::size_t Alignment>
+int test_rbtree_best_fit()
+{
+   //A shared memory with red-black tree best fit algorithm
+   typedef basic_managed_shared_memory
+      <char
+      ,rbtree_best_fit<mutex_family, offset_ptr<void>, Alignment>
+      ,null_index
+      > my_managed_shared_memory;
+
+   //Create shared memory
+   shared_memory_object::remove(shMemName);
+   my_managed_shared_memory segment(create_only, shMemName, memsize);
+
+   //Now take the segment manager and launch memory test
+   if(!test::test_all_allocation(*segment.get_segment_manager())){
+      return 1;
+   }
+   return 0;
+}
+
 int main ()
 {
-   using namespace boost::interprocess;
-   const int memsize = 16384;
-   const char *const shMemName = test::get_process_id_name();
-
-   {
-      //A shared memory with simple sequential fit algorithm
-      typedef basic_managed_shared_memory
-         <char
-         ,simple_seq_fit<mutex_family>
-         ,null_index
-         > my_managed_shared_memory;
-
-      //Create shared memory
-      shared_memory_object::remove(shMemName);
-      my_managed_shared_memory segment(create_only, shMemName, memsize);
-
-      //Now take the segment manager and launch memory test
-      if(!test::test_all_allocation(*segment.get_segment_manager())){
-         return 1;
-      }
-   }
-
-   {
-      //A shared memory with red-black tree best fit algorithm
-      typedef basic_managed_shared_memory
-         <char
-         ,rbtree_best_fit<mutex_family>
-         ,null_index
-         > my_managed_shared_memory;
-
-      //Create shared memory
-      shared_memory_object::remove(shMemName);
-      my_managed_shared_memory segment(create_only, shMemName, memsize);
-
-      //Now take the segment manager and launch memory test
-      if(!test::test_all_allocation(*segment.get_segment_manager())){
-         return 1;
-      }
+   if(test_simple_seq_fit()){
+      return 1;
+   }
+   if(test_rbtree_best_fit<4>()){
+      return 1;
+   }
+   if(test_rbtree_best_fit<8>()){
+      return 1;
    }
+   if(test_rbtree_best_fit<16>()){
+      return 1;
+   }
+
    shared_memory_object::remove(shMemName);
    return 0;
 }
Modified: trunk/libs/interprocess/test/memory_algorithm_test_template.hpp
==============================================================================
--- trunk/libs/interprocess/test/memory_algorithm_test_template.hpp	(original)
+++ trunk/libs/interprocess/test/memory_algorithm_test_template.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -16,7 +16,7 @@
 #include <iostream>
 #include <new>
 #include <utility>
-#include <cstring>
+#include <cstring>   //std::memset
 #include <cstdio>    //std::remove
 
 namespace boost { namespace interprocess { namespace test {
@@ -38,6 +38,8 @@
          void *ptr = a.allocate(i, std::nothrow);
          if(!ptr)
             break;
+         std::size_t size = a.size(ptr);
+         std::memset(ptr, 0, size);
          buffers.push_back(ptr);
       }
 
@@ -94,6 +96,8 @@
       void *ptr = a.allocate(i*2, std::nothrow);
       if(!ptr)
          break;
+      std::size_t size = a.size(ptr);
+      std::memset(ptr, 0, size);
       buffers.push_back(ptr);
    }
 
@@ -111,6 +115,7 @@
          if(received_size < std::size_t(i)){
             return false;
          }
+         std::memset(buffers[i], 0, a.size(buffers[i]));
       }
    }
    
@@ -139,6 +144,8 @@
       void *ptr = a.allocate(i, std::nothrow);
       if(!ptr)
          break;
+      std::size_t size = a.size(ptr);
+      std::memset(ptr, 0, size);
       buffers.push_back(ptr);
    }
 
@@ -203,7 +210,7 @@
 
    //Now shrink to half
    for(int i = 0, max = (int)buffers.size()
-      ;i < max
+      ; i < max
       ; ++i){
       std::size_t received_size;
       if(a.template allocation_command<char>
@@ -224,9 +231,10 @@
       ;i < max
       ;++i){
       std::size_t received_size;
+      std::size_t request_size = received_sizes[i];
       if(a.template allocation_command<char>
-         ( expand_fwd | nothrow_allocation, received_sizes[i]
-         , received_sizes[i], received_size, (char*)buffers[i]).first){
+         ( expand_fwd | nothrow_allocation, request_size
+         , request_size, received_size, (char*)buffers[i]).first){
          if(received_size != received_sizes[i]){
             return false;
          }
@@ -262,6 +270,8 @@
       void *ptr = a.allocate(i, std::nothrow);
       if(!ptr)
          break;
+      std::size_t size = a.size(ptr);
+      std::memset(ptr, 0, size);
       buffers.push_back(ptr);
    }
 
@@ -327,19 +337,21 @@
 bool test_allocation_with_reuse(Allocator &a)
 {
    //We will repeat this test for different sized elements
-   for(int size = 1; size < 20; ++size){
+   for(int sizeof_object = 1; sizeof_object < 20; ++sizeof_object){
       std::vector<void*> buffers;
 
       //Allocate buffers with extra memory
       for(int i = 0; true; ++i){
-         void *ptr = a.allocate(i*size, std::nothrow);
+         void *ptr = a.allocate(i*sizeof_object, std::nothrow);
          if(!ptr)
             break;
+         std::size_t size = a.size(ptr);
+         std::memset(ptr, 0, size);
          buffers.push_back(ptr);
       }
       
       //Now deallocate all except the latest
-      //Now try to expand to the double of the size
+      //Now try to expand to the double of the sizeof_object
       for(int i = 0, max = (int)buffers.size() - 1
          ;i < max
          ;++i){
@@ -353,14 +365,18 @@
       //Now allocate with reuse
       std::size_t received_size = 0;
       for(int i = 0; true; ++i){
-         std::pair<void*, bool> ret = a.template allocation_command<char>
-            ( expand_bwd | nothrow_allocation, received_size/size*size + size
-            , received_size/size*size+(i+1)*size*2, received_size, (char*)ptr);
+         std::size_t min_size = (received_size + 1);
+         std::size_t prf_size = (received_size + (i+1)*2);
+         std::pair<void*, bool> ret = a.raw_allocation_command
+            ( expand_bwd | nothrow_allocation, min_size
+            , prf_size, received_size, (char*)ptr, sizeof_object);
          if(!ret.first)
             break;
          //If we have memory, this must be a buffer reuse
          if(!ret.second)
             return 1;
+         if(received_size < min_size)
+            return 1;
          ptr = ret.first;
       }
       //There is only a single block so deallocate it
@@ -456,6 +472,8 @@
       void *ptr = a.allocate(i, std::nothrow);
       if(!ptr)
          break;
+      std::size_t size = a.size(ptr);
+      std::memset(ptr, 1, size);
       buffers.push_back(ptr);
    }
 
@@ -544,6 +562,8 @@
       void *ptr = a.allocate(i, std::nothrow);
       if(!ptr)
          break;
+      std::size_t size = a.size(ptr);
+      std::memset(ptr, 0, size);
       buffers.push_back(ptr);
    }
 
@@ -564,7 +584,11 @@
    for(int j = 0, max = (int)buffers.size()
       ;j < max
       ;++j){
-      int pos = (j%4)*((int)buffers.size())/4;
+      int pos = (j%5)*((int)buffers.size())/4;
+      if(pos == int(buffers.size()))
+         --pos;
+      a.deallocate(buffers[pos]);
+      buffers.erase(buffers.begin()+pos);
       std::size_t old_free = a.get_free_memory();
       a.shrink_to_fit();
       if(!a.check_sanity())   return false;
@@ -576,9 +600,6 @@
       if(!a.check_sanity())   return false;
       if(original_size != a.get_size())         return false;
       if(old_free      != a.get_free_memory())  return false;
-
-      a.deallocate(buffers[pos]);
-      buffers.erase(buffers.begin()+pos);
    }
 
    //Now shrink it to the maximum
@@ -623,6 +644,8 @@
          void *ptr = a.allocate(i, std::nothrow);
          if(!ptr)
             break;
+         std::size_t size = a.size(ptr);
+         std::memset(ptr, 0, size);
          if(!a.check_sanity())
             return false;
          buffers2.push_back(ptr);
@@ -736,6 +759,8 @@
          void *ptr = a.allocate(i, std::nothrow);
          if(!ptr)
             break;
+         std::size_t size = a.size(ptr);
+         std::memset(ptr, 0, size);
          buffers2.push_back(ptr);
       }
 
@@ -816,6 +841,57 @@
    return true;
 }
 
+//This test allocates multiple values until there is no more memory
+//and after that deallocates all in the inverse order
+template<class Allocator>
+bool test_many_deallocation(Allocator &a)
+{
+   typedef typename Allocator::multiallocation_iterator multiallocation_iterator;
+   const std::size_t ArraySize = 11;
+   std::vector<multiallocation_iterator> buffers;
+   std::size_t requested_sizes[ArraySize];
+   for(std::size_t i = 0; i < ArraySize; ++i){
+      requested_sizes[i] = 4*i;
+   }
+   std::size_t free_memory = a.get_free_memory();
+
+   {
+      for(int i = 0; true; ++i){
+         multiallocation_iterator it = a.allocate_many(requested_sizes, ArraySize, 1, std::nothrow);
+         if(!it)
+            break;
+         buffers.push_back(it);
+      }
+      for(int i = 0, max = (int)buffers.size(); i != max; ++i){
+         a.deallocate_many(buffers[i]);
+      }
+      buffers.clear();
+      bool ok = free_memory == a.get_free_memory() && 
+               a.all_memory_deallocated() && a.check_sanity();
+      if(!ok)  return ok;
+   }
+
+   {
+      for(int i = 0; true; ++i){
+         multiallocation_iterator it = a.allocate_many(i*4, ArraySize, std::nothrow);
+         if(!it)
+            break;
+         buffers.push_back(it);
+      }
+      for(int i = 0, max = (int)buffers.size(); i != max; ++i){
+         a.deallocate_many(buffers[i]);
+      }
+      buffers.clear();
+
+      bool ok = free_memory == a.get_free_memory() && 
+               a.all_memory_deallocated() && a.check_sanity();
+      if(!ok)  return ok;
+   }
+
+   return true;
+}
+
+
 //This function calls all tests
 template<class Allocator>
 bool test_all_allocation(Allocator &a)
@@ -847,6 +923,12 @@
       return false;
    }
 
+   if(!test_many_deallocation(a)){
+      std::cout << "test_many_deallocation failed. Class: "
+                << typeid(a).name() << std::endl;
+      return false;
+   }
+
    std::cout << "Starting test_allocation_shrink. Class: "
              << typeid(a).name() << std::endl;
 
Modified: trunk/libs/interprocess/test/node_allocator_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/node_allocator_test.cpp	(original)
+++ trunk/libs/interprocess/test/node_allocator_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -16,6 +16,7 @@
 #include "dummy_test_allocator.hpp"
 #include "movable_int.hpp"
 #include "list_test.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
@@ -23,15 +24,31 @@
 //Alias a integer node allocator type
 typedef node_allocator
    <int, managed_shared_memory::segment_manager> shmem_node_allocator_t;
+typedef detail::node_allocator_v1
+   <int, managed_shared_memory::segment_manager> shmem_node_allocator_v1_t;
+
+//Explicit instantiations to catch compilation errors
+template class node_allocator<int, managed_shared_memory::segment_manager>;
+template class detail::node_allocator_v1<int, managed_shared_memory::segment_manager>;
 
 //Alias list types
 typedef list<int, shmem_node_allocator_t>    MyShmList;
+typedef list<int, shmem_node_allocator_v1_t> MyShmListV1;
+
+//Alias vector types
+typedef vector<int, shmem_node_allocator_t>     MyShmVector;
+typedef vector<int, shmem_node_allocator_v1_t>  MyShmVectorV1;
 
 int main ()
 {
    if(test::list_test<managed_shared_memory, MyShmList, true>())
       return 1;
-
+   if(test::list_test<managed_shared_memory, MyShmListV1, true>())
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVector>())
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVectorV1>())
+      return 1;
    return 0;
 }
 
Modified: trunk/libs/interprocess/test/node_pool_test.hpp
==============================================================================
--- trunk/libs/interprocess/test/node_pool_test.hpp	(original)
+++ trunk/libs/interprocess/test/node_pool_test.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -42,7 +42,7 @@
 
    //First allocate nodes
    for(std::size_t i = 0; i < num_alloc; ++i){
-      nodes.push_back(pool.allocate(1));
+      nodes.push_back(pool.allocate_node());
    }
 
    //Check that the free count is correct
@@ -52,7 +52,7 @@
    
    //Now deallocate all and check again
    for(std::size_t i = 0; i < num_alloc; ++i){
-       pool.deallocate(nodes[i], 1);
+       pool.deallocate_node(nodes[i]);
    }
 
    //Check that the free count is correct
@@ -85,7 +85,7 @@
 
    //First allocate nodes
    for(std::size_t i = 0; i < max_nodes; ++i){
-      nodes.push_back(pool.allocate(1));
+      nodes.push_back(pool.allocate_node());
    }
 
    //Check that the free count is correct
@@ -97,7 +97,7 @@
    for(std::size_t node_i = 0; node_i < nodes_per_chunk; ++node_i){
       //Deallocate a node per chunk
       for(std::size_t i = 0; i < max_chunks; ++i){
-         pool.deallocate(nodes[i*nodes_per_chunk + node_i], 1);
+         pool.deallocate_node(nodes[i*nodes_per_chunk + node_i]);
       }
 
       //Check that the free count is correct
Modified: trunk/libs/interprocess/test/private_adaptive_pool_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/private_adaptive_pool_test.cpp	(original)
+++ trunk/libs/interprocess/test/private_adaptive_pool_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -16,6 +16,7 @@
 #include "dummy_test_allocator.hpp"
 #include "movable_int.hpp"
 #include "list_test.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
@@ -23,15 +24,31 @@
 //Alias a private adaptive pool that allocates ints
 typedef private_adaptive_pool
    <int, managed_shared_memory::segment_manager> priv_node_allocator_t;
+typedef detail::private_adaptive_pool_v1
+   <int, managed_shared_memory::segment_manager> priv_node_allocator_v1_t;
+
+//Explicit instantiations to catch compilation errors
+template class private_adaptive_pool<int, managed_shared_memory::segment_manager>;
+template class detail::private_adaptive_pool_v1<int, managed_shared_memory::segment_manager>;
 
 //Alias list types
 typedef list<int, priv_node_allocator_t>    MyShmList;
+typedef list<int, priv_node_allocator_v1_t>    MyShmListV1;
+
+//Alias vector types
+typedef vector<int, priv_node_allocator_t>     MyShmVector;
+typedef vector<int, priv_node_allocator_v1_t>  MyShmVectorV1;
 
 int main ()
 {
    if(test::list_test<managed_shared_memory, MyShmList, true>(false))
       return 1;
-
+   if(test::list_test<managed_shared_memory, MyShmListV1, true>(false))
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVector>())
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVectorV1>())
+      return 1;
    return 0;
 }
 
Modified: trunk/libs/interprocess/test/private_node_allocator_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/private_node_allocator_test.cpp	(original)
+++ trunk/libs/interprocess/test/private_node_allocator_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -16,6 +16,7 @@
 #include "dummy_test_allocator.hpp"
 #include "movable_int.hpp"
 #include "list_test.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
@@ -23,15 +24,31 @@
 //Alias a integer node allocator type
 typedef private_node_allocator
    <int, managed_shared_memory::segment_manager> priv_node_allocator_t;
+typedef detail::private_node_allocator_v1
+   <int, managed_shared_memory::segment_manager> priv_node_allocator_v1_t;
+
+//Explicit instantiations to catch compilation errors
+template class private_node_allocator<int, managed_shared_memory::segment_manager>;
+template class detail::private_node_allocator_v1<int, managed_shared_memory::segment_manager>;
 
 //Alias list types
-typedef list<int, priv_node_allocator_t>    MyShmList;
+typedef list<int, priv_node_allocator_t>     MyShmList;
+typedef list<int, priv_node_allocator_v1_t>  MyShmListV1;
+
+//Alias vector types
+typedef vector<int, priv_node_allocator_t>     MyShmVector;
+typedef vector<int, priv_node_allocator_v1_t>  MyShmVectorV1;
 
 int main ()
 {
    if(test::list_test<managed_shared_memory, MyShmList, true>(false))
       return 1;
-
+   if(test::list_test<managed_shared_memory, MyShmListV1, true>(false))
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVector>())
+      return 1;
+   if(test::vector_test<managed_shared_memory, MyShmVectorV1>())
+      return 1;
    return 0;
 }
 
Modified: trunk/libs/interprocess/test/set_test.hpp
==============================================================================
--- trunk/libs/interprocess/test/set_test.hpp	(original)
+++ trunk/libs/interprocess/test/set_test.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -384,6 +384,8 @@
       }
 
       //Now do count exercise
+      shmset->erase(shmset->begin(), shmset->end());
+      shmmultiset->erase(shmmultiset->begin(), shmmultiset->end());
       shmset->clear();
       shmmultiset->clear();
 
Modified: trunk/libs/interprocess/test/vector_test.cpp
==============================================================================
--- trunk/libs/interprocess/test/vector_test.cpp	(original)
+++ trunk/libs/interprocess/test/vector_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -17,227 +17,22 @@
 
 #include <boost/interprocess/managed_shared_memory.hpp>
 #include <boost/interprocess/containers/vector.hpp>
-#include <boost/interprocess/indexes/flat_map_index.hpp>
 #include <boost/interprocess/allocators/allocator.hpp>
 #include "allocator_v1.hpp"
-#include <boost/interprocess/exceptions.hpp>
-#include <boost/interprocess/detail/move_iterator.hpp>
-#include <boost/interprocess/detail/move.hpp>
-#include "print_container.hpp"
 #include "check_equal_containers.hpp"
 #include "movable_int.hpp"
 #include "expand_bwd_test_allocator.hpp"
 #include "expand_bwd_test_template.hpp"
 #include "dummy_test_allocator.hpp"
-#include <string>
-#include "get_process_id_name.hpp"
+#include "vector_test.hpp"
 
 using namespace boost::interprocess;
 
-typedef basic_managed_shared_memory
-   <char,
-   simple_seq_fit<mutex_family>,
-   flat_map_index
-   > managed_shared_memory_t;
-
 //Explicit instantiation to detect compilation errors
 template class boost::interprocess::vector<test::movable_and_copyable_int, 
    test::dummy_test_allocator<test::movable_and_copyable_int> >;
 
-template<class V1, class V2>
-bool copyable_only(V1 *, V2 *, detail::false_type)
-{
-   return true;
-}
-
-//Function to check if both sets are equal
-template<class V1, class V2>
-bool copyable_only(V1 *shmvector, V2 *stdvector, detail::true_type)
-{
-   typedef typename V1::value_type IntType;
-   std::size_t size = shmvector->size();
-   stdvector->insert(stdvector->end(), 50, 1);
-   shmvector->insert(shmvector->end(), 50, 1);
-   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-   {
-   IntType move_me(1);
-   stdvector->insert(stdvector->begin()+size/2, 50, 1);
-   shmvector->insert(shmvector->begin()+size/2, 50, move(move_me));
-   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-   }
-   {
-   IntType move_me(2);
-   shmvector->assign(shmvector->size()/2, move(move_me));
-   stdvector->assign(stdvector->size()/2, 2);
-   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-   }
-   {
-   IntType move_me(3);
-   shmvector->assign(shmvector->size()*3-1, move(move_me));
-   stdvector->assign(stdvector->size()*3-1, 3);
-   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-   }
-   return true;
-}
-
-template<class IntType, template<class T, class SegmentManager> class AllocatorType >
-bool do_test()
-{
-   //Customize managed_shared_memory class
-   typedef basic_managed_shared_memory
-      <char,
-      //simple_seq_fit<mutex_family>,
-      rbtree_best_fit<mutex_family>,
-      flat_map_index
-      > my_managed_shared_memory;
-
-   //Alias AllocatorType type
-   typedef AllocatorType<IntType, my_managed_shared_memory::segment_manager>
-      shmem_allocator_t;
-
-   //Alias vector types
-   typedef vector<IntType, shmem_allocator_t>   MyShmVector;
-   typedef std::vector<int>                     MyStdVector;
-
-   std::string process_name;
-   test::get_process_id_name(process_name);
-
-   const int Memsize = 65536;
-   const char *const shMemName = process_name.c_str();
-   const int max = 100;
-
-   {
-      //Compare several shared memory vector operations with std::vector
-      //Create shared memory
-      shared_memory_object::remove(shMemName);
-      try{
-         my_managed_shared_memory segment(create_only, shMemName, Memsize);
-
-         segment.reserve_named_objects(100);
-
-         //Shared memory allocator must be always be initialized
-         //since it has no default constructor
-         MyShmVector *shmvector = segment.template construct<MyShmVector>("MyShmVector")
-                                 (segment.get_segment_manager());
-         MyStdVector *stdvector = new MyStdVector;
-
-         shmvector->resize(100);
-         stdvector->resize(100);
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;         
-
-         shmvector->resize(200);
-         stdvector->resize(200);
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;         
-
-         shmvector->resize(0);
-         stdvector->resize(0);
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;         
-
-         for(int i = 0; i < max; ++i){
-            IntType new_int(i);
-            shmvector->insert(shmvector->end(), move(new_int));
-            stdvector->insert(stdvector->end(), i);
-         }
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         typename MyShmVector::iterator shmit(shmvector->begin());
-         typename MyStdVector::iterator stdit(stdvector->begin());
-         typename MyShmVector::const_iterator cshmit = shmit;
-         ++shmit; ++stdit;
-         shmvector->erase(shmit);
-         stdvector->erase(stdit);
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         shmvector->erase(shmvector->begin());
-         stdvector->erase(stdvector->begin());
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         {
-            //Initialize values
-            IntType aux_vect[50];
-            for(int i = 0; i < 50; ++i){
-               IntType new_int(-1);
-               aux_vect[i] = move(new_int);
-            }
-            int aux_vect2[50];
-            for(int i = 0; i < 50; ++i){
-               aux_vect2[i] = -1;
-            }
-
-            shmvector->insert(shmvector->end()
-                              ,detail::make_move_iterator(&aux_vect[0])
-                              ,detail::make_move_iterator(aux_vect + 50));
-            stdvector->insert(stdvector->end(), aux_vect2, aux_vect2 + 50);
-            if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-            for(int i = 0, j = static_cast<int>(shmvector->size()); i < j; ++i){
-               shmvector->erase(shmvector->begin());
-               stdvector->erase(stdvector->begin());
-            }
-            if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-         }
-         {
-            IntType aux_vect[50];
-            for(int i = 0; i < 50; ++i){
-               IntType new_int(-1);
-               aux_vect[i] = move(new_int);
-            }
-            int aux_vect2[50];
-            for(int i = 0; i < 50; ++i){
-               aux_vect2[i] = -1;
-            }
-            shmvector->insert(shmvector->begin()
-                              ,detail::make_move_iterator(&aux_vect[0])
-                              ,detail::make_move_iterator(aux_vect + 50));
-            stdvector->insert(stdvector->begin(), aux_vect2, aux_vect2 + 50);
-            if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-         }
-
-         shmvector->reserve(shmvector->size()*2);
-         stdvector->reserve(stdvector->size()*2);
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         IntType push_back_this(1);
-         shmvector->push_back(move(push_back_this));
-         stdvector->push_back(int(1));
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         if(!copyable_only(shmvector, stdvector
-                        ,detail::bool_<!is_movable<IntType>::value>())){
-            return false;
-         }
-
-         shmvector->erase(shmvector->begin());
-         stdvector->erase(stdvector->begin());
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         for(int i = 0; i < max; ++i){
-            IntType insert_this(i);
-            shmvector->insert(shmvector->begin(), move(insert_this));
-            stdvector->insert(stdvector->begin(), i);
-         }
-         if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
-
-         delete stdvector;
-         segment.template destroy<MyShmVector>("MyShmVector");
-         segment.shrink_to_fit_indexes();
-
-         if(!segment.all_memory_deallocated())
-            return false;
-      }
-      catch(std::exception &ex){
-         shared_memory_object::remove(shMemName);
-         std::cout << ex.what() << std::endl;
-         return false;
-      }
-   }
-   shared_memory_object::remove(shMemName);
-   std::cout << std::endl << "Test OK!" << std::endl;
-   return true;
-}
-
-bool test_expand_bwd()
+int test_expand_bwd()
 {
    //Now test all back insertion possibilities
 
@@ -248,7 +43,7 @@
       int_vector;
 
    if(!test::test_all_expand_bwd<int_vector>())
-      return false;
+      return 1;
 
    //Now user defined wrapped int
    typedef test::expand_bwd_test_allocator<test::int_holder>
@@ -257,7 +52,7 @@
       int_holder_vector;
 
    if(!test::test_all_expand_bwd<int_holder_vector>())
-      return false;
+      return 1;
 
    //Now user defined bigger wrapped int
    typedef test::expand_bwd_test_allocator<test::triple_int_holder>
@@ -267,26 +62,32 @@
       triple_int_holder_vector;
 
    if(!test::test_all_expand_bwd<triple_int_holder_vector>())
-      return false;
+      return 1;
 
-   return true;
+   return 0;
 }
 
 int main()
 {
-   if(!do_test<int, allocator>())
-      return 1;
+   typedef allocator<int, managed_shared_memory::segment_manager> ShmemAllocator;
+   typedef vector<int, ShmemAllocator> MyVector;
+
+   typedef allocator<test::movable_int, managed_shared_memory::segment_manager> ShmemMoveAllocator;
+   typedef vector<test::movable_int, ShmemMoveAllocator> MyMoveVector;
+
+   typedef allocator<test::movable_and_copyable_int, managed_shared_memory::segment_manager> ShmemCopyMoveAllocator;
+   typedef vector<test::movable_and_copyable_int, ShmemCopyMoveAllocator> MyCopyMoveVector;
 
-   if(!do_test<test::movable_int, allocator>())
+   if(test::vector_test<managed_shared_memory, MyVector>())
       return 1;
 
-   if(!do_test<test::movable_and_copyable_int, allocator>())
+   if(test::vector_test<managed_shared_memory, MyMoveVector>())
       return 1;
 
-   if(!do_test<int, test::allocator_v1>())
+   if(test::vector_test<managed_shared_memory, MyCopyMoveVector>())
       return 1;
 
-   if(!test_expand_bwd())
+   if(test_expand_bwd())
       return 1;
 
    return 0;
Added: trunk/libs/interprocess/test/vector_test.hpp
==============================================================================
--- (empty file)
+++ trunk/libs/interprocess/test/vector_test.hpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -0,0 +1,219 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost
+// Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/interprocess for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#include <boost/interprocess/detail/config_begin.hpp>
+#include <algorithm>
+#include <memory>
+#include <vector>
+#include <iostream>
+#include <functional>
+
+#include <boost/interprocess/managed_shared_memory.hpp>
+#include <boost/interprocess/containers/vector.hpp>
+#include <boost/interprocess/indexes/flat_map_index.hpp>
+#include <boost/interprocess/exceptions.hpp>
+#include <boost/interprocess/detail/move_iterator.hpp>
+#include <boost/interprocess/detail/move.hpp>
+#include "print_container.hpp"
+#include "check_equal_containers.hpp"
+#include "movable_int.hpp"
+#include <string>
+#include "get_process_id_name.hpp"
+
+namespace boost{
+namespace interprocess{
+namespace test{
+
+template<class V1, class V2>
+bool copyable_only(V1 *, V2 *, detail::false_type)
+{
+   return true;
+}
+
+//Function to check if both sets are equal
+template<class V1, class V2>
+bool copyable_only(V1 *shmvector, V2 *stdvector, detail::true_type)
+{
+   typedef typename V1::value_type IntType;
+   std::size_t size = shmvector->size();
+   stdvector->insert(stdvector->end(), 50, 1);
+   shmvector->insert(shmvector->end(), 50, 1);
+   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
+
+   {
+   IntType move_me(1);
+   stdvector->insert(stdvector->begin()+size/2, 50, 1);
+   shmvector->insert(shmvector->begin()+size/2, 50, move(move_me));
+   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
+   }
+   {
+   IntType move_me(2);
+   shmvector->assign(shmvector->size()/2, move(move_me));
+   stdvector->assign(stdvector->size()/2, 2);
+   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
+   }
+   {
+   IntType move_me(3);
+   shmvector->assign(shmvector->size()*3-1, move(move_me));
+   stdvector->assign(stdvector->size()*3-1, 3);
+   if(!test::CheckEqualContainers(shmvector, stdvector)) return false;
+   }
+   return true;
+}
+
+template<class ManagedSharedMemory
+        ,class MyShmVector>
+int vector_test()
+{
+   typedef std::vector<int>                     MyStdVector;
+   typedef typename MyShmVector::value_type     IntType;
+
+   std::string process_name;
+   test::get_process_id_name(process_name);
+
+   const int Memsize = 65536;
+   const char *const shMemName = process_name.c_str();
+   const int max = 100;
+
+   {
+      //Compare several shared memory vector operations with std::vector
+      //Create shared memory
+      shared_memory_object::remove(shMemName);
+      try{
+         ManagedSharedMemory segment(create_only, shMemName, Memsize);
+
+         segment.reserve_named_objects(100);
+
+         //Shared memory allocator must be always be initialized
+         //since it has no default constructor
+         MyShmVector *shmvector = segment.template construct<MyShmVector>("MyShmVector")
+                                 (segment.get_segment_manager());
+         MyStdVector *stdvector = new MyStdVector;
+
+         shmvector->resize(100);
+         stdvector->resize(100);
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;         
+
+         shmvector->resize(200);
+         stdvector->resize(200);
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;         
+
+         shmvector->resize(0);
+         stdvector->resize(0);
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;         
+
+         for(int i = 0; i < max; ++i){
+            IntType new_int(i);
+            shmvector->insert(shmvector->end(), move(new_int));
+            stdvector->insert(stdvector->end(), i);
+         }
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         typename MyShmVector::iterator shmit(shmvector->begin());
+         typename MyStdVector::iterator stdit(stdvector->begin());
+         typename MyShmVector::const_iterator cshmit = shmit;
+         ++shmit; ++stdit;
+         shmvector->erase(shmit);
+         stdvector->erase(stdit);
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         shmvector->erase(shmvector->begin());
+         stdvector->erase(stdvector->begin());
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         {
+            //Initialize values
+            IntType aux_vect[50];
+            for(int i = 0; i < 50; ++i){
+               IntType new_int(-1);
+               aux_vect[i] = move(new_int);
+            }
+            int aux_vect2[50];
+            for(int i = 0; i < 50; ++i){
+               aux_vect2[i] = -1;
+            }
+
+            shmvector->insert(shmvector->end()
+                              ,detail::make_move_iterator(&aux_vect[0])
+                              ,detail::make_move_iterator(aux_vect + 50));
+            stdvector->insert(stdvector->end(), aux_vect2, aux_vect2 + 50);
+            if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+            for(int i = 0, j = static_cast<int>(shmvector->size()); i < j; ++i){
+               shmvector->erase(shmvector->begin());
+               stdvector->erase(stdvector->begin());
+            }
+            if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+         }
+         {
+            IntType aux_vect[50];
+            for(int i = 0; i < 50; ++i){
+               IntType new_int(-1);
+               aux_vect[i] = move(new_int);
+            }
+            int aux_vect2[50];
+            for(int i = 0; i < 50; ++i){
+               aux_vect2[i] = -1;
+            }
+            shmvector->insert(shmvector->begin()
+                              ,detail::make_move_iterator(&aux_vect[0])
+                              ,detail::make_move_iterator(aux_vect + 50));
+            stdvector->insert(stdvector->begin(), aux_vect2, aux_vect2 + 50);
+            if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+         }
+
+         shmvector->reserve(shmvector->size()*2);
+         stdvector->reserve(stdvector->size()*2);
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         IntType push_back_this(1);
+         shmvector->push_back(move(push_back_this));
+         stdvector->push_back(int(1));
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         if(!copyable_only(shmvector, stdvector
+                        ,detail::bool_<!is_movable<IntType>::value>())){
+            return 1;
+         }
+
+         shmvector->erase(shmvector->begin());
+         stdvector->erase(stdvector->begin());
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         for(int i = 0; i < max; ++i){
+            IntType insert_this(i);
+            shmvector->insert(shmvector->begin(), move(insert_this));
+            stdvector->insert(stdvector->begin(), i);
+         }
+         if(!test::CheckEqualContainers(shmvector, stdvector)) return 1;
+
+         delete stdvector;
+         segment.template destroy<MyShmVector>("MyShmVector");
+         segment.shrink_to_fit_indexes();
+
+         if(!segment.all_memory_deallocated())
+            return 1;
+      }
+      catch(std::exception &ex){
+         shared_memory_object::remove(shMemName);
+         std::cout << ex.what() << std::endl;
+         return 1;
+      }
+   }
+   shared_memory_object::remove(shMemName);
+   std::cout << std::endl << "Test OK!" << std::endl;
+   return 0;
+}
+
+}  //namespace test{
+}  //namespace interprocess{
+}  //namespace boost{
+
+#include <boost/interprocess/detail/config_end.hpp>
Modified: trunk/libs/intrusive/example/doc_list_algorithms.cpp
==============================================================================
--- trunk/libs/intrusive/example/doc_list_algorithms.cpp	(original)
+++ trunk/libs/intrusive/example/doc_list_algorithms.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -38,7 +38,7 @@
 
    //Create an empty doubly linked list container:
    //"one" will be the first node of the container
-   algo::init(&one);
+   algo::init_header(&one);
    assert(algo::count(&one) == 1);
 
    //Now add a new node before "one"
Modified: trunk/libs/intrusive/example/doc_slist_algorithms.cpp
==============================================================================
--- trunk/libs/intrusive/example/doc_slist_algorithms.cpp	(original)
+++ trunk/libs/intrusive/example/doc_slist_algorithms.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -36,7 +36,7 @@
 
    //Create an empty singly linked list container:
    //"one" will be the first node of the container
-   algo::init(&one);
+   algo::init_header(&one);
    assert(algo::count(&one) == 1);
 
    //Now add a new node
Modified: trunk/libs/intrusive/proj/vc7ide/_intrusivelib/_intrusivelib.vcproj
==============================================================================
--- trunk/libs/intrusive/proj/vc7ide/_intrusivelib/_intrusivelib.vcproj	(original)
+++ trunk/libs/intrusive/proj/vc7ide/_intrusivelib/_intrusivelib.vcproj	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -136,6 +136,9 @@
                                 RelativePath="..\..\..\..\..\boost\intrusive\intrusive_fwd.hpp">
                         </File>
                         <File
+				RelativePath="..\..\..\..\..\boost\intrusive\linear_slist_algorithms.hpp">
+			</File>
+			<File
                                 RelativePath="..\..\..\..\..\boost\intrusive\link_mode.hpp">
                         </File>
                         <File
Modified: trunk/libs/intrusive/test/list_test.cpp
==============================================================================
--- trunk/libs/intrusive/test/list_test.cpp	(original)
+++ trunk/libs/intrusive/test/list_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -63,14 +63,6 @@
    test_swap(values);
    test_clone(values);
    test_container_from_end(values);
-/*
-   const char *list_name = typeid(list_type).name();
-   std::cout << list_name << std::endl << strlen(list_name) << std::endl;
-   const char *value_t = typeid(typename list_type::value_traits).name();
-   std::cout << value_t << std::endl << strlen(value_t) << std::endl;
-   const char *list_it_name = typeid(typename list_type::iterator).name();
-   std::cout << list_it_name  << std::endl << strlen(list_it_name ) << std::endl;
-*/
 }
 
 //test: push_front, pop_front, push_back, pop_back, front, back, size, empty:
@@ -194,26 +186,29 @@
    const int num_values = (int)values.size();
    std::vector<int> expected_values(num_values);
 
-   //Shift forward all possible positions 3 times
-   for(int i = 0; i < num_values*3; ++i){
-      testlist.assign(values.begin(), values.end());
-      testlist.shift_forward(i);
-      for(int j = 0; j < num_values; ++j){
-         expected_values[(j + num_values - i%num_values) % num_values] = (j + 1);
+   for(int s = 1; s <= num_values; ++s){
+      expected_values.resize(s);
+      //Shift forward all possible positions 3 times
+      for(int i = 0; i < s*3; ++i){
+         testlist.insert(testlist.begin(), &values[0], &values[0] + s);
+         testlist.shift_forward(i);
+         for(int j = 0; j < s; ++j){
+            expected_values[(j + s - i%s) % s] = (j + 1);
+         }
+         TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin());
+         testlist.clear();
       }
-      TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin());
-      testlist.clear();
-   }
 
-   //Shift backwards all possible positions
-   for(int i = 0; i < num_values*3; ++i){
-      testlist.assign(values.begin(), values.end());
-      testlist.shift_backwards(i);
-      for(int j = 0; j < num_values; ++j){
-         expected_values[(j + i) % num_values] = (j + 1);
+      //Shift backwards all possible positions
+      for(int i = 0; i < s*3; ++i){
+         testlist.insert(testlist.begin(), &values[0], &values[0] + s);
+         testlist.shift_backwards(i);
+         for(int j = 0; j < s; ++j){
+            expected_values[(j + i) % s] = (j + 1);
+         }
+         TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin());
+         testlist.clear();
       }
-      TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin());
-      testlist.clear();
    }
 } 
 
@@ -278,6 +273,28 @@
       {  int init_values [] = { 4, 3 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist2.begin() );  }
    }
+   {
+      list_type testlist1 (&values[0], &values[1]);
+
+      {  int init_values [] = { 1 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+
+      values[1].swap_nodes(values[2]);
+
+      {  int init_values [] = { 1 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+
+      values[0].swap_nodes(values[2]);
+
+      {  int init_values [] = { 3 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+
+      values[0].swap_nodes(values[2]);
+
+      {  int init_values [] = { 1 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+   }
+
 }
 
 template<class ValueTraits>
Modified: trunk/libs/intrusive/test/slist_test.cpp
==============================================================================
--- trunk/libs/intrusive/test/slist_test.cpp	(original)
+++ trunk/libs/intrusive/test/slist_test.cpp	2008-01-20 06:54:47 EST (Sun, 20 Jan 2008)
@@ -24,7 +24,7 @@
 
 using namespace boost::intrusive;
 
-template<class ValueTraits>
+template<class ValueTraits, bool Linear>
 struct test_slist 
 {
    typedef typename ValueTraits::value_type value_type;
@@ -37,11 +37,12 @@
    static void test_swap(std::vector<value_type>& values);
    static void test_slow_insert (std::vector<value_type>& values);
    static void test_clone (std::vector<value_type>& values);
-   static void test_container_from_end(std::vector<value_type> &values);
+   static void test_container_from_end(std::vector<value_type> &, detail::bool_<true>){}
+   static void test_container_from_end(std::vector<value_type> &values, detail::bool_<false>);
 };
 
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_all (std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -50,6 +51,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    {
       list_type list(values.begin(), values.end());
@@ -66,12 +68,12 @@
    test_slow_insert (values);
    test_swap(values);
    test_clone(values);
-   test_container_from_end(values);
+   test_container_from_end(values, detail::bool_<Linear>());
 }
 
 //test: push_front, pop_front, front, size, empty:
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_front_back (std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -80,6 +82,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist;
    BOOST_TEST (testlist.empty());
@@ -101,8 +104,8 @@
 }  
 
 //test: merge due to error in merge implementation:
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_merge (std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -111,6 +114,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist1, testlist2;
    testlist1.push_front (values[0]);
@@ -124,8 +128,8 @@
 }
 
 //test: constructor, iterator, sort, reverse:
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_sort(std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -134,6 +138,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist (values.begin(), values.end());
 
@@ -145,13 +150,13 @@
       TEST_INTRUSIVE_SEQUENCE( init_values, testlist.begin() );  }
 
    testlist.reverse();
-   {  int init_values [] = { 5, 3, 1, 4, 2, };
+   {  int init_values [] = { 5, 3, 1, 4, 2 };
       TEST_INTRUSIVE_SEQUENCE( init_values, testlist.begin() );  }
 }  
   
 //test: assign, insert_after, const_iterator, erase_after, s_iterator_to, previous:
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_insert(std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -160,6 +165,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist;
    testlist.assign (&values[0] + 2, &values[0] + 5);
@@ -189,8 +195,8 @@
 }
 
 //test: insert, const_iterator, erase, siterator_to:
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_slow_insert (std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -199,6 +205,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist;
    testlist.push_front (values[4]);
@@ -232,8 +239,8 @@
    BOOST_TEST (testlist.front().value_ == 3);
 }  
 
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_shift(std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -242,6 +249,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist;
 
@@ -249,33 +257,36 @@
    std::vector<int> expected_values(num_values);
 
    //Shift forward all possible positions 3 times
-   for(int i = 0; i < num_values*3; ++i){
-      testlist.assign(values.begin(), values.end());
-      testlist.shift_forward(i);
-      for(int j = 0; j < num_values; ++j){
-         expected_values[(j + num_values - i%num_values) % num_values] = (j + 1);
+   for(int s = 1; s <= num_values; ++s){
+      expected_values.resize(s);
+      for(int i = 0; i < s*3; ++i){
+         testlist.insert_after(testlist.before_begin(), &values[0], &values[0] + s);
+         testlist.shift_forward(i);
+         for(int j = 0; j < s; ++j){
+            expected_values[(j + s - i%s) % s] = (j + 1);
+         }
+
+         TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin())
+         testlist.clear();
       }
 
-      TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin())
-      testlist.clear();
-   }
+      //Shift backwards all possible positions
+      for(int i = 0; i < s*3; ++i){
+         testlist.insert_after(testlist.before_begin(), &values[0], &values[0] + s);
+         testlist.shift_backwards(i);
+         for(int j = 0; j < s; ++j){
+            expected_values[(j + i) % s] = (j + 1);
+         }
 
-   //Shift backwards all possible positions
-   for(int i = 0; i < num_values*3; ++i){
-      testlist.assign(values.begin(), values.end());
-      testlist.shift_backwards(i);
-      for(int j = 0; j < num_values; ++j){
-         expected_values[(j + i) % num_values] = (j + 1);
+         TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin())
+         testlist.clear();
       }
-
-      TEST_INTRUSIVE_SEQUENCE_EXPECTED(expected_values, testlist.begin())
-      testlist.clear();
    }
 }  
 
 //test: insert_after (seq-version), swap, splice_after:
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_swap(std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -284,11 +295,12 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    {
       list_type testlist1 (&values[0], &values[0] + 2);
       list_type testlist2;
-      testlist2.insert_after (testlist2.end(), &values[0] + 2, &values[0] + 5);
+      testlist2.insert_after (testlist2.before_begin(), &values[0] + 2, &values[0] + 5);
       testlist1.swap(testlist2);
       {  int init_values [] = { 3, 4, 5 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
@@ -299,19 +311,20 @@
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist2.begin() );  }
       BOOST_TEST (testlist1.empty());
 
-      testlist1.splice_after (testlist1.end(), testlist2, ++testlist2.begin());
+      testlist1.splice_after (testlist1.before_begin(), testlist2, ++testlist2.begin());
       {  int init_values [] = { 4 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
       {  int init_values [] = { 1, 3, 5, 2 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist2.begin() );  }
 
       testlist1.splice_after (testlist1.begin(), testlist2, 
-                              testlist2.end(), ++++testlist2.begin());
+                              testlist2.before_begin(), ++++testlist2.begin());
       {  int init_values [] = { 4, 1, 3, 5 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
       {  int init_values [] = { 2 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist2.begin() );  }
    }
+   if(!list_type::linear)
    {
       list_type testlist1 (&values[0], &values[0] + 2);
       list_type testlist2 (&values[0] + 3, &values[0] + 5);
@@ -326,10 +339,32 @@
       {  int init_values [] = { 4, 3 };
          TEST_INTRUSIVE_SEQUENCE( init_values, testlist2.begin() );  }
    }
+   if(!list_type::linear)
+   {
+      list_type testlist1 (&values[0], &values[1]);
+
+      {  int init_values [] = { 1 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+
+      values[1].swap_nodes(values[2]);
+
+      {  int init_values [] = { 1 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+
+      values[0].swap_nodes(values[2]);
+
+      {  int init_values [] = { 3 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+
+      values[0].swap_nodes(values[2]);
+
+      {  int init_values [] = { 1 };
+         TEST_INTRUSIVE_SEQUENCE( init_values, testlist1.begin() );  }
+   }
 }  
 
-template<class ValueTraits>
-void test_slist<ValueTraits>
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
    ::test_clone(std::vector<typename ValueTraits::value_type>& values)
 {
    typedef typename ValueTraits::value_type value_type;
@@ -338,6 +373,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
 
       list_type testlist1 (&values[0], &values[0] + values.size());
@@ -349,9 +385,10 @@
       BOOST_TEST (testlist2.empty());
 }
 
-template<class ValueTraits>
-void test_slist<ValueTraits>
-   ::test_container_from_end(std::vector<typename ValueTraits::value_type>& values)
+template<class ValueTraits, bool Linear>
+void test_slist<ValueTraits, Linear>
+   ::test_container_from_end(std::vector<typename ValueTraits::value_type>& values
+                            ,detail::bool_<false>)
 {
    typedef typename ValueTraits::value_type value_type;
    typedef slist
@@ -359,6 +396,7 @@
       , value_traits<ValueTraits>
       , size_type<std::size_t>
       , constant_time_size<value_type::constant_time_size>
+      , linear<Linear>
       > list_type;
    list_type testlist1 (&values[0], &values[0] + values.size());
    BOOST_TEST (testlist1 == list_type::container_from_end_iterator(testlist1.end()));
@@ -380,7 +418,26 @@
                   < value_type
                   , typename value_type::slist_base_hook_t
                   >::type
+                 , false
+                >::test_all(data);
+      test_slist < typename detail::get_member_value_traits
+                  < value_type
+                  , member_hook< value_type
+                               , typename value_type::slist_member_hook_t
+                               , &value_type::slist_node_
+                               >
+                  >::type
+                 , false
+                >::test_all(data);
+
+      //Now linear slists
+      test_slist < typename detail::get_base_value_traits
+                  < value_type
+                  , typename value_type::slist_base_hook_t
+                  >::type
+                 , true
                 >::test_all(data);
+
       test_slist < typename detail::get_member_value_traits
                   < value_type
                   , member_hook< value_type
@@ -388,6 +445,7 @@
                                , &value_type::slist_node_
                                >
                   >::type
+                 , true
                 >::test_all(data);
 
       return 0;
@@ -409,6 +467,7 @@
                   < value_type
                   , typename value_type::slist_base_hook_t
                   >::type
+                 , false
                 >::test_all(data);
 
       test_slist < typename detail::get_member_value_traits
@@ -418,12 +477,14 @@
                                , &value_type::slist_node_
                                >
                   >::type
+                 , false
                 >::test_all(data);
 
       test_slist < typename detail::get_base_value_traits
                   < value_type
                   , typename value_type::slist_auto_base_hook_t
                   >::type
+                 , false
                 >::test_all(data);
 
       test_slist < typename detail::get_member_value_traits
@@ -433,6 +494,24 @@
                                , &value_type::slist_auto_node_
                                >
                   >::type
+                 , false
+                >::test_all(data);
+
+      test_slist < typename detail::get_base_value_traits
+                  < value_type
+                  , typename value_type::slist_base_hook_t
+                  >::type
+                 , true
+                >::test_all(data);
+
+      test_slist < typename detail::get_member_value_traits
+                  < value_type
+                  , member_hook< value_type
+                               , typename value_type::slist_member_hook_t
+                               , &value_type::slist_node_
+                               >
+                  >::type
+                 , true
                 >::test_all(data);
       return 0;
    }