a), _M_bucket_count(0), _M_element_count(0), _M_rehash_policy() { _M_bucket_count = _M_rehash_policy._M_next_bkt(__bucket_hint); _M_buckets = _M_allocate_buckets(_M_bucket_count); } template template _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _Hashtable(_InputIterator __f, _InputIterator __l, size_type __bucket_hint, const _H1& __h1, const _H2& __h2, const _Hash& __h, const _Equal& __eq, const _ExtractKey& __exk, const allocator_type& __a) : __detail::_Rehash_base<_RehashPolicy, _Hashtable>(), __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __chc>(__exk, __eq, __h1, __h2, __h), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(), _M_node_allocator(__a), _M_bucket_count(0), _M_element_count(0), _M_rehash_policy() { _M_bucket_count = std::max(_M_rehash_policy._M_next_bkt(__bucket_hint), _M_rehash_policy. _M_bkt_for_elements(__detail:: __distance_fw(__f, __l))); _M_buckets = _M_allocate_buckets(_M_bucket_count); try { for (; __f != __l; ++__f) this->insert(*__f); } catch(...) { clear(); _M_deallocate_buckets(_M_buckets, _M_bucket_count); __throw_exception_again; } } template _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _Hashtable(const _Hashtable& __ht) : __detail::_Rehash_base<_RehashPolicy, _Hashtable>(__ht), __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __chc>(__ht), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(__ht), _M_node_allocator(__ht._M_node_allocator), _M_bucket_count(__ht._M_bucket_count), _M_element_count(__ht._M_element_count), _M_rehash_policy(__ht._M_rehash_policy) { _M_buckets = _M_allocate_buckets(_M_bucket_count); try { for (size_type __i = 0; __i < __ht._M_bucket_count; ++__i) { _Node* __n = __ht._M_buckets[__i]; _Node** __tail = _M_buckets + __i; while (__n) { *__tail = _M_allocate_node(__n->_M_v); this->_M_copy_code(*__tail, __n); __tail = &((*__tail)->_M_next); __n = __n->_M_next; } } } catch(...) { clear(); _M_deallocate_buckets(_M_buckets, _M_bucket_count); __throw_exception_again; } } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X template _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _Hashtable(_Hashtable&& __ht) : __detail::_Rehash_base<_RehashPolicy, _Hashtable>(__ht), __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __chc>(__ht), __detail::_Map_base<_Key, _Value, _ExtractKey, __uk, _Hashtable>(__ht), _M_node_allocator(__ht._M_node_allocator), _M_bucket_count(__ht._M_bucket_count), _M_element_count(__ht._M_element_count), _M_rehash_policy(__ht._M_rehash_policy), _M_buckets(__ht._M_buckets) { size_type __n_bkt = __ht._M_rehash_policy._M_next_bkt(0); __ht._M_buckets = __ht._M_allocate_buckets(__n_bkt); __ht._M_bucket_count = __n_bkt; __ht._M_element_count = 0; __ht._M_rehash_policy = _RehashPolicy(); } #endif template _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>& _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: operator=(const _Hashtable& __ht) { _Hashtable __tmp(__ht); this->swap(__tmp); return *this; } template _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: ~_Hashtable() { clear(); _M_deallocate_buckets(_M_buckets, _M_bucket_count); } template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: #ifdef _GLIBCXX_INCLUDE_AS_CXX0X swap(_Hashtable&& __x) #else swap(_Hashtable& __x) #endif { // The only base class with member variables is hash_code_base. We // define _Hash_code_base::_M_swap because different specializations // have different members. __detail::_Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2, _Hash, __chc>::_M_swap(__x); // _GLIBCXX_RESOLVE_LIB_DEFECTS // 431. Swapping containers with unequal allocators. std::__alloc_swap<_Node_allocator_type>::_S_do_it(_M_node_allocator, __x._M_node_allocator); std::swap(_M_rehash_policy, __x._M_rehash_policy); std::swap(_M_buckets, __x._M_buckets); std::swap(_M_bucket_count, __x._M_bucket_count); std::swap(_M_element_count, __x._M_element_count); } template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: __rehash_policy(const _RehashPolicy& __pol) { _M_rehash_policy = __pol; size_type __n_bkt = __pol._M_bkt_for_elements(_M_element_count); if (__n_bkt > _M_bucket_count) _M_rehash(__n_bkt); } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: find(const key_type& __k) { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); _Node* __p = _M_find_node(_M_buckets[__n], __k, __code); return __p ? iterator(__p, _M_buckets + __n) : this->end(); } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::const_iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: find(const key_type& __k) const { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); _Node* __p = _M_find_node(_M_buckets[__n], __k, __code); return __p ? const_iterator(__p, _M_buckets + __n) : this->end(); } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::size_type _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: count(const key_type& __k) const { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); std::size_t __result = 0; for (_Node* __p = _M_buckets[__n]; __p; __p = __p->_M_next) if (this->_M_compare(__k, __code, __p)) ++__result; return __result; } template std::pair::iterator, typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::iterator> _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: equal_range(const key_type& __k) { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); _Node** __head = _M_buckets + __n; _Node* __p = _M_find_node(*__head, __k, __code); if (__p) { _Node* __p1 = __p->_M_next; for (; __p1; __p1 = __p1->_M_next) if (!this->_M_compare(__k, __code, __p1)) break; iterator __first(__p, __head); iterator __last(__p1, __head); if (!__p1) __last._M_incr_bucket(); return std::make_pair(__first, __last); } else return std::make_pair(this->end(), this->end()); } template std::pair::const_iterator, typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::const_iterator> _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: equal_range(const key_type& __k) const { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); _Node** __head = _M_buckets + __n; _Node* __p = _M_find_node(*__head, __k, __code); if (__p) { _Node* __p1 = __p->_M_next; for (; __p1; __p1 = __p1->_M_next) if (!this->_M_compare(__k, __code, __p1)) break; const_iterator __first(__p, __head); const_iterator __last(__p1, __head); if (!__p1) __last._M_incr_bucket(); return std::make_pair(__first, __last); } else return std::make_pair(this->end(), this->end()); } // Find the node whose key compares equal to k, beginning the search // at p (usually the head of a bucket). Return nil if no node is found. template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::_Node* _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_find_node(_Node* __p, const key_type& __k, typename _Hashtable::_Hash_code_type __code) const { for (; __p; __p = __p->_M_next) if (this->_M_compare(__k, __code, __p)) return __p; return false; } // Insert v in bucket n (assumes no element with its key already present). template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert_bucket(const value_type& __v, size_type __n, typename _Hashtable::_Hash_code_type __code) { std::pair __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); // Allocate the new node before doing the rehash so that we don't // do a rehash if the allocation throws. _Node* __new_node = _M_allocate_node(__v); try { if (__do_rehash.first) { const key_type& __k = this->_M_extract(__v); __n = this->_M_bucket_index(__k, __code, __do_rehash.second); _M_rehash(__do_rehash.second); } __new_node->_M_next = _M_buckets[__n]; this->_M_store_code(__new_node, __code); _M_buckets[__n] = __new_node; ++_M_element_count; return iterator(__new_node, _M_buckets + __n); } catch(...) { _M_deallocate_node(__new_node); __throw_exception_again; } } // Insert v if no element with its key is already present. template std::pair::iterator, bool> _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert(const value_type& __v, std::_GLIBCXX_TR1 true_type) { const key_type& __k = this->_M_extract(__v); typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count); if (_Node* __p = _M_find_node(_M_buckets[__n], __k, __code)) return std::make_pair(iterator(__p, _M_buckets + __n), false); return std::make_pair(_M_insert_bucket(__v, __n, __code), true); } // Insert v unconditionally. template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert(const value_type& __v, std::_GLIBCXX_TR1 false_type) { std::pair __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); if (__do_rehash.first) _M_rehash(__do_rehash.second); const key_type& __k = this->_M_extract(__v); typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count); // First find the node, avoid leaking new_node if compare throws. _Node* __prev = _M_find_node(_M_buckets[__n], __k, __code); _Node* __new_node = _M_allocate_node(__v); if (__prev) { __new_node->_M_next = __prev->_M_next; __prev->_M_next = __new_node; } else { __new_node->_M_next = _M_buckets[__n]; _M_buckets[__n] = __new_node; } this->_M_store_code(__new_node, __code); ++_M_element_count; return iterator(__new_node, _M_buckets + __n); } // For erase(iterator) and erase(const_iterator). template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_erase_node(_Node* __p, _Node** __b) { _Node* __cur = *__b; if (__cur == __p) *__b = __cur->_M_next; else { _Node* __next = __cur->_M_next; while (__next != __p) { __cur = __next; __next = __cur->_M_next; } __cur->_M_next = __next->_M_next; } _M_deallocate_node(__p); --_M_element_count; } template template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: insert(_InputIterator __first, _InputIterator __last) { size_type __n_elt = __detail::__distance_fw(__first, __last); std::pair __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n_elt); if (__do_rehash.first) _M_rehash(__do_rehash.second); for (; __first != __last; ++__first) this->insert(*__first); } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(iterator __it) { iterator __result = __it; ++__result; _M_erase_node(__it._M_cur_node, __it._M_cur_bucket); return __result; } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::const_iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(const_iterator __it) { const_iterator __result = __it; ++__result; _M_erase_node(__it._M_cur_node, __it._M_cur_bucket); return __result; } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::size_type _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(const key_type& __k) { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); size_type __result = 0; _Node** __slot = _M_buckets + __n; while (*__slot && !this->_M_compare(__k, __code, *__slot)) __slot = &((*__slot)->_M_next); _Node** __saved_slot = 0; while (*__slot && this->_M_compare(__k, __code, *__slot)) { // _GLIBCXX_RESOLVE_LIB_DEFECTS // 526. Is it undefined if a function in the standard changes // in parameters? if (&this->_M_extract((*__slot)->_M_v) != &__k) { _Node* __p = *__slot; *__slot = __p->_M_next; _M_deallocate_node(__p); --_M_element_count; ++__result; } else { __saved_slot = __slot; __slot = &((*__slot)->_M_next); } } if (__saved_slot) { _Node* __p = *__saved_slot; *__saved_slot = __p->_M_next; _M_deallocate_node(__p); --_M_element_count; ++__result; } return __result; } // ??? This could be optimized by taking advantage of the bucket // structure, but it's not clear that it's worth doing. It probably // wouldn't even be an optimization unless the load factor is large. template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(iterator __first, iterator __last) { while (__first != __last) __first = this->erase(__first); return __last; } template typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>::const_iterator _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(const_iterator __first, const_iterator __last) { while (__first != __last) __first = this->erase(__first); return __last; } template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: clear() { _M_deallocate_nodes(_M_buckets, _M_bucket_count); _M_element_count = 0; } template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: rehash(size_type __n) { _M_rehash(std::max(_M_rehash_policy._M_next_bkt(__n), _M_rehash_policy._M_bkt_for_elements(_M_element_count + 1))); } template void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_rehash(size_type __n) { _Node** __new_array = _M_allocate_buckets(__n); try { for (size_type __i = 0; __i < _M_bucket_count; ++__i) while (_Node* __p = _M_buckets[__i]) { std::size_t __new_index = this->_M_bucket_index(__p, __n); _M_buckets[__i] = __p->_M_next; __p->_M_next = __new_array[__new_index]; __new_array[__new_index] = __p; } _M_deallocate_buckets(_M_buckets, _M_bucket_count); _M_bucket_count = __n; _M_buckets = __new_array; } catch(...) { // A failure here means that a hash function threw an exception. // We can't restore the previous state without calling the hash // function again, so the only sensible recovery is to delete // everything. _M_deallocate_nodes(__new_array, __n); _M_deallocate_buckets(__new_array, __n); _M_deallocate_nodes(_M_buckets, _M_bucket_count); _M_element_count = 0; __throw_exception_again; } } _GLIBCXX_END_NAMESPACE_TR1 } // TR1 utility -*- C++ -*- // Copyright (C) 2007 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 2, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING. If not, write to the Free // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, // USA. // As a special exception, you may use this file as part of a free software // library without restriction. Specifically, if other files instantiate // templates or use macros or inline functions from this file, or you compile // this file and link it with other files to produce an executable, this // file does not by itself cause the resulting executable to be covered by // the GNU General Public License. This exception does not however // invalidate any other reasons why the executable file might be covered by // the GNU General Public License. /** @file tr1_impl/utility * This is an internal header file, included by other library headers. * You should not attempt to use it directly. */ namespace std { _GLIBCXX_BEGIN_NAMESPACE_TR1 template class tuple_size; template class tuple_element; // Various functions which give std::pair a tuple-like interface. template struct tuple_size > { static const int value = 2; }; template const int tuple_size >::value; template struct tuple_element<0, std::pair<_Tp1, _Tp2> > { typedef _Tp1 type; }; template struct tuple_element<1, std::pair<_Tp1, _Tp2> > { typedef _Tp2 type; }; template struct __pair_get; template<> struct __pair_get<0> { template static _Tp1& __get(std::pair<_Tp1, _Tp2>& __pair) { return __pair.first; } template static const _Tp1& __const_get(const std::pair<_Tp1, _Tp2>& __pair) { return __pair.first; } }; template<> struct __pair_get<1> { template static _Tp2& __get(std::pair<_Tp1, _Tp2>& __pair) { return __pair.second; } template static const _Tp2& __const_get(const std::pair<_Tp1, _Tp2>& __pair) { return __pair.second; } }; template inline typename tuple_element<_Int, std::pair<_Tp1, _Tp2> >::type& get(std::pair<_Tp1, _Tp2>& __in) { return __pair_get<_Int>::__get(__in); } template inline const typename tuple_element<_Int, std::pair<_Tp1, _Tp2> >::type& get(const std::pair<_Tp1, _Tp2>& __in) { return __pair_get<_Int>::__const_get(__in); } _GLIBCXX_END_NAMESPACE_TR1 } // TR1 cstdio -*- C++ -*- // Copyright (C) 2007 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 2, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING. If not, write to the Free // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, // USA. // As a special exception, you may use this file as part of a free software // library without restriction. Specifically, if other files instantiate // templates or use macros or inline functions from this file, or you compile // this file and link it with other files to produce an executable, this // file does not by itself cause the resulting executable to be covered by // the GNU General Public License. This exception does not however // invalidate any other reasons why the executable file might be covered by // the GNU General Public License. /** @file tr1_impl/cstdio * This is an internal header file, included by other library headers. * You should not attempt to use it directly. */ namespace std { _GLIBCXX_BEGIN_NAMESPACE_TR1 #if _GLIBCXX_USE_C99 using std::snprintf; using std::vsnprintf; using std::vfscanf; using std::vscanf; using std::vsscanf; #endif _GLIBCXX_END_NAMESPACE_TR1 } // -*- C++ -*- // Copyright (C) 2007 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 2, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING. If not, write to the Free // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, // USA. // As a special exception, you may use this file as part of a free software // library without restriction. Specifically, if other files instantiate // templates or use macros or inline functions from this file, or you compile // this file and link it with other files to produce an executable, this // file does not by itself cause the resulting executable to be covered by // the GNU General Public License. This exception does not however // invalidate any other reasons why the executable file might be covered by // the GNU General Public License. // shared_count.hpp // Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd. // shared_ptr.hpp // Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes. // Copyright (C) 2001, 2002, 2003 Peter Dimov // weak_ptr.hpp // Copyright (C) 2001, 2002, 2003 Peter Dimov // enable_shared_from_this.hpp // Copyright (C) 2002 Peter Dimov // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // GCC Note: based on version 1.32.0 of the Boost library. /** @file tr1_impl/boost_shared_ptr.h * This is an internal header file, included by other library headers. * You should not attempt to use it directly. */ namespace std { _GLIBCXX_BEGIN_NAMESPACE_TR1 template<_Lock_policy _Lp> class __weak_count { public: __weak_count() : _M_pi(0) // nothrow { } __weak_count(const __shared_count<_Lp>& __r) : _M_pi(__r._M_pi) // nothrow { if (_M_pi != 0) _M_pi->_M_weak_add_ref(); } __weak_count(const __weak_count<_Lp>& __r) : _M_pi(__r._M_pi) // nothrow { if (_M_pi != 0) _M_pi->_M_weak_add_ref(); } ~__weak_count() // nothrow { if (_M_pi != 0) _M_pi->_M_weak_release(); } __weak_count<_Lp>& operator=(const __shared_count<_Lp>& __r) // nothrow { _Sp_counted_base<_Lp>* __tmp = __r._M_pi; if (__tmp != 0) __tmp->_M_weak_add_ref(); if (_M_pi != 0) _M_pi->_M_weak_release(); _M_pi = __tmp; return *this; } __weak_count<_Lp>& operator=(const __weak_count<_Lp>& __r) // nothrow { _Sp_counted_base<_Lp>* __tmp = __r._M_pi; if (__tmp != 0) __tmp->_M_weak_add_ref(); if (_M_pi != 0) _M_pi->_M_weak_release(); _M_pi = __tmp; return *this; } void _M_swap(__weak_count<_Lp>& __r) // nothrow { _Sp_counted_base<_Lp>* __tmp = __r._M_pi; __r._M_pi = _M_pi; _M_pi = __tmp; } long _M_get_use_count() const // nothrow { return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; } friend inline bool operator==(const __weak_count<_Lp>& __a, const __weak_count<_Lp>& __b) { return __a._M_pi == __b._M_pi; } friend inline bool operator<(const __weak_count<_Lp>& __a, const __weak_count<_Lp>& __b) { return std::less<_Sp_counted_base<_Lp>*>()(__a._M_pi, __b._M_pi); } private: friend class __shared_count<_Lp>; _Sp_counted_base<_Lp>* _M_pi; }; // now that __weak_count is defined we can define this constructor: template<_Lock_policy _Lp> inline __shared_count<_Lp>:: __shared_count(const __weak_count<_Lp>& __r) : _M_pi(__r._M_pi) { if (_M_pi != 0) _M_pi->_M_add_ref_lock(); else __throw_bad_weak_ptr(); } // Forward declarations. template class __shared_ptr; template class __weak_ptr; template class __enable_shared_from_this; template class shared_ptr; template class weak_ptr; template class enable_shared_from_this; // Support for enable_shared_from_this. // Friend of __enable_shared_from_this. template<_Lock_policy _Lp, typename _Tp1, typename _Tp2> void __enable_shared_from_this_helper(const __shared_count<_Lp>&, const __enable_shared_from_this<_Tp1, _Lp>*, const _Tp2*); // Friend of enable_shared_from_this. template void __enable_shared_from_this_helper(const __shared_count<>&, const enable_shared_from_this<_Tp1>*, const _Tp2*); template<_Lock_policy _Lp> inline void __enable_shared_from_this_helper(const __shared_count<_Lp>&, ...) { } #ifdef _GLIBCXX_INCLUDE_AS_TR1 struct __static_cast_tag { }; struct __const_cast_tag { }; struct __dynamic_cast_tag { }; #endif /** * @class __shared_ptr * * A smart pointer with reference-counted copy semantics. * The object pointed to is deleted when the last shared_ptr pointing to * it is destroyed or reset. */ template class __shared_ptr { public: typedef _Tp element_type; /** @brief Construct an empty %__shared_ptr. * @post use_count()==0 && get()==0 */ __shared_ptr() : _M_ptr(0), _M_refcount() // never throws { } /** @brief Construct a %__shared_ptr that owns the pointer @a __p. * @param __p A pointer that is convertible to element_type*. * @post use_count() == 1 && get() == __p * @throw std::bad_alloc, in which case @c delete @a __p is called. */ template explicit __shared_ptr(_Tp1* __p) : _M_ptr(__p), _M_refcount(__p) { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) // __glibcxx_function_requires(_CompleteConcept<_Tp1*>) __enable_shared_from_this_helper(_M_refcount, __p, __p); } // // Requirements: _Deleter's copy constructor and destructor must // not throw // // __shared_ptr will release __p by calling __d(__p) // /** @brief Construct a %__shared_ptr that owns the pointer @a __p * and the deleter @a __d. * @param __p A pointer. * @param __d A deleter. * @post use_count() == 1 && get() == __p * @throw std::bad_alloc, in which case @a __d(__p) is called. */ template __shared_ptr(_Tp1* __p, _Deleter __d) : _M_ptr(__p), _M_refcount(__p, __d) { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) // TODO requires _Deleter CopyConstructible and __d(__p) well-formed __enable_shared_from_this_helper(_M_refcount, __p, __p); } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X // // Requirements: _Deleter's copy constructor and destructor must // not throw _Alloc's copy constructor and destructor must not // throw. // // __shared_ptr will release __p by calling __d(__p) // /** @brief Construct a %__shared_ptr that owns the pointer @a __p * and the deleter @a __d. * @param __p A pointer. * @param __d A deleter. * @param __a An allocator. * @post use_count() == 1 && get() == __p * @throw std::bad_alloc, in which case @a __d(__p) is called. */ template __shared_ptr(_Tp1* __p, _Deleter __d, const _Alloc& __a) : _M_ptr(__p), _M_refcount(__p, __d, __a) { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) // TODO requires _Deleter CopyConstructible and __d(__p) well-formed __enable_shared_from_this_helper(_M_refcount, __p, __p); } /** @brief Constructs a %__shared_ptr instance that stores @a __p * and shares ownership with @a __r. * @param __r A %__shared_ptr. * @param __p A pointer that will remain valid while @a *__r is valid. * @post get() == __p && use_count() == __r.use_count() * * This can be used to construct a @c shared_ptr to a sub-object * of an object managed by an existing @c shared_ptr. * * @code * shared_ptr< pair > pii(new pair()); * shared_ptr pi(pii, &pii->first); * assert(pii.use_count() == 2); * @endcode */ template __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, _Tp* __p) : _M_ptr(__p), _M_refcount(__r._M_refcount) // never throws { } #endif // generated copy constructor, assignment, destructor are fine. /** @brief If @a __r is empty, constructs an empty %__shared_ptr; * otherwise construct a %__shared_ptr that shares ownership * with @a __r. * @param __r A %__shared_ptr. * @post get() == __r.get() && use_count() == __r.use_count() */ template __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r) : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X /** @brief Move-constructs a %__shared_ptr instance from @a __r. * @param __r A %__shared_ptr rvalue. * @post *this contains the old value of @a __r, @a __r is empty. */ __shared_ptr(__shared_ptr&& __r) : _M_ptr(__r._M_ptr), _M_refcount() // never throws { _M_refcount._M_swap(__r._M_refcount); __r._M_ptr = 0; } /** @brief Move-constructs a %__shared_ptr instance from @a __r. * @param __r A %__shared_ptr rvalue. * @post *this contains the old value of @a __r, @a __r is empty. */ template __shared_ptr(__shared_ptr<_Tp1, _Lp>&& __r) : _M_ptr(__r._M_ptr), _M_refcount() // never throws { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) _M_refcount._M_swap(__r._M_refcount); __r._M_ptr = 0; } #endif /** @brief Constructs a %__shared_ptr that shares ownership with @a __r * and stores a copy of the pointer stored in @a __r. * @param __r A weak_ptr. * @post use_count() == __r.use_count() * @throw bad_weak_ptr when __r.expired(), * in which case the constructor has no effect. */ template explicit __shared_ptr(const __weak_ptr<_Tp1, _Lp>& __r) : _M_refcount(__r._M_refcount) // may throw { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) // It is now safe to copy __r._M_ptr, as _M_refcount(__r._M_refcount) // did not throw. _M_ptr = __r._M_ptr; } /** * @post use_count() == 1 and __r.get() == 0 */ #if !defined(__GXX_EXPERIMENTAL_CXX0X__) || _GLIBCXX_DEPRECATED template explicit __shared_ptr(std::auto_ptr<_Tp1>& __r) : _M_ptr(__r.get()), _M_refcount() { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) // TODO requires _Tp1 is complete, delete __r.release() well-formed _Tp1* __tmp = __r.get(); _M_refcount = __shared_count<_Lp>(__r); __enable_shared_from_this_helper(_M_refcount, __tmp, __tmp); } #endif #ifdef _GLIBCXX_INCLUDE_AS_TR1 template __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, __static_cast_tal.m.n.o.p.q.r.s.t.u.v.w.x.y.z.{.|.}.~....g) : _M_ptr(static_cast(__r._M_ptr)), _M_refcount(__r._M_refcount) { } template __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, __const_cast_tag) : _M_ptr(const_cast(__r._M_ptr)), _M_refcount(__r._M_refcount) { } template __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, __dynamic_cast_tag) : _M_ptr(dynamic_cast(__r._M_ptr)), _M_refcount(__r._M_refcount) { if (_M_ptr == 0) // need to allocate new counter -- the cast failed _M_refcount = __shared_count<_Lp>(); } #endif template __shared_ptr& operator=(const __shared_ptr<_Tp1, _Lp>& __r) // never throws { _M_ptr = __r._M_ptr; _M_refcount = __r._M_refcount; // __shared_count::op= doesn't throw return *this; } #if !defined(__GXX_EXPERIMENTAL_CXX0X__) || _GLIBCXX_DEPRECATED template __shared_ptr& operator=(std::auto_ptr<_Tp1>& __r) { __shared_ptr(__r).swap(*this); return *this; } #endif #ifdef _GLIBCXX_INCLUDE_AS_CXX0X __shared_ptr& operator=(__shared_ptr&& __r) { __shared_ptr(std::move(__r)).swap(*this); return *this; } template __shared_ptr& operator=(__shared_ptr<_Tp1, _Lp>&& __r) { __shared_ptr(std::move(__r)).swap(*this); return *this; } #endif void reset() // never throws { __shared_ptr().swap(*this); } template void reset(_Tp1* __p) // _Tp1 must be complete. { // Catch self-reset errors. _GLIBCXX_DEBUG_ASSERT(__p == 0 || __p != _M_ptr); __shared_ptr(__p).swap(*this); } template void reset(_Tp1* __p, _Deleter __d) { __shared_ptr(__p, __d).swap(*this); } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X template void reset(_Tp1* __p, _Deleter __d, const _Alloc& __a) { __shared_ptr(__p, __d, __a).swap(*this); } // Allow class instantiation when _Tp is [cv-qual] void. typename std::add_lvalue_reference<_Tp>::type #else // Allow class instantiation when _Tp is [cv-qual] void. typename std::tr1::add_reference<_Tp>::type #endif operator*() const // never throws { _GLIBCXX_DEBUG_ASSERT(_M_ptr != 0); return *_M_ptr; } _Tp* operator->() const // never throws { _GLIBCXX_DEBUG_ASSERT(_M_ptr != 0); return _M_ptr; } _Tp* get() const // never throws { return _M_ptr; } // Implicit conversion to "bool" private: typedef _Tp* __shared_ptr::*__unspecified_bool_type; public: operator __unspecified_bool_type() const // never throws { return _M_ptr == 0 ? 0 : &__shared_ptr::_M_ptr; } bool unique() const // never throws { return _M_refcount._M_unique(); } long use_count() const // never throws { return _M_refcount._M_get_use_count(); } void swap(__shared_ptr<_Tp, _Lp>& __other) // never throws { std::swap(_M_ptr, __other._M_ptr); _M_refcount._M_swap(__other._M_refcount); } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X protected: // This constructor is non-standard, it is used by allocate_shared. template __shared_ptr(_Sp_make_shared_tag __tag, _Alloc __a, _Args&&... __args) : _M_ptr() , _M_refcount(__tag, (_Tp*)0, __a, std::forward<_Args>(__args)...) { // _M_ptr needs to point to the newly constructed object. // This relies on _Sp_counted_ptr_inplace::_M_get_deleter. void * __p = _M_refcount._M_get_deleter(typeid(__tag)); _M_ptr = static_cast<_Tp*>(__p); } template friend __shared_ptr<_Tp1, _Lp1> __allocate_shared(_Alloc __a, _Args&&... __args); #endif private: void* _M_get_deleter(const std::type_info& __ti) const { return _M_refcount._M_get_deleter(__ti); } template bool _M_less(const __shared_ptr<_Tp1, _Lp1>& __rhs) const { return _M_refcount < __rhs._M_refcount; } template friend class __shared_ptr; template friend class __weak_ptr; template friend _Del* get_deleter(const __shared_ptr<_Tp1, _Lp1>&); // Friends injected into enclosing namespace and found by ADL: template friend inline bool operator==(const __shared_ptr& __a, const __shared_ptr<_Tp1, _Lp>& __b) { return __a.get() == __b.get(); } template friend inline bool operator!=(const __shared_ptr& __a, const __shared_ptr<_Tp1, _Lp>& __b) { return __a.get() != __b.get(); } template friend inline bool operator<(const __shared_ptr& __a, const __shared_ptr<_Tp1, _Lp>& __b) { return __a._M_less(__b); } _Tp* _M_ptr; // Contained pointer. __shared_count<_Lp> _M_refcount; // Reference counter. }; // 2.2.3.8 shared_ptr specialized algorithms. template inline void swap(__shared_ptr<_Tp, _Lp>& __a, __shared_ptr<_Tp, _Lp>& __b) { __a.swap(__b); } // 2.2.3.9 shared_ptr casts /** @warning The seemingly equivalent * shared_ptr<_Tp, _Lp>(static_cast<_Tp*>(__r.get())) * will eventually result in undefined behaviour, * attempting to delete the same object twice. */ template inline __shared_ptr<_Tp, _Lp> static_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) { #ifdef _GLIBCXX_INCLUDE_AS_CXX0X return __shared_ptr<_Tp, _Lp>(__r, static_cast<_Tp*>(__r.get())); #else return __shared_ptr<_Tp, _Lp>(__r, __static_cast_tag()); #endif } /** @warning The seemingly equivalent * shared_ptr<_Tp, _Lp>(const_cast<_Tp*>(__r.get())) * will eventually result in undefined behaviour, * attempting to delete the same object twice. */ template inline __shared_ptr<_Tp, _Lp> const_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) { #ifdef _GLIBCXX_INCLUDE_AS_CXX0X return __shared_ptr<_Tp, _Lp>(__r, const_cast<_Tp*>(__r.get())); #else return __shared_ptr<_Tp, _Lp>(__r, __const_cast_tag()); #endif } /** @warning The seemingly equivalent * shared_ptr<_Tp, _Lp>(dynamic_cast<_Tp*>(__r.get())) * will eventually result in undefined behaviour, * attempting to delete the same object twice. */ template inline __shared_ptr<_Tp, _Lp> dynamic_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) { #ifdef _GLIBCXX_INCLUDE_AS_CXX0X if (_Tp* __p = dynamic_cast<_Tp*>(__r.get())) return __shared_ptr<_Tp, _Lp>(__r, __p); return __shared_ptr<_Tp, _Lp>(); #else return __shared_ptr<_Tp, _Lp>(__r, __dynamic_cast_tag()); #endif } // 2.2.3.7 shared_ptr I/O template std::basic_ostream<_Ch, _Tr>& operator<<(std::basic_ostream<_Ch, _Tr>& __os, const __shared_ptr<_Tp, _Lp>& __p) { __os << __p.get(); return __os; } // 2.2.3.10 shared_ptr get_deleter (experimental) template inline _Del* get_deleter(const __shared_ptr<_Tp, _Lp>& __p) { return static_cast<_Del*>(__p._M_get_deleter(typeid(_Del))); } template class __weak_ptr { public: typedef _Tp element_type; __weak_ptr() : _M_ptr(0), _M_refcount() // never throws { } // Generated copy constructor, assignment, destructor are fine. // The "obvious" converting constructor implementation: // // template // __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r) // : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws // { } // // has a serious problem. // // __r._M_ptr may already have been invalidated. The _M_ptr(__r._M_ptr) // conversion may require access to *__r._M_ptr (virtual inheritance). // // It is not possible to avoid spurious access violations since // in multithreaded programs __r._M_ptr may be invalidated at any point. template __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r) : _M_refcount(__r._M_refcount) // never throws { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) _M_ptr = __r.lock().get(); } template __weak_ptr(const __shared_ptr<_Tp1, _Lp>& __r) : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws { __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) } template __weak_ptr& operator=(const __weak_ptr<_Tp1, _Lp>& __r) // never throws { _M_ptr = __r.lock().get(); _M_refcount = __r._M_refcount; return *this; } template __weak_ptr& operator=(const __shared_ptr<_Tp1, _Lp>& __r) // never throws { _M_ptr = __r._M_ptr; _M_refcount = __r._M_refcount; return *this; } __shared_ptr<_Tp, _Lp> lock() const // never throws { #ifdef __GTHREADS // Optimization: avoid throw overhead. if (expired()) return __shared_ptr(); try { return __shared_ptr(*this); } catch(const bad_weak_ptr&) { // Q: How can we get here? // A: Another thread may have invalidated r after the // use_count test above. return __shared_ptr(); } #else // Optimization: avoid try/catch overhead when single threaded. return expired() ? __shared_ptr() : __shared_ptr(*this); #endif } // XXX MT long use_count() const // never throws { return _M_refcount._M_get_use_count(); } bool expired() const // never throws { return _M_refcount._M_get_use_count() == 0; } void reset() // never throws { __weak_ptr().swap(*this); } void swap(__weak_ptr& __s) // never throws { std::swap(_M_ptr, __s._M_ptr); _M_refcount._M_swap(__s._M_refcount); } private: // Used by __enable_shared_from_this. void _M_assign(_Tp* __ptr, const __shared_count<_Lp>& __refcount) { _M_ptr = __ptr; _M_refcount = __refcount; } template bool _M_less(const __weak_ptr<_Tp1, _Lp>& __rhs) const { return _M_refcount < __rhs._M_refcount; } template friend class __shared_ptr; template friend class __weak_ptr; friend class __enable_shared_from_this<_Tp, _Lp>; friend class enable_shared_from_this<_Tp>; // Friend injected into namespace and found by ADL. template friend inline bool operator<(const __weak_ptr& __lhs, const __weak_ptr<_Tp1, _Lp>& __rhs) { return __lhs._M_less(__rhs); } _Tp* _M_ptr; // Contained pointer. __weak_count<_Lp> _M_refcount; // Reference counter. }; // 2.2.4.7 weak_ptr specialized algorithms. template inline void swap(__weak_ptr<_Tp, _Lp>& __a, __weak_ptr<_Tp, _Lp>& __b) { __a.swap(__b); } template class __enable_shared_from_this { protected: __enable_shared_from_this() { } __enable_shared_from_this(const __enable_shared_from_this&) { } __enable_shared_from_this& operator=(const __enable_shared_from_this&) { return *this; } ~__enable_shared_from_this() { } public: __shared_ptr<_Tp, _Lp> shared_from_this() { return __shared_ptr<_Tp, _Lp>(this->_M_weak_this); } __shared_ptr shared_from_this() const { return __shared_ptr(this->_M_weak_this); } private: template void _M_weak_assign(_Tp1* __p, const __shared_count<_Lp>& __n) const { _M_weak_this._M_assign(__p, __n); } template friend void __enable_shared_from_this_helper(const __shared_count<_Lp>& __pn, const __enable_shared_from_this* __pe, const _Tp1* __px) { if (__pe != 0) __pe->_M_weak_assign(const_cast<_Tp1*>(__px), __pn); } mutable __weak_ptr<_Tp, _Lp> _M_weak_this; }; /// shared_ptr // The actual shared_ptr, with forwarding constructors and // assignment operators. template class shared_ptr : public __shared_ptr<_Tp> { public: shared_ptr() : __shared_ptr<_Tp>() { } template explicit shared_ptr(_Tp1* __p) : __shared_ptr<_Tp>(__p) { } template shared_ptr(_Tp1* __p, _Deleter __d) : __shared_ptr<_Tp>(__p, __d) { } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X template shared_ptr(_Tp1* __p, _Deleter __d, const _Alloc& __a) : __shared_ptr<_Tp>(__p, __d, __a) { } // Aliasing constructor template shared_ptr(const shared_ptr<_Tp1>& __r, _Tp* __p) : __shared_ptr<_Tp>(__r, __p) { } #endif template shared_ptr(const shared_ptr<_Tp1>& __r) : __shared_ptr<_Tp>(__r) { } #ifdef _GLIBCXX_INCLUDE_AS_CXX0X shared_ptr(shared_ptr&& __r) : __shared_ptr<_Tp>(std::move(__r)) { } template shared_ptr(shared_ptr<_Tp1>&& __r) : __shared_ptr<_Tp>(std::move(__r)) { } #endif template explicit shared_ptr(const weak_ptr<_Tp1>& __r) : __shared_ptr<_Tp>(__r) { } #if !defined(__GXX_EXPERIMENTAL_CXX0X__) || _GLIBCXX_DEPRECATED template explicit shared_ptr(std::auto_ptr<_Tp1>& __r) : __shared_ptr<_Tp>(__r) { } #endif #ifdef _GLIBCXX_INCLUDE_AS_TR1 template shared_ptr(const shared_ptr<_Tp1>& __r, __static_cast_tag) : __shared_ptr<_Tp>(__r, __static_cast_tag()) { } template shared_ptr(const shared_ptr<_Tp1>& __r, __const_cast_tag) : __shared_ptr<_Tp>(__r, __const_cast_tag()) { } template shared_ptr(const shared_ptr<_Tp1>& __r, __dynamic_cast_tag) : __shared_ptr<_Tp>(__r, __dynamic_cast_tag()) { } #endif template shared_ptr& operator=(const shared_ptr<_Tp1>& __r) // never throws { this->__shared_ptr<_Tp>::operator=(__r); return *this; } #if !defined(__GXX_EXPERIMENTAL_CXX0X__) || _GLIBCXX_DEPRECATED template shared_ptr& operator=(std::auto_ptr<_Tp1>& __r) { this->__shared_ptr<_Tp>::operator=(__r); return *this; } #endif #ifdef _GLIBCXX_INCLUDE_AS_CXX0X shared_ptr& operator=(shared_ptr&& __r) { this->__shared_ptr<_Tp>::operator=(std::move(__r)); return *this; } template shared_ptr& operator=(shared_ptr<_Tp1>&& __r) { this->__shared_ptr<_Tp>::operator=(std::move(__r)); return *this; } #endif #ifdef _GLIBCXX_INCLUDE_AS_CXX0X private: // This constructor is non-standard, it is used by allocate_shared. template shared_ptr(_Sp_make_shared_tag __tag, _Alloc __a, _Args&&... __args) : __shared_ptr<_Tp>(__tag, __a, std::forward<_Args>(__args)...) { } template friend shared_ptr<_Tp1> allocate_shared(_Alloc __a, _Args&&... __args); #endif }; template inline shared_ptr<_Tp> static_pointer_cast(const shared_ptr<_Tp1>& __r) { #ifdef _GLIBCXX_INCLUDE_AS_CXX0X return shared_ptr<_Tp>(__r, static_cast<_Tp*>(__r.get())); #else return shared_ptr<_Tp>(__r, __static_cast_tag()); #endif } template inline shared_ptr<_Tp> const_pointer_cast(const shared_ptr<_Tp1>& __r) { #ifdef _GLIBCXX_INCLUDE_AS_CXX0X return shared_ptr<_Tp>(__r, const_cast<_Tp*>(__r.get())); #else return shared_ptr<_Tp>(__r, __const_cast_tag()); #endif } template inline shared_ptr<_Tp> dynamic_pointer_cast(const shared_ptr<_Tp1>& __r) { #ifdef _GLIBCXX_INCLUDE_AS_CXX0X if (_Tp* __p = dynamic_cast<_Tp*>(__r.get())) return shared_ptr<_Tp>(__r, __p); return shared_ptr<_Tp>(); #else return shared_ptr<_Tp>(__r, __dynamic_cast_tag()); #endif } /// weak_ptr // The actual weak_ptr, with forwarding constructors and // assignment operators. template class weak_ptr : public __weak_ptr<_Tp> { public: weak_ptr() : __weak_ptr<_Tp>() { } template weak_ptr(const weak_ptr<_Tp1>& __r) : __weak_ptr<_Tp>(__r) { } template weak_ptr(const shared_ptr<_Tp1>& __r) : __weak_ptr<_Tp>(__r) { } template weak_ptr& operator=(const weak_ptr<_Tp1>& __r) // never throws { this->__weak_ptr<_Tp>::operator=(__r); return *this; } template weak_ptr& operator=(const shared_ptr<_Tp1>& __r) // never throws { this->__weak_ptr<_Tp>::operator=(__r); return *this; } shared_ptr<_Tp> lock() const // never throws { #ifdef __GTHREADS if (this->expired()) return shared_ptr<_Tp>(); try { return shared_ptr<_Tp>(*this); } catch(const bad_weak_ptr&) { return shared_ptr<_Tp>(); } #else return this->expired() ? shared_ptr<_Tp>() : shared_ptr<_Tp>(*this); #endif } }; /// enable_shared_from_this template class enable_shared_from_this { protected: enable_shared_from_this() { } enable_shared_from_this(const enable_shared_from_this&) { } enable_shared_from_this& operator=(const enable_shared_from_this&) { return *this; } ~enable_shared_from_this() { } public: shared_ptr<_Tp> shared_from_this() { return shared_ptr<_Tp>(this->_M_weak_this); } shared_ptr shared_from_this() const { return shared_ptr(this->_M_weak_this); } private: template void _M_weak_assign(_Tp1* __p, const __shared_count<>& __n) const { _M_weak_this._M_assign(__p, __n); } template friend void __enable_shared_from_this_helper(const __shared_count<>& __pn, const enable_shared_from_this* __pe, const _Tp1* __px) { if (__pe != 0) __pe->_M_weak_assign(const_cast<_Tp1*>(__px), __pn); } mutable weak_ptr<_Tp> _M_weak_this; }; #ifdef _GLIBCXX_INCLUDE_AS_CXX0X template inline __shared_ptr<_Tp, _Lp> __allocate_shared(_Alloc __a, _Args&&... __args) { return __shared_ptr<_Tp, _Lp>(_Sp_make_shared_tag(), std::forward<_Alloc>(__a), std::forward<_Args>(__args)...); } template inline __shared_ptr<_Tp, _Lp> __make_shared(_Args&&... __args) { typedef typename std::remove_const<_Tp>::type _Tp_nc; return __allocate_shared<_Tp, _Lp>(s