head	1.2;
access;
symbols
	RELENG_4_11_0_RELEASE:1.1.1.2
	RELENG_4_11:1.1.1.2.0.20
	RELENG_4_11_BP:1.1.1.2
	RELENG_4_10_0_RELEASE:1.1.1.2
	RELENG_4_10:1.1.1.2.0.18
	RELENG_4_10_BP:1.1.1.2
	RELENG_4_9_0_RELEASE:1.1.1.2
	RELENG_4_9:1.1.1.2.0.16
	RELENG_4_9_BP:1.1.1.2
	RELENG_4_8_0_RELEASE:1.1.1.2
	RELENG_4_8:1.1.1.2.0.14
	RELENG_4_8_BP:1.1.1.2
	RELENG_4_7_0_RELEASE:1.1.1.2
	RELENG_4_7:1.1.1.2.0.12
	RELENG_4_7_BP:1.1.1.2
	RELENG_4_6_2_RELEASE:1.1.1.2
	RELENG_4_6_1_RELEASE:1.1.1.2
	RELENG_4_6_0_RELEASE:1.1.1.2
	RELENG_4_6:1.1.1.2.0.10
	RELENG_4_6_BP:1.1.1.2
	BEFORE_3_1_0_snap:1.1.1.2
	RELENG_4_5_0_RELEASE:1.1.1.2
	RELENG_4_5:1.1.1.2.0.8
	RELENG_4_5_BP:1.1.1.2
	gcc_2_95_3:1.1.1.2
	REPOCOPY:1.1.1.2
	RELENG_4_4_0_RELEASE:1.1.1.2
	RELENG_4_4:1.1.1.2.0.6
	RELENG_4_4_BP:1.1.1.2
	RELENG_4_3_0_RELEASE:1.1.1.2
	RELENG_4_3:1.1.1.2.0.4
	RELENG_4_3_BP:1.1.1.2
	BEFORE_GCC_2_95_3:1.1.1.2
	RELENG_4_2_0_RELEASE:1.1.1.2
	RELENG_4_1_1_RELEASE:1.1.1.2
	RELENG_4_1_0_RELEASE:1.1.1.2
	RELENG_4_0_0_RELEASE:1.1.1.2
	RELENG_4:1.1.1.2.0.2
	RELENG_4_BP:1.1.1.2
	EGCS_11x:1.1.1.1.0.2
	gcc_2_95_2:1.1.1.2
	BEFORE_GCC_2_95_1:1.1.1.1
	gcc_2_95_1:1.1.1.2
	egcs_1_1_2:1.1.1.1
	FSF:1.1.1;
locks; strict;
comment	@# @;


1.2
date	2002.11.27.18.52.02;	author obrien;	state dead;
branches;
next	1.1;

1.1
date	99.10.04.08.12.35;	author obrien;	state Exp;
branches
	1.1.1.1;
next	;

1.1.1.1
date	99.10.04.08.12.35;	author obrien;	state Exp;
branches;
next	1.1.1.2;

1.1.1.2
date	99.10.16.03.52.38;	author obrien;	state Exp;
branches;
next	1.1.1.3;

1.1.1.3
date	2004.08.12.16.41.40;	author kan;	state dead;
branches;
next	;


desc
@@


1.2
log
@Remove files no longer part of the gcc_3_2_anoncvs_20021009 libstdc++.

Approved by:	re(jhb)
@
text
@/*
 * Copyright (c) 1996
 * Silicon Graphics Computer Systems, Inc.
 *
 * Permission to use, copy, modify, distribute and sell this software
 * and its documentation for any purpose is hereby granted without fee,
 * provided that the above copyright notice appear in all copies and
 * that both that copyright notice and this permission notice appear
 * in supporting documentation.  Silicon Graphics makes no
 * representations about the suitability of this software for any
 * purpose.  It is provided "as is" without express or implied warranty.
 */

#ifndef __SGI_STL_PTHREAD_ALLOC
#define __SGI_STL_PTHREAD_ALLOC

// Pthread-specific node allocator.
// This is similar to the default allocator, except that free-list
// information is kept separately for each thread, avoiding locking.
// This should be reasonably fast even in the presence of threads.
// The down side is that storage may not be well-utilized.
// It is not an error to allocate memory in thread A and deallocate
// it n thread B.  But this effectively transfers ownership of the memory,
// so that it can only be reallocated by thread B.  Thus this can effectively
// result in a storage leak if it's done on a regular basis.
// It can also result in frequent sharing of
// cache lines among processors, with potentially serious performance
// consequences.

#include <stl_config.h>
#include <stl_alloc.h>
#ifndef __RESTRICT
#  define __RESTRICT
#endif

__STL_BEGIN_NAMESPACE

// Note that this class has nonstatic members.  We instantiate it once
// per thread.
template <bool dummy>
class __pthread_alloc_template {

private:
  enum {ALIGN = 8};
  enum {MAX_BYTES = 128};  // power of 2
  enum {NFREELISTS = MAX_BYTES/ALIGN};

  union obj {
        union obj * free_list_link;
        char client_data[ALIGN];    /* The client sees this.        */
  };

  // Per instance state
  obj* volatile free_list[NFREELISTS]; 
  __pthread_alloc_template<dummy>* next; 	// Free list link

  static size_t ROUND_UP(size_t bytes) {
	return (((bytes) + ALIGN-1) & ~(ALIGN - 1));
  }
  static size_t FREELIST_INDEX(size_t bytes) {
	return (((bytes) + ALIGN-1)/ALIGN - 1);
  }

  // Returns an object of size n, and optionally adds to size n free list.
  void *refill(size_t n);
  // Allocates a chunk for nobjs of size "size".  nobjs may be reduced
  // if it is inconvenient to allocate the requested number.
  static char *chunk_alloc(size_t size, int &nobjs);

  // Chunk allocation state. And other shared state.
  // Protected by chunk_allocator_lock.
  static pthread_mutex_t chunk_allocator_lock;
  static char *start_free;
  static char *end_free;
  static size_t heap_size;
  static __pthread_alloc_template<dummy>* free_allocators;
  static pthread_key_t key;
  static bool key_initialized;
	// Pthread key under which allocator is stored. 
	// Allocator instances that are currently unclaimed by any thread.
  static void destructor(void *instance);
	// Function to be called on thread exit to reclaim allocator
	// instance.
  static __pthread_alloc_template<dummy> *new_allocator();
	// Return a recycled or new allocator instance.
  static __pthread_alloc_template<dummy> *get_allocator_instance();
	// ensure that the current thread has an associated
	// allocator instance.
  class lock {
      public:
	lock () { pthread_mutex_lock(&chunk_allocator_lock); }
	~lock () { pthread_mutex_unlock(&chunk_allocator_lock); }
  };
  friend class lock;


public:

  __pthread_alloc_template() : next(0)
  {
    memset((void *)free_list, 0, NFREELISTS * sizeof(obj *));
  }

  /* n must be > 0	*/
  static void * allocate(size_t n)
  {
    obj * volatile * my_free_list;
    obj * __RESTRICT result;
    __pthread_alloc_template<dummy>* a;

    if (n > MAX_BYTES) {
	return(malloc(n));
    }
    if (!key_initialized ||
        !(a = (__pthread_alloc_template<dummy>*)
		pthread_getspecific(key))) {
	a = get_allocator_instance();
    }
    my_free_list = a -> free_list + FREELIST_INDEX(n);
    result = *my_free_list;
    if (result == 0) {
    	void *r = a -> refill(ROUND_UP(n));
	return r;
    }
    *my_free_list = result -> free_list_link;
    return (result);
  };

  /* p may not be 0 */
  static void deallocate(void *p, size_t n)
  {
    obj *q = (obj *)p;
    obj * volatile * my_free_list;
    __pthread_alloc_template<dummy>* a;

    if (n > MAX_BYTES) {
	free(p);
	return;
    }
    if (!key_initialized ||
        !(a = (__pthread_alloc_template<dummy>*)
		pthread_getspecific(key))) {
	a = get_allocator_instance();
    }
    my_free_list = a->free_list + FREELIST_INDEX(n);
    q -> free_list_link = *my_free_list;
    *my_free_list = q;
  }

  static void * reallocate(void *p, size_t old_sz, size_t new_sz);

} ;

typedef __pthread_alloc_template<false> pthread_alloc;


template <bool dummy>
void __pthread_alloc_template<dummy>::destructor(void * instance)
{
    __pthread_alloc_template<dummy>* a =
	(__pthread_alloc_template<dummy>*)instance;
    a -> next = free_allocators;
    free_allocators = a;
}

template <bool dummy>
__pthread_alloc_template<dummy>*
__pthread_alloc_template<dummy>::new_allocator()
{
    if (0 != free_allocators) {
	__pthread_alloc_template<dummy>* result = free_allocators;
	free_allocators = free_allocators -> next;
	return result;
    } else {
	return new __pthread_alloc_template<dummy>;
    }
}

template <bool dummy>
__pthread_alloc_template<dummy>*
__pthread_alloc_template<dummy>::get_allocator_instance()
{
    __pthread_alloc_template<dummy>* result;
    if (!key_initialized) {
    	/*REFERENCED*/
	lock lock_instance;
	if (!key_initialized) {
	    if (pthread_key_create(&key, destructor)) {
		abort();  // failed
	    }
	    key_initialized = true;
	}
    }
    result = new_allocator();
    if (pthread_setspecific(key, result)) abort();
    return result;
}

/* We allocate memory in large chunks in order to avoid fragmenting	*/
/* the malloc heap too much.						*/
/* We assume that size is properly aligned.				*/
template <bool dummy>
char *__pthread_alloc_template<dummy>
::chunk_alloc(size_t size, int &nobjs)
{
  {
    char * result;
    size_t total_bytes;
    size_t bytes_left;
    /*REFERENCED*/
    lock lock_instance;		// Acquire lock for this routine

    total_bytes = size * nobjs;
    bytes_left = end_free - start_free;
    if (bytes_left >= total_bytes) {
	result = start_free;
	start_free += total_bytes;
	return(result);
    } else if (bytes_left >= size) {
	nobjs = bytes_left/size;
	total_bytes = size * nobjs;
	result = start_free;
	start_free += total_bytes;
	return(result);
    } else {
	size_t bytes_to_get = 2 * total_bytes + ROUND_UP(heap_size >> 4);
	// Try to make use of the left-over piece.
	if (bytes_left > 0) {
	    __pthread_alloc_template<dummy>* a = 
		(__pthread_alloc_template<dummy>*)pthread_getspecific(key);
	    obj * volatile * my_free_list =
			a->free_list + FREELIST_INDEX(bytes_left);

            ((obj *)start_free) -> free_list_link = *my_free_list;
            *my_free_list = (obj *)start_free;
	}
#	ifdef _SGI_SOURCE
	  // Try to get memory that's aligned on something like a
	  // cache line boundary, so as to avoid parceling out
	  // parts of the same line to different threads and thus
	  // possibly different processors.
	  {
	    const int cache_line_size = 128;  // probable upper bound
	    bytes_to_get &= ~(cache_line_size-1);
	    start_free = (char *)memalign(cache_line_size, bytes_to_get); 
	    if (0 == start_free) {
	      start_free = (char *)malloc_alloc::allocate(bytes_to_get);
	    }
	  }
#	else  /* !SGI_SOURCE */
	  start_free = (char *)malloc_alloc::allocate(bytes_to_get);
#       endif
	heap_size += bytes_to_get;
	end_free = start_free + bytes_to_get;
    }
  }
  // lock is released here
  return(chunk_alloc(size, nobjs));
}


/* Returns an object of size n, and optionally adds to size n free list.*/
/* We assume that n is properly aligned.				*/
/* We hold the allocation lock.						*/
template <bool dummy>
void *__pthread_alloc_template<dummy>
::refill(size_t n)
{
    int nobjs = 128;
    char * chunk = chunk_alloc(n, nobjs);
    obj * volatile * my_free_list;
    obj * result;
    obj * current_obj, * next_obj;
    int i;

    if (1 == nobjs)  {
	return(chunk);
    }
    my_free_list = free_list + FREELIST_INDEX(n);

    /* Build free list in chunk */
      result = (obj *)chunk;
      *my_free_list = next_obj = (obj *)(chunk + n);
      for (i = 1; ; i++) {
	current_obj = next_obj;
	next_obj = (obj *)((char *)next_obj + n);
	if (nobjs - 1 == i) {
	    current_obj -> free_list_link = 0;
	    break;
	} else {
	    current_obj -> free_list_link = next_obj;
	}
      }
    return(result);
}

template <bool dummy>
void *__pthread_alloc_template<dummy>
::reallocate(void *p, size_t old_sz, size_t new_sz)
{
    void * result;
    size_t copy_sz;

    if (old_sz > MAX_BYTES && new_sz > MAX_BYTES) {
	return(realloc(p, new_sz));
    }
    if (ROUND_UP(old_sz) == ROUND_UP(new_sz)) return(p);
    result = allocate(new_sz);
    copy_sz = new_sz > old_sz? old_sz : new_sz;
    memcpy(result, p, copy_sz);
    deallocate(p, old_sz);
    return(result);
}

template <bool dummy>
__pthread_alloc_template<dummy> *
__pthread_alloc_template<dummy>::free_allocators = 0;

template <bool dummy>
pthread_key_t __pthread_alloc_template<dummy>::key;

template <bool dummy>
bool __pthread_alloc_template<dummy>::key_initialized = false;

template <bool dummy>
pthread_mutex_t __pthread_alloc_template<dummy>::chunk_allocator_lock
= PTHREAD_MUTEX_INITIALIZER;

template <bool dummy>
char *__pthread_alloc_template<dummy>
::start_free = 0;

template <bool dummy>
char *__pthread_alloc_template<dummy>
::end_free = 0;

template <bool dummy>
size_t __pthread_alloc_template<dummy>
::heap_size = 0;

__STL_END_NAMESPACE

#endif /* __SGI_STL_PTHREAD_ALLOC */

// Local Variables:
// mode:C++
// End:
@


1.1
log
@Initial revision
@
text
@@


1.1.1.1
log
@Virgin import of EGCS 1.1.2's libstdc++
@
text
@@


1.1.1.2
log
@Virgin import of GCC 2.95.1's libstdc++
@
text
@d23 1
a23 1
// it in thread B.  But this effectively transfers ownership of the memory,
d38 4
a41 1
#define __STL_DATA_ALIGNMENT 8
d43 9
a51 35
union _Pthread_alloc_obj {
    union _Pthread_alloc_obj * __free_list_link;
    char __client_data[__STL_DATA_ALIGNMENT];    /* The client sees this.    */
};

// Pthread allocators don't appear to the client to have meaningful
// instances.  We do in fact need to associate some state with each
// thread.  That state is represented by
// _Pthread_alloc_per_thread_state<_Max_size>.

template<size_t _Max_size>
struct _Pthread_alloc_per_thread_state {
  typedef _Pthread_alloc_obj __obj;
  enum { _S_NFREELISTS = _Max_size/__STL_DATA_ALIGNMENT };
  _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS]; 
  _Pthread_alloc_per_thread_state<_Max_size> * __next; 
	// Free list link for list of available per thread structures.
  	// When one of these becomes available for reuse due to thread
	// termination, any objects in its free list remain associated
	// with it.  The whole structure may then be used by a newly
	// created thread.
  _Pthread_alloc_per_thread_state() : __next(0)
  {
    memset((void *)__free_list, 0, _S_NFREELISTS * sizeof(__obj *));
  }
  // Returns an object of size __n, and possibly adds to size n free list.
  void *_M_refill(size_t __n);
};

// Pthread-specific allocator.
// The argument specifies the largest object size allocated from per-thread
// free lists.  Larger objects are allocated using malloc_alloc.
// Max_size must be a power of 2.
template <size_t _Max_size = 128>
class _Pthread_alloc_template {
d53 3
a55 1
public: // but only for internal use:
d57 6
a62 1
  typedef _Pthread_alloc_obj __obj;
d64 2
d68 1
a68 1
  static char *_S_chunk_alloc(size_t __size, int &__nobjs);
a69 10
  enum {_S_ALIGN = __STL_DATA_ALIGNMENT};

  static size_t _S_round_up(size_t __bytes) {
        return (((__bytes) + _S_ALIGN-1) & ~(_S_ALIGN - 1));
  }
  static size_t _S_freelist_index(size_t __bytes) {
        return (((__bytes) + _S_ALIGN-1)/_S_ALIGN - 1);
  }

private:
d71 19
a89 20
  // Protected by _S_chunk_allocator_lock.
  static pthread_mutex_t _S_chunk_allocator_lock;
  static char *_S_start_free;
  static char *_S_end_free;
  static size_t _S_heap_size;
  static _Pthread_alloc_per_thread_state<_Max_size>* _S_free_per_thread_states;
  static pthread_key_t _S_key;
  static bool _S_key_initialized;
        // Pthread key under which per thread state is stored. 
        // Allocator instances that are currently unclaimed by any thread.
  static void _S_destructor(void *instance);
        // Function to be called on thread exit to reclaim per thread
        // state.
  static _Pthread_alloc_per_thread_state<_Max_size> *_S_new_per_thread_state();
        // Return a recycled or new per thread state.
  static _Pthread_alloc_per_thread_state<_Max_size> *_S_get_per_thread_state();
        // ensure that the current thread has an associated
        // per thread state.
  friend class _M_lock;
  class _M_lock {
d91 2
a92 2
        _M_lock () { pthread_mutex_lock(&_S_chunk_allocator_lock); }
        ~_M_lock () { pthread_mutex_unlock(&_S_chunk_allocator_lock); }
d94 2
d99 1
a99 2
  /* n must be > 0      */
  static void * allocate(size_t __n)
d101 9
a109 3
    __obj * volatile * __my_free_list;
    __obj * __RESTRICT __result;
    _Pthread_alloc_per_thread_state<_Max_size>* __a;
d111 2
a112 2
    if (__n > _Max_size) {
        return(malloc_alloc::allocate(__n));
d114 4
a117 4
    if (!_S_key_initialized ||
        !(__a = (_Pthread_alloc_per_thread_state<_Max_size>*)
                                 pthread_getspecific(_S_key))) {
        __a = _S_get_per_thread_state();
d119 5
a123 5
    __my_free_list = __a -> __free_list + _S_freelist_index(__n);
    __result = *__my_free_list;
    if (__result == 0) {
        void *__r = __a -> _M_refill(_S_round_up(__n));
        return __r;
d125 2
a126 2
    *__my_free_list = __result -> __free_list_link;
    return (__result);
d130 1
a130 1
  static void deallocate(void *__p, size_t __n)
d132 7
a138 7
    __obj *__q = (__obj *)__p;
    __obj * volatile * __my_free_list;
    _Pthread_alloc_per_thread_state<_Max_size>* __a;

    if (__n > _Max_size) {
        malloc_alloc::deallocate(__p, __n);
        return;
d140 4
a143 4
    if (!_S_key_initialized ||
        !(__a = (_Pthread_alloc_per_thread_state<_Max_size> *)
                pthread_getspecific(_S_key))) {
        __a = _S_get_per_thread_state();
d145 3
a147 3
    __my_free_list = __a->__free_list + _S_freelist_index(__n);
    __q -> __free_list_link = *__my_free_list;
    *__my_free_list = __q;
d150 1
a150 1
  static void * reallocate(void *__p, size_t __old_sz, size_t __new_sz);
d154 1
a154 1
typedef _Pthread_alloc_template<> pthread_alloc;
d157 2
a158 2
template <size_t _Max_size>
void _Pthread_alloc_template<_Max_size>::_S_destructor(void * __instance)
d160 4
a163 5
    _M_lock __lock_instance;	// Need to acquire lock here.
    _Pthread_alloc_per_thread_state<_Max_size>* __s =
        (_Pthread_alloc_per_thread_state<_Max_size> *)__instance;
    __s -> __next = _S_free_per_thread_states;
    _S_free_per_thread_states = __s;
d166 8
a173 10
template <size_t _Max_size>
_Pthread_alloc_per_thread_state<_Max_size> *
_Pthread_alloc_template<_Max_size>::_S_new_per_thread_state()
{    
    /* lock already held here.	*/
    if (0 != _S_free_per_thread_states) {
        _Pthread_alloc_per_thread_state<_Max_size> *__result =
					_S_free_per_thread_states;
        _S_free_per_thread_states = _S_free_per_thread_states -> __next;
        return __result;
d175 1
a175 1
        return new _Pthread_alloc_per_thread_state<_Max_size>;
d179 14
a192 12
template <size_t _Max_size>
_Pthread_alloc_per_thread_state<_Max_size> *
_Pthread_alloc_template<_Max_size>::_S_get_per_thread_state()
{
    /*REFERENCED*/
    _M_lock __lock_instance;	// Need to acquire lock here.
    _Pthread_alloc_per_thread_state<_Max_size> * __result;
    if (!_S_key_initialized) {
        if (pthread_key_create(&_S_key, _S_destructor)) {
            abort();  // failed
        }
        _S_key_initialized = true;
d194 3
a196 3
    __result = _S_new_per_thread_state();
    if (pthread_setspecific(_S_key, __result)) abort();
    return __result;
d199 6
a204 6
/* We allocate memory in large chunks in order to avoid fragmenting     */
/* the malloc heap too much.                                            */
/* We assume that size is properly aligned.                             */
template <size_t _Max_size>
char *_Pthread_alloc_template<_Max_size>
::_S_chunk_alloc(size_t __size, int &__nobjs)
d207 3
a209 3
    char * __result;
    size_t __total_bytes;
    size_t __bytes_left;
d211 1
a211 1
    _M_lock __lock_instance;         // Acquire lock for this routine
d213 12
a224 12
    __total_bytes = __size * __nobjs;
    __bytes_left = _S_end_free - _S_start_free;
    if (__bytes_left >= __total_bytes) {
        __result = _S_start_free;
        _S_start_free += __total_bytes;
        return(__result);
    } else if (__bytes_left >= __size) {
        __nobjs = __bytes_left/__size;
        __total_bytes = __size * __nobjs;
        __result = _S_start_free;
        _S_start_free += __total_bytes;
        return(__result);
d226 26
a251 28
        size_t __bytes_to_get =
		2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
        // Try to make use of the left-over piece.
        if (__bytes_left > 0) {
            _Pthread_alloc_per_thread_state<_Max_size>* __a = 
                (_Pthread_alloc_per_thread_state<_Max_size>*)
			pthread_getspecific(_S_key);
            __obj * volatile * __my_free_list =
                        __a->__free_list + _S_freelist_index(__bytes_left);

            ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
            *__my_free_list = (__obj *)_S_start_free;
        }
#       ifdef _SGI_SOURCE
          // Try to get memory that's aligned on something like a
          // cache line boundary, so as to avoid parceling out
          // parts of the same line to different threads and thus
          // possibly different processors.
          {
            const int __cache_line_size = 128;  // probable upper bound
            __bytes_to_get &= ~(__cache_line_size-1);
            _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get); 
            if (0 == _S_start_free) {
              _S_start_free = (char *)malloc_alloc::allocate(__bytes_to_get);
            }
          }
#       else  /* !SGI_SOURCE */
          _S_start_free = (char *)malloc_alloc::allocate(__bytes_to_get);
d253 2
a254 2
        _S_heap_size += __bytes_to_get;
        _S_end_free = _S_start_free + __bytes_to_get;
d258 1
a258 1
  return(_S_chunk_alloc(__size, __nobjs));
d263 12
a274 13
/* We assume that n is properly aligned.                                */
/* We hold the allocation lock.                                         */
template <size_t _Max_size>
void *_Pthread_alloc_per_thread_state<_Max_size>
::_M_refill(size_t __n)
{
    int __nobjs = 128;
    char * __chunk =
	_Pthread_alloc_template<_Max_size>::_S_chunk_alloc(__n, __nobjs);
    __obj * volatile * __my_free_list;
    __obj * __result;
    __obj * __current_obj, * __next_obj;
    int __i;
d276 2
a277 2
    if (1 == __nobjs)  {
        return(__chunk);
d279 1
a279 2
    __my_free_list = __free_list
		 + _Pthread_alloc_template<_Max_size>::_S_freelist_index(__n);
d282 11
a292 11
      __result = (__obj *)__chunk;
      *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
      for (__i = 1; ; __i++) {
        __current_obj = __next_obj;
        __next_obj = (__obj *)((char *)__next_obj + __n);
        if (__nobjs - 1 == __i) {
            __current_obj -> __free_list_link = 0;
            break;
        } else {
            __current_obj -> __free_list_link = __next_obj;
        }
d294 1
a294 1
    return(__result);
d297 3
a299 3
template <size_t _Max_size>
void *_Pthread_alloc_template<_Max_size>
::reallocate(void *__p, size_t __old_sz, size_t __new_sz)
d301 2
a302 2
    void * __result;
    size_t __copy_sz;
d304 2
a305 3
    if (__old_sz > _Max_size
	&& __new_sz > _Max_size) {
        return(realloc(__p, __new_sz));
d307 6
a312 6
    if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
    __result = allocate(__new_sz);
    __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
    memcpy(__result, __p, __copy_sz);
    deallocate(__p, __old_sz);
    return(__result);
d315 3
a317 3
template <size_t _Max_size>
_Pthread_alloc_per_thread_state<_Max_size> *
_Pthread_alloc_template<_Max_size>::_S_free_per_thread_states = 0;
d319 2
a320 2
template <size_t _Max_size>
pthread_key_t _Pthread_alloc_template<_Max_size>::_S_key;
d322 2
a323 2
template <size_t _Max_size>
bool _Pthread_alloc_template<_Max_size>::_S_key_initialized = false;
d325 2
a326 2
template <size_t _Max_size>
pthread_mutex_t _Pthread_alloc_template<_Max_size>::_S_chunk_allocator_lock
d329 11
a339 119
template <size_t _Max_size>
char *_Pthread_alloc_template<_Max_size>
::_S_start_free = 0;

template <size_t _Max_size>
char *_Pthread_alloc_template<_Max_size>
::_S_end_free = 0;

template <size_t _Max_size>
size_t _Pthread_alloc_template<_Max_size>
::_S_heap_size = 0;

#ifdef __STL_USE_STD_ALLOCATORS

template <class _Tp>
class pthread_allocator {
  typedef pthread_alloc _S_Alloc;          // The underlying allocator.
public:
  typedef size_t     size_type;
  typedef ptrdiff_t  difference_type;
  typedef _Tp*       pointer;
  typedef const _Tp* const_pointer;
  typedef _Tp&       reference;
  typedef const _Tp& const_reference;
  typedef _Tp        value_type;

  template <class _Up> struct rebind {
    typedef pthread_allocator<_Up> other;
  };

  pthread_allocator() __STL_NOTHROW {}
  pthread_allocator(const pthread_allocator& a) __STL_NOTHROW {}
  template <class _Up> pthread_allocator(const pthread_allocator<_Up>&)
		__STL_NOTHROW {}
  ~pthread_allocator() __STL_NOTHROW {}

  pointer address(reference __x) const { return &__x; }
  const_pointer address(const_reference __x) const { return &__x; }

  // __n is permitted to be 0.  The C++ standard says nothing about what
  // the return value is when __n == 0.
  _Tp* allocate(size_type __n, const void* = 0) {
    return __n != 0 ? static_cast<_Tp*>(_S_Alloc::allocate(__n * sizeof(_Tp)))
                    : 0;
  }

  // p is not permitted to be a null pointer.
  void deallocate(pointer __p, size_type __n)
    { _S_Alloc::deallocate(__p, __n * sizeof(_Tp)); }

  size_type max_size() const __STL_NOTHROW 
    { return size_t(-1) / sizeof(_Tp); }

  void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
  void destroy(pointer _p) { _p->~_Tp(); }
};

template<>
class pthread_allocator<void> {
public:
  typedef size_t      size_type;
  typedef ptrdiff_t   difference_type;
  typedef void*       pointer;
  typedef const void* const_pointer;
  typedef void        value_type;

  template <class _Up> struct rebind {
    typedef pthread_allocator<_Up> other;
  };
};

template <size_t _Max_size>
inline bool operator==(const _Pthread_alloc_template<_Max_size>&,
                       const _Pthread_alloc_template<_Max_size>&)
{
  return true;
}

template <class _T1, class _T2>
inline bool operator==(const pthread_allocator<_T1>&,
                       const pthread_allocator<_T2>& a2) 
{
  return true;
}

template <class _T1, class _T2>
inline bool operator!=(const pthread_allocator<_T1>&,
                       const pthread_allocator<_T2>&)
{
  return false;
}

template <class _Tp, size_t _Max_size>
struct _Alloc_traits<_Tp, _Pthread_alloc_template<_Max_size> >
{
  static const bool _S_instanceless = true;
  typedef simple_alloc<_Tp, _Pthread_alloc_template<_Max_size> > _Alloc_type;
  typedef __allocator<_Tp, _Pthread_alloc_template<_Max_size> > 
          allocator_type;
};

template <class _Tp, class _Up, size_t _Max>
struct _Alloc_traits<_Tp, __allocator<_Up, _Pthread_alloc_template<_Max> > >
{
  static const bool _S_instanceless = true;
  typedef simple_alloc<_Tp, _Pthread_alloc_template<_Max> > _Alloc_type;
  typedef __allocator<_Tp, _Pthread_alloc_template<_Max> > allocator_type;
};

template <class _Tp, class _Up>
struct _Alloc_traits<_Tp, pthread_allocator<_Up> >
{
  static const bool _S_instanceless = true;
  typedef simple_alloc<_Tp, _Pthread_alloc_template<> > _Alloc_type;
  typedef pthread_allocator<_Tp> allocator_type;
};


#endif /* __STL_USE_STD_ALLOCATORS */
@


1.1.1.3
log
@Remove files that are not part of GCC 3.4.x from the vendor branch.
@
text
@@

