00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 
00020 
00021 
00022 
00023 
00024 
00025 
00026 
00027 
00028 
00029 
00030 
00031 
00032 
00033 
00034 
00035 
00036 
00037 
00038 
00039 
00040 
00041 
00042 
00048 #ifndef __SGI_STL_INTERNAL_THREADS_H
00049 #define __SGI_STL_INTERNAL_THREADS_H
00050 
00051 
00052 #include "bits/gthr.h"
00053 
00054 namespace std
00055 {
00056   
00057   
00058   
00059   
00060   struct _Refcount_Base
00061   {
00062     
00063     typedef size_t _RC_t;
00064     
00065     
00066     volatile _RC_t _M_ref_count;
00067     
00068     
00069     __gthread_mutex_t _M_ref_count_lock;
00070 
00071     _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00072     {
00073 #ifdef __GTHREAD_MUTEX_INIT
00074       __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
00075       _M_ref_count_lock = __tmp;
00076 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00077       __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock);
00078 #else
00079 #error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.
00080 #endif
00081     }
00082 
00083     void 
00084     _M_incr() 
00085     {
00086       __gthread_mutex_lock(&_M_ref_count_lock);
00087       ++_M_ref_count;
00088       __gthread_mutex_unlock(&_M_ref_count_lock);
00089     }
00090 
00091     _RC_t 
00092     _M_decr() 
00093     {
00094       __gthread_mutex_lock(&_M_ref_count_lock);
00095       volatile _RC_t __tmp = --_M_ref_count;
00096       __gthread_mutex_unlock(&_M_ref_count_lock);
00097       return __tmp;
00098     }
00099   };
00100 
00101   
00102   
00103   
00104   
00105 #if defined (__GTHREAD_MUTEX_INIT)
00106   
00107   
00108   
00109   template<int __dummy>
00110     struct _Swap_lock_struct 
00111     { static __gthread_mutex_t _S_swap_lock; };
00112 
00113   template<int __dummy>
00114     __gthread_mutex_t
00115     _Swap_lock_struct<__dummy>::_S_swap_lock = __GTHREAD_MUTEX_INIT;
00116 
00117   
00118   
00119   inline unsigned long 
00120   _Atomic_swap(unsigned long * __p, unsigned long __q) 
00121   {
00122     __gthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00123     unsigned long __result = *__p;
00124     *__p = __q;
00125     __gthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00126     return __result;
00127   }
00128 #endif
00129 
00130   
00131   
00132   
00133   
00134   
00135   
00136 
00137   
00138   
00139   
00140   
00141   
00142 
00143 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00144   extern __gthread_mutex_t _GLIBCPP_mutex;
00145   extern __gthread_mutex_t *_GLIBCPP_mutex_address;
00146   extern __gthread_once_t _GLIBCPP_once;
00147   extern void _GLIBCPP_mutex_init (void);
00148   extern void _GLIBCPP_mutex_address_init (void);
00149 #endif
00150 
00151   struct _STL_mutex_lock
00152   {
00153     
00154 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00155     volatile int _M_init_flag;
00156     __gthread_once_t _M_once;
00157 #endif
00158     __gthread_mutex_t _M_lock;
00159 
00160     void 
00161     _M_initialize() 
00162     {
00163 #ifdef __GTHREAD_MUTEX_INIT
00164       
00165 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00166       if (_M_init_flag) return;
00167       if (__gthread_once (&_GLIBCPP_once, _GLIBCPP_mutex_init) != 0
00168       && __gthread_active_p ())
00169     abort ();
00170       __gthread_mutex_lock (&_GLIBCPP_mutex);
00171       if (!_M_init_flag) 
00172     {
00173       
00174       
00175       
00176       _GLIBCPP_mutex_address = &_M_lock;
00177       if (__gthread_once (&_M_once, _GLIBCPP_mutex_address_init) != 0
00178         && __gthread_active_p ())
00179         abort ();
00180       _M_init_flag = 1;
00181     }
00182       __gthread_mutex_unlock (&_GLIBCPP_mutex);
00183 #endif
00184     }
00185 
00186     void 
00187     _M_acquire_lock() 
00188     {
00189 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00190       if (!_M_init_flag) _M_initialize();
00191 #endif
00192       __gthread_mutex_lock(&_M_lock);
00193     }
00194 
00195     void 
00196     _M_release_lock() 
00197     {
00198 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00199       if (!_M_init_flag) _M_initialize();
00200 #endif
00201       __gthread_mutex_unlock(&_M_lock);
00202     }
00203   };
00204   
00205 #ifdef __GTHREAD_MUTEX_INIT
00206 #define __STL_MUTEX_INITIALIZER = { __GTHREAD_MUTEX_INIT }
00207 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00208 #ifdef __GTHREAD_MUTEX_INIT_DEFAULT
00209 #define __STL_MUTEX_INITIALIZER \
00210   = { 0, __GTHREAD_ONCE_INIT, __GTHREAD_MUTEX_INIT_DEFAULT }
00211 #else
00212 #define __STL_MUTEX_INITIALIZER = { 0, __GTHREAD_ONCE_INIT }
00213 #endif
00214 #endif
00215 
00216   
00217   
00218   
00219   
00220   struct _STL_auto_lock
00221   {
00222     _STL_mutex_lock& _M_lock;
00223     
00224     _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
00225     { _M_lock._M_acquire_lock(); }
00226 
00227     ~_STL_auto_lock() { _M_lock._M_release_lock(); }
00228 
00229   private:
00230     void operator=(const _STL_auto_lock&);
00231     _STL_auto_lock(const _STL_auto_lock&);
00232   };
00233   
00234 } 
00235 
00236 #endif