voidRemoveObserver(ObserverType *observer) { typename std::vector<ObserverType *>::iterator pos = std::find(observers_.begin(), observers_.end(), observer); if (pos == observers_.end()) return; /* this method may be called within a traversal of the list */ if (lazy_erase_ > 0) // 关键 *pos = NULL; else observers_.erase(pos); }
~LazyInstance() { // |instance_| should be deleted by |OnExit| //DCHECK(instance_ == 0); }
Type& Get() { return *Pointer(); }
Type* Pointer() { usingnamespace base::subtle;
if (Acquire_Load(&state_) != kCreated) { Atomic32 state = NoBarrier_CompareAndSwap(&state_, kNone, kCreating); if (state == kNone) { // we take the chance to create the instance instance_ = reinterpret_cast<AtomicWord>(newType()); AtExitManager::RegisterCallback(OnExit, this); Release_Store(&state_, kCreated); } elseif (state != kCreated) { // wait, util another thread created the instance while (Acquire_Load(&state_) != kCreated) Thread::YieldThread(); } }
// Atomically execute: // result = *ptr; // if (*ptr == old_value) // *ptr = new_value; // return result; // // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". // Always return the old value of "*ptr" // // This routine implies no memory barriers. Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value);
inlinevoidRelease_Store(volatile Atomic32* ptr, Atomic32 value){ *ptr = value; // works w/o barrier for current Intel chips as of June 2005 // See comments in Atomic64 version of Release_Store() below. }
上面是x86架构下msvc的内存屏障
1 2 3 4 5 6 7 8 9 10 11 12 13 14
inline Atomic32 Acquire_Load(volatileconst Atomic32* ptr){ Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. // See comments in Atomic64 version of Release_Store(), below. ATOMICOPS_COMPILER_BARRIER(); return value; }
inlinevoidRelease_Store(volatile Atomic32* ptr, Atomic32 value){ ATOMICOPS_COMPILER_BARRIER(); *ptr = value; // An x86 store acts as a release barrier. // See comments in Atomic64 version of Release_Store(), below. }