...

Source file src/sync/map.go

Documentation: sync

		 1  // Copyright 2016 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package sync
		 6  
		 7  import (
		 8  	"sync/atomic"
		 9  	"unsafe"
		10  )
		11  
		12  // Map is like a Go map[interface{}]interface{} but is safe for concurrent use
		13  // by multiple goroutines without additional locking or coordination.
		14  // Loads, stores, and deletes run in amortized constant time.
		15  //
		16  // The Map type is specialized. Most code should use a plain Go map instead,
		17  // with separate locking or coordination, for better type safety and to make it
		18  // easier to maintain other invariants along with the map content.
		19  //
		20  // The Map type is optimized for two common use cases: (1) when the entry for a given
		21  // key is only ever written once but read many times, as in caches that only grow,
		22  // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
		23  // sets of keys. In these two cases, use of a Map may significantly reduce lock
		24  // contention compared to a Go map paired with a separate Mutex or RWMutex.
		25  //
		26  // The zero Map is empty and ready for use. A Map must not be copied after first use.
		27  type Map struct {
		28  	mu Mutex
		29  
		30  	// read contains the portion of the map's contents that are safe for
		31  	// concurrent access (with or without mu held).
		32  	//
		33  	// The read field itself is always safe to load, but must only be stored with
		34  	// mu held.
		35  	//
		36  	// Entries stored in read may be updated concurrently without mu, but updating
		37  	// a previously-expunged entry requires that the entry be copied to the dirty
		38  	// map and unexpunged with mu held.
		39  	read atomic.Value // readOnly
		40  
		41  	// dirty contains the portion of the map's contents that require mu to be
		42  	// held. To ensure that the dirty map can be promoted to the read map quickly,
		43  	// it also includes all of the non-expunged entries in the read map.
		44  	//
		45  	// Expunged entries are not stored in the dirty map. An expunged entry in the
		46  	// clean map must be unexpunged and added to the dirty map before a new value
		47  	// can be stored to it.
		48  	//
		49  	// If the dirty map is nil, the next write to the map will initialize it by
		50  	// making a shallow copy of the clean map, omitting stale entries.
		51  	dirty map[interface{}]*entry
		52  
		53  	// misses counts the number of loads since the read map was last updated that
		54  	// needed to lock mu to determine whether the key was present.
		55  	//
		56  	// Once enough misses have occurred to cover the cost of copying the dirty
		57  	// map, the dirty map will be promoted to the read map (in the unamended
		58  	// state) and the next store to the map will make a new dirty copy.
		59  	misses int
		60  }
		61  
		62  // readOnly is an immutable struct stored atomically in the Map.read field.
		63  type readOnly struct {
		64  	m			 map[interface{}]*entry
		65  	amended bool // true if the dirty map contains some key not in m.
		66  }
		67  
		68  // expunged is an arbitrary pointer that marks entries which have been deleted
		69  // from the dirty map.
		70  var expunged = unsafe.Pointer(new(interface{}))
		71  
		72  // An entry is a slot in the map corresponding to a particular key.
		73  type entry struct {
		74  	// p points to the interface{} value stored for the entry.
		75  	//
		76  	// If p == nil, the entry has been deleted, and either m.dirty == nil or
		77  	// m.dirty[key] is e.
		78  	//
		79  	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
		80  	// is missing from m.dirty.
		81  	//
		82  	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
		83  	// != nil, in m.dirty[key].
		84  	//
		85  	// An entry can be deleted by atomic replacement with nil: when m.dirty is
		86  	// next created, it will atomically replace nil with expunged and leave
		87  	// m.dirty[key] unset.
		88  	//
		89  	// An entry's associated value can be updated by atomic replacement, provided
		90  	// p != expunged. If p == expunged, an entry's associated value can be updated
		91  	// only after first setting m.dirty[key] = e so that lookups using the dirty
		92  	// map find the entry.
		93  	p unsafe.Pointer // *interface{}
		94  }
		95  
		96  func newEntry(i interface{}) *entry {
		97  	return &entry{p: unsafe.Pointer(&i)}
		98  }
		99  
	 100  // Load returns the value stored in the map for a key, or nil if no
	 101  // value is present.
	 102  // The ok result indicates whether value was found in the map.
	 103  func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
	 104  	read, _ := m.read.Load().(readOnly)
	 105  	e, ok := read.m[key]
	 106  	if !ok && read.amended {
	 107  		m.mu.Lock()
	 108  		// Avoid reporting a spurious miss if m.dirty got promoted while we were
	 109  		// blocked on m.mu. (If further loads of the same key will not miss, it's
	 110  		// not worth copying the dirty map for this key.)
	 111  		read, _ = m.read.Load().(readOnly)
	 112  		e, ok = read.m[key]
	 113  		if !ok && read.amended {
	 114  			e, ok = m.dirty[key]
	 115  			// Regardless of whether the entry was present, record a miss: this key
	 116  			// will take the slow path until the dirty map is promoted to the read
	 117  			// map.
	 118  			m.missLocked()
	 119  		}
	 120  		m.mu.Unlock()
	 121  	}
	 122  	if !ok {
	 123  		return nil, false
	 124  	}
	 125  	return e.load()
	 126  }
	 127  
	 128  func (e *entry) load() (value interface{}, ok bool) {
	 129  	p := atomic.LoadPointer(&e.p)
	 130  	if p == nil || p == expunged {
	 131  		return nil, false
	 132  	}
	 133  	return *(*interface{})(p), true
	 134  }
	 135  
	 136  // Store sets the value for a key.
	 137  func (m *Map) Store(key, value interface{}) {
	 138  	read, _ := m.read.Load().(readOnly)
	 139  	if e, ok := read.m[key]; ok && e.tryStore(&value) {
	 140  		return
	 141  	}
	 142  
	 143  	m.mu.Lock()
	 144  	read, _ = m.read.Load().(readOnly)
	 145  	if e, ok := read.m[key]; ok {
	 146  		if e.unexpungeLocked() {
	 147  			// The entry was previously expunged, which implies that there is a
	 148  			// non-nil dirty map and this entry is not in it.
	 149  			m.dirty[key] = e
	 150  		}
	 151  		e.storeLocked(&value)
	 152  	} else if e, ok := m.dirty[key]; ok {
	 153  		e.storeLocked(&value)
	 154  	} else {
	 155  		if !read.amended {
	 156  			// We're adding the first new key to the dirty map.
	 157  			// Make sure it is allocated and mark the read-only map as incomplete.
	 158  			m.dirtyLocked()
	 159  			m.read.Store(readOnly{m: read.m, amended: true})
	 160  		}
	 161  		m.dirty[key] = newEntry(value)
	 162  	}
	 163  	m.mu.Unlock()
	 164  }
	 165  
	 166  // tryStore stores a value if the entry has not been expunged.
	 167  //
	 168  // If the entry is expunged, tryStore returns false and leaves the entry
	 169  // unchanged.
	 170  func (e *entry) tryStore(i *interface{}) bool {
	 171  	for {
	 172  		p := atomic.LoadPointer(&e.p)
	 173  		if p == expunged {
	 174  			return false
	 175  		}
	 176  		if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
	 177  			return true
	 178  		}
	 179  	}
	 180  }
	 181  
	 182  // unexpungeLocked ensures that the entry is not marked as expunged.
	 183  //
	 184  // If the entry was previously expunged, it must be added to the dirty map
	 185  // before m.mu is unlocked.
	 186  func (e *entry) unexpungeLocked() (wasExpunged bool) {
	 187  	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
	 188  }
	 189  
	 190  // storeLocked unconditionally stores a value to the entry.
	 191  //
	 192  // The entry must be known not to be expunged.
	 193  func (e *entry) storeLocked(i *interface{}) {
	 194  	atomic.StorePointer(&e.p, unsafe.Pointer(i))
	 195  }
	 196  
	 197  // LoadOrStore returns the existing value for the key if present.
	 198  // Otherwise, it stores and returns the given value.
	 199  // The loaded result is true if the value was loaded, false if stored.
	 200  func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
	 201  	// Avoid locking if it's a clean hit.
	 202  	read, _ := m.read.Load().(readOnly)
	 203  	if e, ok := read.m[key]; ok {
	 204  		actual, loaded, ok := e.tryLoadOrStore(value)
	 205  		if ok {
	 206  			return actual, loaded
	 207  		}
	 208  	}
	 209  
	 210  	m.mu.Lock()
	 211  	read, _ = m.read.Load().(readOnly)
	 212  	if e, ok := read.m[key]; ok {
	 213  		if e.unexpungeLocked() {
	 214  			m.dirty[key] = e
	 215  		}
	 216  		actual, loaded, _ = e.tryLoadOrStore(value)
	 217  	} else if e, ok := m.dirty[key]; ok {
	 218  		actual, loaded, _ = e.tryLoadOrStore(value)
	 219  		m.missLocked()
	 220  	} else {
	 221  		if !read.amended {
	 222  			// We're adding the first new key to the dirty map.
	 223  			// Make sure it is allocated and mark the read-only map as incomplete.
	 224  			m.dirtyLocked()
	 225  			m.read.Store(readOnly{m: read.m, amended: true})
	 226  		}
	 227  		m.dirty[key] = newEntry(value)
	 228  		actual, loaded = value, false
	 229  	}
	 230  	m.mu.Unlock()
	 231  
	 232  	return actual, loaded
	 233  }
	 234  
	 235  // tryLoadOrStore atomically loads or stores a value if the entry is not
	 236  // expunged.
	 237  //
	 238  // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
	 239  // returns with ok==false.
	 240  func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
	 241  	p := atomic.LoadPointer(&e.p)
	 242  	if p == expunged {
	 243  		return nil, false, false
	 244  	}
	 245  	if p != nil {
	 246  		return *(*interface{})(p), true, true
	 247  	}
	 248  
	 249  	// Copy the interface after the first load to make this method more amenable
	 250  	// to escape analysis: if we hit the "load" path or the entry is expunged, we
	 251  	// shouldn't bother heap-allocating.
	 252  	ic := i
	 253  	for {
	 254  		if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
	 255  			return i, false, true
	 256  		}
	 257  		p = atomic.LoadPointer(&e.p)
	 258  		if p == expunged {
	 259  			return nil, false, false
	 260  		}
	 261  		if p != nil {
	 262  			return *(*interface{})(p), true, true
	 263  		}
	 264  	}
	 265  }
	 266  
	 267  // LoadAndDelete deletes the value for a key, returning the previous value if any.
	 268  // The loaded result reports whether the key was present.
	 269  func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
	 270  	read, _ := m.read.Load().(readOnly)
	 271  	e, ok := read.m[key]
	 272  	if !ok && read.amended {
	 273  		m.mu.Lock()
	 274  		read, _ = m.read.Load().(readOnly)
	 275  		e, ok = read.m[key]
	 276  		if !ok && read.amended {
	 277  			e, ok = m.dirty[key]
	 278  			delete(m.dirty, key)
	 279  			// Regardless of whether the entry was present, record a miss: this key
	 280  			// will take the slow path until the dirty map is promoted to the read
	 281  			// map.
	 282  			m.missLocked()
	 283  		}
	 284  		m.mu.Unlock()
	 285  	}
	 286  	if ok {
	 287  		return e.delete()
	 288  	}
	 289  	return nil, false
	 290  }
	 291  
	 292  // Delete deletes the value for a key.
	 293  func (m *Map) Delete(key interface{}) {
	 294  	m.LoadAndDelete(key)
	 295  }
	 296  
	 297  func (e *entry) delete() (value interface{}, ok bool) {
	 298  	for {
	 299  		p := atomic.LoadPointer(&e.p)
	 300  		if p == nil || p == expunged {
	 301  			return nil, false
	 302  		}
	 303  		if atomic.CompareAndSwapPointer(&e.p, p, nil) {
	 304  			return *(*interface{})(p), true
	 305  		}
	 306  	}
	 307  }
	 308  
	 309  // Range calls f sequentially for each key and value present in the map.
	 310  // If f returns false, range stops the iteration.
	 311  //
	 312  // Range does not necessarily correspond to any consistent snapshot of the Map's
	 313  // contents: no key will be visited more than once, but if the value for any key
	 314  // is stored or deleted concurrently, Range may reflect any mapping for that key
	 315  // from any point during the Range call.
	 316  //
	 317  // Range may be O(N) with the number of elements in the map even if f returns
	 318  // false after a constant number of calls.
	 319  func (m *Map) Range(f func(key, value interface{}) bool) {
	 320  	// We need to be able to iterate over all of the keys that were already
	 321  	// present at the start of the call to Range.
	 322  	// If read.amended is false, then read.m satisfies that property without
	 323  	// requiring us to hold m.mu for a long time.
	 324  	read, _ := m.read.Load().(readOnly)
	 325  	if read.amended {
	 326  		// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
	 327  		// (assuming the caller does not break out early), so a call to Range
	 328  		// amortizes an entire copy of the map: we can promote the dirty copy
	 329  		// immediately!
	 330  		m.mu.Lock()
	 331  		read, _ = m.read.Load().(readOnly)
	 332  		if read.amended {
	 333  			read = readOnly{m: m.dirty}
	 334  			m.read.Store(read)
	 335  			m.dirty = nil
	 336  			m.misses = 0
	 337  		}
	 338  		m.mu.Unlock()
	 339  	}
	 340  
	 341  	for k, e := range read.m {
	 342  		v, ok := e.load()
	 343  		if !ok {
	 344  			continue
	 345  		}
	 346  		if !f(k, v) {
	 347  			break
	 348  		}
	 349  	}
	 350  }
	 351  
	 352  func (m *Map) missLocked() {
	 353  	m.misses++
	 354  	if m.misses < len(m.dirty) {
	 355  		return
	 356  	}
	 357  	m.read.Store(readOnly{m: m.dirty})
	 358  	m.dirty = nil
	 359  	m.misses = 0
	 360  }
	 361  
	 362  func (m *Map) dirtyLocked() {
	 363  	if m.dirty != nil {
	 364  		return
	 365  	}
	 366  
	 367  	read, _ := m.read.Load().(readOnly)
	 368  	m.dirty = make(map[interface{}]*entry, len(read.m))
	 369  	for k, e := range read.m {
	 370  		if !e.tryExpungeLocked() {
	 371  			m.dirty[k] = e
	 372  		}
	 373  	}
	 374  }
	 375  
	 376  func (e *entry) tryExpungeLocked() (isExpunged bool) {
	 377  	p := atomic.LoadPointer(&e.p)
	 378  	for p == nil {
	 379  		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
	 380  			return true
	 381  		}
	 382  		p = atomic.LoadPointer(&e.p)
	 383  	}
	 384  	return p == expunged
	 385  }
	 386  

View as plain text