...

Source file src/runtime/lock_sema.go

Documentation: runtime

		 1  // Copyright 2011 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  //go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows
		 6  // +build aix darwin netbsd openbsd plan9 solaris windows
		 7  
		 8  package runtime
		 9  
		10  import (
		11  	"runtime/internal/atomic"
		12  	"unsafe"
		13  )
		14  
		15  // This implementation depends on OS-specific implementations of
		16  //
		17  //	func semacreate(mp *m)
		18  //		Create a semaphore for mp, if it does not already have one.
		19  //
		20  //	func semasleep(ns int64) int32
		21  //		If ns < 0, acquire m's semaphore and return 0.
		22  //		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
		23  //		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
		24  //
		25  //	func semawakeup(mp *m)
		26  //		Wake up mp, which is or will soon be sleeping on its semaphore.
		27  //
		28  const (
		29  	locked uintptr = 1
		30  
		31  	active_spin		 = 4
		32  	active_spin_cnt = 30
		33  	passive_spin		= 1
		34  )
		35  
		36  func lock(l *mutex) {
		37  	lockWithRank(l, getLockRank(l))
		38  }
		39  
		40  func lock2(l *mutex) {
		41  	gp := getg()
		42  	if gp.m.locks < 0 {
		43  		throw("runtime·lock: lock count")
		44  	}
		45  	gp.m.locks++
		46  
		47  	// Speculative grab for lock.
		48  	if atomic.Casuintptr(&l.key, 0, locked) {
		49  		return
		50  	}
		51  	semacreate(gp.m)
		52  
		53  	// On uniprocessor's, no point spinning.
		54  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
		55  	spin := 0
		56  	if ncpu > 1 {
		57  		spin = active_spin
		58  	}
		59  Loop:
		60  	for i := 0; ; i++ {
		61  		v := atomic.Loaduintptr(&l.key)
		62  		if v&locked == 0 {
		63  			// Unlocked. Try to lock.
		64  			if atomic.Casuintptr(&l.key, v, v|locked) {
		65  				return
		66  			}
		67  			i = 0
		68  		}
		69  		if i < spin {
		70  			procyield(active_spin_cnt)
		71  		} else if i < spin+passive_spin {
		72  			osyield()
		73  		} else {
		74  			// Someone else has it.
		75  			// l->waitm points to a linked list of M's waiting
		76  			// for this lock, chained through m->nextwaitm.
		77  			// Queue this M.
		78  			for {
		79  				gp.m.nextwaitm = muintptr(v &^ locked)
		80  				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
		81  					break
		82  				}
		83  				v = atomic.Loaduintptr(&l.key)
		84  				if v&locked == 0 {
		85  					continue Loop
		86  				}
		87  			}
		88  			if v&locked != 0 {
		89  				// Queued. Wait.
		90  				semasleep(-1)
		91  				i = 0
		92  			}
		93  		}
		94  	}
		95  }
		96  
		97  func unlock(l *mutex) {
		98  	unlockWithRank(l)
		99  }
	 100  
	 101  //go:nowritebarrier
	 102  // We might not be holding a p in this code.
	 103  func unlock2(l *mutex) {
	 104  	gp := getg()
	 105  	var mp *m
	 106  	for {
	 107  		v := atomic.Loaduintptr(&l.key)
	 108  		if v == locked {
	 109  			if atomic.Casuintptr(&l.key, locked, 0) {
	 110  				break
	 111  			}
	 112  		} else {
	 113  			// Other M's are waiting for the lock.
	 114  			// Dequeue an M.
	 115  			mp = muintptr(v &^ locked).ptr()
	 116  			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
	 117  				// Dequeued an M.	Wake it.
	 118  				semawakeup(mp)
	 119  				break
	 120  			}
	 121  		}
	 122  	}
	 123  	gp.m.locks--
	 124  	if gp.m.locks < 0 {
	 125  		throw("runtime·unlock: lock count")
	 126  	}
	 127  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
	 128  		gp.stackguard0 = stackPreempt
	 129  	}
	 130  }
	 131  
	 132  // One-time notifications.
	 133  func noteclear(n *note) {
	 134  	if GOOS == "aix" {
	 135  		// On AIX, semaphores might not synchronize the memory in some
	 136  		// rare cases. See issue #30189.
	 137  		atomic.Storeuintptr(&n.key, 0)
	 138  	} else {
	 139  		n.key = 0
	 140  	}
	 141  }
	 142  
	 143  func notewakeup(n *note) {
	 144  	var v uintptr
	 145  	for {
	 146  		v = atomic.Loaduintptr(&n.key)
	 147  		if atomic.Casuintptr(&n.key, v, locked) {
	 148  			break
	 149  		}
	 150  	}
	 151  
	 152  	// Successfully set waitm to locked.
	 153  	// What was it before?
	 154  	switch {
	 155  	case v == 0:
	 156  		// Nothing was waiting. Done.
	 157  	case v == locked:
	 158  		// Two notewakeups! Not allowed.
	 159  		throw("notewakeup - double wakeup")
	 160  	default:
	 161  		// Must be the waiting m. Wake it up.
	 162  		semawakeup((*m)(unsafe.Pointer(v)))
	 163  	}
	 164  }
	 165  
	 166  func notesleep(n *note) {
	 167  	gp := getg()
	 168  	if gp != gp.m.g0 {
	 169  		throw("notesleep not on g0")
	 170  	}
	 171  	semacreate(gp.m)
	 172  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
	 173  		// Must be locked (got wakeup).
	 174  		if n.key != locked {
	 175  			throw("notesleep - waitm out of sync")
	 176  		}
	 177  		return
	 178  	}
	 179  	// Queued. Sleep.
	 180  	gp.m.blocked = true
	 181  	if *cgo_yield == nil {
	 182  		semasleep(-1)
	 183  	} else {
	 184  		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
	 185  		const ns = 10e6
	 186  		for atomic.Loaduintptr(&n.key) == 0 {
	 187  			semasleep(ns)
	 188  			asmcgocall(*cgo_yield, nil)
	 189  		}
	 190  	}
	 191  	gp.m.blocked = false
	 192  }
	 193  
	 194  //go:nosplit
	 195  func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
	 196  	// gp and deadline are logically local variables, but they are written
	 197  	// as parameters so that the stack space they require is charged
	 198  	// to the caller.
	 199  	// This reduces the nosplit footprint of notetsleep_internal.
	 200  	gp = getg()
	 201  
	 202  	// Register for wakeup on n->waitm.
	 203  	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
	 204  		// Must be locked (got wakeup).
	 205  		if n.key != locked {
	 206  			throw("notetsleep - waitm out of sync")
	 207  		}
	 208  		return true
	 209  	}
	 210  	if ns < 0 {
	 211  		// Queued. Sleep.
	 212  		gp.m.blocked = true
	 213  		if *cgo_yield == nil {
	 214  			semasleep(-1)
	 215  		} else {
	 216  			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
	 217  			const ns = 10e6
	 218  			for semasleep(ns) < 0 {
	 219  				asmcgocall(*cgo_yield, nil)
	 220  			}
	 221  		}
	 222  		gp.m.blocked = false
	 223  		return true
	 224  	}
	 225  
	 226  	deadline = nanotime() + ns
	 227  	for {
	 228  		// Registered. Sleep.
	 229  		gp.m.blocked = true
	 230  		if *cgo_yield != nil && ns > 10e6 {
	 231  			ns = 10e6
	 232  		}
	 233  		if semasleep(ns) >= 0 {
	 234  			gp.m.blocked = false
	 235  			// Acquired semaphore, semawakeup unregistered us.
	 236  			// Done.
	 237  			return true
	 238  		}
	 239  		if *cgo_yield != nil {
	 240  			asmcgocall(*cgo_yield, nil)
	 241  		}
	 242  		gp.m.blocked = false
	 243  		// Interrupted or timed out. Still registered. Semaphore not acquired.
	 244  		ns = deadline - nanotime()
	 245  		if ns <= 0 {
	 246  			break
	 247  		}
	 248  		// Deadline hasn't arrived. Keep sleeping.
	 249  	}
	 250  
	 251  	// Deadline arrived. Still registered. Semaphore not acquired.
	 252  	// Want to give up and return, but have to unregister first,
	 253  	// so that any notewakeup racing with the return does not
	 254  	// try to grant us the semaphore when we don't expect it.
	 255  	for {
	 256  		v := atomic.Loaduintptr(&n.key)
	 257  		switch v {
	 258  		case uintptr(unsafe.Pointer(gp.m)):
	 259  			// No wakeup yet; unregister if possible.
	 260  			if atomic.Casuintptr(&n.key, v, 0) {
	 261  				return false
	 262  			}
	 263  		case locked:
	 264  			// Wakeup happened so semaphore is available.
	 265  			// Grab it to avoid getting out of sync.
	 266  			gp.m.blocked = true
	 267  			if semasleep(-1) < 0 {
	 268  				throw("runtime: unable to acquire - semaphore out of sync")
	 269  			}
	 270  			gp.m.blocked = false
	 271  			return true
	 272  		default:
	 273  			throw("runtime: unexpected waitm - semaphore out of sync")
	 274  		}
	 275  	}
	 276  }
	 277  
	 278  func notetsleep(n *note, ns int64) bool {
	 279  	gp := getg()
	 280  	if gp != gp.m.g0 {
	 281  		throw("notetsleep not on g0")
	 282  	}
	 283  	semacreate(gp.m)
	 284  	return notetsleep_internal(n, ns, nil, 0)
	 285  }
	 286  
	 287  // same as runtime·notetsleep, but called on user g (not g0)
	 288  // calls only nosplit functions between entersyscallblock/exitsyscall
	 289  func notetsleepg(n *note, ns int64) bool {
	 290  	gp := getg()
	 291  	if gp == gp.m.g0 {
	 292  		throw("notetsleepg on g0")
	 293  	}
	 294  	semacreate(gp.m)
	 295  	entersyscallblock()
	 296  	ok := notetsleep_internal(n, ns, nil, 0)
	 297  	exitsyscall()
	 298  	return ok
	 299  }
	 300  
	 301  func beforeIdle(int64, int64) (*g, bool) {
	 302  	return nil, false
	 303  }
	 304  
	 305  func checkTimeouts() {}
	 306  

View as plain text