...

Source file src/runtime/mbarrier.go

Documentation: runtime

		 1  // Copyright 2015 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // Garbage collector: write barriers.
		 6  //
		 7  // For the concurrent garbage collector, the Go compiler implements
		 8  // updates to pointer-valued fields that may be in heap objects by
		 9  // emitting calls to write barriers. The main write barrier for
		10  // individual pointer writes is gcWriteBarrier and is implemented in
		11  // assembly. This file contains write barrier entry points for bulk
		12  // operations. See also mwbbuf.go.
		13  
		14  package runtime
		15  
		16  import (
		17  	"internal/abi"
		18  	"runtime/internal/sys"
		19  	"unsafe"
		20  )
		21  
		22  // Go uses a hybrid barrier that combines a Yuasa-style deletion
		23  // barrier—which shades the object whose reference is being
		24  // overwritten—with Dijkstra insertion barrier—which shades the object
		25  // whose reference is being written. The insertion part of the barrier
		26  // is necessary while the calling goroutine's stack is grey. In
		27  // pseudocode, the barrier is:
		28  //
		29  //		 writePointer(slot, ptr):
		30  //				 shade(*slot)
		31  //				 if current stack is grey:
		32  //						 shade(ptr)
		33  //				 *slot = ptr
		34  //
		35  // slot is the destination in Go code.
		36  // ptr is the value that goes into the slot in Go code.
		37  //
		38  // Shade indicates that it has seen a white pointer by adding the referent
		39  // to wbuf as well as marking it.
		40  //
		41  // The two shades and the condition work together to prevent a mutator
		42  // from hiding an object from the garbage collector:
		43  //
		44  // 1. shade(*slot) prevents a mutator from hiding an object by moving
		45  // the sole pointer to it from the heap to its stack. If it attempts
		46  // to unlink an object from the heap, this will shade it.
		47  //
		48  // 2. shade(ptr) prevents a mutator from hiding an object by moving
		49  // the sole pointer to it from its stack into a black object in the
		50  // heap. If it attempts to install the pointer into a black object,
		51  // this will shade it.
		52  //
		53  // 3. Once a goroutine's stack is black, the shade(ptr) becomes
		54  // unnecessary. shade(ptr) prevents hiding an object by moving it from
		55  // the stack to the heap, but this requires first having a pointer
		56  // hidden on the stack. Immediately after a stack is scanned, it only
		57  // points to shaded objects, so it's not hiding anything, and the
		58  // shade(*slot) prevents it from hiding any other pointers on its
		59  // stack.
		60  //
		61  // For a detailed description of this barrier and proof of
		62  // correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
		63  //
		64  //
		65  //
		66  // Dealing with memory ordering:
		67  //
		68  // Both the Yuasa and Dijkstra barriers can be made conditional on the
		69  // color of the object containing the slot. We chose not to make these
		70  // conditional because the cost of ensuring that the object holding
		71  // the slot doesn't concurrently change color without the mutator
		72  // noticing seems prohibitive.
		73  //
		74  // Consider the following example where the mutator writes into
		75  // a slot and then loads the slot's mark bit while the GC thread
		76  // writes to the slot's mark bit and then as part of scanning reads
		77  // the slot.
		78  //
		79  // Initially both [slot] and [slotmark] are 0 (nil)
		80  // Mutator thread					GC thread
		81  // st [slot], ptr					st [slotmark], 1
		82  //
		83  // ld r1, [slotmark]			 ld r2, [slot]
		84  //
		85  // Without an expensive memory barrier between the st and the ld, the final
		86  // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
		87  // example of what can happen when loads are allowed to be reordered with older
		88  // stores (avoiding such reorderings lies at the heart of the classic
		89  // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
		90  // barriers, which will slow down both the mutator and the GC, we always grey
		91  // the ptr object regardless of the slot's color.
		92  //
		93  // Another place where we intentionally omit memory barriers is when
		94  // accessing mheap_.arena_used to check if a pointer points into the
		95  // heap. On relaxed memory machines, it's possible for a mutator to
		96  // extend the size of the heap by updating arena_used, allocate an
		97  // object from this new region, and publish a pointer to that object,
		98  // but for tracing running on another processor to observe the pointer
		99  // but use the old value of arena_used. In this case, tracing will not
	 100  // mark the object, even though it's reachable. However, the mutator
	 101  // is guaranteed to execute a write barrier when it publishes the
	 102  // pointer, so it will take care of marking the object. A general
	 103  // consequence of this is that the garbage collector may cache the
	 104  // value of mheap_.arena_used. (See issue #9984.)
	 105  //
	 106  //
	 107  // Stack writes:
	 108  //
	 109  // The compiler omits write barriers for writes to the current frame,
	 110  // but if a stack pointer has been passed down the call stack, the
	 111  // compiler will generate a write barrier for writes through that
	 112  // pointer (because it doesn't know it's not a heap pointer).
	 113  //
	 114  // One might be tempted to ignore the write barrier if slot points
	 115  // into to the stack. Don't do it! Mark termination only re-scans
	 116  // frames that have potentially been active since the concurrent scan,
	 117  // so it depends on write barriers to track changes to pointers in
	 118  // stack frames that have not been active.
	 119  //
	 120  //
	 121  // Global writes:
	 122  //
	 123  // The Go garbage collector requires write barriers when heap pointers
	 124  // are stored in globals. Many garbage collectors ignore writes to
	 125  // globals and instead pick up global -> heap pointers during
	 126  // termination. This increases pause time, so we instead rely on write
	 127  // barriers for writes to globals so that we don't have to rescan
	 128  // global during mark termination.
	 129  //
	 130  //
	 131  // Publication ordering:
	 132  //
	 133  // The write barrier is *pre-publication*, meaning that the write
	 134  // barrier happens prior to the *slot = ptr write that may make ptr
	 135  // reachable by some goroutine that currently cannot reach it.
	 136  //
	 137  //
	 138  // Signal handler pointer writes:
	 139  //
	 140  // In general, the signal handler cannot safely invoke the write
	 141  // barrier because it may run without a P or even during the write
	 142  // barrier.
	 143  //
	 144  // There is exactly one exception: profbuf.go omits a barrier during
	 145  // signal handler profile logging. That's safe only because of the
	 146  // deletion barrier. See profbuf.go for a detailed argument. If we
	 147  // remove the deletion barrier, we'll have to work out a new way to
	 148  // handle the profile logging.
	 149  
	 150  // typedmemmove copies a value of type t to dst from src.
	 151  // Must be nosplit, see #16026.
	 152  //
	 153  // TODO: Perfect for go:nosplitrec since we can't have a safe point
	 154  // anywhere in the bulk barrier or memmove.
	 155  //
	 156  //go:nosplit
	 157  func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
	 158  	if dst == src {
	 159  		return
	 160  	}
	 161  	if writeBarrier.needed && typ.ptrdata != 0 {
	 162  		bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata)
	 163  	}
	 164  	// There's a race here: if some other goroutine can write to
	 165  	// src, it may change some pointer in src after we've
	 166  	// performed the write barrier but before we perform the
	 167  	// memory copy. This safe because the write performed by that
	 168  	// other goroutine must also be accompanied by a write
	 169  	// barrier, so at worst we've unnecessarily greyed the old
	 170  	// pointer that was in src.
	 171  	memmove(dst, src, typ.size)
	 172  	if writeBarrier.cgo {
	 173  		cgoCheckMemmove(typ, dst, src, 0, typ.size)
	 174  	}
	 175  }
	 176  
	 177  //go:linkname reflect_typedmemmove reflect.typedmemmove
	 178  func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
	 179  	if raceenabled {
	 180  		raceWriteObjectPC(typ, dst, getcallerpc(), funcPC(reflect_typedmemmove))
	 181  		raceReadObjectPC(typ, src, getcallerpc(), funcPC(reflect_typedmemmove))
	 182  	}
	 183  	if msanenabled {
	 184  		msanwrite(dst, typ.size)
	 185  		msanread(src, typ.size)
	 186  	}
	 187  	typedmemmove(typ, dst, src)
	 188  }
	 189  
	 190  //go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
	 191  func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
	 192  	reflect_typedmemmove(typ, dst, src)
	 193  }
	 194  
	 195  // typedmemmovepartial is like typedmemmove but assumes that
	 196  // dst and src point off bytes into the value and only copies size bytes.
	 197  // off must be a multiple of sys.PtrSize.
	 198  //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
	 199  func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
	 200  	if writeBarrier.needed && typ.ptrdata > off && size >= sys.PtrSize {
	 201  		if off&(sys.PtrSize-1) != 0 {
	 202  			panic("reflect: internal error: misaligned offset")
	 203  		}
	 204  		pwsize := alignDown(size, sys.PtrSize)
	 205  		if poff := typ.ptrdata - off; pwsize > poff {
	 206  			pwsize = poff
	 207  		}
	 208  		bulkBarrierPreWrite(uintptr(dst), uintptr(src), pwsize)
	 209  	}
	 210  
	 211  	memmove(dst, src, size)
	 212  	if writeBarrier.cgo {
	 213  		cgoCheckMemmove(typ, dst, src, off, size)
	 214  	}
	 215  }
	 216  
	 217  // reflectcallmove is invoked by reflectcall to copy the return values
	 218  // out of the stack and into the heap, invoking the necessary write
	 219  // barriers. dst, src, and size describe the return value area to
	 220  // copy. typ describes the entire frame (not just the return values).
	 221  // typ may be nil, which indicates write barriers are not needed.
	 222  //
	 223  // It must be nosplit and must only call nosplit functions because the
	 224  // stack map of reflectcall is wrong.
	 225  //
	 226  //go:nosplit
	 227  func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
	 228  	if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
	 229  		bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
	 230  	}
	 231  	memmove(dst, src, size)
	 232  
	 233  	// Move pointers returned in registers to a place where the GC can see them.
	 234  	for i := range regs.Ints {
	 235  		if regs.ReturnIsPtr.Get(i) {
	 236  			regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
	 237  		}
	 238  	}
	 239  }
	 240  
	 241  //go:nosplit
	 242  func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
	 243  	n := dstLen
	 244  	if n > srcLen {
	 245  		n = srcLen
	 246  	}
	 247  	if n == 0 {
	 248  		return 0
	 249  	}
	 250  
	 251  	// The compiler emits calls to typedslicecopy before
	 252  	// instrumentation runs, so unlike the other copying and
	 253  	// assignment operations, it's not instrumented in the calling
	 254  	// code and needs its own instrumentation.
	 255  	if raceenabled {
	 256  		callerpc := getcallerpc()
	 257  		pc := funcPC(slicecopy)
	 258  		racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
	 259  		racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
	 260  	}
	 261  	if msanenabled {
	 262  		msanwrite(dstPtr, uintptr(n)*typ.size)
	 263  		msanread(srcPtr, uintptr(n)*typ.size)
	 264  	}
	 265  
	 266  	if writeBarrier.cgo {
	 267  		cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
	 268  	}
	 269  
	 270  	if dstPtr == srcPtr {
	 271  		return n
	 272  	}
	 273  
	 274  	// Note: No point in checking typ.ptrdata here:
	 275  	// compiler only emits calls to typedslicecopy for types with pointers,
	 276  	// and growslice and reflect_typedslicecopy check for pointers
	 277  	// before calling typedslicecopy.
	 278  	size := uintptr(n) * typ.size
	 279  	if writeBarrier.needed {
	 280  		pwsize := size - typ.size + typ.ptrdata
	 281  		bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
	 282  	}
	 283  	// See typedmemmove for a discussion of the race between the
	 284  	// barrier and memmove.
	 285  	memmove(dstPtr, srcPtr, size)
	 286  	return n
	 287  }
	 288  
	 289  //go:linkname reflect_typedslicecopy reflect.typedslicecopy
	 290  func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
	 291  	if elemType.ptrdata == 0 {
	 292  		return slicecopy(dst.array, dst.len, src.array, src.len, elemType.size)
	 293  	}
	 294  	return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
	 295  }
	 296  
	 297  // typedmemclr clears the typed memory at ptr with type typ. The
	 298  // memory at ptr must already be initialized (and hence in type-safe
	 299  // state). If the memory is being initialized for the first time, see
	 300  // memclrNoHeapPointers.
	 301  //
	 302  // If the caller knows that typ has pointers, it can alternatively
	 303  // call memclrHasPointers.
	 304  //
	 305  //go:nosplit
	 306  func typedmemclr(typ *_type, ptr unsafe.Pointer) {
	 307  	if writeBarrier.needed && typ.ptrdata != 0 {
	 308  		bulkBarrierPreWrite(uintptr(ptr), 0, typ.ptrdata)
	 309  	}
	 310  	memclrNoHeapPointers(ptr, typ.size)
	 311  }
	 312  
	 313  //go:linkname reflect_typedmemclr reflect.typedmemclr
	 314  func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
	 315  	typedmemclr(typ, ptr)
	 316  }
	 317  
	 318  //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
	 319  func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
	 320  	if writeBarrier.needed && typ.ptrdata != 0 {
	 321  		bulkBarrierPreWrite(uintptr(ptr), 0, size)
	 322  	}
	 323  	memclrNoHeapPointers(ptr, size)
	 324  }
	 325  
	 326  // memclrHasPointers clears n bytes of typed memory starting at ptr.
	 327  // The caller must ensure that the type of the object at ptr has
	 328  // pointers, usually by checking typ.ptrdata. However, ptr
	 329  // does not have to point to the start of the allocation.
	 330  //
	 331  //go:nosplit
	 332  func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
	 333  	bulkBarrierPreWrite(uintptr(ptr), 0, n)
	 334  	memclrNoHeapPointers(ptr, n)
	 335  }
	 336  

View as plain text