...

Source file src/runtime/mfinal.go

Documentation: runtime

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // Garbage collector: finalizers and block profiling.
		 6  
		 7  package runtime
		 8  
		 9  import (
		10  	"internal/abi"
		11  	"runtime/internal/atomic"
		12  	"runtime/internal/sys"
		13  	"unsafe"
		14  )
		15  
		16  // finblock is an array of finalizers to be executed. finblocks are
		17  // arranged in a linked list for the finalizer queue.
		18  //
		19  // finblock is allocated from non-GC'd memory, so any heap pointers
		20  // must be specially handled. GC currently assumes that the finalizer
		21  // queue does not grow during marking (but it can shrink).
		22  //
		23  //go:notinheap
		24  type finblock struct {
		25  	alllink *finblock
		26  	next		*finblock
		27  	cnt		 uint32
		28  	_			 int32
		29  	fin		 [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
		30  }
		31  
		32  var finlock mutex	// protects the following variables
		33  var fing *g				// goroutine that runs finalizers
		34  var finq *finblock // list of finalizers that are to be executed
		35  var finc *finblock // cache of free blocks
		36  var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
		37  var fingwait bool
		38  var fingwake bool
		39  var allfin *finblock // list of all blocks
		40  
		41  // NOTE: Layout known to queuefinalizer.
		42  type finalizer struct {
		43  	fn	 *funcval			 // function to call (may be a heap pointer)
		44  	arg	unsafe.Pointer // ptr to object (may be a heap pointer)
		45  	nret uintptr				// bytes of return values from fn
		46  	fint *_type				 // type of first argument of fn
		47  	ot	 *ptrtype			 // type of ptr to object (may be a heap pointer)
		48  }
		49  
		50  var finalizer1 = [...]byte{
		51  	// Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
		52  	// Each byte describes 8 words.
		53  	// Need 8 Finalizers described by 5 bytes before pattern repeats:
		54  	//	ptr ptr INT ptr ptr
		55  	//	ptr ptr INT ptr ptr
		56  	//	ptr ptr INT ptr ptr
		57  	//	ptr ptr INT ptr ptr
		58  	//	ptr ptr INT ptr ptr
		59  	//	ptr ptr INT ptr ptr
		60  	//	ptr ptr INT ptr ptr
		61  	//	ptr ptr INT ptr ptr
		62  	// aka
		63  	//
		64  	//	ptr ptr INT ptr ptr ptr ptr INT
		65  	//	ptr ptr ptr ptr INT ptr ptr ptr
		66  	//	ptr INT ptr ptr ptr ptr INT ptr
		67  	//	ptr ptr ptr INT ptr ptr ptr ptr
		68  	//	INT ptr ptr ptr ptr INT ptr ptr
		69  	//
		70  	// Assumptions about Finalizer layout checked below.
		71  	1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
		72  	1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
		73  	1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
		74  	1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
		75  	0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
		76  }
		77  
		78  func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
		79  	if gcphase != _GCoff {
		80  		// Currently we assume that the finalizer queue won't
		81  		// grow during marking so we don't have to rescan it
		82  		// during mark termination. If we ever need to lift
		83  		// this assumption, we can do it by adding the
		84  		// necessary barriers to queuefinalizer (which it may
		85  		// have automatically).
		86  		throw("queuefinalizer during GC")
		87  	}
		88  
		89  	lock(&finlock)
		90  	if finq == nil || finq.cnt == uint32(len(finq.fin)) {
		91  		if finc == nil {
		92  			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
		93  			finc.alllink = allfin
		94  			allfin = finc
		95  			if finptrmask[0] == 0 {
		96  				// Build pointer mask for Finalizer array in block.
		97  				// Check assumptions made in finalizer1 array above.
		98  				if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
		99  					unsafe.Offsetof(finalizer{}.fn) != 0 ||
	 100  					unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
	 101  					unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
	 102  					unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
	 103  					unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
	 104  					throw("finalizer out of sync")
	 105  				}
	 106  				for i := range finptrmask {
	 107  					finptrmask[i] = finalizer1[i%len(finalizer1)]
	 108  				}
	 109  			}
	 110  		}
	 111  		block := finc
	 112  		finc = block.next
	 113  		block.next = finq
	 114  		finq = block
	 115  	}
	 116  	f := &finq.fin[finq.cnt]
	 117  	atomic.Xadd(&finq.cnt, +1) // Sync with markroots
	 118  	f.fn = fn
	 119  	f.nret = nret
	 120  	f.fint = fint
	 121  	f.ot = ot
	 122  	f.arg = p
	 123  	fingwake = true
	 124  	unlock(&finlock)
	 125  }
	 126  
	 127  //go:nowritebarrier
	 128  func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
	 129  	for fb := allfin; fb != nil; fb = fb.alllink {
	 130  		for i := uint32(0); i < fb.cnt; i++ {
	 131  			f := &fb.fin[i]
	 132  			callback(f.fn, f.arg, f.nret, f.fint, f.ot)
	 133  		}
	 134  	}
	 135  }
	 136  
	 137  func wakefing() *g {
	 138  	var res *g
	 139  	lock(&finlock)
	 140  	if fingwait && fingwake {
	 141  		fingwait = false
	 142  		fingwake = false
	 143  		res = fing
	 144  	}
	 145  	unlock(&finlock)
	 146  	return res
	 147  }
	 148  
	 149  var (
	 150  	fingCreate	uint32
	 151  	fingRunning bool
	 152  )
	 153  
	 154  func createfing() {
	 155  	// start the finalizer goroutine exactly once
	 156  	if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
	 157  		go runfinq()
	 158  	}
	 159  }
	 160  
	 161  // This is the goroutine that runs all of the finalizers
	 162  func runfinq() {
	 163  	var (
	 164  		frame		unsafe.Pointer
	 165  		framecap uintptr
	 166  		argRegs	int
	 167  	)
	 168  
	 169  	for {
	 170  		lock(&finlock)
	 171  		fb := finq
	 172  		finq = nil
	 173  		if fb == nil {
	 174  			gp := getg()
	 175  			fing = gp
	 176  			fingwait = true
	 177  			goparkunlock(&finlock, waitReasonFinalizerWait, traceEvGoBlock, 1)
	 178  			continue
	 179  		}
	 180  		argRegs = intArgRegs
	 181  		unlock(&finlock)
	 182  		if raceenabled {
	 183  			racefingo()
	 184  		}
	 185  		for fb != nil {
	 186  			for i := fb.cnt; i > 0; i-- {
	 187  				f := &fb.fin[i-1]
	 188  
	 189  				var regs abi.RegArgs
	 190  				// The args may be passed in registers or on stack. Even for
	 191  				// the register case, we still need the spill slots.
	 192  				// TODO: revisit if we remove spill slots.
	 193  				//
	 194  				// Unfortunately because we can have an arbitrary
	 195  				// amount of returns and it would be complex to try and
	 196  				// figure out how many of those can get passed in registers,
	 197  				// just conservatively assume none of them do.
	 198  				framesz := unsafe.Sizeof((interface{})(nil)) + f.nret
	 199  				if framecap < framesz {
	 200  					// The frame does not contain pointers interesting for GC,
	 201  					// all not yet finalized objects are stored in finq.
	 202  					// If we do not mark it as FlagNoScan,
	 203  					// the last finalized object is not collected.
	 204  					frame = mallocgc(framesz, nil, true)
	 205  					framecap = framesz
	 206  				}
	 207  
	 208  				if f.fint == nil {
	 209  					throw("missing type in runfinq")
	 210  				}
	 211  				r := frame
	 212  				if argRegs > 0 {
	 213  					r = unsafe.Pointer(&regs.Ints)
	 214  				} else {
	 215  					// frame is effectively uninitialized
	 216  					// memory. That means we have to clear
	 217  					// it before writing to it to avoid
	 218  					// confusing the write barrier.
	 219  					*(*[2]uintptr)(frame) = [2]uintptr{}
	 220  				}
	 221  				switch f.fint.kind & kindMask {
	 222  				case kindPtr:
	 223  					// direct use of pointer
	 224  					*(*unsafe.Pointer)(r) = f.arg
	 225  				case kindInterface:
	 226  					ityp := (*interfacetype)(unsafe.Pointer(f.fint))
	 227  					// set up with empty interface
	 228  					(*eface)(r)._type = &f.ot.typ
	 229  					(*eface)(r).data = f.arg
	 230  					if len(ityp.mhdr) != 0 {
	 231  						// convert to interface with methods
	 232  						// this conversion is guaranteed to succeed - we checked in SetFinalizer
	 233  						(*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type)
	 234  					}
	 235  				default:
	 236  					throw("bad kind in runfinq")
	 237  				}
	 238  				fingRunning = true
	 239  				reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
	 240  				fingRunning = false
	 241  
	 242  				// Drop finalizer queue heap references
	 243  				// before hiding them from markroot.
	 244  				// This also ensures these will be
	 245  				// clear if we reuse the finalizer.
	 246  				f.fn = nil
	 247  				f.arg = nil
	 248  				f.ot = nil
	 249  				atomic.Store(&fb.cnt, i-1)
	 250  			}
	 251  			next := fb.next
	 252  			lock(&finlock)
	 253  			fb.next = finc
	 254  			finc = fb
	 255  			unlock(&finlock)
	 256  			fb = next
	 257  		}
	 258  	}
	 259  }
	 260  
	 261  // SetFinalizer sets the finalizer associated with obj to the provided
	 262  // finalizer function. When the garbage collector finds an unreachable block
	 263  // with an associated finalizer, it clears the association and runs
	 264  // finalizer(obj) in a separate goroutine. This makes obj reachable again,
	 265  // but now without an associated finalizer. Assuming that SetFinalizer
	 266  // is not called again, the next time the garbage collector sees
	 267  // that obj is unreachable, it will free obj.
	 268  //
	 269  // SetFinalizer(obj, nil) clears any finalizer associated with obj.
	 270  //
	 271  // The argument obj must be a pointer to an object allocated by calling
	 272  // new, by taking the address of a composite literal, or by taking the
	 273  // address of a local variable.
	 274  // The argument finalizer must be a function that takes a single argument
	 275  // to which obj's type can be assigned, and can have arbitrary ignored return
	 276  // values. If either of these is not true, SetFinalizer may abort the
	 277  // program.
	 278  //
	 279  // Finalizers are run in dependency order: if A points at B, both have
	 280  // finalizers, and they are otherwise unreachable, only the finalizer
	 281  // for A runs; once A is freed, the finalizer for B can run.
	 282  // If a cyclic structure includes a block with a finalizer, that
	 283  // cycle is not guaranteed to be garbage collected and the finalizer
	 284  // is not guaranteed to run, because there is no ordering that
	 285  // respects the dependencies.
	 286  //
	 287  // The finalizer is scheduled to run at some arbitrary time after the
	 288  // program can no longer reach the object to which obj points.
	 289  // There is no guarantee that finalizers will run before a program exits,
	 290  // so typically they are useful only for releasing non-memory resources
	 291  // associated with an object during a long-running program.
	 292  // For example, an os.File object could use a finalizer to close the
	 293  // associated operating system file descriptor when a program discards
	 294  // an os.File without calling Close, but it would be a mistake
	 295  // to depend on a finalizer to flush an in-memory I/O buffer such as a
	 296  // bufio.Writer, because the buffer would not be flushed at program exit.
	 297  //
	 298  // It is not guaranteed that a finalizer will run if the size of *obj is
	 299  // zero bytes.
	 300  //
	 301  // It is not guaranteed that a finalizer will run for objects allocated
	 302  // in initializers for package-level variables. Such objects may be
	 303  // linker-allocated, not heap-allocated.
	 304  //
	 305  // A finalizer may run as soon as an object becomes unreachable.
	 306  // In order to use finalizers correctly, the program must ensure that
	 307  // the object is reachable until it is no longer required.
	 308  // Objects stored in global variables, or that can be found by tracing
	 309  // pointers from a global variable, are reachable. For other objects,
	 310  // pass the object to a call of the KeepAlive function to mark the
	 311  // last point in the function where the object must be reachable.
	 312  //
	 313  // For example, if p points to a struct, such as os.File, that contains
	 314  // a file descriptor d, and p has a finalizer that closes that file
	 315  // descriptor, and if the last use of p in a function is a call to
	 316  // syscall.Write(p.d, buf, size), then p may be unreachable as soon as
	 317  // the program enters syscall.Write. The finalizer may run at that moment,
	 318  // closing p.d, causing syscall.Write to fail because it is writing to
	 319  // a closed file descriptor (or, worse, to an entirely different
	 320  // file descriptor opened by a different goroutine). To avoid this problem,
	 321  // call runtime.KeepAlive(p) after the call to syscall.Write.
	 322  //
	 323  // A single goroutine runs all finalizers for a program, sequentially.
	 324  // If a finalizer must run for a long time, it should do so by starting
	 325  // a new goroutine.
	 326  func SetFinalizer(obj interface{}, finalizer interface{}) {
	 327  	if debug.sbrk != 0 {
	 328  		// debug.sbrk never frees memory, so no finalizers run
	 329  		// (and we don't have the data structures to record them).
	 330  		return
	 331  	}
	 332  	e := efaceOf(&obj)
	 333  	etyp := e._type
	 334  	if etyp == nil {
	 335  		throw("runtime.SetFinalizer: first argument is nil")
	 336  	}
	 337  	if etyp.kind&kindMask != kindPtr {
	 338  		throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
	 339  	}
	 340  	ot := (*ptrtype)(unsafe.Pointer(etyp))
	 341  	if ot.elem == nil {
	 342  		throw("nil elem type!")
	 343  	}
	 344  
	 345  	// find the containing object
	 346  	base, _, _ := findObject(uintptr(e.data), 0, 0)
	 347  
	 348  	if base == 0 {
	 349  		// 0-length objects are okay.
	 350  		if e.data == unsafe.Pointer(&zerobase) {
	 351  			return
	 352  		}
	 353  
	 354  		// Global initializers might be linker-allocated.
	 355  		//	var Foo = &Object{}
	 356  		//	func main() {
	 357  		//		runtime.SetFinalizer(Foo, nil)
	 358  		//	}
	 359  		// The relevant segments are: noptrdata, data, bss, noptrbss.
	 360  		// We cannot assume they are in any order or even contiguous,
	 361  		// due to external linking.
	 362  		for datap := &firstmoduledata; datap != nil; datap = datap.next {
	 363  			if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
	 364  				datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
	 365  				datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
	 366  				datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
	 367  				return
	 368  			}
	 369  		}
	 370  		throw("runtime.SetFinalizer: pointer not in allocated block")
	 371  	}
	 372  
	 373  	if uintptr(e.data) != base {
	 374  		// As an implementation detail we allow to set finalizers for an inner byte
	 375  		// of an object if it could come from tiny alloc (see mallocgc for details).
	 376  		if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize {
	 377  			throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
	 378  		}
	 379  	}
	 380  
	 381  	f := efaceOf(&finalizer)
	 382  	ftyp := f._type
	 383  	if ftyp == nil {
	 384  		// switch to system stack and remove finalizer
	 385  		systemstack(func() {
	 386  			removefinalizer(e.data)
	 387  		})
	 388  		return
	 389  	}
	 390  
	 391  	if ftyp.kind&kindMask != kindFunc {
	 392  		throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
	 393  	}
	 394  	ft := (*functype)(unsafe.Pointer(ftyp))
	 395  	if ft.dotdotdot() {
	 396  		throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
	 397  	}
	 398  	if ft.inCount != 1 {
	 399  		throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
	 400  	}
	 401  	fint := ft.in()[0]
	 402  	switch {
	 403  	case fint == etyp:
	 404  		// ok - same type
	 405  		goto okarg
	 406  	case fint.kind&kindMask == kindPtr:
	 407  		if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
	 408  			// ok - not same type, but both pointers,
	 409  			// one or the other is unnamed, and same element type, so assignable.
	 410  			goto okarg
	 411  		}
	 412  	case fint.kind&kindMask == kindInterface:
	 413  		ityp := (*interfacetype)(unsafe.Pointer(fint))
	 414  		if len(ityp.mhdr) == 0 {
	 415  			// ok - satisfies empty interface
	 416  			goto okarg
	 417  		}
	 418  		if iface := assertE2I2(ityp, *efaceOf(&obj)); iface.tab != nil {
	 419  			goto okarg
	 420  		}
	 421  	}
	 422  	throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
	 423  okarg:
	 424  	// compute size needed for return parameters
	 425  	nret := uintptr(0)
	 426  	for _, t := range ft.out() {
	 427  		nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
	 428  	}
	 429  	nret = alignUp(nret, sys.PtrSize)
	 430  
	 431  	// make sure we have a finalizer goroutine
	 432  	createfing()
	 433  
	 434  	systemstack(func() {
	 435  		if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
	 436  			throw("runtime.SetFinalizer: finalizer already set")
	 437  		}
	 438  	})
	 439  }
	 440  
	 441  // Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
	 442  //go:noinline
	 443  
	 444  // KeepAlive marks its argument as currently reachable.
	 445  // This ensures that the object is not freed, and its finalizer is not run,
	 446  // before the point in the program where KeepAlive is called.
	 447  //
	 448  // A very simplified example showing where KeepAlive is required:
	 449  // 	type File struct { d int }
	 450  // 	d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
	 451  // 	// ... do something if err != nil ...
	 452  // 	p := &File{d}
	 453  // 	runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
	 454  // 	var buf [10]byte
	 455  // 	n, err := syscall.Read(p.d, buf[:])
	 456  // 	// Ensure p is not finalized until Read returns.
	 457  // 	runtime.KeepAlive(p)
	 458  // 	// No more uses of p after this point.
	 459  //
	 460  // Without the KeepAlive call, the finalizer could run at the start of
	 461  // syscall.Read, closing the file descriptor before syscall.Read makes
	 462  // the actual system call.
	 463  //
	 464  // Note: KeepAlive should only be used to prevent finalizers from
	 465  // running prematurely. In particular, when used with unsafe.Pointer,
	 466  // the rules for valid uses of unsafe.Pointer still apply.
	 467  func KeepAlive(x interface{}) {
	 468  	// Introduce a use of x that the compiler can't eliminate.
	 469  	// This makes sure x is alive on entry. We need x to be alive
	 470  	// on entry for "defer runtime.KeepAlive(x)"; see issue 21402.
	 471  	if cgoAlwaysFalse {
	 472  		println(x)
	 473  	}
	 474  }
	 475  

View as plain text