Source file src/runtime/mfinal.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: finalizers and block profiling. 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "internal/goarch" 12 "internal/runtime/atomic" 13 "internal/runtime/sys" 14 "unsafe" 15 ) 16 17 const finBlockSize = 4 * 1024 18 19 // finBlock is an block of finalizers/cleanups to be executed. finBlocks 20 // are arranged in a linked list for the finalizer queue. 21 // 22 // finBlock is allocated from non-GC'd memory, so any heap pointers 23 // must be specially handled. GC currently assumes that the finalizer 24 // queue does not grow during marking (but it can shrink). 25 type finBlock struct { 26 _ sys.NotInHeap 27 alllink *finBlock 28 next *finBlock 29 cnt uint32 30 _ int32 31 fin [(finBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer 32 } 33 34 var fingStatus atomic.Uint32 35 36 // finalizer goroutine status. 37 const ( 38 fingUninitialized uint32 = iota 39 fingCreated uint32 = 1 << (iota - 1) 40 fingRunningFinalizer 41 fingWait 42 fingWake 43 ) 44 45 var ( 46 finlock mutex // protects the following variables 47 fing *g // goroutine that runs finalizers 48 finq *finBlock // list of finalizers that are to be executed 49 finc *finBlock // cache of free blocks 50 finptrmask [finBlockSize / goarch.PtrSize / 8]byte 51 ) 52 53 var allfin *finBlock // list of all blocks 54 55 // NOTE: Layout known to queuefinalizer. 56 type finalizer struct { 57 fn *funcval // function to call (may be a heap pointer) 58 arg unsafe.Pointer // ptr to object (may be a heap pointer) 59 nret uintptr // bytes of return values from fn 60 fint *_type // type of first argument of fn 61 ot *ptrtype // type of ptr to object (may be a heap pointer) 62 } 63 64 var finalizer1 = [...]byte{ 65 // Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here) 66 // Each byte describes 8 words. 67 // Need 8 Finalizers described by 5 bytes before pattern repeats: 68 // ptr ptr INT ptr ptr 69 // ptr ptr INT ptr ptr 70 // ptr ptr INT ptr ptr 71 // ptr ptr INT ptr ptr 72 // ptr ptr INT ptr ptr 73 // ptr ptr INT ptr ptr 74 // ptr ptr INT ptr ptr 75 // ptr ptr INT ptr ptr 76 // aka 77 // 78 // ptr ptr INT ptr ptr ptr ptr INT 79 // ptr ptr ptr ptr INT ptr ptr ptr 80 // ptr INT ptr ptr ptr ptr INT ptr 81 // ptr ptr ptr INT ptr ptr ptr ptr 82 // INT ptr ptr ptr ptr INT ptr ptr 83 // 84 // Assumptions about Finalizer layout checked below. 85 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7, 86 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7, 87 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7, 88 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7, 89 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7, 90 } 91 92 // lockRankMayQueueFinalizer records the lock ranking effects of a 93 // function that may call queuefinalizer. 94 func lockRankMayQueueFinalizer() { 95 lockWithRankMayAcquire(&finlock, getLockRank(&finlock)) 96 } 97 98 func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) { 99 if gcphase != _GCoff { 100 // Currently we assume that the finalizer queue won't 101 // grow during marking so we don't have to rescan it 102 // during mark termination. If we ever need to lift 103 // this assumption, we can do it by adding the 104 // necessary barriers to queuefinalizer (which it may 105 // have automatically). 106 throw("queuefinalizer during GC") 107 } 108 109 lock(&finlock) 110 if finq == nil || finq.cnt == uint32(len(finq.fin)) { 111 if finc == nil { 112 finc = (*finBlock)(persistentalloc(finBlockSize, 0, &memstats.gcMiscSys)) 113 finc.alllink = allfin 114 allfin = finc 115 if finptrmask[0] == 0 { 116 // Build pointer mask for Finalizer array in block. 117 // Check assumptions made in finalizer1 array above. 118 if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize || 119 unsafe.Offsetof(finalizer{}.fn) != 0 || 120 unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize || 121 unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize || 122 unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize || 123 unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) { 124 throw("finalizer out of sync") 125 } 126 for i := range finptrmask { 127 finptrmask[i] = finalizer1[i%len(finalizer1)] 128 } 129 } 130 } 131 block := finc 132 finc = block.next 133 block.next = finq 134 finq = block 135 } 136 f := &finq.fin[finq.cnt] 137 atomic.Xadd(&finq.cnt, +1) // Sync with markroots 138 f.fn = fn 139 f.nret = nret 140 f.fint = fint 141 f.ot = ot 142 f.arg = p 143 unlock(&finlock) 144 fingStatus.Or(fingWake) 145 } 146 147 //go:nowritebarrier 148 func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) { 149 for fb := allfin; fb != nil; fb = fb.alllink { 150 for i := uint32(0); i < fb.cnt; i++ { 151 f := &fb.fin[i] 152 callback(f.fn, f.arg, f.nret, f.fint, f.ot) 153 } 154 } 155 } 156 157 func wakefing() *g { 158 if ok := fingStatus.CompareAndSwap(fingCreated|fingWait|fingWake, fingCreated); ok { 159 return fing 160 } 161 return nil 162 } 163 164 func createfing() { 165 // start the finalizer goroutine exactly once 166 if fingStatus.Load() == fingUninitialized && fingStatus.CompareAndSwap(fingUninitialized, fingCreated) { 167 go runFinalizersAndCleanups() 168 } 169 } 170 171 func finalizercommit(gp *g, lock unsafe.Pointer) bool { 172 unlock((*mutex)(lock)) 173 // fingStatus should be modified after fing is put into a waiting state 174 // to avoid waking fing in running state, even if it is about to be parked. 175 fingStatus.Or(fingWait) 176 return true 177 } 178 179 // This is the goroutine that runs all of the finalizers and cleanups. 180 func runFinalizersAndCleanups() { 181 var ( 182 frame unsafe.Pointer 183 framecap uintptr 184 argRegs int 185 ) 186 187 gp := getg() 188 lock(&finlock) 189 fing = gp 190 unlock(&finlock) 191 192 for { 193 lock(&finlock) 194 fb := finq 195 finq = nil 196 if fb == nil { 197 gopark(finalizercommit, unsafe.Pointer(&finlock), waitReasonFinalizerWait, traceBlockSystemGoroutine, 1) 198 continue 199 } 200 argRegs = intArgRegs 201 unlock(&finlock) 202 if raceenabled { 203 racefingo() 204 } 205 for fb != nil { 206 for i := fb.cnt; i > 0; i-- { 207 f := &fb.fin[i-1] 208 209 // arg will only be nil when a cleanup has been queued. 210 if f.arg == nil { 211 var cleanup func() 212 fn := unsafe.Pointer(f.fn) 213 cleanup = *(*func())(unsafe.Pointer(&fn)) 214 fingStatus.Or(fingRunningFinalizer) 215 cleanup() 216 fingStatus.And(^fingRunningFinalizer) 217 218 f.fn = nil 219 f.arg = nil 220 f.ot = nil 221 atomic.Store(&fb.cnt, i-1) 222 continue 223 } 224 225 var regs abi.RegArgs 226 // The args may be passed in registers or on stack. Even for 227 // the register case, we still need the spill slots. 228 // TODO: revisit if we remove spill slots. 229 // 230 // Unfortunately because we can have an arbitrary 231 // amount of returns and it would be complex to try and 232 // figure out how many of those can get passed in registers, 233 // just conservatively assume none of them do. 234 framesz := unsafe.Sizeof((any)(nil)) + f.nret 235 if framecap < framesz { 236 // The frame does not contain pointers interesting for GC, 237 // all not yet finalized objects are stored in finq. 238 // If we do not mark it as FlagNoScan, 239 // the last finalized object is not collected. 240 frame = mallocgc(framesz, nil, true) 241 framecap = framesz 242 } 243 // cleanups also have a nil fint. Cleanups should have been processed before 244 // reaching this point. 245 if f.fint == nil { 246 throw("missing type in finalizer") 247 } 248 r := frame 249 if argRegs > 0 { 250 r = unsafe.Pointer(®s.Ints) 251 } else { 252 // frame is effectively uninitialized 253 // memory. That means we have to clear 254 // it before writing to it to avoid 255 // confusing the write barrier. 256 *(*[2]uintptr)(frame) = [2]uintptr{} 257 } 258 switch f.fint.Kind_ & abi.KindMask { 259 case abi.Pointer: 260 // direct use of pointer 261 *(*unsafe.Pointer)(r) = f.arg 262 case abi.Interface: 263 ityp := (*interfacetype)(unsafe.Pointer(f.fint)) 264 // set up with empty interface 265 (*eface)(r)._type = &f.ot.Type 266 (*eface)(r).data = f.arg 267 if len(ityp.Methods) != 0 { 268 // convert to interface with methods 269 // this conversion is guaranteed to succeed - we checked in SetFinalizer 270 (*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type) 271 } 272 default: 273 throw("bad type kind in finalizer") 274 } 275 fingStatus.Or(fingRunningFinalizer) 276 reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), ®s) 277 fingStatus.And(^fingRunningFinalizer) 278 279 // Drop finalizer queue heap references 280 // before hiding them from markroot. 281 // This also ensures these will be 282 // clear if we reuse the finalizer. 283 f.fn = nil 284 f.arg = nil 285 f.ot = nil 286 atomic.Store(&fb.cnt, i-1) 287 } 288 next := fb.next 289 lock(&finlock) 290 fb.next = finc 291 finc = fb 292 unlock(&finlock) 293 fb = next 294 } 295 } 296 } 297 298 func isGoPointerWithoutSpan(p unsafe.Pointer) bool { 299 // 0-length objects are okay. 300 if p == unsafe.Pointer(&zerobase) { 301 return true 302 } 303 304 // Global initializers might be linker-allocated. 305 // var Foo = &Object{} 306 // func main() { 307 // runtime.SetFinalizer(Foo, nil) 308 // } 309 // The relevant segments are: noptrdata, data, bss, noptrbss. 310 // We cannot assume they are in any order or even contiguous, 311 // due to external linking. 312 for datap := &firstmoduledata; datap != nil; datap = datap.next { 313 if datap.noptrdata <= uintptr(p) && uintptr(p) < datap.enoptrdata || 314 datap.data <= uintptr(p) && uintptr(p) < datap.edata || 315 datap.bss <= uintptr(p) && uintptr(p) < datap.ebss || 316 datap.noptrbss <= uintptr(p) && uintptr(p) < datap.enoptrbss { 317 return true 318 } 319 } 320 return false 321 } 322 323 // blockUntilEmptyFinalizerQueue blocks until either the finalizer 324 // queue is emptied (and the finalizers have executed) or the timeout 325 // is reached. Returns true if the finalizer queue was emptied. 326 // This is used by the runtime and sync tests. 327 func blockUntilEmptyFinalizerQueue(timeout int64) bool { 328 start := nanotime() 329 for nanotime()-start < timeout { 330 lock(&finlock) 331 // We know the queue has been drained when both finq is nil 332 // and the finalizer g has stopped executing. 333 empty := finq == nil 334 empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait 335 unlock(&finlock) 336 if empty { 337 return true 338 } 339 Gosched() 340 } 341 return false 342 } 343 344 // SetFinalizer sets the finalizer associated with obj to the provided 345 // finalizer function. When the garbage collector finds an unreachable block 346 // with an associated finalizer, it clears the association and runs 347 // finalizer(obj) in a separate goroutine. This makes obj reachable again, 348 // but now without an associated finalizer. Assuming that SetFinalizer 349 // is not called again, the next time the garbage collector sees 350 // that obj is unreachable, it will free obj. 351 // 352 // SetFinalizer(obj, nil) clears any finalizer associated with obj. 353 // 354 // New Go code should consider using [AddCleanup] instead, which is much 355 // less error-prone than SetFinalizer. 356 // 357 // The argument obj must be a pointer to an object allocated by calling 358 // new, by taking the address of a composite literal, or by taking the 359 // address of a local variable. 360 // The argument finalizer must be a function that takes a single argument 361 // to which obj's type can be assigned, and can have arbitrary ignored return 362 // values. If either of these is not true, SetFinalizer may abort the 363 // program. 364 // 365 // Finalizers are run in dependency order: if A points at B, both have 366 // finalizers, and they are otherwise unreachable, only the finalizer 367 // for A runs; once A is freed, the finalizer for B can run. 368 // If a cyclic structure includes a block with a finalizer, that 369 // cycle is not guaranteed to be garbage collected and the finalizer 370 // is not guaranteed to run, because there is no ordering that 371 // respects the dependencies. 372 // 373 // The finalizer is scheduled to run at some arbitrary time after the 374 // program can no longer reach the object to which obj points. 375 // There is no guarantee that finalizers will run before a program exits, 376 // so typically they are useful only for releasing non-memory resources 377 // associated with an object during a long-running program. 378 // For example, an [os.File] object could use a finalizer to close the 379 // associated operating system file descriptor when a program discards 380 // an os.File without calling Close, but it would be a mistake 381 // to depend on a finalizer to flush an in-memory I/O buffer such as a 382 // [bufio.Writer], because the buffer would not be flushed at program exit. 383 // 384 // It is not guaranteed that a finalizer will run if the size of *obj is 385 // zero bytes, because it may share same address with other zero-size 386 // objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees. 387 // 388 // It is not guaranteed that a finalizer will run for objects allocated 389 // in initializers for package-level variables. Such objects may be 390 // linker-allocated, not heap-allocated. 391 // 392 // Note that because finalizers may execute arbitrarily far into the future 393 // after an object is no longer referenced, the runtime is allowed to perform 394 // a space-saving optimization that batches objects together in a single 395 // allocation slot. The finalizer for an unreferenced object in such an 396 // allocation may never run if it always exists in the same batch as a 397 // referenced object. Typically, this batching only happens for tiny 398 // (on the order of 16 bytes or less) and pointer-free objects. 399 // 400 // A finalizer may run as soon as an object becomes unreachable. 401 // In order to use finalizers correctly, the program must ensure that 402 // the object is reachable until it is no longer required. 403 // Objects stored in global variables, or that can be found by tracing 404 // pointers from a global variable, are reachable. A function argument or 405 // receiver may become unreachable at the last point where the function 406 // mentions it. To make an unreachable object reachable, pass the object 407 // to a call of the [KeepAlive] function to mark the last point in the 408 // function where the object must be reachable. 409 // 410 // For example, if p points to a struct, such as os.File, that contains 411 // a file descriptor d, and p has a finalizer that closes that file 412 // descriptor, and if the last use of p in a function is a call to 413 // syscall.Write(p.d, buf, size), then p may be unreachable as soon as 414 // the program enters [syscall.Write]. The finalizer may run at that moment, 415 // closing p.d, causing syscall.Write to fail because it is writing to 416 // a closed file descriptor (or, worse, to an entirely different 417 // file descriptor opened by a different goroutine). To avoid this problem, 418 // call KeepAlive(p) after the call to syscall.Write. 419 // 420 // A single goroutine runs all finalizers for a program, sequentially. 421 // If a finalizer must run for a long time, it should do so by starting 422 // a new goroutine. 423 // 424 // In the terminology of the Go memory model, a call 425 // SetFinalizer(x, f) “synchronizes before” the finalization call f(x). 426 // However, there is no guarantee that KeepAlive(x) or any other use of x 427 // “synchronizes before” f(x), so in general a finalizer should use a mutex 428 // or other synchronization mechanism if it needs to access mutable state in x. 429 // For example, consider a finalizer that inspects a mutable field in x 430 // that is modified from time to time in the main program before x 431 // becomes unreachable and the finalizer is invoked. 432 // The modifications in the main program and the inspection in the finalizer 433 // need to use appropriate synchronization, such as mutexes or atomic updates, 434 // to avoid read-write races. 435 func SetFinalizer(obj any, finalizer any) { 436 e := efaceOf(&obj) 437 etyp := e._type 438 if etyp == nil { 439 throw("runtime.SetFinalizer: first argument is nil") 440 } 441 if etyp.Kind_&abi.KindMask != abi.Pointer { 442 throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer") 443 } 444 ot := (*ptrtype)(unsafe.Pointer(etyp)) 445 if ot.Elem == nil { 446 throw("nil elem type!") 447 } 448 if inUserArenaChunk(uintptr(e.data)) { 449 // Arena-allocated objects are not eligible for finalizers. 450 throw("runtime.SetFinalizer: first argument was allocated into an arena") 451 } 452 if debug.sbrk != 0 { 453 // debug.sbrk never frees memory, so no finalizers run 454 // (and we don't have the data structures to record them). 455 return 456 } 457 458 // find the containing object 459 base, span, _ := findObject(uintptr(e.data), 0, 0) 460 461 if base == 0 { 462 if isGoPointerWithoutSpan(e.data) { 463 return 464 } 465 throw("runtime.SetFinalizer: pointer not in allocated block") 466 } 467 468 // Move base forward if we've got an allocation header. 469 if !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 { 470 base += mallocHeaderSize 471 } 472 473 if uintptr(e.data) != base { 474 // As an implementation detail we allow to set finalizers for an inner byte 475 // of an object if it could come from tiny alloc (see mallocgc for details). 476 if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize { 477 throw("runtime.SetFinalizer: pointer not at beginning of allocated block") 478 } 479 } 480 481 f := efaceOf(&finalizer) 482 ftyp := f._type 483 if ftyp == nil { 484 // switch to system stack and remove finalizer 485 systemstack(func() { 486 removefinalizer(e.data) 487 }) 488 return 489 } 490 491 if ftyp.Kind_&abi.KindMask != abi.Func { 492 throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function") 493 } 494 ft := (*functype)(unsafe.Pointer(ftyp)) 495 if ft.IsVariadic() { 496 throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string() + " because dotdotdot") 497 } 498 if ft.InCount != 1 { 499 throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string()) 500 } 501 fint := ft.InSlice()[0] 502 switch { 503 case fint == etyp: 504 // ok - same type 505 goto okarg 506 case fint.Kind_&abi.KindMask == abi.Pointer: 507 if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem { 508 // ok - not same type, but both pointers, 509 // one or the other is unnamed, and same element type, so assignable. 510 goto okarg 511 } 512 case fint.Kind_&abi.KindMask == abi.Interface: 513 ityp := (*interfacetype)(unsafe.Pointer(fint)) 514 if len(ityp.Methods) == 0 { 515 // ok - satisfies empty interface 516 goto okarg 517 } 518 if itab := assertE2I2(ityp, efaceOf(&obj)._type); itab != nil { 519 goto okarg 520 } 521 } 522 throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string()) 523 okarg: 524 // compute size needed for return parameters 525 nret := uintptr(0) 526 for _, t := range ft.OutSlice() { 527 nret = alignUp(nret, uintptr(t.Align_)) + t.Size_ 528 } 529 nret = alignUp(nret, goarch.PtrSize) 530 531 // make sure we have a finalizer goroutine 532 createfing() 533 534 systemstack(func() { 535 if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) { 536 throw("runtime.SetFinalizer: finalizer already set") 537 } 538 }) 539 } 540 541 // Mark KeepAlive as noinline so that it is easily detectable as an intrinsic. 542 // 543 //go:noinline 544 545 // KeepAlive marks its argument as currently reachable. 546 // This ensures that the object is not freed, and its finalizer is not run, 547 // before the point in the program where KeepAlive is called. 548 // 549 // A very simplified example showing where KeepAlive is required: 550 // 551 // type File struct { d int } 552 // d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0) 553 // // ... do something if err != nil ... 554 // p := &File{d} 555 // runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) }) 556 // var buf [10]byte 557 // n, err := syscall.Read(p.d, buf[:]) 558 // // Ensure p is not finalized until Read returns. 559 // runtime.KeepAlive(p) 560 // // No more uses of p after this point. 561 // 562 // Without the KeepAlive call, the finalizer could run at the start of 563 // [syscall.Read], closing the file descriptor before syscall.Read makes 564 // the actual system call. 565 // 566 // Note: KeepAlive should only be used to prevent finalizers from 567 // running prematurely. In particular, when used with [unsafe.Pointer], 568 // the rules for valid uses of unsafe.Pointer still apply. 569 func KeepAlive(x any) { 570 // Introduce a use of x that the compiler can't eliminate. 571 // This makes sure x is alive on entry. We need x to be alive 572 // on entry for "defer runtime.KeepAlive(x)"; see issue 21402. 573 if cgoAlwaysFalse { 574 println(x) 575 } 576 } 577