Source file src/runtime/mgc.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector (GC).
     6  //
     7  // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
     8  // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
     9  // non-generational and non-compacting. Allocation is done using size segregated per P allocation
    10  // areas to minimize fragmentation while eliminating locks in the common case.
    11  //
    12  // The algorithm decomposes into several steps.
    13  // This is a high level description of the algorithm being used. For an overview of GC a good
    14  // place to start is Richard Jones' gchandbook.org.
    15  //
    16  // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
    17  // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
    18  // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
    19  // 966-975.
    20  // For journal quality proofs that these steps are complete, correct, and terminate see
    21  // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
    22  // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
    23  //
    24  // 1. GC performs sweep termination.
    25  //
    26  //    a. Stop the world. This causes all Ps to reach a GC safe-point.
    27  //
    28  //    b. Sweep any unswept spans. There will only be unswept spans if
    29  //    this GC cycle was forced before the expected time.
    30  //
    31  // 2. GC performs the mark phase.
    32  //
    33  //    a. Prepare for the mark phase by setting gcphase to _GCmark
    34  //    (from _GCoff), enabling the write barrier, enabling mutator
    35  //    assists, and enqueueing root mark jobs. No objects may be
    36  //    scanned until all Ps have enabled the write barrier, which is
    37  //    accomplished using STW.
    38  //
    39  //    b. Start the world. From this point, GC work is done by mark
    40  //    workers started by the scheduler and by assists performed as
    41  //    part of allocation. The write barrier shades both the
    42  //    overwritten pointer and the new pointer value for any pointer
    43  //    writes (see mbarrier.go for details). Newly allocated objects
    44  //    are immediately marked black.
    45  //
    46  //    c. GC performs root marking jobs. This includes scanning all
    47  //    stacks, shading all globals, and shading any heap pointers in
    48  //    off-heap runtime data structures. Scanning a stack stops a
    49  //    goroutine, shades any pointers found on its stack, and then
    50  //    resumes the goroutine.
    51  //
    52  //    d. GC drains the work queue of grey objects, scanning each grey
    53  //    object to black and shading all pointers found in the object
    54  //    (which in turn may add those pointers to the work queue).
    55  //
    56  //    e. Because GC work is spread across local caches, GC uses a
    57  //    distributed termination algorithm to detect when there are no
    58  //    more root marking jobs or grey objects (see gcMarkDone). At this
    59  //    point, GC transitions to mark termination.
    60  //
    61  // 3. GC performs mark termination.
    62  //
    63  //    a. Stop the world.
    64  //
    65  //    b. Set gcphase to _GCmarktermination, and disable workers and
    66  //    assists.
    67  //
    68  //    c. Perform housekeeping like flushing mcaches.
    69  //
    70  // 4. GC performs the sweep phase.
    71  //
    72  //    a. Prepare for the sweep phase by setting gcphase to _GCoff,
    73  //    setting up sweep state and disabling the write barrier.
    74  //
    75  //    b. Start the world. From this point on, newly allocated objects
    76  //    are white, and allocating sweeps spans before use if necessary.
    77  //
    78  //    c. GC does concurrent sweeping in the background and in response
    79  //    to allocation. See description below.
    80  //
    81  // 5. When sufficient allocation has taken place, replay the sequence
    82  // starting with 1 above. See discussion of GC rate below.
    83  
    84  // Concurrent sweep.
    85  //
    86  // The sweep phase proceeds concurrently with normal program execution.
    87  // The heap is swept span-by-span both lazily (when a goroutine needs another span)
    88  // and concurrently in a background goroutine (this helps programs that are not CPU bound).
    89  // At the end of STW mark termination all spans are marked as "needs sweeping".
    90  //
    91  // The background sweeper goroutine simply sweeps spans one-by-one.
    92  //
    93  // To avoid requesting more OS memory while there are unswept spans, when a
    94  // goroutine needs another span, it first attempts to reclaim that much memory
    95  // by sweeping. When a goroutine needs to allocate a new small-object span, it
    96  // sweeps small-object spans for the same object size until it frees at least
    97  // one object. When a goroutine needs to allocate large-object span from heap,
    98  // it sweeps spans until it frees at least that many pages into heap. There is
    99  // one case where this may not suffice: if a goroutine sweeps and frees two
   100  // nonadjacent one-page spans to the heap, it will allocate a new two-page
   101  // span, but there can still be other one-page unswept spans which could be
   102  // combined into a two-page span.
   103  //
   104  // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
   105  // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
   106  // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
   107  // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
   108  // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
   109  // The finalizer goroutine is kicked off only when all spans are swept.
   110  // When the next GC starts, it sweeps all not-yet-swept spans (if any).
   111  
   112  // GC rate.
   113  // Next GC is after we've allocated an extra amount of memory proportional to
   114  // the amount already in use. The proportion is controlled by GOGC environment variable
   115  // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
   116  // (this mark is computed by the gcController.heapGoal method). This keeps the GC cost in
   117  // linear proportion to the allocation cost. Adjusting GOGC just changes the linear constant
   118  // (and also the amount of extra memory used).
   119  
   120  // Oblets
   121  //
   122  // In order to prevent long pauses while scanning large objects and to
   123  // improve parallelism, the garbage collector breaks up scan jobs for
   124  // objects larger than maxObletBytes into "oblets" of at most
   125  // maxObletBytes. When scanning encounters the beginning of a large
   126  // object, it scans only the first oblet and enqueues the remaining
   127  // oblets as new scan jobs.
   128  
   129  package runtime
   130  
   131  import (
   132  	"internal/cpu"
   133  	"internal/goarch"
   134  	"internal/goexperiment"
   135  	"internal/runtime/atomic"
   136  	"internal/runtime/gc"
   137  	"unsafe"
   138  )
   139  
   140  const (
   141  	_DebugGC = 0
   142  
   143  	// concurrentSweep is a debug flag. Disabling this flag
   144  	// ensures all spans are swept while the world is stopped.
   145  	concurrentSweep = true
   146  
   147  	// debugScanConservative enables debug logging for stack
   148  	// frames that are scanned conservatively.
   149  	debugScanConservative = false
   150  
   151  	// sweepMinHeapDistance is a lower bound on the heap distance
   152  	// (in bytes) reserved for concurrent sweeping between GC
   153  	// cycles.
   154  	sweepMinHeapDistance = 1024 * 1024
   155  )
   156  
   157  // heapObjectsCanMove always returns false in the current garbage collector.
   158  // It exists for go4.org/unsafe/assume-no-moving-gc, which is an
   159  // unfortunate idea that had an even more unfortunate implementation.
   160  // Every time a new Go release happened, the package stopped building,
   161  // and the authors had to add a new file with a new //go:build line, and
   162  // then the entire ecosystem of packages with that as a dependency had to
   163  // explicitly update to the new version. Many packages depend on
   164  // assume-no-moving-gc transitively, through paths like
   165  // inet.af/netaddr -> go4.org/intern -> assume-no-moving-gc.
   166  // This was causing a significant amount of friction around each new
   167  // release, so we added this bool for the package to //go:linkname
   168  // instead. The bool is still unfortunate, but it's not as bad as
   169  // breaking the ecosystem on every new release.
   170  //
   171  // If the Go garbage collector ever does move heap objects, we can set
   172  // this to true to break all the programs using assume-no-moving-gc.
   173  //
   174  //go:linkname heapObjectsCanMove
   175  func heapObjectsCanMove() bool {
   176  	return false
   177  }
   178  
   179  func gcinit() {
   180  	if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
   181  		throw("size of Workbuf is suboptimal")
   182  	}
   183  	// No sweep on the first cycle.
   184  	sweep.active.state.Store(sweepDrainedMask)
   185  
   186  	// Initialize GC pacer state.
   187  	// Use the environment variable GOGC for the initial gcPercent value.
   188  	// Use the environment variable GOMEMLIMIT for the initial memoryLimit value.
   189  	gcController.init(readGOGC(), readGOMEMLIMIT())
   190  
   191  	// Set up the cleanup block ptr mask.
   192  	for i := range cleanupBlockPtrMask {
   193  		cleanupBlockPtrMask[i] = 0xff
   194  	}
   195  
   196  	work.startSema = 1
   197  	work.markDoneSema = 1
   198  	work.spanSPMCs.list.init(unsafe.Offsetof(spanSPMC{}.allnode))
   199  	lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
   200  	lockInit(&work.assistQueue.lock, lockRankAssistQueue)
   201  	lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue)
   202  	lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
   203  	lockInit(&work.spanSPMCs.lock, lockRankSpanSPMCs)
   204  	lockInit(&gcCleanups.lock, lockRankCleanupQueue)
   205  }
   206  
   207  // gcenable is called after the bulk of the runtime initialization,
   208  // just before we're about to start letting user code run.
   209  // It kicks off the background sweeper goroutine, the background
   210  // scavenger goroutine, and enables GC.
   211  func gcenable() {
   212  	// Kick off sweeping and scavenging.
   213  	c := make(chan int, 2)
   214  	go bgsweep(c)
   215  	go bgscavenge(c)
   216  	<-c
   217  	<-c
   218  	memstats.enablegc = true // now that runtime is initialized, GC is okay
   219  }
   220  
   221  // Garbage collector phase.
   222  // Indicates to write barrier and synchronization task to perform.
   223  var gcphase uint32
   224  
   225  // The compiler knows about this variable.
   226  // If you change it, you must change builtin/runtime.go, too.
   227  // If you change the first four bytes, you must also change the write
   228  // barrier insertion code.
   229  //
   230  // writeBarrier should be an internal detail,
   231  // but widely used packages access it using linkname.
   232  // Notable members of the hall of shame include:
   233  //   - github.com/bytedance/sonic
   234  //
   235  // Do not remove or change the type signature.
   236  // See go.dev/issue/67401.
   237  //
   238  //go:linkname writeBarrier
   239  var writeBarrier struct {
   240  	enabled bool    // compiler emits a check of this before calling write barrier
   241  	pad     [3]byte // compiler uses 32-bit load for "enabled" field
   242  	alignme uint64  // guarantee alignment so that compiler can use a 32 or 64-bit load
   243  }
   244  
   245  // gcBlackenEnabled is 1 if mutator assists and background mark
   246  // workers are allowed to blacken objects. This must only be set when
   247  // gcphase == _GCmark.
   248  var gcBlackenEnabled uint32
   249  
   250  const (
   251  	_GCoff             = iota // GC not running; sweeping in background, write barrier disabled
   252  	_GCmark                   // GC marking roots and workbufs: allocate black, write barrier ENABLED
   253  	_GCmarktermination        // GC mark termination: allocate black, P's help GC, write barrier ENABLED
   254  )
   255  
   256  //go:nosplit
   257  func setGCPhase(x uint32) {
   258  	atomic.Store(&gcphase, x)
   259  	writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination
   260  }
   261  
   262  // gcMarkWorkerMode represents the mode that a concurrent mark worker
   263  // should operate in.
   264  //
   265  // Concurrent marking happens through four different mechanisms. One
   266  // is mutator assists, which happen in response to allocations and are
   267  // not scheduled. The other three are variations in the per-P mark
   268  // workers and are distinguished by gcMarkWorkerMode.
   269  type gcMarkWorkerMode int
   270  
   271  const (
   272  	// gcMarkWorkerNotWorker indicates that the next scheduled G is not
   273  	// starting work and the mode should be ignored.
   274  	gcMarkWorkerNotWorker gcMarkWorkerMode = iota
   275  
   276  	// gcMarkWorkerDedicatedMode indicates that the P of a mark
   277  	// worker is dedicated to running that mark worker. The mark
   278  	// worker should run without preemption.
   279  	gcMarkWorkerDedicatedMode
   280  
   281  	// gcMarkWorkerFractionalMode indicates that a P is currently
   282  	// running the "fractional" mark worker. The fractional worker
   283  	// is necessary when GOMAXPROCS*gcBackgroundUtilization is not
   284  	// an integer and using only dedicated workers would result in
   285  	// utilization too far from the target of gcBackgroundUtilization.
   286  	// The fractional worker should run until it is preempted and
   287  	// will be scheduled to pick up the fractional part of
   288  	// GOMAXPROCS*gcBackgroundUtilization.
   289  	gcMarkWorkerFractionalMode
   290  
   291  	// gcMarkWorkerIdleMode indicates that a P is running the mark
   292  	// worker because it has nothing else to do. The idle worker
   293  	// should run until it is preempted and account its time
   294  	// against gcController.idleMarkTime.
   295  	gcMarkWorkerIdleMode
   296  )
   297  
   298  // gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes
   299  // to use in execution traces.
   300  var gcMarkWorkerModeStrings = [...]string{
   301  	"Not worker",
   302  	"GC (dedicated)",
   303  	"GC (fractional)",
   304  	"GC (idle)",
   305  }
   306  
   307  // pollFractionalWorkerExit reports whether a fractional mark worker
   308  // should self-preempt. It assumes it is called from the fractional
   309  // worker.
   310  func pollFractionalWorkerExit() bool {
   311  	// This should be kept in sync with the fractional worker
   312  	// scheduler logic in findRunnableGCWorker.
   313  	now := nanotime()
   314  	delta := now - gcController.markStartTime
   315  	if delta <= 0 {
   316  		return true
   317  	}
   318  	p := getg().m.p.ptr()
   319  	selfTime := p.gcFractionalMarkTime.Load() + (now - p.gcMarkWorkerStartTime)
   320  	// Add some slack to the utilization goal so that the
   321  	// fractional worker isn't behind again the instant it exits.
   322  	return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal
   323  }
   324  
   325  var work workType
   326  
   327  type workType struct {
   328  	full  lfstack          // lock-free list of full blocks workbuf
   329  	_     cpu.CacheLinePad // prevents false-sharing between full and empty
   330  	empty lfstack          // lock-free list of empty blocks workbuf
   331  	_     cpu.CacheLinePad // prevents false-sharing between empty and wbufSpans
   332  
   333  	wbufSpans struct {
   334  		lock mutex
   335  		// free is a list of spans dedicated to workbufs, but
   336  		// that don't currently contain any workbufs.
   337  		free mSpanList
   338  		// busy is a list of all spans containing workbufs on
   339  		// one of the workbuf lists.
   340  		busy mSpanList
   341  	}
   342  	_ cpu.CacheLinePad // prevents false-sharing between wbufSpans and spanWorkMask
   343  
   344  	// spanqMask is a bitmap indicating which Ps have local work worth stealing.
   345  	// Set or cleared by the owning P, cleared by stealing Ps.
   346  	//
   347  	// spanqMask is like a proxy for a global queue. An important invariant is that
   348  	// forced flushing like gcw.dispose must set this bit on any P that has local
   349  	// span work.
   350  	spanqMask pMask
   351  	_         cpu.CacheLinePad // prevents false-sharing between spanqMask and everything else
   352  
   353  	// List of all spanSPMCs.
   354  	//
   355  	// Only used if goexperiment.GreenTeaGC.
   356  	spanSPMCs struct {
   357  		lock mutex
   358  		list listHeadManual // *spanSPMC
   359  	}
   360  
   361  	// Restore 64-bit alignment on 32-bit.
   362  	// _ uint32
   363  
   364  	// bytesMarked is the number of bytes marked this cycle. This
   365  	// includes bytes blackened in scanned objects, noscan objects
   366  	// that go straight to black, objects allocated as black during
   367  	// the cycle, and permagrey objects scanned by markroot during
   368  	// the concurrent scan phase.
   369  	//
   370  	// This is updated atomically during the cycle. Updates may be batched
   371  	// arbitrarily, since the value is only read at the end of the cycle.
   372  	//
   373  	// Because of benign races during marking, this number may not
   374  	// be the exact number of marked bytes, but it should be very
   375  	// close.
   376  	//
   377  	// Put this field here because it needs 64-bit atomic access
   378  	// (and thus 8-byte alignment even on 32-bit architectures).
   379  	bytesMarked uint64
   380  
   381  	markrootNext atomic.Uint32 // next markroot job
   382  	markrootJobs atomic.Uint32 // number of markroot jobs
   383  
   384  	nproc  uint32
   385  	tstart int64
   386  	nwait  uint32
   387  
   388  	// Number of roots of various root types. Set by gcPrepareMarkRoots.
   389  	//
   390  	// During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots);
   391  	// during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots
   392  	// scheduled for marking.
   393  	// In both variants, nStackRoots == len(stackRoots).
   394  	nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int
   395  
   396  	// The following fields monitor the GC phase of the current cycle during
   397  	// goroutine leak detection.
   398  	goroutineLeak struct {
   399  		// Once set, it indicates that the GC will perform goroutine leak detection during
   400  		// the next GC cycle; it is set by goroutineLeakGC and unset during gcStart.
   401  		pending atomic.Bool
   402  		// Once set, it indicates that the GC has started a goroutine leak detection run;
   403  		// it is set during gcStart and unset during gcMarkTermination;
   404  		//
   405  		// Protected by STW.
   406  		enabled bool
   407  		// Once set, it indicates that the GC has performed goroutine leak detection during
   408  		// the current GC cycle; it is set during gcMarkDone, right after goroutine leak detection,
   409  		// and unset during gcMarkTermination;
   410  		//
   411  		// Protected by STW.
   412  		done bool
   413  		// The number of leaked goroutines during the last leak detection GC cycle.
   414  		//
   415  		// Write-protected by STW in findGoroutineLeaks.
   416  		count int
   417  	}
   418  
   419  	// Base indexes of each root type. Set by gcPrepareMarkRoots.
   420  	baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
   421  
   422  	// stackRoots is a snapshot of all of the Gs that existed before the
   423  	// beginning of concurrent marking.  During goroutine leak detection, stackRoots
   424  	// is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots
   425  	// of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are
   426  	// stackRoots of unmarked / not runnable goroutines
   427  	// The stackRoots array is re-partitioned after each marking phase iteration.
   428  	stackRoots []*g
   429  
   430  	// Each type of GC state transition is protected by a lock.
   431  	// Since multiple threads can simultaneously detect the state
   432  	// transition condition, any thread that detects a transition
   433  	// condition must acquire the appropriate transition lock,
   434  	// re-check the transition condition and return if it no
   435  	// longer holds or perform the transition if it does.
   436  	// Likewise, any transition must invalidate the transition
   437  	// condition before releasing the lock. This ensures that each
   438  	// transition is performed by exactly one thread and threads
   439  	// that need the transition to happen block until it has
   440  	// happened.
   441  	//
   442  	// startSema protects the transition from "off" to mark or
   443  	// mark termination.
   444  	startSema uint32
   445  	// markDoneSema protects transitions from mark to mark termination.
   446  	markDoneSema uint32
   447  
   448  	bgMarkDone uint32 // cas to 1 when at a background mark completion point
   449  	// Background mark completion signaling
   450  
   451  	// mode is the concurrency mode of the current GC cycle.
   452  	mode gcMode
   453  
   454  	// userForced indicates the current GC cycle was forced by an
   455  	// explicit user call.
   456  	userForced bool
   457  
   458  	// initialHeapLive is the value of gcController.heapLive at the
   459  	// beginning of this GC cycle.
   460  	initialHeapLive uint64
   461  
   462  	// assistQueue is a queue of assists that are blocked because
   463  	// there was neither enough credit to steal or enough work to
   464  	// do.
   465  	assistQueue struct {
   466  		lock mutex
   467  		q    gQueue
   468  	}
   469  
   470  	// sweepWaiters is a list of blocked goroutines to wake when
   471  	// we transition from mark termination to sweep.
   472  	sweepWaiters struct {
   473  		lock mutex
   474  		list gList
   475  	}
   476  
   477  	// strongFromWeak controls how the GC interacts with weak->strong
   478  	// pointer conversions.
   479  	strongFromWeak struct {
   480  		// block is a flag set during mark termination that prevents
   481  		// new weak->strong conversions from executing by blocking the
   482  		// goroutine and enqueuing it onto q.
   483  		//
   484  		// Mutated only by one goroutine at a time in gcMarkDone,
   485  		// with globally-synchronizing events like forEachP and
   486  		// stopTheWorld.
   487  		block bool
   488  
   489  		// q is a queue of goroutines that attempted to perform a
   490  		// weak->strong conversion during mark termination.
   491  		//
   492  		// Protected by lock.
   493  		lock mutex
   494  		q    gQueue
   495  	}
   496  
   497  	// cycles is the number of completed GC cycles, where a GC
   498  	// cycle is sweep termination, mark, mark termination, and
   499  	// sweep. This differs from memstats.numgc, which is
   500  	// incremented at mark termination.
   501  	cycles atomic.Uint32
   502  
   503  	// Timing/utilization stats for this cycle.
   504  	stwprocs, maxprocs                 int32
   505  	tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
   506  
   507  	// pauseNS is the total STW time this cycle, measured as the time between
   508  	// when stopping began (just before trying to stop Ps) and just after the
   509  	// world started again.
   510  	pauseNS int64
   511  
   512  	// debug.gctrace heap sizes for this cycle.
   513  	heap0, heap1, heap2 uint64
   514  
   515  	// Cumulative estimated CPU usage.
   516  	cpuStats
   517  }
   518  
   519  // GC runs a garbage collection and blocks the caller until the
   520  // garbage collection is complete. It may also block the entire
   521  // program.
   522  func GC() {
   523  	// We consider a cycle to be: sweep termination, mark, mark
   524  	// termination, and sweep. This function shouldn't return
   525  	// until a full cycle has been completed, from beginning to
   526  	// end. Hence, we always want to finish up the current cycle
   527  	// and start a new one. That means:
   528  	//
   529  	// 1. In sweep termination, mark, or mark termination of cycle
   530  	// N, wait until mark termination N completes and transitions
   531  	// to sweep N.
   532  	//
   533  	// 2. In sweep N, help with sweep N.
   534  	//
   535  	// At this point we can begin a full cycle N+1.
   536  	//
   537  	// 3. Trigger cycle N+1 by starting sweep termination N+1.
   538  	//
   539  	// 4. Wait for mark termination N+1 to complete.
   540  	//
   541  	// 5. Help with sweep N+1 until it's done.
   542  	//
   543  	// This all has to be written to deal with the fact that the
   544  	// GC may move ahead on its own. For example, when we block
   545  	// until mark termination N, we may wake up in cycle N+2.
   546  
   547  	// Wait until the current sweep termination, mark, and mark
   548  	// termination complete.
   549  	n := work.cycles.Load()
   550  	gcWaitOnMark(n)
   551  
   552  	// We're now in sweep N or later. Trigger GC cycle N+1, which
   553  	// will first finish sweep N if necessary and then enter sweep
   554  	// termination N+1.
   555  	gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1})
   556  
   557  	// Wait for mark termination N+1 to complete.
   558  	gcWaitOnMark(n + 1)
   559  
   560  	// Finish sweep N+1 before returning. We do this both to
   561  	// complete the cycle and because runtime.GC() is often used
   562  	// as part of tests and benchmarks to get the system into a
   563  	// relatively stable and isolated state.
   564  	for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
   565  		Gosched()
   566  	}
   567  
   568  	// Callers may assume that the heap profile reflects the
   569  	// just-completed cycle when this returns (historically this
   570  	// happened because this was a STW GC), but right now the
   571  	// profile still reflects mark termination N, not N+1.
   572  	//
   573  	// As soon as all of the sweep frees from cycle N+1 are done,
   574  	// we can go ahead and publish the heap profile.
   575  	//
   576  	// First, wait for sweeping to finish. (We know there are no
   577  	// more spans on the sweep queue, but we may be concurrently
   578  	// sweeping spans, so we have to wait.)
   579  	for work.cycles.Load() == n+1 && !isSweepDone() {
   580  		Gosched()
   581  	}
   582  
   583  	// Now we're really done with sweeping, so we can publish the
   584  	// stable heap profile. Only do this if we haven't already hit
   585  	// another mark termination.
   586  	mp := acquirem()
   587  	cycle := work.cycles.Load()
   588  	if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) {
   589  		mProf_PostSweep()
   590  	}
   591  	releasem(mp)
   592  }
   593  
   594  // goroutineLeakGC runs a GC cycle that performs goroutine leak detection.
   595  //
   596  //go:linkname goroutineLeakGC runtime/pprof.runtime_goroutineLeakGC
   597  func goroutineLeakGC() {
   598  	// Set the pending flag to true, instructing the next GC cycle to
   599  	// perform goroutine leak detection.
   600  	work.goroutineLeak.pending.Store(true)
   601  
   602  	// Spin GC cycles until the pending flag is unset.
   603  	// This ensures that goroutineLeakGC waits for a GC cycle that
   604  	// actually performs goroutine leak detection.
   605  	//
   606  	// This is needed in case multiple concurrent calls to GC
   607  	// are simultaneously fired by the system, wherein some
   608  	// of them are dropped.
   609  	//
   610  	// In the vast majority of cases, only one loop iteration is needed;
   611  	// however, multiple concurrent calls to goroutineLeakGC could lead to
   612  	// the execution of additional GC cycles.
   613  	//
   614  	// Examples:
   615  	//
   616  	// pending? |   G1                    | G2
   617  	// ---------|-------------------------|-----------------------
   618  	//     -    | goroutineLeakGC()       | goroutineLeakGC()
   619  	//     -    | pending.Store(true)     | .
   620  	//     X    | for pending.Load()      | .
   621  	//     X    | GC()                    | .
   622  	//     X    | > gcStart()             | .
   623  	//     X    |   pending.Store(false)  | .
   624  	// ...
   625  	//     -    | > gcMarkDone()          | .
   626  	//     -    |   .                     | pending.Store(true)
   627  	// ...
   628  	//     X    | > gcMarkTermination()   | .
   629  	//     X    |   ...
   630  	//     X    | < GC returns            | .
   631  	//     X    | for pending.Load        | .
   632  	//     X    | GC()                    | .
   633  	//     X    | .                       | for pending.Load()
   634  	//     X    | .                       | GC()
   635  	// ...
   636  	// The first to pick up the pending flag will start a
   637  	// leak detection cycle.
   638  	for work.goroutineLeak.pending.Load() {
   639  		GC()
   640  	}
   641  }
   642  
   643  // gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
   644  // already completed this mark phase, it returns immediately.
   645  func gcWaitOnMark(n uint32) {
   646  	for {
   647  		// Disable phase transitions.
   648  		lock(&work.sweepWaiters.lock)
   649  		nMarks := work.cycles.Load()
   650  		if gcphase != _GCmark {
   651  			// We've already completed this cycle's mark.
   652  			nMarks++
   653  		}
   654  		if nMarks > n {
   655  			// We're done.
   656  			unlock(&work.sweepWaiters.lock)
   657  			return
   658  		}
   659  
   660  		// Wait until sweep termination, mark, and mark
   661  		// termination of cycle N complete.
   662  		work.sweepWaiters.list.push(getg())
   663  		goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceBlockUntilGCEnds, 1)
   664  	}
   665  }
   666  
   667  // gcMode indicates how concurrent a GC cycle should be.
   668  type gcMode int
   669  
   670  const (
   671  	gcBackgroundMode gcMode = iota // concurrent GC and sweep
   672  	gcForceMode                    // stop-the-world GC now, concurrent sweep
   673  	gcForceBlockMode               // stop-the-world GC now and STW sweep (forced by user)
   674  )
   675  
   676  // A gcTrigger is a predicate for starting a GC cycle. Specifically,
   677  // it is an exit condition for the _GCoff phase.
   678  type gcTrigger struct {
   679  	kind gcTriggerKind
   680  	now  int64  // gcTriggerTime: current time
   681  	n    uint32 // gcTriggerCycle: cycle number to start
   682  }
   683  
   684  type gcTriggerKind int
   685  
   686  const (
   687  	// gcTriggerHeap indicates that a cycle should be started when
   688  	// the heap size reaches the trigger heap size computed by the
   689  	// controller.
   690  	gcTriggerHeap gcTriggerKind = iota
   691  
   692  	// gcTriggerTime indicates that a cycle should be started when
   693  	// it's been more than forcegcperiod nanoseconds since the
   694  	// previous GC cycle.
   695  	gcTriggerTime
   696  
   697  	// gcTriggerCycle indicates that a cycle should be started if
   698  	// we have not yet started cycle number gcTrigger.n (relative
   699  	// to work.cycles).
   700  	gcTriggerCycle
   701  )
   702  
   703  // test reports whether the trigger condition is satisfied, meaning
   704  // that the exit condition for the _GCoff phase has been met. The exit
   705  // condition should be tested when allocating.
   706  func (t gcTrigger) test() bool {
   707  	if !memstats.enablegc || panicking.Load() != 0 || gcphase != _GCoff {
   708  		return false
   709  	}
   710  	switch t.kind {
   711  	case gcTriggerHeap:
   712  		trigger, _ := gcController.trigger()
   713  		return gcController.heapLive.Load() >= trigger
   714  	case gcTriggerTime:
   715  		if gcController.gcPercent.Load() < 0 {
   716  			return false
   717  		}
   718  		lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
   719  		return lastgc != 0 && t.now-lastgc > forcegcperiod
   720  	case gcTriggerCycle:
   721  		// t.n > work.cycles, but accounting for wraparound.
   722  		return int32(t.n-work.cycles.Load()) > 0
   723  	}
   724  	return true
   725  }
   726  
   727  // gcStart starts the GC. It transitions from _GCoff to _GCmark (if
   728  // debug.gcstoptheworld == 0) or performs all of GC (if
   729  // debug.gcstoptheworld != 0).
   730  //
   731  // This may return without performing this transition in some cases,
   732  // such as when called on a system stack or with locks held.
   733  func gcStart(trigger gcTrigger) {
   734  	// Since this is called from malloc and malloc is called in
   735  	// the guts of a number of libraries that might be holding
   736  	// locks, don't attempt to start GC in non-preemptible or
   737  	// potentially unstable situations.
   738  	mp := acquirem()
   739  	if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
   740  		releasem(mp)
   741  		return
   742  	}
   743  	releasem(mp)
   744  	mp = nil
   745  
   746  	if gp := getg(); gp.bubble != nil {
   747  		// Disassociate the G from its synctest bubble while allocating.
   748  		// This is less elegant than incrementing the group's active count,
   749  		// but avoids any contamination between GC and synctest.
   750  		bubble := gp.bubble
   751  		gp.bubble = nil
   752  		defer func() {
   753  			gp.bubble = bubble
   754  		}()
   755  	}
   756  
   757  	// Pick up the remaining unswept/not being swept spans concurrently
   758  	//
   759  	// This shouldn't happen if we're being invoked in background
   760  	// mode since proportional sweep should have just finished
   761  	// sweeping everything, but rounding errors, etc, may leave a
   762  	// few spans unswept. In forced mode, this is necessary since
   763  	// GC can be forced at any point in the sweeping cycle.
   764  	//
   765  	// We check the transition condition continuously here in case
   766  	// this G gets delayed in to the next GC cycle.
   767  	for trigger.test() && sweepone() != ^uintptr(0) {
   768  	}
   769  
   770  	// Perform GC initialization and the sweep termination
   771  	// transition.
   772  	semacquire(&work.startSema)
   773  	// Re-check transition condition under transition lock.
   774  	if !trigger.test() {
   775  		semrelease(&work.startSema)
   776  		return
   777  	}
   778  
   779  	// In gcstoptheworld debug mode, upgrade the mode accordingly.
   780  	// We do this after re-checking the transition condition so
   781  	// that multiple goroutines that detect the heap trigger don't
   782  	// start multiple STW GCs.
   783  	mode := gcBackgroundMode
   784  	if debug.gcstoptheworld == 1 {
   785  		mode = gcForceMode
   786  	} else if debug.gcstoptheworld == 2 {
   787  		mode = gcForceBlockMode
   788  	}
   789  
   790  	// Ok, we're doing it! Stop everybody else
   791  	semacquire(&gcsema)
   792  	semacquire(&worldsema)
   793  
   794  	// For stats, check if this GC was forced by the user.
   795  	// Update it under gcsema to avoid gctrace getting wrong values.
   796  	work.userForced = trigger.kind == gcTriggerCycle
   797  
   798  	trace := traceAcquire()
   799  	if trace.ok() {
   800  		trace.GCStart()
   801  		traceRelease(trace)
   802  	}
   803  
   804  	// Check and setup per-P state.
   805  	for _, p := range allp {
   806  		// Check that all Ps have finished deferred mcache flushes.
   807  		if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen {
   808  			println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen)
   809  			throw("p mcache not flushed")
   810  		}
   811  		// Initialize ptrBuf if necessary.
   812  		if goexperiment.GreenTeaGC && p.gcw.ptrBuf == nil {
   813  			p.gcw.ptrBuf = (*[gc.PageSize / goarch.PtrSize]uintptr)(persistentalloc(gc.PageSize, goarch.PtrSize, &memstats.gcMiscSys))
   814  		}
   815  	}
   816  
   817  	gcBgMarkStartWorkers()
   818  
   819  	systemstack(gcResetMarkState)
   820  
   821  	work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
   822  	if work.stwprocs > numCPUStartup {
   823  		// This is used to compute CPU time of the STW phases, so it
   824  		// can't be more than the CPU count, even if GOMAXPROCS is.
   825  		work.stwprocs = numCPUStartup
   826  	}
   827  	work.heap0 = gcController.heapLive.Load()
   828  	work.pauseNS = 0
   829  	work.mode = mode
   830  
   831  	now := nanotime()
   832  	work.tSweepTerm = now
   833  	var stw worldStop
   834  	systemstack(func() {
   835  		stw = stopTheWorldWithSema(stwGCSweepTerm)
   836  	})
   837  
   838  	// Accumulate fine-grained stopping time.
   839  	work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
   840  
   841  	// Finish sweep before we start concurrent scan.
   842  	systemstack(func() {
   843  		finishsweep_m()
   844  	})
   845  
   846  	// clearpools before we start the GC. If we wait the memory will not be
   847  	// reclaimed until the next GC cycle.
   848  	clearpools()
   849  
   850  	work.cycles.Add(1)
   851  
   852  	// Assists and workers can start the moment we start
   853  	// the world.
   854  	gcController.startCycle(now, int(gomaxprocs), trigger)
   855  
   856  	// Notify the CPU limiter that assists may begin.
   857  	gcCPULimiter.startGCTransition(true, now)
   858  
   859  	// In STW mode, disable scheduling of user Gs. This may also
   860  	// disable scheduling of this goroutine, so it may block as
   861  	// soon as we start the world again.
   862  	if mode != gcBackgroundMode {
   863  		schedEnableUser(false)
   864  	}
   865  
   866  	// If goroutine leak detection is pending, enable it for this GC cycle.
   867  	if work.goroutineLeak.pending.Load() {
   868  		work.goroutineLeak.enabled = true
   869  		work.goroutineLeak.pending.Store(false)
   870  		// Set all sync objects of blocked goroutines as untraceable
   871  		// by the GC. Only set as traceable at the end of the GC cycle.
   872  		setSyncObjectsUntraceable()
   873  	}
   874  
   875  	// Enter concurrent mark phase and enable
   876  	// write barriers.
   877  	//
   878  	// Because the world is stopped, all Ps will
   879  	// observe that write barriers are enabled by
   880  	// the time we start the world and begin
   881  	// scanning.
   882  	//
   883  	// Write barriers must be enabled before assists are
   884  	// enabled because they must be enabled before
   885  	// any non-leaf heap objects are marked. Since
   886  	// allocations are blocked until assists can
   887  	// happen, we want to enable assists as early as
   888  	// possible.
   889  	setGCPhase(_GCmark)
   890  
   891  	gcBgMarkPrepare() // Must happen before assists are enabled.
   892  	gcPrepareMarkRoots()
   893  
   894  	// Mark all active tinyalloc blocks. Since we're
   895  	// allocating from these, they need to be black like
   896  	// other allocations. The alternative is to blacken
   897  	// the tiny block on every allocation from it, which
   898  	// would slow down the tiny allocator.
   899  	gcMarkTinyAllocs()
   900  
   901  	// At this point all Ps have enabled the write
   902  	// barrier, thus maintaining the no white to
   903  	// black invariant. Enable mutator assists to
   904  	// put back-pressure on fast allocating
   905  	// mutators.
   906  	atomic.Store(&gcBlackenEnabled, 1)
   907  
   908  	// In STW mode, we could block the instant systemstack
   909  	// returns, so make sure we're not preemptible.
   910  	mp = acquirem()
   911  
   912  	// Update the CPU stats pause time.
   913  	//
   914  	// Use maxprocs instead of stwprocs here because the total time
   915  	// computed in the CPU stats is based on maxprocs, and we want them
   916  	// to be comparable.
   917  	work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
   918  
   919  	// Concurrent mark.
   920  	systemstack(func() {
   921  		now = startTheWorldWithSema(0, stw)
   922  		work.pauseNS += now - stw.startedStopping
   923  		work.tMark = now
   924  
   925  		// Release the CPU limiter.
   926  		gcCPULimiter.finishGCTransition(now)
   927  	})
   928  
   929  	// Release the world sema before Gosched() in STW mode
   930  	// because we will need to reacquire it later but before
   931  	// this goroutine becomes runnable again, and we could
   932  	// self-deadlock otherwise.
   933  	semrelease(&worldsema)
   934  	releasem(mp)
   935  
   936  	// Make sure we block instead of returning to user code
   937  	// in STW mode.
   938  	if mode != gcBackgroundMode {
   939  		Gosched()
   940  	}
   941  
   942  	semrelease(&work.startSema)
   943  }
   944  
   945  // gcMarkDoneFlushed counts the number of P's with flushed work.
   946  //
   947  // Ideally this would be a captured local in gcMarkDone, but forEachP
   948  // escapes its callback closure, so it can't capture anything.
   949  //
   950  // This is protected by markDoneSema.
   951  var gcMarkDoneFlushed uint32
   952  
   953  // gcDebugMarkDone contains fields used to debug/test mark termination.
   954  var gcDebugMarkDone struct {
   955  	// spinAfterRaggedBarrier forces gcMarkDone to spin after it executes
   956  	// the ragged barrier.
   957  	spinAfterRaggedBarrier atomic.Bool
   958  
   959  	// restartedDueTo27993 indicates that we restarted mark termination
   960  	// due to the bug described in issue #27993.
   961  	//
   962  	// Protected by worldsema.
   963  	restartedDueTo27993 bool
   964  }
   965  
   966  // gcMarkDone transitions the GC from mark to mark termination if all
   967  // reachable objects have been marked (that is, there are no grey
   968  // objects and can be no more in the future). Otherwise, it flushes
   969  // all local work to the global queues where it can be discovered by
   970  // other workers.
   971  //
   972  // All goroutines performing GC work must call gcBeginWork to signal
   973  // that they're executing GC work. They must call gcEndWork when done.
   974  // This should be called when all local mark work has been drained and
   975  // there are no remaining workers. Specifically, when gcEndWork returns
   976  // true.
   977  //
   978  // The calling context must be preemptible.
   979  //
   980  // Flushing local work is important because idle Ps may have local
   981  // work queued. This is the only way to make that work visible and
   982  // drive GC to completion.
   983  //
   984  // It is explicitly okay to have write barriers in this function. If
   985  // it does transition to mark termination, then all reachable objects
   986  // have been marked, so the write barrier cannot shade any more
   987  // objects.
   988  func gcMarkDone() {
   989  	// Ensure only one thread is running the ragged barrier at a
   990  	// time.
   991  	semacquire(&work.markDoneSema)
   992  
   993  top:
   994  	// Re-check transition condition under transition lock.
   995  	//
   996  	// It's critical that this checks the global work queues are
   997  	// empty before performing the ragged barrier. Otherwise,
   998  	// there could be global work that a P could take after the P
   999  	// has passed the ragged barrier.
  1000  	if !(gcphase == _GCmark && gcIsMarkDone()) {
  1001  		semrelease(&work.markDoneSema)
  1002  		return
  1003  	}
  1004  
  1005  	// forEachP needs worldsema to execute, and we'll need it to
  1006  	// stop the world later, so acquire worldsema now.
  1007  	semacquire(&worldsema)
  1008  
  1009  	// Prevent weak->strong conversions from generating additional
  1010  	// GC work. forEachP will guarantee that it is observed globally.
  1011  	work.strongFromWeak.block = true
  1012  
  1013  	// Flush all local buffers and collect flushedWork flags.
  1014  	gcMarkDoneFlushed = 0
  1015  	forEachP(waitReasonGCMarkTermination, func(pp *p) {
  1016  		// Flush the write barrier buffer, since this may add
  1017  		// work to the gcWork.
  1018  		wbBufFlush1(pp)
  1019  
  1020  		// Flush the gcWork, since this may create global work
  1021  		// and set the flushedWork flag.
  1022  		//
  1023  		// TODO(austin): Break up these workbufs to
  1024  		// better distribute work.
  1025  		pp.gcw.dispose()
  1026  
  1027  		// Collect the flushedWork flag.
  1028  		if pp.gcw.flushedWork {
  1029  			atomic.Xadd(&gcMarkDoneFlushed, 1)
  1030  			pp.gcw.flushedWork = false
  1031  		}
  1032  	})
  1033  
  1034  	if gcMarkDoneFlushed != 0 {
  1035  		// More grey objects were discovered since the
  1036  		// previous termination check, so there may be more
  1037  		// work to do. Keep going. It's possible the
  1038  		// transition condition became true again during the
  1039  		// ragged barrier, so re-check it.
  1040  		semrelease(&worldsema)
  1041  		goto top
  1042  	}
  1043  
  1044  	// For debugging/testing.
  1045  	for gcDebugMarkDone.spinAfterRaggedBarrier.Load() {
  1046  	}
  1047  
  1048  	// There was no global work, no local work, and no Ps
  1049  	// communicated work since we took markDoneSema. Therefore
  1050  	// there are no grey objects and no more objects can be
  1051  	// shaded. Transition to mark termination.
  1052  	now := nanotime()
  1053  	work.tMarkTerm = now
  1054  	getg().m.preemptoff = "gcing"
  1055  	var stw worldStop
  1056  	systemstack(func() {
  1057  		stw = stopTheWorldWithSema(stwGCMarkTerm)
  1058  	})
  1059  	// The gcphase is _GCmark, it will transition to _GCmarktermination
  1060  	// below. The important thing is that the wb remains active until
  1061  	// all marking is complete. This includes writes made by the GC.
  1062  
  1063  	// Accumulate fine-grained stopping time.
  1064  	work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
  1065  
  1066  	// There is sometimes work left over when we enter mark termination due
  1067  	// to write barriers performed after the completion barrier above.
  1068  	// Detect this and resume concurrent mark. This is obviously
  1069  	// unfortunate.
  1070  	//
  1071  	// See issue #27993 for details.
  1072  	//
  1073  	// Switch to the system stack to call wbBufFlush1, though in this case
  1074  	// it doesn't matter because we're non-preemptible anyway.
  1075  	restart := false
  1076  	systemstack(func() {
  1077  		for _, p := range allp {
  1078  			wbBufFlush1(p)
  1079  			if !p.gcw.empty() {
  1080  				restart = true
  1081  				break
  1082  			}
  1083  		}
  1084  	})
  1085  
  1086  	// Check whether we need to resume the marking phase because of issue #27993
  1087  	// or because of goroutine leak detection.
  1088  	if restart || (work.goroutineLeak.enabled && !work.goroutineLeak.done) {
  1089  		if restart {
  1090  			// Restart because of issue #27993.
  1091  			gcDebugMarkDone.restartedDueTo27993 = true
  1092  		} else {
  1093  			// Marking has reached a fixed-point. Attempt to detect goroutine leaks.
  1094  			//
  1095  			// If the returned value is true, then detection already concluded for this cycle.
  1096  			// Otherwise, more runnable goroutines were discovered, requiring additional mark work.
  1097  			work.goroutineLeak.done = findGoroutineLeaks()
  1098  		}
  1099  
  1100  		getg().m.preemptoff = ""
  1101  		systemstack(func() {
  1102  			// Accumulate the time we were stopped before we had to start again.
  1103  			work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
  1104  
  1105  			// Start the world again.
  1106  			now := startTheWorldWithSema(0, stw)
  1107  			work.pauseNS += now - stw.startedStopping
  1108  		})
  1109  		semrelease(&worldsema)
  1110  		goto top
  1111  	}
  1112  
  1113  	gcComputeStartingStackSize()
  1114  
  1115  	// Disable assists and background workers. We must do
  1116  	// this before waking blocked assists.
  1117  	atomic.Store(&gcBlackenEnabled, 0)
  1118  
  1119  	// Notify the CPU limiter that GC assists will now cease.
  1120  	gcCPULimiter.startGCTransition(false, now)
  1121  
  1122  	// Wake all blocked assists. These will run when we
  1123  	// start the world again.
  1124  	gcWakeAllAssists()
  1125  
  1126  	// Wake all blocked weak->strong conversions. These will run
  1127  	// when we start the world again.
  1128  	work.strongFromWeak.block = false
  1129  	gcWakeAllStrongFromWeak()
  1130  
  1131  	// Likewise, release the transition lock. Blocked
  1132  	// workers and assists will run when we start the
  1133  	// world again.
  1134  	semrelease(&work.markDoneSema)
  1135  
  1136  	// In STW mode, re-enable user goroutines. These will be
  1137  	// queued to run after we start the world.
  1138  	schedEnableUser(true)
  1139  
  1140  	// endCycle depends on all gcWork cache stats being flushed.
  1141  	// The termination algorithm above ensured that up to
  1142  	// allocations since the ragged barrier.
  1143  	gcController.endCycle(now, int(gomaxprocs), work.userForced)
  1144  
  1145  	// Perform mark termination. This will restart the world.
  1146  	gcMarkTermination(stw)
  1147  }
  1148  
  1149  // isMaybeRunnable checks whether a goroutine may still be semantically runnable.
  1150  // For goroutines which are semantically runnable, this will eventually return true
  1151  // as the GC marking phase progresses. It returns false for leaked goroutines, or for
  1152  // goroutines which are not yet computed as possibly runnable by the GC.
  1153  func (gp *g) isMaybeRunnable() bool {
  1154  	// Check whether the goroutine is actually in a waiting state first.
  1155  	if readgstatus(gp) != _Gwaiting {
  1156  		// If the goroutine is not waiting, then clearly it is maybe runnable.
  1157  		return true
  1158  	}
  1159  
  1160  	switch gp.waitreason {
  1161  	case waitReasonSelectNoCases,
  1162  		waitReasonChanSendNilChan,
  1163  		waitReasonChanReceiveNilChan:
  1164  		// Select with no cases or communicating on nil channels
  1165  		// make goroutines unrunnable by definition.
  1166  		return false
  1167  	case waitReasonChanReceive,
  1168  		waitReasonSelect,
  1169  		waitReasonChanSend:
  1170  		// Cycle all through all *sudog to check whether
  1171  		// the goroutine is waiting on a marked channel.
  1172  		for sg := gp.waiting; sg != nil; sg = sg.waitlink {
  1173  			if isMarkedOrNotInHeap(unsafe.Pointer(sg.c.get())) {
  1174  				return true
  1175  			}
  1176  		}
  1177  		return false
  1178  	case waitReasonSyncCondWait,
  1179  		waitReasonSyncWaitGroupWait,
  1180  		waitReasonSyncMutexLock,
  1181  		waitReasonSyncRWMutexLock,
  1182  		waitReasonSyncRWMutexRLock:
  1183  		// If waiting on mutexes, wait groups, or condition variables,
  1184  		// check if the synchronization primitive attached to the sudog is marked.
  1185  		if gp.waiting != nil {
  1186  			return isMarkedOrNotInHeap(gp.waiting.elem.get())
  1187  		}
  1188  	}
  1189  	return true
  1190  }
  1191  
  1192  // findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist.
  1193  // If so, it adds them into root set and increments work.markrootJobs accordingly.
  1194  // Returns true if we need to run another phase of markroots; returns false otherwise.
  1195  func findMaybeRunnableGoroutines() (moreWork bool) {
  1196  	oldRootJobs := work.markrootJobs.Load()
  1197  
  1198  	// To begin with we have a set of unchecked stackRoots between
  1199  	// vIndex and ivIndex. During the loop, anything < vIndex should be
  1200  	// valid stackRoots and anything >= ivIndex should be invalid stackRoots.
  1201  	// The loop terminates when the two indices meet.
  1202  	var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots
  1203  	// Reorder goroutine list
  1204  	for vIndex < ivIndex {
  1205  		if work.stackRoots[vIndex].isMaybeRunnable() {
  1206  			vIndex = vIndex + 1
  1207  			continue
  1208  		}
  1209  		for ivIndex = ivIndex - 1; ivIndex != vIndex; ivIndex = ivIndex - 1 {
  1210  			if gp := work.stackRoots[ivIndex]; gp.isMaybeRunnable() {
  1211  				work.stackRoots[ivIndex] = work.stackRoots[vIndex]
  1212  				work.stackRoots[vIndex] = gp
  1213  				vIndex = vIndex + 1
  1214  				break
  1215  			}
  1216  		}
  1217  	}
  1218  
  1219  	newRootJobs := work.baseStacks + uint32(vIndex)
  1220  	if newRootJobs > oldRootJobs {
  1221  		work.nMaybeRunnableStackRoots = vIndex
  1222  		work.markrootJobs.Store(newRootJobs)
  1223  	}
  1224  	return newRootJobs > oldRootJobs
  1225  }
  1226  
  1227  // setSyncObjectsUntraceable scans allgs and sets the elem and c fields of all sudogs to
  1228  // an untrackable pointer. This prevents the GC from marking these objects as live in memory
  1229  // by following these pointers when runnning deadlock detection.
  1230  func setSyncObjectsUntraceable() {
  1231  	assertWorldStopped()
  1232  
  1233  	forEachGRace(func(gp *g) {
  1234  		// Set as untraceable all synchronization objects of goroutines
  1235  		// blocked at concurrency operations that could leak.
  1236  		switch {
  1237  		case gp.waitreason.isSyncWait():
  1238  			// Synchronization primitives are reachable from the *sudog via
  1239  			// via the elem field.
  1240  			for sg := gp.waiting; sg != nil; sg = sg.waitlink {
  1241  				sg.elem.setUntraceable()
  1242  			}
  1243  		case gp.waitreason.isChanWait():
  1244  			// Channels and select statements are reachable from the *sudog via the c field.
  1245  			for sg := gp.waiting; sg != nil; sg = sg.waitlink {
  1246  				sg.c.setUntraceable()
  1247  			}
  1248  		}
  1249  	})
  1250  }
  1251  
  1252  // gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values.
  1253  // Should be invoked after the goroutine leak detection phase.
  1254  func gcRestoreSyncObjects() {
  1255  	assertWorldStopped()
  1256  
  1257  	forEachGRace(func(gp *g) {
  1258  		for sg := gp.waiting; sg != nil; sg = sg.waitlink {
  1259  			sg.elem.setTraceable()
  1260  			sg.c.setTraceable()
  1261  		}
  1262  	})
  1263  }
  1264  
  1265  // findGoroutineLeaks scans the remaining stackRoots and marks any which are
  1266  // blocked over exclusively unreachable concurrency primitives as leaked (deadlocked).
  1267  // Returns true if the goroutine leak check was performed (or unnecessary).
  1268  // Returns false if the GC cycle has not yet computed all maybe-runnable goroutines.
  1269  func findGoroutineLeaks() bool {
  1270  	assertWorldStopped()
  1271  
  1272  	// Report goroutine leaks and mark them unreachable, and resume marking
  1273  	// we still need to mark these unreachable *g structs as they
  1274  	// get reused, but their stack won't get scanned
  1275  	if work.nMaybeRunnableStackRoots == work.nStackRoots {
  1276  		// nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked.
  1277  		return true
  1278  	}
  1279  
  1280  	// Check whether any more maybe-runnable goroutines can be found by the GC.
  1281  	if findMaybeRunnableGoroutines() {
  1282  		// We found more work, so we need to resume the marking phase.
  1283  		return false
  1284  	}
  1285  
  1286  	// For the remaining goroutines, mark them as unreachable and leaked.
  1287  	work.goroutineLeak.count = work.nStackRoots - work.nMaybeRunnableStackRoots
  1288  
  1289  	for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ {
  1290  		gp := work.stackRoots[i]
  1291  		casgstatus(gp, _Gwaiting, _Gleaked)
  1292  
  1293  		// Add the primitives causing the goroutine leaks
  1294  		// to the GC work queue, to ensure they are marked.
  1295  		//
  1296  		// NOTE(vsaioc): these primitives should also be reachable
  1297  		// from the goroutine's stack, but let's play it safe.
  1298  		switch {
  1299  		case gp.waitreason.isChanWait():
  1300  			for sg := gp.waiting; sg != nil; sg = sg.waitlink {
  1301  				shade(sg.c.uintptr())
  1302  			}
  1303  		case gp.waitreason.isSyncWait():
  1304  			for sg := gp.waiting; sg != nil; sg = sg.waitlink {
  1305  				shade(sg.elem.uintptr())
  1306  			}
  1307  		}
  1308  	}
  1309  	// Put the remaining roots as ready for marking and drain them.
  1310  	work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots))
  1311  	work.nMaybeRunnableStackRoots = work.nStackRoots
  1312  	return true
  1313  }
  1314  
  1315  // World must be stopped and mark assists and background workers must be
  1316  // disabled.
  1317  func gcMarkTermination(stw worldStop) {
  1318  	// Start marktermination (write barrier remains enabled for now).
  1319  	setGCPhase(_GCmarktermination)
  1320  
  1321  	work.heap1 = gcController.heapLive.Load()
  1322  	startTime := nanotime()
  1323  
  1324  	mp := acquirem()
  1325  	mp.preemptoff = "gcing"
  1326  	mp.traceback = 2
  1327  	curgp := mp.curg
  1328  	// N.B. The execution tracer is not aware of this status
  1329  	// transition and handles it specially based on the
  1330  	// wait reason.
  1331  	casGToWaitingForSuspendG(curgp, _Grunning, waitReasonGarbageCollection)
  1332  
  1333  	// Run gc on the g0 stack. We do this so that the g stack
  1334  	// we're currently running on will no longer change. Cuts
  1335  	// the root set down a bit (g0 stacks are not scanned, and
  1336  	// we don't need to scan gc's internal state).  We also
  1337  	// need to switch to g0 so we can shrink the stack.
  1338  	systemstack(func() {
  1339  		gcMark(startTime)
  1340  		// Must return immediately.
  1341  		// The outer function's stack may have moved
  1342  		// during gcMark (it shrinks stacks, including the
  1343  		// outer function's stack), so we must not refer
  1344  		// to any of its variables. Return back to the
  1345  		// non-system stack to pick up the new addresses
  1346  		// before continuing.
  1347  	})
  1348  
  1349  	var stwSwept bool
  1350  	systemstack(func() {
  1351  		work.heap2 = work.bytesMarked
  1352  		if debug.gccheckmark > 0 {
  1353  			runCheckmark(func(_ *gcWork) { gcPrepareMarkRoots() })
  1354  		}
  1355  		if debug.checkfinalizers > 0 {
  1356  			checkFinalizersAndCleanups()
  1357  		}
  1358  
  1359  		// marking is complete so we can turn the write barrier off
  1360  		setGCPhase(_GCoff)
  1361  		stwSwept = gcSweep(work.mode)
  1362  	})
  1363  
  1364  	mp.traceback = 0
  1365  	casgstatus(curgp, _Gwaiting, _Grunning)
  1366  
  1367  	trace := traceAcquire()
  1368  	if trace.ok() {
  1369  		trace.GCDone()
  1370  		traceRelease(trace)
  1371  	}
  1372  
  1373  	// all done
  1374  	mp.preemptoff = ""
  1375  
  1376  	if gcphase != _GCoff {
  1377  		throw("gc done but gcphase != _GCoff")
  1378  	}
  1379  
  1380  	// Record heapInUse for scavenger.
  1381  	memstats.lastHeapInUse = gcController.heapInUse.load()
  1382  
  1383  	// Update GC trigger and pacing, as well as downstream consumers
  1384  	// of this pacing information, for the next cycle.
  1385  	systemstack(gcControllerCommit)
  1386  
  1387  	// Update timing memstats
  1388  	now := nanotime()
  1389  	sec, nsec, _ := time_now()
  1390  	unixNow := sec*1e9 + int64(nsec)
  1391  	work.pauseNS += now - stw.startedStopping
  1392  	work.tEnd = now
  1393  	atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
  1394  	atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
  1395  	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
  1396  	memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
  1397  	memstats.pause_total_ns += uint64(work.pauseNS)
  1398  
  1399  	// Accumulate CPU stats.
  1400  	//
  1401  	// Use maxprocs instead of stwprocs for GC pause time because the total time
  1402  	// computed in the CPU stats is based on maxprocs, and we want them to be
  1403  	// comparable.
  1404  	//
  1405  	// Pass gcMarkPhase=true to accumulate so we can get all the latest GC CPU stats
  1406  	// in there too.
  1407  	work.cpuStats.accumulateGCPauseTime(now-stw.finishedStopping, work.maxprocs)
  1408  	work.cpuStats.accumulate(now, true)
  1409  
  1410  	// Compute overall GC CPU utilization.
  1411  	// Omit idle marking time from the overall utilization here since it's "free".
  1412  	memstats.gc_cpu_fraction = float64(work.cpuStats.GCTotalTime-work.cpuStats.GCIdleTime) / float64(work.cpuStats.TotalTime)
  1413  
  1414  	// Reset assist time and background time stats.
  1415  	//
  1416  	// Do this now, instead of at the start of the next GC cycle, because
  1417  	// these two may keep accumulating even if the GC is not active.
  1418  	scavenge.assistTime.Store(0)
  1419  	scavenge.backgroundTime.Store(0)
  1420  
  1421  	// Reset idle time stat.
  1422  	sched.idleTime.Store(0)
  1423  
  1424  	if work.userForced {
  1425  		memstats.numforcedgc++
  1426  	}
  1427  
  1428  	// Bump GC cycle count and wake goroutines waiting on sweep.
  1429  	lock(&work.sweepWaiters.lock)
  1430  	memstats.numgc++
  1431  	injectglist(&work.sweepWaiters.list)
  1432  	unlock(&work.sweepWaiters.lock)
  1433  
  1434  	// Increment the scavenge generation now.
  1435  	//
  1436  	// This moment represents peak heap in use because we're
  1437  	// about to start sweeping.
  1438  	mheap_.pages.scav.index.nextGen()
  1439  
  1440  	// Release the CPU limiter.
  1441  	gcCPULimiter.finishGCTransition(now)
  1442  
  1443  	// Finish the current heap profiling cycle and start a new
  1444  	// heap profiling cycle. We do this before starting the world
  1445  	// so events don't leak into the wrong cycle.
  1446  	mProf_NextCycle()
  1447  
  1448  	// There may be stale spans in mcaches that need to be swept.
  1449  	// Those aren't tracked in any sweep lists, so we need to
  1450  	// count them against sweep completion until we ensure all
  1451  	// those spans have been forced out.
  1452  	//
  1453  	// If gcSweep fully swept the heap (for example if the sweep
  1454  	// is not concurrent due to a GODEBUG setting), then we expect
  1455  	// the sweepLocker to be invalid, since sweeping is done.
  1456  	//
  1457  	// N.B. Below we might duplicate some work from gcSweep; this is
  1458  	// fine as all that work is idempotent within a GC cycle, and
  1459  	// we're still holding worldsema so a new cycle can't start.
  1460  	sl := sweep.active.begin()
  1461  	if !stwSwept && !sl.valid {
  1462  		throw("failed to set sweep barrier")
  1463  	} else if stwSwept && sl.valid {
  1464  		throw("non-concurrent sweep failed to drain all sweep queues")
  1465  	}
  1466  
  1467  	if work.goroutineLeak.enabled {
  1468  		// Restore the elem and c fields of all sudogs to their original values.
  1469  		gcRestoreSyncObjects()
  1470  	}
  1471  
  1472  	var goroutineLeakDone bool
  1473  	systemstack(func() {
  1474  		// Pull the GC out of goroutine leak detection mode.
  1475  		work.goroutineLeak.enabled = false
  1476  		goroutineLeakDone = work.goroutineLeak.done
  1477  		work.goroutineLeak.done = false
  1478  
  1479  		// The memstats updated above must be updated with the world
  1480  		// stopped to ensure consistency of some values, such as
  1481  		// sched.idleTime and sched.totaltime. memstats also include
  1482  		// the pause time (work,pauseNS), forcing computation of the
  1483  		// total pause time before the pause actually ends.
  1484  		//
  1485  		// Here we reuse the same now for start the world so that the
  1486  		// time added to /sched/pauses/total/gc:seconds will be
  1487  		// consistent with the value in memstats.
  1488  		startTheWorldWithSema(now, stw)
  1489  	})
  1490  
  1491  	// Flush the heap profile so we can start a new cycle next GC.
  1492  	// This is relatively expensive, so we don't do it with the
  1493  	// world stopped.
  1494  	mProf_Flush()
  1495  
  1496  	// Prepare workbufs for freeing by the sweeper. We do this
  1497  	// asynchronously because it can take non-trivial time.
  1498  	prepareFreeWorkbufs()
  1499  
  1500  	// Free stack spans. This must be done between GC cycles.
  1501  	systemstack(freeStackSpans)
  1502  
  1503  	// Ensure all mcaches are flushed. Each P will flush its own
  1504  	// mcache before allocating, but idle Ps may not. Since this
  1505  	// is necessary to sweep all spans, we need to ensure all
  1506  	// mcaches are flushed before we start the next GC cycle.
  1507  	//
  1508  	// While we're here, flush the page cache for idle Ps to avoid
  1509  	// having pages get stuck on them. These pages are hidden from
  1510  	// the scavenger, so in small idle heaps a significant amount
  1511  	// of additional memory might be held onto.
  1512  	//
  1513  	// Also, flush the pinner cache, to avoid leaking that memory
  1514  	// indefinitely.
  1515  	if debug.gctrace > 1 {
  1516  		clear(memstats.lastScanStats[:])
  1517  	}
  1518  	forEachP(waitReasonFlushProcCaches, func(pp *p) {
  1519  		pp.mcache.prepareForSweep()
  1520  		if pp.status == _Pidle {
  1521  			systemstack(func() {
  1522  				lock(&mheap_.lock)
  1523  				pp.pcache.flush(&mheap_.pages)
  1524  				unlock(&mheap_.lock)
  1525  			})
  1526  		}
  1527  		if debug.gctrace > 1 {
  1528  			pp.gcw.flushScanStats(&memstats.lastScanStats)
  1529  		}
  1530  		pp.pinnerCache = nil
  1531  	})
  1532  	if sl.valid {
  1533  		// Now that we've swept stale spans in mcaches, they don't
  1534  		// count against unswept spans.
  1535  		//
  1536  		// Note: this sweepLocker may not be valid if sweeping had
  1537  		// already completed during the STW. See the corresponding
  1538  		// begin() call that produced sl.
  1539  		sweep.active.end(sl)
  1540  	}
  1541  
  1542  	// Print gctrace before dropping worldsema. As soon as we drop
  1543  	// worldsema another cycle could start and smash the stats
  1544  	// we're trying to print.
  1545  	if debug.gctrace > 0 {
  1546  		util := int(memstats.gc_cpu_fraction * 100)
  1547  
  1548  		var sbuf [24]byte
  1549  		printlock()
  1550  		print("gc ", memstats.numgc,
  1551  			" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
  1552  			util, "%")
  1553  		if goroutineLeakDone {
  1554  			print(" (checking for goroutine leaks)")
  1555  		}
  1556  		print(": ")
  1557  		prev := work.tSweepTerm
  1558  		for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
  1559  			if i != 0 {
  1560  				print("+")
  1561  			}
  1562  			print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
  1563  			prev = ns
  1564  		}
  1565  		print(" ms clock, ")
  1566  		for i, ns := range []int64{
  1567  			int64(work.stwprocs) * (work.tMark - work.tSweepTerm),
  1568  			gcController.assistTime.Load(),
  1569  			gcController.dedicatedMarkTime.Load() + gcController.fractionalMarkTime.Load(),
  1570  			gcController.idleMarkTime.Load(),
  1571  			int64(work.stwprocs) * (work.tEnd - work.tMarkTerm),
  1572  		} {
  1573  			if i == 2 || i == 3 {
  1574  				// Separate mark time components with /.
  1575  				print("/")
  1576  			} else if i != 0 {
  1577  				print("+")
  1578  			}
  1579  			print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
  1580  		}
  1581  		print(" ms cpu, ",
  1582  			work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
  1583  			gcController.lastHeapGoal>>20, " MB goal, ",
  1584  			gcController.lastStackScan.Load()>>20, " MB stacks, ",
  1585  			gcController.globalsScan.Load()>>20, " MB globals, ",
  1586  			work.maxprocs, " P")
  1587  		if work.userForced {
  1588  			print(" (forced)")
  1589  		}
  1590  		print("\n")
  1591  
  1592  		if debug.gctrace > 1 {
  1593  			dumpScanStats()
  1594  		}
  1595  		printunlock()
  1596  	}
  1597  
  1598  	// Print finalizer/cleanup queue length. Like gctrace, do this before the next GC starts.
  1599  	// The fact that the next GC might start is not that problematic here, but acts as a convenient
  1600  	// lock on printing this information (so it cannot overlap with itself from the next GC cycle).
  1601  	if debug.checkfinalizers > 0 {
  1602  		fq, fe := finReadQueueStats()
  1603  		fn := max(int64(fq)-int64(fe), 0)
  1604  
  1605  		cq, ce := gcCleanups.readQueueStats()
  1606  		cn := max(int64(cq)-int64(ce), 0)
  1607  
  1608  		println("checkfinalizers: queue:", fn, "finalizers +", cn, "cleanups")
  1609  	}
  1610  
  1611  	// Set any arena chunks that were deferred to fault.
  1612  	lock(&userArenaState.lock)
  1613  	faultList := userArenaState.fault
  1614  	userArenaState.fault = nil
  1615  	unlock(&userArenaState.lock)
  1616  	for _, lc := range faultList {
  1617  		lc.mspan.setUserArenaChunkToFault()
  1618  	}
  1619  
  1620  	// Enable huge pages on some metadata if we cross a heap threshold.
  1621  	if gcController.heapGoal() > minHeapForMetadataHugePages {
  1622  		systemstack(func() {
  1623  			mheap_.enableMetadataHugePages()
  1624  		})
  1625  	}
  1626  
  1627  	semrelease(&worldsema)
  1628  	semrelease(&gcsema)
  1629  	// Careful: another GC cycle may start now.
  1630  
  1631  	releasem(mp)
  1632  	mp = nil
  1633  
  1634  	// now that gc is done, kick off finalizer thread if needed
  1635  	if !concurrentSweep {
  1636  		// give the queued finalizers, if any, a chance to run
  1637  		Gosched()
  1638  	}
  1639  }
  1640  
  1641  // gcBgMarkStartWorkers prepares background mark worker goroutines. These
  1642  // goroutines will not run until the mark phase, but they must be started while
  1643  // the work is not stopped and from a regular G stack. The caller must hold
  1644  // worldsema.
  1645  func gcBgMarkStartWorkers() {
  1646  	// Background marking is performed by per-P G's. Ensure that each P has
  1647  	// a background GC G.
  1648  	//
  1649  	// Worker Gs don't exit if gomaxprocs is reduced. If it is raised
  1650  	// again, we can reuse the old workers; no need to create new workers.
  1651  	if gcBgMarkWorkerCount >= gomaxprocs {
  1652  		return
  1653  	}
  1654  
  1655  	// Increment mp.locks when allocating. We are called within gcStart,
  1656  	// and thus must not trigger another gcStart via an allocation. gcStart
  1657  	// bails when allocating with locks held, so simulate that for these
  1658  	// allocations.
  1659  	//
  1660  	// TODO(prattmic): cleanup gcStart to use a more explicit "in gcStart"
  1661  	// check for bailing.
  1662  	mp := acquirem()
  1663  	ready := make(chan struct{}, 1)
  1664  	releasem(mp)
  1665  
  1666  	for gcBgMarkWorkerCount < gomaxprocs {
  1667  		mp := acquirem() // See above, we allocate a closure here.
  1668  		go gcBgMarkWorker(ready)
  1669  		releasem(mp)
  1670  
  1671  		// N.B. we intentionally wait on each goroutine individually
  1672  		// rather than starting all in a batch and then waiting once
  1673  		// afterwards. By running one goroutine at a time, we can take
  1674  		// advantage of runnext to bounce back and forth between
  1675  		// workers and this goroutine. In an overloaded application,
  1676  		// this can reduce GC start latency by prioritizing these
  1677  		// goroutines rather than waiting on the end of the run queue.
  1678  		<-ready
  1679  		// The worker is now guaranteed to be added to the pool before
  1680  		// its P's next findRunnableGCWorker.
  1681  
  1682  		gcBgMarkWorkerCount++
  1683  	}
  1684  }
  1685  
  1686  // gcBgMarkPrepare sets up state for background marking.
  1687  // Mutator assists must not yet be enabled.
  1688  func gcBgMarkPrepare() {
  1689  	// Background marking will stop when the work queues are empty
  1690  	// and there are no more workers (note that, since this is
  1691  	// concurrent, this may be a transient state, but mark
  1692  	// termination will clean it up). Between background workers
  1693  	// and assists, we don't really know how many workers there
  1694  	// will be, so we pretend to have an arbitrarily large number
  1695  	// of workers, almost all of which are "waiting". While a
  1696  	// worker is working it decrements nwait. If nproc == nwait,
  1697  	// there are no workers.
  1698  	work.nproc = ^uint32(0)
  1699  	work.nwait = ^uint32(0)
  1700  }
  1701  
  1702  // gcBgMarkWorkerNode is an entry in the gcBgMarkWorkerPool. It points to a single
  1703  // gcBgMarkWorker goroutine.
  1704  type gcBgMarkWorkerNode struct {
  1705  	// Unused workers are managed in a lock-free stack. This field must be first.
  1706  	node lfnode
  1707  
  1708  	// The g of this worker.
  1709  	gp guintptr
  1710  
  1711  	// Release this m on park. This is used to communicate with the unlock
  1712  	// function, which cannot access the G's stack. It is unused outside of
  1713  	// gcBgMarkWorker().
  1714  	m muintptr
  1715  }
  1716  type gcBgMarkWorkerNodePadded struct {
  1717  	gcBgMarkWorkerNode
  1718  	pad [tagAlign - unsafe.Sizeof(gcBgMarkWorkerNode{}) - gcBgMarkWorkerNodeRedZoneSize]byte
  1719  }
  1720  
  1721  const gcBgMarkWorkerNodeRedZoneSize = (16 << 2) * asanenabledBit // redZoneSize(512)
  1722  
  1723  func gcBgMarkWorker(ready chan struct{}) {
  1724  	gp := getg()
  1725  
  1726  	// We pass node to a gopark unlock function, so it can't be on
  1727  	// the stack (see gopark). Prevent deadlock from recursively
  1728  	// starting GC by disabling preemption.
  1729  	gp.m.preemptoff = "GC worker init"
  1730  	node := &new(gcBgMarkWorkerNodePadded).gcBgMarkWorkerNode // TODO: technically not allowed in the heap. See comment in tagptr.go.
  1731  	gp.m.preemptoff = ""
  1732  
  1733  	node.gp.set(gp)
  1734  
  1735  	node.m.set(acquirem())
  1736  
  1737  	ready <- struct{}{}
  1738  	// After this point, the background mark worker is generally scheduled
  1739  	// cooperatively by gcController.findRunnableGCWorker. While performing
  1740  	// work on the P, preemption is disabled because we are working on
  1741  	// P-local work buffers. When the preempt flag is set, this puts itself
  1742  	// into _Gwaiting to be woken up by gcController.findRunnableGCWorker
  1743  	// at the appropriate time.
  1744  	//
  1745  	// When preemption is enabled (e.g., while in gcMarkDone), this worker
  1746  	// may be preempted and schedule as a _Grunnable G from a runq. That is
  1747  	// fine; it will eventually gopark again for further scheduling via
  1748  	// findRunnableGCWorker.
  1749  	//
  1750  	// Since we disable preemption before notifying ready, we guarantee that
  1751  	// this G will be in the worker pool for the next findRunnableGCWorker.
  1752  	// This isn't strictly necessary, but it reduces latency between
  1753  	// _GCmark starting and the workers starting.
  1754  
  1755  	for {
  1756  		// Go to sleep until woken by
  1757  		// gcController.findRunnableGCWorker.
  1758  		gopark(func(g *g, nodep unsafe.Pointer) bool {
  1759  			node := (*gcBgMarkWorkerNode)(nodep)
  1760  
  1761  			if mp := node.m.ptr(); mp != nil {
  1762  				// The worker G is no longer running; release
  1763  				// the M.
  1764  				//
  1765  				// N.B. it is _safe_ to release the M as soon
  1766  				// as we are no longer performing P-local mark
  1767  				// work.
  1768  				//
  1769  				// However, since we cooperatively stop work
  1770  				// when gp.preempt is set, if we releasem in
  1771  				// the loop then the following call to gopark
  1772  				// would immediately preempt the G. This is
  1773  				// also safe, but inefficient: the G must
  1774  				// schedule again only to enter gopark and park
  1775  				// again. Thus, we defer the release until
  1776  				// after parking the G.
  1777  				releasem(mp)
  1778  			}
  1779  
  1780  			// Release this G to the pool.
  1781  			gcBgMarkWorkerPool.push(&node.node)
  1782  			// Note that at this point, the G may immediately be
  1783  			// rescheduled and may be running.
  1784  			return true
  1785  		}, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceBlockSystemGoroutine, 0)
  1786  
  1787  		// Preemption must not occur here, or another G might see
  1788  		// p.gcMarkWorkerMode.
  1789  
  1790  		// Disable preemption so we can use the gcw. If the
  1791  		// scheduler wants to preempt us, we'll stop draining,
  1792  		// dispose the gcw, and then preempt.
  1793  		node.m.set(acquirem())
  1794  		pp := gp.m.p.ptr() // P can't change with preemption disabled.
  1795  
  1796  		if gcBlackenEnabled == 0 {
  1797  			println("worker mode", pp.gcMarkWorkerMode)
  1798  			throw("gcBgMarkWorker: blackening not enabled")
  1799  		}
  1800  
  1801  		if pp.gcMarkWorkerMode == gcMarkWorkerNotWorker {
  1802  			throw("gcBgMarkWorker: mode not set")
  1803  		}
  1804  
  1805  		startTime := nanotime()
  1806  		pp.gcMarkWorkerStartTime = startTime
  1807  		var trackLimiterEvent bool
  1808  		if pp.gcMarkWorkerMode == gcMarkWorkerIdleMode {
  1809  			trackLimiterEvent = pp.limiterEvent.start(limiterEventIdleMarkWork, startTime)
  1810  		}
  1811  
  1812  		gcBeginWork()
  1813  
  1814  		systemstack(func() {
  1815  			// Mark our goroutine preemptible so its stack can be scanned or observed
  1816  			// by the execution tracer. This, for example, lets two mark workers scan
  1817  			// each other (otherwise, they would deadlock).
  1818  			//
  1819  			// casGToWaitingForSuspendG marks the goroutine as ineligible for a
  1820  			// stack shrink, effectively pinning the stack in memory for the duration.
  1821  			//
  1822  			// N.B. The execution tracer is not aware of this status transition and
  1823  			// handles it specially based on the wait reason.
  1824  			casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCWorkerActive)
  1825  			switch pp.gcMarkWorkerMode {
  1826  			default:
  1827  				throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
  1828  			case gcMarkWorkerDedicatedMode:
  1829  				gcDrainMarkWorkerDedicated(&pp.gcw, true)
  1830  				if gp.preempt {
  1831  					// We were preempted. This is
  1832  					// a useful signal to kick
  1833  					// everything out of the run
  1834  					// queue so it can run
  1835  					// somewhere else.
  1836  					if drainQ := runqdrain(pp); !drainQ.empty() {
  1837  						lock(&sched.lock)
  1838  						globrunqputbatch(&drainQ)
  1839  						unlock(&sched.lock)
  1840  					}
  1841  				}
  1842  				// Go back to draining, this time
  1843  				// without preemption.
  1844  				gcDrainMarkWorkerDedicated(&pp.gcw, false)
  1845  			case gcMarkWorkerFractionalMode:
  1846  				gcDrainMarkWorkerFractional(&pp.gcw)
  1847  			case gcMarkWorkerIdleMode:
  1848  				gcDrainMarkWorkerIdle(&pp.gcw)
  1849  			}
  1850  			casgstatus(gp, _Gwaiting, _Grunning)
  1851  		})
  1852  
  1853  		// Account for time and mark us as stopped.
  1854  		now := nanotime()
  1855  		duration := now - startTime
  1856  		gcController.markWorkerStop(pp.gcMarkWorkerMode, duration)
  1857  		if trackLimiterEvent {
  1858  			pp.limiterEvent.stop(limiterEventIdleMarkWork, now)
  1859  		}
  1860  		if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode {
  1861  			pp.gcFractionalMarkTime.Add(duration)
  1862  		}
  1863  
  1864  		// We'll releasem after this point and thus this P may run
  1865  		// something else. We must clear the worker mode to avoid
  1866  		// attributing the mode to a different (non-worker) G in
  1867  		// tracev2.GoStart.
  1868  		pp.gcMarkWorkerMode = gcMarkWorkerNotWorker
  1869  
  1870  		// If this worker reached a background mark completion
  1871  		// point, signal the main GC goroutine.
  1872  		if gcEndWork() {
  1873  			// We don't need the P-local buffers here, allow
  1874  			// preemption because we may schedule like a regular
  1875  			// goroutine in gcMarkDone (block on locks, etc).
  1876  			releasem(node.m.ptr())
  1877  			node.m.set(nil)
  1878  
  1879  			gcMarkDone()
  1880  		}
  1881  	}
  1882  }
  1883  
  1884  // gcShouldScheduleWorker reports whether executing a mark worker
  1885  // on p is potentially useful. p may be nil.
  1886  func gcShouldScheduleWorker(p *p) bool {
  1887  	if p != nil && !p.gcw.empty() {
  1888  		return true
  1889  	}
  1890  	return gcMarkWorkAvailable()
  1891  }
  1892  
  1893  // gcIsMarkDone reports whether the mark phase is (probably) done.
  1894  func gcIsMarkDone() bool {
  1895  	return work.nwait == work.nproc && !gcMarkWorkAvailable()
  1896  }
  1897  
  1898  // gcBeginWork signals to the garbage collector that a new worker is
  1899  // about to process GC work.
  1900  func gcBeginWork() {
  1901  	decnwait := atomic.Xadd(&work.nwait, -1)
  1902  	if decnwait == work.nproc {
  1903  		println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
  1904  		throw("work.nwait was > work.nproc")
  1905  	}
  1906  }
  1907  
  1908  // gcEndWork signals to the garbage collector that a new worker has just finished
  1909  // its work. It reports whether it was the last worker and there's no more work
  1910  // to do. If it returns true, the caller must call gcMarkDone.
  1911  func gcEndWork() (last bool) {
  1912  	incnwait := atomic.Xadd(&work.nwait, +1)
  1913  	if incnwait > work.nproc {
  1914  		println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
  1915  		throw("work.nwait > work.nproc")
  1916  	}
  1917  	return incnwait == work.nproc && !gcMarkWorkAvailable()
  1918  }
  1919  
  1920  // gcMark runs the mark (or, for concurrent GC, mark termination)
  1921  // All gcWork caches must be empty.
  1922  // STW is in effect at this point.
  1923  func gcMark(startTime int64) {
  1924  	if gcphase != _GCmarktermination {
  1925  		throw("in gcMark expecting to see gcphase as _GCmarktermination")
  1926  	}
  1927  	work.tstart = startTime
  1928  
  1929  	// Check that there's no marking work remaining.
  1930  	if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); work.full != 0 || next < jobs {
  1931  		print("runtime: full=", hex(work.full), " next=", next, " jobs=", jobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
  1932  		panic("non-empty mark queue after concurrent mark")
  1933  	}
  1934  
  1935  	if debug.gccheckmark > 0 {
  1936  		// This is expensive when there's a large number of
  1937  		// Gs, so only do it if checkmark is also enabled.
  1938  		gcMarkRootCheck()
  1939  	}
  1940  
  1941  	// Drop allg snapshot. allgs may have grown, in which case
  1942  	// this is the only reference to the old backing store and
  1943  	// there's no need to keep it around.
  1944  	work.stackRoots = nil
  1945  
  1946  	// Clear out buffers and double-check that all gcWork caches
  1947  	// are empty. This should be ensured by gcMarkDone before we
  1948  	// enter mark termination.
  1949  	//
  1950  	// TODO: We could clear out buffers just before mark if this
  1951  	// has a non-negligible impact on STW time.
  1952  	for _, p := range allp {
  1953  		// The write barrier may have buffered pointers since
  1954  		// the gcMarkDone barrier. However, since the barrier
  1955  		// ensured all reachable objects were marked, all of
  1956  		// these must be pointers to black objects. Hence we
  1957  		// can just discard the write barrier buffer.
  1958  		if debug.gccheckmark > 0 {
  1959  			// For debugging, flush the buffer and make
  1960  			// sure it really was all marked.
  1961  			wbBufFlush1(p)
  1962  		} else {
  1963  			p.wbBuf.reset()
  1964  		}
  1965  
  1966  		gcw := &p.gcw
  1967  		if !gcw.empty() {
  1968  			printlock()
  1969  			print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork)
  1970  			if gcw.wbuf1 == nil {
  1971  				print(" wbuf1=<nil>")
  1972  			} else {
  1973  				print(" wbuf1.n=", gcw.wbuf1.nobj)
  1974  			}
  1975  			if gcw.wbuf2 == nil {
  1976  				print(" wbuf2=<nil>")
  1977  			} else {
  1978  				print(" wbuf2.n=", gcw.wbuf2.nobj)
  1979  			}
  1980  			print("\n")
  1981  			throw("P has cached GC work at end of mark termination")
  1982  		}
  1983  		// There may still be cached empty buffers, which we
  1984  		// need to flush since we're going to free them. Also,
  1985  		// there may be non-zero stats because we allocated
  1986  		// black after the gcMarkDone barrier.
  1987  		gcw.dispose()
  1988  	}
  1989  
  1990  	// Flush scanAlloc from each mcache since we're about to modify
  1991  	// heapScan directly. If we were to flush this later, then scanAlloc
  1992  	// might have incorrect information.
  1993  	//
  1994  	// Note that it's not important to retain this information; we know
  1995  	// exactly what heapScan is at this point via scanWork.
  1996  	for _, p := range allp {
  1997  		c := p.mcache
  1998  		if c == nil {
  1999  			continue
  2000  		}
  2001  		c.scanAlloc = 0
  2002  	}
  2003  
  2004  	// Reset controller state.
  2005  	gcController.resetLive(work.bytesMarked)
  2006  }
  2007  
  2008  // gcSweep must be called on the system stack because it acquires the heap
  2009  // lock. See mheap for details.
  2010  //
  2011  // Returns true if the heap was fully swept by this function.
  2012  //
  2013  // The world must be stopped.
  2014  //
  2015  //go:systemstack
  2016  func gcSweep(mode gcMode) bool {
  2017  	assertWorldStopped()
  2018  
  2019  	if gcphase != _GCoff {
  2020  		throw("gcSweep being done but phase is not GCoff")
  2021  	}
  2022  
  2023  	lock(&mheap_.lock)
  2024  	mheap_.sweepgen += 2
  2025  	sweep.active.reset()
  2026  	mheap_.pagesSwept.Store(0)
  2027  	mheap_.sweepArenas = mheap_.heapArenas
  2028  	mheap_.reclaimIndex.Store(0)
  2029  	mheap_.reclaimCredit.Store(0)
  2030  	unlock(&mheap_.lock)
  2031  
  2032  	sweep.centralIndex.clear()
  2033  
  2034  	if !concurrentSweep || mode == gcForceBlockMode {
  2035  		// Special case synchronous sweep.
  2036  		// Record that no proportional sweeping has to happen.
  2037  		lock(&mheap_.lock)
  2038  		mheap_.sweepPagesPerByte = 0
  2039  		unlock(&mheap_.lock)
  2040  		// Flush all mcaches.
  2041  		for _, pp := range allp {
  2042  			pp.mcache.prepareForSweep()
  2043  		}
  2044  		// Sweep all spans eagerly.
  2045  		for sweepone() != ^uintptr(0) {
  2046  		}
  2047  		// Free workbufs and span rings eagerly.
  2048  		prepareFreeWorkbufs()
  2049  		for freeSomeWbufs(false) {
  2050  		}
  2051  		freeDeadSpanSPMCs()
  2052  		// All "free" events for this mark/sweep cycle have
  2053  		// now happened, so we can make this profile cycle
  2054  		// available immediately.
  2055  		mProf_NextCycle()
  2056  		mProf_Flush()
  2057  		return true
  2058  	}
  2059  
  2060  	// Background sweep.
  2061  	lock(&sweep.lock)
  2062  	if sweep.parked {
  2063  		sweep.parked = false
  2064  		ready(sweep.g, 0, true)
  2065  	}
  2066  	unlock(&sweep.lock)
  2067  	return false
  2068  }
  2069  
  2070  // gcResetMarkState resets global state prior to marking (concurrent
  2071  // or STW) and resets the stack scan state of all Gs.
  2072  //
  2073  // This is safe to do without the world stopped because any Gs created
  2074  // during or after this will start out in the reset state.
  2075  //
  2076  // gcResetMarkState must be called on the system stack because it acquires
  2077  // the heap lock. See mheap for details.
  2078  //
  2079  //go:systemstack
  2080  func gcResetMarkState() {
  2081  	// This may be called during a concurrent phase, so lock to make sure
  2082  	// allgs doesn't change.
  2083  	forEachG(func(gp *g) {
  2084  		gp.gcscandone = false // set to true in gcphasework
  2085  		gp.gcAssistBytes = 0
  2086  	})
  2087  
  2088  	// Clear page marks. This is just 1MB per 64GB of heap, so the
  2089  	// time here is pretty trivial.
  2090  	lock(&mheap_.lock)
  2091  	arenas := mheap_.heapArenas
  2092  	unlock(&mheap_.lock)
  2093  	for _, ai := range arenas {
  2094  		ha := mheap_.arenas[ai.l1()][ai.l2()]
  2095  		clear(ha.pageMarks[:])
  2096  	}
  2097  
  2098  	work.bytesMarked = 0
  2099  	work.initialHeapLive = gcController.heapLive.Load()
  2100  }
  2101  
  2102  // Hooks for other packages
  2103  
  2104  var poolcleanup func()
  2105  var boringCaches []unsafe.Pointer // for crypto/internal/boring
  2106  
  2107  // sync_runtime_registerPoolCleanup should be an internal detail,
  2108  // but widely used packages access it using linkname.
  2109  // Notable members of the hall of shame include:
  2110  //   - github.com/bytedance/gopkg
  2111  //   - github.com/songzhibin97/gkit
  2112  //
  2113  // Do not remove or change the type signature.
  2114  // See go.dev/issue/67401.
  2115  //
  2116  //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
  2117  func sync_runtime_registerPoolCleanup(f func()) {
  2118  	poolcleanup = f
  2119  }
  2120  
  2121  //go:linkname boring_registerCache crypto/internal/boring/bcache.registerCache
  2122  func boring_registerCache(p unsafe.Pointer) {
  2123  	boringCaches = append(boringCaches, p)
  2124  }
  2125  
  2126  func clearpools() {
  2127  	// clear sync.Pools
  2128  	if poolcleanup != nil {
  2129  		poolcleanup()
  2130  	}
  2131  
  2132  	// clear boringcrypto caches
  2133  	for _, p := range boringCaches {
  2134  		atomicstorep(p, nil)
  2135  	}
  2136  
  2137  	// Clear central sudog cache.
  2138  	// Leave per-P caches alone, they have strictly bounded size.
  2139  	// Disconnect cached list before dropping it on the floor,
  2140  	// so that a dangling ref to one entry does not pin all of them.
  2141  	lock(&sched.sudoglock)
  2142  	var sg, sgnext *sudog
  2143  	for sg = sched.sudogcache; sg != nil; sg = sgnext {
  2144  		sgnext = sg.next
  2145  		sg.next = nil
  2146  	}
  2147  	sched.sudogcache = nil
  2148  	unlock(&sched.sudoglock)
  2149  
  2150  	// Clear central defer pool.
  2151  	// Leave per-P pools alone, they have strictly bounded size.
  2152  	lock(&sched.deferlock)
  2153  	// disconnect cached list before dropping it on the floor,
  2154  	// so that a dangling ref to one entry does not pin all of them.
  2155  	var d, dlink *_defer
  2156  	for d = sched.deferpool; d != nil; d = dlink {
  2157  		dlink = d.link
  2158  		d.link = nil
  2159  	}
  2160  	sched.deferpool = nil
  2161  	unlock(&sched.deferlock)
  2162  }
  2163  
  2164  // Timing
  2165  
  2166  // itoaDiv formats val/(10**dec) into buf.
  2167  func itoaDiv(buf []byte, val uint64, dec int) []byte {
  2168  	i := len(buf) - 1
  2169  	idec := i - dec
  2170  	for val >= 10 || i >= idec {
  2171  		buf[i] = byte(val%10 + '0')
  2172  		i--
  2173  		if i == idec {
  2174  			buf[i] = '.'
  2175  			i--
  2176  		}
  2177  		val /= 10
  2178  	}
  2179  	buf[i] = byte(val + '0')
  2180  	return buf[i:]
  2181  }
  2182  
  2183  // fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
  2184  func fmtNSAsMS(buf []byte, ns uint64) []byte {
  2185  	if ns >= 10e6 {
  2186  		// Format as whole milliseconds.
  2187  		return itoaDiv(buf, ns/1e6, 0)
  2188  	}
  2189  	// Format two digits of precision, with at most three decimal places.
  2190  	x := ns / 1e3
  2191  	if x == 0 {
  2192  		buf[0] = '0'
  2193  		return buf[:1]
  2194  	}
  2195  	dec := 3
  2196  	for x >= 100 {
  2197  		x /= 10
  2198  		dec--
  2199  	}
  2200  	return itoaDiv(buf, x, dec)
  2201  }
  2202  
  2203  // Helpers for testing GC.
  2204  
  2205  // gcTestMoveStackOnNextCall causes the stack to be moved on a call
  2206  // immediately following the call to this. It may not work correctly
  2207  // if any other work appears after this call (such as returning).
  2208  // Typically the following call should be marked go:noinline so it
  2209  // performs a stack check.
  2210  //
  2211  // In rare cases this may not cause the stack to move, specifically if
  2212  // there's a preemption between this call and the next.
  2213  func gcTestMoveStackOnNextCall() {
  2214  	gp := getg()
  2215  	gp.stackguard0 = stackForceMove
  2216  }
  2217  
  2218  // gcTestIsReachable performs a GC and returns a bit set where bit i
  2219  // is set if ptrs[i] is reachable.
  2220  func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  2221  	// This takes the pointers as unsafe.Pointers in order to keep
  2222  	// them live long enough for us to attach specials. After
  2223  	// that, we drop our references to them.
  2224  
  2225  	if len(ptrs) > 64 {
  2226  		panic("too many pointers for uint64 mask")
  2227  	}
  2228  
  2229  	// Block GC while we attach specials and drop our references
  2230  	// to ptrs. Otherwise, if a GC is in progress, it could mark
  2231  	// them reachable via this function before we have a chance to
  2232  	// drop them.
  2233  	semacquire(&gcsema)
  2234  
  2235  	// Create reachability specials for ptrs.
  2236  	specials := make([]*specialReachable, len(ptrs))
  2237  	for i, p := range ptrs {
  2238  		lock(&mheap_.speciallock)
  2239  		s := (*specialReachable)(mheap_.specialReachableAlloc.alloc())
  2240  		unlock(&mheap_.speciallock)
  2241  		s.special.kind = _KindSpecialReachable
  2242  		if !addspecial(p, &s.special, false) {
  2243  			throw("already have a reachable special (duplicate pointer?)")
  2244  		}
  2245  		specials[i] = s
  2246  		// Make sure we don't retain ptrs.
  2247  		ptrs[i] = nil
  2248  	}
  2249  
  2250  	semrelease(&gcsema)
  2251  
  2252  	// Force a full GC and sweep.
  2253  	GC()
  2254  
  2255  	// Process specials.
  2256  	for i, s := range specials {
  2257  		if !s.done {
  2258  			printlock()
  2259  			println("runtime: object", i, "was not swept")
  2260  			throw("IsReachable failed")
  2261  		}
  2262  		if s.reachable {
  2263  			mask |= 1 << i
  2264  		}
  2265  		lock(&mheap_.speciallock)
  2266  		mheap_.specialReachableAlloc.free(unsafe.Pointer(s))
  2267  		unlock(&mheap_.speciallock)
  2268  	}
  2269  
  2270  	return mask
  2271  }
  2272  
  2273  // gcTestPointerClass returns the category of what p points to, one of:
  2274  // "heap", "stack", "data", "bss", "other". This is useful for checking
  2275  // that a test is doing what it's intended to do.
  2276  //
  2277  // This is nosplit simply to avoid extra pointer shuffling that may
  2278  // complicate a test.
  2279  //
  2280  //go:nosplit
  2281  func gcTestPointerClass(p unsafe.Pointer) string {
  2282  	p2 := uintptr(noescape(p))
  2283  	gp := getg()
  2284  	if gp.stack.lo <= p2 && p2 < gp.stack.hi {
  2285  		return "stack"
  2286  	}
  2287  	if base, _, _ := findObject(p2, 0, 0); base != 0 {
  2288  		return "heap"
  2289  	}
  2290  	for _, datap := range activeModules() {
  2291  		if datap.data <= p2 && p2 < datap.edata || datap.noptrdata <= p2 && p2 < datap.enoptrdata {
  2292  			return "data"
  2293  		}
  2294  		if datap.bss <= p2 && p2 < datap.ebss || datap.noptrbss <= p2 && p2 <= datap.enoptrbss {
  2295  			return "bss"
  2296  		}
  2297  	}
  2298  	KeepAlive(p)
  2299  	return "other"
  2300  }
  2301  

View as plain text