Source file src/runtime/mheap.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page heap.
     6  //
     7  // See malloc.go for overview.
     8  
     9  package runtime
    10  
    11  import (
    12  	"internal/abi"
    13  	"internal/cpu"
    14  	"internal/goarch"
    15  	"internal/goexperiment"
    16  	"internal/runtime/atomic"
    17  	"internal/runtime/gc"
    18  	"internal/runtime/sys"
    19  	"unsafe"
    20  )
    21  
    22  const (
    23  	// minPhysPageSize is a lower-bound on the physical page size. The
    24  	// true physical page size may be larger than this. In contrast,
    25  	// sys.PhysPageSize is an upper-bound on the physical page size.
    26  	minPhysPageSize = 4096
    27  
    28  	// maxPhysPageSize is the maximum page size the runtime supports.
    29  	maxPhysPageSize = 512 << 10
    30  
    31  	// maxPhysHugePageSize sets an upper-bound on the maximum huge page size
    32  	// that the runtime supports.
    33  	maxPhysHugePageSize = pallocChunkBytes
    34  
    35  	// pagesPerReclaimerChunk indicates how many pages to scan from the
    36  	// pageInUse bitmap at a time. Used by the page reclaimer.
    37  	//
    38  	// Higher values reduce contention on scanning indexes (such as
    39  	// h.reclaimIndex), but increase the minimum latency of the
    40  	// operation.
    41  	//
    42  	// The time required to scan this many pages can vary a lot depending
    43  	// on how many spans are actually freed. Experimentally, it can
    44  	// scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only
    45  	// free spans at ~32 MB/ms. Using 512 pages bounds this at
    46  	// roughly 100µs.
    47  	//
    48  	// Must be a multiple of the pageInUse bitmap element size and
    49  	// must also evenly divide pagesPerArena.
    50  	pagesPerReclaimerChunk = min(512, pagesPerArena)
    51  
    52  	// physPageAlignedStacks indicates whether stack allocations must be
    53  	// physical page aligned. This is a requirement for MAP_STACK on
    54  	// OpenBSD.
    55  	physPageAlignedStacks = GOOS == "openbsd"
    56  )
    57  
    58  // Main malloc heap.
    59  // The heap use pageAlloc to manage free and scavenged pages,
    60  // but all the other global data is here too.
    61  //
    62  // mheap must not be heap-allocated because it contains mSpanLists,
    63  // which must not be heap-allocated.
    64  type mheap struct {
    65  	_ sys.NotInHeap
    66  
    67  	// lock must only be acquired on the system stack, otherwise a g
    68  	// could self-deadlock if its stack grows with the lock held.
    69  	lock mutex
    70  
    71  	pages pageAlloc // page allocation data structure
    72  
    73  	sweepgen uint32 // sweep generation, see comment in mspan; written during STW
    74  
    75  	// allspans is a slice of all mspans ever created. Each mspan
    76  	// appears exactly once.
    77  	//
    78  	// The memory for allspans is manually managed and can be
    79  	// reallocated and move as the heap grows.
    80  	//
    81  	// In general, allspans is protected by mheap_.lock, which
    82  	// prevents concurrent access as well as freeing the backing
    83  	// store. Accesses during STW might not hold the lock, but
    84  	// must ensure that allocation cannot happen around the
    85  	// access (since that may free the backing store).
    86  	allspans []*mspan // all spans out there
    87  
    88  	// Proportional sweep
    89  	//
    90  	// These parameters represent a linear function from gcController.heapLive
    91  	// to page sweep count. The proportional sweep system works to
    92  	// stay in the black by keeping the current page sweep count
    93  	// above this line at the current gcController.heapLive.
    94  	//
    95  	// The line has slope sweepPagesPerByte and passes through a
    96  	// basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
    97  	// any given time, the system is at (gcController.heapLive,
    98  	// pagesSwept) in this space.
    99  	//
   100  	// It is important that the line pass through a point we
   101  	// control rather than simply starting at a 0,0 origin
   102  	// because that lets us adjust sweep pacing at any time while
   103  	// accounting for current progress. If we could only adjust
   104  	// the slope, it would create a discontinuity in debt if any
   105  	// progress has already been made.
   106  	pagesInUse         atomic.Uintptr // pages of spans in stats mSpanInUse
   107  	pagesSwept         atomic.Uint64  // pages swept this cycle
   108  	pagesSweptBasis    atomic.Uint64  // pagesSwept to use as the origin of the sweep ratio
   109  	sweepHeapLiveBasis uint64         // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
   110  	sweepPagesPerByte  float64        // proportional sweep ratio; written with lock, read without
   111  
   112  	// Page reclaimer state
   113  
   114  	// reclaimIndex is the page index in heapArenas of next page to
   115  	// reclaim. Specifically, it refers to page (i %
   116  	// pagesPerArena) of arena heapArenas[i / pagesPerArena].
   117  	//
   118  	// If this is >= 1<<63, the page reclaimer is done scanning
   119  	// the page marks.
   120  	reclaimIndex atomic.Uint64
   121  
   122  	// reclaimCredit is spare credit for extra pages swept. Since
   123  	// the page reclaimer works in large chunks, it may reclaim
   124  	// more than requested. Any spare pages released go to this
   125  	// credit pool.
   126  	reclaimCredit atomic.Uintptr
   127  
   128  	_ cpu.CacheLinePad // prevents false-sharing between arenas and preceding variables
   129  
   130  	// arenas is the heap arena map. It points to the metadata for
   131  	// the heap for every arena frame of the entire usable virtual
   132  	// address space.
   133  	//
   134  	// Use arenaIndex to compute indexes into this array.
   135  	//
   136  	// For regions of the address space that are not backed by the
   137  	// Go heap, the arena map contains nil.
   138  	//
   139  	// Modifications are protected by mheap_.lock. Reads can be
   140  	// performed without locking; however, a given entry can
   141  	// transition from nil to non-nil at any time when the lock
   142  	// isn't held. (Entries never transitions back to nil.)
   143  	//
   144  	// In general, this is a two-level mapping consisting of an L1
   145  	// map and possibly many L2 maps. This saves space when there
   146  	// are a huge number of arena frames. However, on many
   147  	// platforms (even 64-bit), arenaL1Bits is 0, making this
   148  	// effectively a single-level map. In this case, arenas[0]
   149  	// will never be nil.
   150  	arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
   151  
   152  	// arenasHugePages indicates whether arenas' L2 entries are eligible
   153  	// to be backed by huge pages.
   154  	arenasHugePages bool
   155  
   156  	// heapArenaAlloc is pre-reserved space for allocating heapArena
   157  	// objects. This is only used on 32-bit, where we pre-reserve
   158  	// this space to avoid interleaving it with the heap itself.
   159  	heapArenaAlloc linearAlloc
   160  
   161  	// arenaHints is a list of addresses at which to attempt to
   162  	// add more heap arenas. This is initially populated with a
   163  	// set of general hint addresses, and grown with the bounds of
   164  	// actual heap arena ranges.
   165  	arenaHints *arenaHint
   166  
   167  	// arena is a pre-reserved space for allocating heap arenas
   168  	// (the actual arenas). This is only used on 32-bit.
   169  	arena linearAlloc
   170  
   171  	// heapArenas is the arenaIndex of every mapped arena mapped for the heap.
   172  	// This can be used to iterate through the heap address space.
   173  	//
   174  	// Access is protected by mheap_.lock. However, since this is
   175  	// append-only and old backing arrays are never freed, it is
   176  	// safe to acquire mheap_.lock, copy the slice header, and
   177  	// then release mheap_.lock.
   178  	heapArenas []arenaIdx
   179  
   180  	// userArenaArenas is the arenaIndex of every mapped arena mapped for
   181  	// user arenas.
   182  	//
   183  	// Access is protected by mheap_.lock. However, since this is
   184  	// append-only and old backing arrays are never freed, it is
   185  	// safe to acquire mheap_.lock, copy the slice header, and
   186  	// then release mheap_.lock.
   187  	userArenaArenas []arenaIdx
   188  
   189  	// sweepArenas is a snapshot of heapArenas taken at the
   190  	// beginning of the sweep cycle. This can be read safely by
   191  	// simply blocking GC (by disabling preemption).
   192  	sweepArenas []arenaIdx
   193  
   194  	// markArenas is a snapshot of heapArenas taken at the beginning
   195  	// of the mark cycle. Because heapArenas is append-only, neither
   196  	// this slice nor its contents will change during the mark, so
   197  	// it can be read safely.
   198  	markArenas []arenaIdx
   199  
   200  	// curArena is the arena that the heap is currently growing
   201  	// into. This should always be physPageSize-aligned.
   202  	curArena struct {
   203  		base, end uintptr
   204  	}
   205  
   206  	// central free lists for small size classes.
   207  	// the padding makes sure that the mcentrals are
   208  	// spaced CacheLinePadSize bytes apart, so that each mcentral.lock
   209  	// gets its own cache line.
   210  	// central is indexed by spanClass.
   211  	central [numSpanClasses]struct {
   212  		mcentral mcentral
   213  		pad      [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
   214  	}
   215  
   216  	spanalloc                  fixalloc // allocator for span
   217  	spanSPMCAlloc              fixalloc // allocator for spanSPMC, protected by work.spanSPMCs.lock
   218  	cachealloc                 fixalloc // allocator for mcache
   219  	specialfinalizeralloc      fixalloc // allocator for specialfinalizer
   220  	specialCleanupAlloc        fixalloc // allocator for specialCleanup
   221  	specialCheckFinalizerAlloc fixalloc // allocator for specialCheckFinalizer
   222  	specialTinyBlockAlloc      fixalloc // allocator for specialTinyBlock
   223  	specialprofilealloc        fixalloc // allocator for specialprofile
   224  	specialReachableAlloc      fixalloc // allocator for specialReachable
   225  	specialPinCounterAlloc     fixalloc // allocator for specialPinCounter
   226  	specialWeakHandleAlloc     fixalloc // allocator for specialWeakHandle
   227  	specialBubbleAlloc         fixalloc // allocator for specialBubble
   228  	specialSecretAlloc         fixalloc // allocator for specialSecret
   229  	speciallock                mutex    // lock for special record allocators.
   230  	arenaHintAlloc             fixalloc // allocator for arenaHints
   231  
   232  	// User arena state.
   233  	//
   234  	// Protected by mheap_.lock.
   235  	userArena struct {
   236  		// arenaHints is a list of addresses at which to attempt to
   237  		// add more heap arenas for user arena chunks. This is initially
   238  		// populated with a set of general hint addresses, and grown with
   239  		// the bounds of actual heap arena ranges.
   240  		arenaHints *arenaHint
   241  
   242  		// quarantineList is a list of user arena spans that have been set to fault, but
   243  		// are waiting for all pointers into them to go away. Sweeping handles
   244  		// identifying when this is true, and moves the span to the ready list.
   245  		quarantineList mSpanList
   246  
   247  		// readyList is a list of empty user arena spans that are ready for reuse.
   248  		readyList mSpanList
   249  	}
   250  
   251  	// cleanupID is a counter which is incremented each time a cleanup special is added
   252  	// to a span. It's used to create globally unique identifiers for individual cleanup.
   253  	// cleanupID is protected by mheap_.speciallock. It must only be incremented while holding
   254  	// the lock. ID 0 is reserved. Users should increment first, then read the value.
   255  	cleanupID uint64
   256  
   257  	_ cpu.CacheLinePad
   258  
   259  	immortalWeakHandles immortalWeakHandleMap
   260  
   261  	unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
   262  }
   263  
   264  var mheap_ mheap
   265  
   266  // A heapArena stores metadata for a heap arena. heapArenas are stored
   267  // outside of the Go heap and accessed via the mheap_.arenas index.
   268  type heapArena struct {
   269  	_ sys.NotInHeap
   270  
   271  	// spans maps from virtual address page ID within this arena to *mspan.
   272  	// For allocated spans, their pages map to the span itself.
   273  	// For free spans, only the lowest and highest pages map to the span itself.
   274  	// Internal pages map to an arbitrary span.
   275  	// For pages that have never been allocated, spans entries are nil.
   276  	//
   277  	// Modifications are protected by mheap.lock. Reads can be
   278  	// performed without locking, but ONLY from indexes that are
   279  	// known to contain in-use or stack spans. This means there
   280  	// must not be a safe-point between establishing that an
   281  	// address is live and looking it up in the spans array.
   282  	spans [pagesPerArena]*mspan
   283  
   284  	// pageInUse is a bitmap that indicates which spans are in
   285  	// state mSpanInUse. This bitmap is indexed by page number,
   286  	// but only the bit corresponding to the first page in each
   287  	// span is used.
   288  	//
   289  	// Reads and writes are atomic.
   290  	pageInUse [pagesPerArena / 8]uint8
   291  
   292  	// pageMarks is a bitmap that indicates which spans have any
   293  	// marked objects on them. Like pageInUse, only the bit
   294  	// corresponding to the first page in each span is used.
   295  	//
   296  	// Writes are done atomically during marking. Reads are
   297  	// non-atomic and lock-free since they only occur during
   298  	// sweeping (and hence never race with writes).
   299  	//
   300  	// This is used to quickly find whole spans that can be freed.
   301  	//
   302  	// TODO(austin): It would be nice if this was uint64 for
   303  	// faster scanning, but we don't have 64-bit atomic bit
   304  	// operations.
   305  	pageMarks [pagesPerArena / 8]uint8
   306  
   307  	// pageSpecials is a bitmap that indicates which spans have
   308  	// specials (finalizers or other). Like pageInUse, only the bit
   309  	// corresponding to the first page in each span is used.
   310  	//
   311  	// Writes are done atomically whenever a special is added to
   312  	// a span and whenever the last special is removed from a span.
   313  	// Reads are done atomically to find spans containing specials
   314  	// during marking.
   315  	pageSpecials [pagesPerArena / 8]uint8
   316  
   317  	// pageUseSpanInlineMarkBits is a bitmap where each bit corresponds
   318  	// to a span, as only spans one page in size can have inline mark bits.
   319  	// The bit indicates that the span has a spanInlineMarkBits struct
   320  	// stored directly at the top end of the span's memory.
   321  	pageUseSpanInlineMarkBits [pagesPerArena / 8]uint8
   322  
   323  	// checkmarks stores the debug.gccheckmark state. It is only
   324  	// used if debug.gccheckmark > 0 or debug.checkfinalizers > 0.
   325  	checkmarks *checkmarksMap
   326  
   327  	// zeroedBase marks the first byte of the first page in this
   328  	// arena which hasn't been used yet and is therefore already
   329  	// zero. zeroedBase is relative to the arena base.
   330  	// Increases monotonically until it hits heapArenaBytes.
   331  	//
   332  	// This field is sufficient to determine if an allocation
   333  	// needs to be zeroed because the page allocator follows an
   334  	// address-ordered first-fit policy.
   335  	//
   336  	// Read atomically and written with an atomic CAS.
   337  	zeroedBase uintptr
   338  }
   339  
   340  // arenaHint is a hint for where to grow the heap arenas. See
   341  // mheap_.arenaHints.
   342  type arenaHint struct {
   343  	_    sys.NotInHeap
   344  	addr uintptr
   345  	down bool
   346  	next *arenaHint
   347  }
   348  
   349  // An mspan is a run of pages.
   350  //
   351  // When a mspan is in the heap free treap, state == mSpanFree
   352  // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
   353  // If the mspan is in the heap scav treap, then in addition to the
   354  // above scavenged == true. scavenged == false in all other cases.
   355  //
   356  // When a mspan is allocated, state == mSpanInUse or mSpanManual
   357  // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
   358  
   359  // Every mspan is in one doubly-linked list, either in the mheap's
   360  // busy list or one of the mcentral's span lists.
   361  
   362  // An mspan representing actual memory has state mSpanInUse,
   363  // mSpanManual, or mSpanFree. Transitions between these states are
   364  // constrained as follows:
   365  //
   366  //   - A span may transition from free to in-use or manual during any GC
   367  //     phase.
   368  //
   369  //   - During sweeping (gcphase == _GCoff), a span may transition from
   370  //     in-use to free (as a result of sweeping) or manual to free (as a
   371  //     result of stacks being freed).
   372  //
   373  //   - During GC (gcphase != _GCoff), a span *must not* transition from
   374  //     manual or in-use to free. Because concurrent GC may read a pointer
   375  //     and then look up its span, the span state must be monotonic.
   376  //
   377  // Setting mspan.state to mSpanInUse or mSpanManual must be done
   378  // atomically and only after all other span fields are valid.
   379  // Likewise, if inspecting a span is contingent on it being
   380  // mSpanInUse, the state should be loaded atomically and checked
   381  // before depending on other fields. This allows the garbage collector
   382  // to safely deal with potentially invalid pointers, since resolving
   383  // such pointers may race with a span being allocated.
   384  type mSpanState uint8
   385  
   386  const (
   387  	mSpanDead   mSpanState = iota
   388  	mSpanInUse             // allocated for garbage collected heap
   389  	mSpanManual            // allocated for manual management (e.g., stack allocator)
   390  )
   391  
   392  // mSpanStateNames are the names of the span states, indexed by
   393  // mSpanState.
   394  var mSpanStateNames = []string{
   395  	"mSpanDead",
   396  	"mSpanInUse",
   397  	"mSpanManual",
   398  }
   399  
   400  // mSpanStateBox holds an atomic.Uint8 to provide atomic operations on
   401  // an mSpanState. This is a separate type to disallow accidental comparison
   402  // or assignment with mSpanState.
   403  type mSpanStateBox struct {
   404  	s atomic.Uint8
   405  }
   406  
   407  // It is nosplit to match get, below.
   408  
   409  //go:nosplit
   410  func (b *mSpanStateBox) set(s mSpanState) {
   411  	b.s.Store(uint8(s))
   412  }
   413  
   414  // It is nosplit because it's called indirectly by typedmemclr,
   415  // which must not be preempted.
   416  
   417  //go:nosplit
   418  func (b *mSpanStateBox) get() mSpanState {
   419  	return mSpanState(b.s.Load())
   420  }
   421  
   422  type mspan struct {
   423  	_    sys.NotInHeap
   424  	next *mspan     // next span in list, or nil if none
   425  	prev *mspan     // previous span in list, or nil if none
   426  	list *mSpanList // For debugging.
   427  
   428  	startAddr uintptr // address of first byte of span aka s.base()
   429  	npages    uintptr // number of pages in span
   430  
   431  	manualFreeList gclinkptr // list of free objects in mSpanManual spans
   432  
   433  	// freeindex is the slot index between 0 and nelems at which to begin scanning
   434  	// for the next free object in this span.
   435  	// Each allocation scans allocBits starting at freeindex until it encounters a 0
   436  	// indicating a free object. freeindex is then adjusted so that subsequent scans begin
   437  	// just past the newly discovered free object.
   438  	//
   439  	// If freeindex == nelems, this span has no free objects, though might have reusable objects.
   440  	//
   441  	// allocBits is a bitmap of objects in this span.
   442  	// If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
   443  	// then object n is free;
   444  	// otherwise, object n is allocated. Bits starting at nelems are
   445  	// undefined and should never be referenced.
   446  	//
   447  	// Object n starts at address n*elemsize + (start << pageShift).
   448  	freeindex uint16
   449  	// TODO: Look up nelems from sizeclass and remove this field if it
   450  	// helps performance.
   451  	nelems uint16 // number of object in the span.
   452  	// freeIndexForScan is like freeindex, except that freeindex is
   453  	// used by the allocator whereas freeIndexForScan is used by the
   454  	// GC scanner. They are two fields so that the GC sees the object
   455  	// is allocated only when the object and the heap bits are
   456  	// initialized (see also the assignment of freeIndexForScan in
   457  	// mallocgc, and issue 54596).
   458  	freeIndexForScan uint16
   459  
   460  	// Temporary storage for the object index that caused this span to
   461  	// be queued for scanning.
   462  	//
   463  	// Used only with goexperiment.GreenTeaGC.
   464  	scanIdx uint16
   465  
   466  	// Cache of the allocBits at freeindex. allocCache is shifted
   467  	// such that the lowest bit corresponds to the bit freeindex.
   468  	// allocCache holds the complement of allocBits, thus allowing
   469  	// ctz (count trailing zero) to use it directly.
   470  	// allocCache may contain bits beyond s.nelems; the caller must ignore
   471  	// these.
   472  	allocCache uint64
   473  
   474  	// allocBits and gcmarkBits hold pointers to a span's mark and
   475  	// allocation bits. The pointers are 8 byte aligned.
   476  	// There are three arenas where this data is held.
   477  	// free: Dirty arenas that are no longer accessed
   478  	//       and can be reused.
   479  	// next: Holds information to be used in the next GC cycle.
   480  	// current: Information being used during this GC cycle.
   481  	// previous: Information being used during the last GC cycle.
   482  	// A new GC cycle starts with the call to finishsweep_m.
   483  	// finishsweep_m moves the previous arena to the free arena,
   484  	// the current arena to the previous arena, and
   485  	// the next arena to the current arena.
   486  	// The next arena is populated as the spans request
   487  	// memory to hold gcmarkBits for the next GC cycle as well
   488  	// as allocBits for newly allocated spans.
   489  	//
   490  	// The pointer arithmetic is done "by hand" instead of using
   491  	// arrays to avoid bounds checks along critical performance
   492  	// paths.
   493  	// The sweep will free the old allocBits and set allocBits to the
   494  	// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
   495  	// out memory.
   496  	allocBits  *gcBits
   497  	gcmarkBits *gcBits
   498  	pinnerBits *gcBits // bitmap for pinned objects; accessed atomically
   499  
   500  	// sweep generation:
   501  	// if sweepgen == h->sweepgen - 2, the span needs sweeping
   502  	// if sweepgen == h->sweepgen - 1, the span is currently being swept
   503  	// if sweepgen == h->sweepgen, the span is swept and ready to use
   504  	// if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
   505  	// if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
   506  	// h->sweepgen is incremented by 2 after every GC
   507  
   508  	sweepgen              uint32
   509  	divMul                uint32        // for divide by elemsize
   510  	allocCount            uint16        // number of allocated objects
   511  	spanclass             spanClass     // size class and noscan (uint8)
   512  	state                 mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
   513  	needzero              uint8         // needs to be zeroed before allocation
   514  	isUserArenaChunk      bool          // whether or not this span represents a user arena
   515  	allocCountBeforeCache uint16        // a copy of allocCount that is stored just before this span is cached
   516  	elemsize              uintptr       // computed from sizeclass or from npages
   517  	limit                 uintptr       // end of data in span
   518  	speciallock           mutex         // guards specials list and changes to pinnerBits
   519  	specials              *special      // linked list of special records sorted by offset.
   520  	userArenaChunkFree    addrRange     // interval for managing chunk allocation
   521  	largeType             *_type        // malloc header for large objects.
   522  }
   523  
   524  func (s *mspan) base() uintptr {
   525  	return s.startAddr
   526  }
   527  
   528  func (s *mspan) layout() (size, n, total uintptr) {
   529  	total = s.npages << gc.PageShift
   530  	size = s.elemsize
   531  	if size > 0 {
   532  		n = total / size
   533  	}
   534  	return
   535  }
   536  
   537  // recordspan adds a newly allocated span to h.allspans.
   538  //
   539  // This only happens the first time a span is allocated from
   540  // mheap.spanalloc (it is not called when a span is reused).
   541  //
   542  // Write barriers are disallowed here because it can be called from
   543  // gcWork when allocating new workbufs. However, because it's an
   544  // indirect call from the fixalloc initializer, the compiler can't see
   545  // this.
   546  //
   547  // The heap lock must be held.
   548  //
   549  //go:nowritebarrierrec
   550  func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
   551  	h := (*mheap)(vh)
   552  	s := (*mspan)(p)
   553  
   554  	assertLockHeld(&h.lock)
   555  
   556  	if len(h.allspans) >= cap(h.allspans) {
   557  		n := 64 * 1024 / goarch.PtrSize
   558  		if n < cap(h.allspans)*3/2 {
   559  			n = cap(h.allspans) * 3 / 2
   560  		}
   561  		var new []*mspan
   562  		sp := (*slice)(unsafe.Pointer(&new))
   563  		sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array")
   564  		if sp.array == nil {
   565  			throw("runtime: cannot allocate memory")
   566  		}
   567  		sp.len = len(h.allspans)
   568  		sp.cap = n
   569  		if len(h.allspans) > 0 {
   570  			copy(new, h.allspans)
   571  		}
   572  		oldAllspans := h.allspans
   573  		*(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
   574  		if len(oldAllspans) != 0 {
   575  			sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
   576  		}
   577  	}
   578  	h.allspans = h.allspans[:len(h.allspans)+1]
   579  	h.allspans[len(h.allspans)-1] = s
   580  }
   581  
   582  // A spanClass represents the size class and noscan-ness of a span.
   583  //
   584  // Each size class has a noscan spanClass and a scan spanClass. The
   585  // noscan spanClass contains only noscan objects, which do not contain
   586  // pointers and thus do not need to be scanned by the garbage
   587  // collector.
   588  type spanClass uint8
   589  
   590  const (
   591  	numSpanClasses = gc.NumSizeClasses << 1
   592  	tinySpanClass  = spanClass(tinySizeClass<<1 | 1)
   593  )
   594  
   595  func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
   596  	return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
   597  }
   598  
   599  //go:nosplit
   600  func (sc spanClass) sizeclass() int8 {
   601  	return int8(sc >> 1)
   602  }
   603  
   604  //go:nosplit
   605  func (sc spanClass) noscan() bool {
   606  	return sc&1 != 0
   607  }
   608  
   609  // arenaIndex returns the index into mheap_.arenas of the arena
   610  // containing metadata for p. This index combines of an index into the
   611  // L1 map and an index into the L2 map and should be used as
   612  // mheap_.arenas[ai.l1()][ai.l2()].
   613  //
   614  // If p is outside the range of valid heap addresses, either l1() or
   615  // l2() will be out of bounds.
   616  //
   617  // It is nosplit because it's called by spanOf and several other
   618  // nosplit functions.
   619  //
   620  //go:nosplit
   621  func arenaIndex(p uintptr) arenaIdx {
   622  	return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
   623  }
   624  
   625  // arenaBase returns the low address of the region covered by heap
   626  // arena i.
   627  func arenaBase(i arenaIdx) uintptr {
   628  	return uintptr(i)*heapArenaBytes + arenaBaseOffset
   629  }
   630  
   631  type arenaIdx uint
   632  
   633  // l1 returns the "l1" portion of an arenaIdx.
   634  //
   635  // Marked nosplit because it's called by spanOf and other nosplit
   636  // functions.
   637  //
   638  //go:nosplit
   639  func (i arenaIdx) l1() uint {
   640  	if arenaL1Bits == 0 {
   641  		// Let the compiler optimize this away if there's no
   642  		// L1 map.
   643  		return 0
   644  	} else {
   645  		return uint(i) >> arenaL1Shift
   646  	}
   647  }
   648  
   649  // l2 returns the "l2" portion of an arenaIdx.
   650  //
   651  // Marked nosplit because it's called by spanOf and other nosplit funcs.
   652  // functions.
   653  //
   654  //go:nosplit
   655  func (i arenaIdx) l2() uint {
   656  	if arenaL1Bits == 0 {
   657  		return uint(i)
   658  	} else {
   659  		return uint(i) & (1<<arenaL2Bits - 1)
   660  	}
   661  }
   662  
   663  // inheap reports whether b is a pointer into a (potentially dead) heap object.
   664  // It returns false for pointers into mSpanManual spans.
   665  // Non-preemptible because it is used by write barriers.
   666  //
   667  //go:nowritebarrier
   668  //go:nosplit
   669  func inheap(b uintptr) bool {
   670  	return spanOfHeap(b) != nil
   671  }
   672  
   673  // inHeapOrStack is a variant of inheap that returns true for pointers
   674  // into any allocated heap span.
   675  //
   676  //go:nowritebarrier
   677  //go:nosplit
   678  func inHeapOrStack(b uintptr) bool {
   679  	s := spanOf(b)
   680  	if s == nil || b < s.base() {
   681  		return false
   682  	}
   683  	switch s.state.get() {
   684  	case mSpanInUse, mSpanManual:
   685  		return b < s.limit
   686  	default:
   687  		return false
   688  	}
   689  }
   690  
   691  // spanOf returns the span of p. If p does not point into the heap
   692  // arena or no span has ever contained p, spanOf returns nil.
   693  //
   694  // If p does not point to allocated memory, this may return a non-nil
   695  // span that does *not* contain p. If this is a possibility, the
   696  // caller should either call spanOfHeap or check the span bounds
   697  // explicitly.
   698  //
   699  // Must be nosplit because it has callers that are nosplit.
   700  //
   701  //go:nosplit
   702  func spanOf(p uintptr) *mspan {
   703  	// This function looks big, but we use a lot of constant
   704  	// folding around arenaL1Bits to get it under the inlining
   705  	// budget. Also, many of the checks here are safety checks
   706  	// that Go needs to do anyway, so the generated code is quite
   707  	// short.
   708  	ri := arenaIndex(p)
   709  	if arenaL1Bits == 0 {
   710  		// If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
   711  		if ri.l2() >= uint(len(mheap_.arenas[0])) {
   712  			return nil
   713  		}
   714  	} else {
   715  		// If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
   716  		if ri.l1() >= uint(len(mheap_.arenas)) {
   717  			return nil
   718  		}
   719  	}
   720  	l2 := mheap_.arenas[ri.l1()]
   721  	if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
   722  		return nil
   723  	}
   724  	ha := l2[ri.l2()]
   725  	if ha == nil {
   726  		return nil
   727  	}
   728  	return ha.spans[(p/pageSize)%pagesPerArena]
   729  }
   730  
   731  // spanOfUnchecked is equivalent to spanOf, but the caller must ensure
   732  // that p points into an allocated heap arena.
   733  //
   734  // Must be nosplit because it has callers that are nosplit.
   735  //
   736  //go:nosplit
   737  func spanOfUnchecked(p uintptr) *mspan {
   738  	ai := arenaIndex(p)
   739  	return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
   740  }
   741  
   742  // spanOfHeap is like spanOf, but returns nil if p does not point to a
   743  // heap object.
   744  //
   745  // Must be nosplit because it has callers that are nosplit.
   746  //
   747  //go:nosplit
   748  func spanOfHeap(p uintptr) *mspan {
   749  	s := spanOf(p)
   750  	// s is nil if it's never been allocated. Otherwise, we check
   751  	// its state first because we don't trust this pointer, so we
   752  	// have to synchronize with span initialization. Then, it's
   753  	// still possible we picked up a stale span pointer, so we
   754  	// have to check the span's bounds.
   755  	if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
   756  		return nil
   757  	}
   758  	return s
   759  }
   760  
   761  // pageIndexOf returns the arena, page index, and page mask for pointer p.
   762  // The caller must ensure p is in the heap.
   763  func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) {
   764  	ai := arenaIndex(p)
   765  	arena = mheap_.arenas[ai.l1()][ai.l2()]
   766  	pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
   767  	pageMask = byte(1 << ((p / pageSize) % 8))
   768  	return
   769  }
   770  
   771  // heapArenaOf returns the heap arena for p, if one exists.
   772  func heapArenaOf(p uintptr) *heapArena {
   773  	ri := arenaIndex(p)
   774  	if arenaL1Bits == 0 {
   775  		// If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
   776  		if ri.l2() >= uint(len(mheap_.arenas[0])) {
   777  			return nil
   778  		}
   779  	} else {
   780  		// If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
   781  		if ri.l1() >= uint(len(mheap_.arenas)) {
   782  			return nil
   783  		}
   784  	}
   785  	l2 := mheap_.arenas[ri.l1()]
   786  	if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
   787  		return nil
   788  	}
   789  	return l2[ri.l2()]
   790  }
   791  
   792  // Initialize the heap.
   793  func (h *mheap) init() {
   794  	lockInit(&h.lock, lockRankMheap)
   795  	lockInit(&h.speciallock, lockRankMheapSpecial)
   796  
   797  	h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
   798  	h.spanSPMCAlloc.init(unsafe.Sizeof(spanSPMC{}), nil, nil, &memstats.gcMiscSys)
   799  	h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
   800  	h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
   801  	h.specialCleanupAlloc.init(unsafe.Sizeof(specialCleanup{}), nil, nil, &memstats.other_sys)
   802  	h.specialCheckFinalizerAlloc.init(unsafe.Sizeof(specialCheckFinalizer{}), nil, nil, &memstats.other_sys)
   803  	h.specialTinyBlockAlloc.init(unsafe.Sizeof(specialTinyBlock{}), nil, nil, &memstats.other_sys)
   804  	h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
   805  	h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
   806  	h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
   807  	h.specialSecretAlloc.init(unsafe.Sizeof(specialSecret{}), nil, nil, &memstats.other_sys)
   808  	h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys)
   809  	h.specialBubbleAlloc.init(unsafe.Sizeof(specialBubble{}), nil, nil, &memstats.other_sys)
   810  	h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
   811  
   812  	// Don't zero mspan allocations. Background sweeping can
   813  	// inspect a span concurrently with allocating it, so it's
   814  	// important that the span's sweepgen survive across freeing
   815  	// and re-allocating a span to prevent background sweeping
   816  	// from improperly cas'ing it from 0.
   817  	//
   818  	// This is safe because mspan contains no heap pointers.
   819  	h.spanalloc.zero = false
   820  
   821  	// h->mapcache needs no init
   822  
   823  	for i := range h.central {
   824  		h.central[i].mcentral.init(spanClass(i))
   825  	}
   826  
   827  	h.pages.init(&h.lock, &memstats.gcMiscSys, false)
   828  
   829  	xRegInitAlloc()
   830  }
   831  
   832  // reclaim sweeps and reclaims at least npage pages into the heap.
   833  // It is called before allocating npage pages to keep growth in check.
   834  //
   835  // reclaim implements the page-reclaimer half of the sweeper.
   836  //
   837  // h.lock must NOT be held.
   838  func (h *mheap) reclaim(npage uintptr) {
   839  	// TODO(austin): Half of the time spent freeing spans is in
   840  	// locking/unlocking the heap (even with low contention). We
   841  	// could make the slow path here several times faster by
   842  	// batching heap frees.
   843  
   844  	// Bail early if there's no more reclaim work.
   845  	if h.reclaimIndex.Load() >= 1<<63 {
   846  		return
   847  	}
   848  
   849  	// Disable preemption so the GC can't start while we're
   850  	// sweeping, so we can read h.sweepArenas, and so
   851  	// traceGCSweepStart/Done pair on the P.
   852  	mp := acquirem()
   853  
   854  	trace := traceAcquire()
   855  	if trace.ok() {
   856  		trace.GCSweepStart()
   857  		traceRelease(trace)
   858  	}
   859  
   860  	arenas := h.sweepArenas
   861  	locked := false
   862  	for npage > 0 {
   863  		// Pull from accumulated credit first.
   864  		if credit := h.reclaimCredit.Load(); credit > 0 {
   865  			take := credit
   866  			if take > npage {
   867  				// Take only what we need.
   868  				take = npage
   869  			}
   870  			if h.reclaimCredit.CompareAndSwap(credit, credit-take) {
   871  				npage -= take
   872  			}
   873  			continue
   874  		}
   875  
   876  		// Claim a chunk of work.
   877  		idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk)
   878  		if idx/pagesPerArena >= uintptr(len(arenas)) {
   879  			// Page reclaiming is done.
   880  			h.reclaimIndex.Store(1 << 63)
   881  			break
   882  		}
   883  
   884  		if !locked {
   885  			// Lock the heap for reclaimChunk.
   886  			lock(&h.lock)
   887  			locked = true
   888  		}
   889  
   890  		// Scan this chunk.
   891  		nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk)
   892  		if nfound <= npage {
   893  			npage -= nfound
   894  		} else {
   895  			// Put spare pages toward global credit.
   896  			h.reclaimCredit.Add(nfound - npage)
   897  			npage = 0
   898  		}
   899  	}
   900  	if locked {
   901  		unlock(&h.lock)
   902  	}
   903  
   904  	trace = traceAcquire()
   905  	if trace.ok() {
   906  		trace.GCSweepDone()
   907  		traceRelease(trace)
   908  	}
   909  	releasem(mp)
   910  }
   911  
   912  // reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
   913  // It returns the number of pages returned to the heap.
   914  //
   915  // h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
   916  // temporarily unlocked and re-locked in order to do sweeping or if tracing is
   917  // enabled.
   918  func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
   919  	// The heap lock must be held because this accesses the
   920  	// heapArena.spans arrays using potentially non-live pointers.
   921  	// In particular, if a span were freed and merged concurrently
   922  	// with this probing heapArena.spans, it would be possible to
   923  	// observe arbitrary, stale span pointers.
   924  	assertLockHeld(&h.lock)
   925  
   926  	n0 := n
   927  	var nFreed uintptr
   928  	sl := sweep.active.begin()
   929  	if !sl.valid {
   930  		return 0
   931  	}
   932  	for n > 0 {
   933  		ai := arenas[pageIdx/pagesPerArena]
   934  		ha := h.arenas[ai.l1()][ai.l2()]
   935  
   936  		// Get a chunk of the bitmap to work on.
   937  		arenaPage := uint(pageIdx % pagesPerArena)
   938  		inUse := ha.pageInUse[arenaPage/8:]
   939  		marked := ha.pageMarks[arenaPage/8:]
   940  		if uintptr(len(inUse)) > n/8 {
   941  			inUse = inUse[:n/8]
   942  			marked = marked[:n/8]
   943  		}
   944  
   945  		// Scan this bitmap chunk for spans that are in-use
   946  		// but have no marked objects on them.
   947  		for i := range inUse {
   948  			inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
   949  			if inUseUnmarked == 0 {
   950  				continue
   951  			}
   952  
   953  			for j := uint(0); j < 8; j++ {
   954  				if inUseUnmarked&(1<<j) != 0 {
   955  					s := ha.spans[arenaPage+uint(i)*8+j]
   956  					if s, ok := sl.tryAcquire(s); ok {
   957  						npages := s.npages
   958  						unlock(&h.lock)
   959  						if s.sweep(false) {
   960  							nFreed += npages
   961  						}
   962  						lock(&h.lock)
   963  						// Reload inUse. It's possible nearby
   964  						// spans were freed when we dropped the
   965  						// lock and we don't want to get stale
   966  						// pointers from the spans array.
   967  						inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
   968  					}
   969  				}
   970  			}
   971  		}
   972  
   973  		// Advance.
   974  		pageIdx += uintptr(len(inUse) * 8)
   975  		n -= uintptr(len(inUse) * 8)
   976  	}
   977  	sweep.active.end(sl)
   978  	trace := traceAcquire()
   979  	if trace.ok() {
   980  		unlock(&h.lock)
   981  		// Account for pages scanned but not reclaimed.
   982  		trace.GCSweepSpan((n0 - nFreed) * pageSize)
   983  		traceRelease(trace)
   984  		lock(&h.lock)
   985  	}
   986  
   987  	assertLockHeld(&h.lock) // Must be locked on return.
   988  	return nFreed
   989  }
   990  
   991  // spanAllocType represents the type of allocation to make, or
   992  // the type of allocation to be freed.
   993  type spanAllocType uint8
   994  
   995  const (
   996  	spanAllocHeap    spanAllocType = iota // heap span
   997  	spanAllocStack                        // stack span
   998  	spanAllocWorkBuf                      // work buf span
   999  )
  1000  
  1001  // manual returns true if the span allocation is manually managed.
  1002  func (s spanAllocType) manual() bool {
  1003  	return s != spanAllocHeap
  1004  }
  1005  
  1006  // alloc allocates a new span of npage pages from the GC'd heap.
  1007  //
  1008  // spanclass indicates the span's size class and scannability.
  1009  //
  1010  // Returns a span that has been fully initialized. span.needzero indicates
  1011  // whether the span has been zeroed. Note that it may not be.
  1012  func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
  1013  	// Don't do any operations that lock the heap on the G stack.
  1014  	// It might trigger stack growth, and the stack growth code needs
  1015  	// to be able to allocate heap.
  1016  	var s *mspan
  1017  	systemstack(func() {
  1018  		// To prevent excessive heap growth, before allocating n pages
  1019  		// we need to sweep and reclaim at least n pages.
  1020  		if !isSweepDone() {
  1021  			h.reclaim(npages)
  1022  		}
  1023  		s = h.allocSpan(npages, spanAllocHeap, spanclass)
  1024  	})
  1025  	return s
  1026  }
  1027  
  1028  // allocManual allocates a manually-managed span of npage pages.
  1029  // allocManual returns nil if allocation fails.
  1030  //
  1031  // allocManual adds the bytes used to *stat, which should be a
  1032  // memstats in-use field. Unlike allocations in the GC'd heap, the
  1033  // allocation does *not* count toward heapInUse.
  1034  //
  1035  // The memory backing the returned span may not be zeroed if
  1036  // span.needzero is set.
  1037  //
  1038  // allocManual must be called on the system stack because it may
  1039  // acquire the heap lock via allocSpan. See mheap for details.
  1040  //
  1041  // If new code is written to call allocManual, do NOT use an
  1042  // existing spanAllocType value and instead declare a new one.
  1043  //
  1044  //go:systemstack
  1045  func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
  1046  	if !typ.manual() {
  1047  		throw("manual span allocation called with non-manually-managed type")
  1048  	}
  1049  	return h.allocSpan(npages, typ, 0)
  1050  }
  1051  
  1052  // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
  1053  // is s.
  1054  func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
  1055  	p := base / pageSize
  1056  	ai := arenaIndex(base)
  1057  	ha := h.arenas[ai.l1()][ai.l2()]
  1058  	for n := uintptr(0); n < npage; n++ {
  1059  		i := (p + n) % pagesPerArena
  1060  		if i == 0 {
  1061  			ai = arenaIndex(base + n*pageSize)
  1062  			ha = h.arenas[ai.l1()][ai.l2()]
  1063  		}
  1064  		ha.spans[i] = s
  1065  	}
  1066  }
  1067  
  1068  // allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
  1069  // assumed to be allocated, needs to be zeroed, updating heap arena metadata for
  1070  // future allocations.
  1071  //
  1072  // This must be called each time pages are allocated from the heap, even if the page
  1073  // allocator can otherwise prove the memory it's allocating is already zero because
  1074  // they're fresh from the operating system. It updates heapArena metadata that is
  1075  // critical for future page allocations.
  1076  //
  1077  // There are no locking constraints on this method.
  1078  func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
  1079  	for npage > 0 {
  1080  		ai := arenaIndex(base)
  1081  		ha := h.arenas[ai.l1()][ai.l2()]
  1082  
  1083  		zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
  1084  		arenaBase := base % heapArenaBytes
  1085  		if arenaBase < zeroedBase {
  1086  			// We extended into the non-zeroed part of the
  1087  			// arena, so this region needs to be zeroed before use.
  1088  			//
  1089  			// zeroedBase is monotonically increasing, so if we see this now then
  1090  			// we can be sure we need to zero this memory region.
  1091  			//
  1092  			// We still need to update zeroedBase for this arena, and
  1093  			// potentially more arenas.
  1094  			needZero = true
  1095  		}
  1096  		// We may observe arenaBase > zeroedBase if we're racing with one or more
  1097  		// allocations which are acquiring memory directly before us in the address
  1098  		// space. But, because we know no one else is acquiring *this* memory, it's
  1099  		// still safe to not zero.
  1100  
  1101  		// Compute how far into the arena we extend into, capped
  1102  		// at heapArenaBytes.
  1103  		arenaLimit := arenaBase + npage*pageSize
  1104  		if arenaLimit > heapArenaBytes {
  1105  			arenaLimit = heapArenaBytes
  1106  		}
  1107  		// Increase ha.zeroedBase so it's >= arenaLimit.
  1108  		// We may be racing with other updates.
  1109  		for arenaLimit > zeroedBase {
  1110  			if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) {
  1111  				break
  1112  			}
  1113  			zeroedBase = atomic.Loaduintptr(&ha.zeroedBase)
  1114  			// Double check basic conditions of zeroedBase.
  1115  			if zeroedBase <= arenaLimit && zeroedBase > arenaBase {
  1116  				// The zeroedBase moved into the space we were trying to
  1117  				// claim. That's very bad, and indicates someone allocated
  1118  				// the same region we did.
  1119  				throw("potentially overlapping in-use allocations detected")
  1120  			}
  1121  		}
  1122  
  1123  		// Move base forward and subtract from npage to move into
  1124  		// the next arena, or finish.
  1125  		base += arenaLimit - arenaBase
  1126  		npage -= (arenaLimit - arenaBase) / pageSize
  1127  	}
  1128  	return
  1129  }
  1130  
  1131  // tryAllocMSpan attempts to allocate an mspan object from
  1132  // the P-local cache, but may fail.
  1133  //
  1134  // h.lock need not be held.
  1135  //
  1136  // This caller must ensure that its P won't change underneath
  1137  // it during this function. Currently to ensure that we enforce
  1138  // that the function is run on the system stack, because that's
  1139  // the only place it is used now. In the future, this requirement
  1140  // may be relaxed if its use is necessary elsewhere.
  1141  //
  1142  //go:systemstack
  1143  func (h *mheap) tryAllocMSpan() *mspan {
  1144  	pp := getg().m.p.ptr()
  1145  	// If we don't have a p or the cache is empty, we can't do
  1146  	// anything here.
  1147  	if pp == nil || pp.mspancache.len == 0 {
  1148  		return nil
  1149  	}
  1150  	// Pull off the last entry in the cache.
  1151  	s := pp.mspancache.buf[pp.mspancache.len-1]
  1152  	pp.mspancache.len--
  1153  	return s
  1154  }
  1155  
  1156  // allocMSpanLocked allocates an mspan object.
  1157  //
  1158  // h.lock must be held.
  1159  //
  1160  // allocMSpanLocked must be called on the system stack because
  1161  // its caller holds the heap lock. See mheap for details.
  1162  // Running on the system stack also ensures that we won't
  1163  // switch Ps during this function. See tryAllocMSpan for details.
  1164  //
  1165  //go:systemstack
  1166  func (h *mheap) allocMSpanLocked() *mspan {
  1167  	assertLockHeld(&h.lock)
  1168  
  1169  	pp := getg().m.p.ptr()
  1170  	if pp == nil {
  1171  		// We don't have a p so just do the normal thing.
  1172  		return (*mspan)(h.spanalloc.alloc())
  1173  	}
  1174  	// Refill the cache if necessary.
  1175  	if pp.mspancache.len == 0 {
  1176  		const refillCount = len(pp.mspancache.buf) / 2
  1177  		for i := 0; i < refillCount; i++ {
  1178  			pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc())
  1179  		}
  1180  		pp.mspancache.len = refillCount
  1181  	}
  1182  	// Pull off the last entry in the cache.
  1183  	s := pp.mspancache.buf[pp.mspancache.len-1]
  1184  	pp.mspancache.len--
  1185  	return s
  1186  }
  1187  
  1188  // freeMSpanLocked free an mspan object.
  1189  //
  1190  // h.lock must be held.
  1191  //
  1192  // freeMSpanLocked must be called on the system stack because
  1193  // its caller holds the heap lock. See mheap for details.
  1194  // Running on the system stack also ensures that we won't
  1195  // switch Ps during this function. See tryAllocMSpan for details.
  1196  //
  1197  //go:systemstack
  1198  func (h *mheap) freeMSpanLocked(s *mspan) {
  1199  	assertLockHeld(&h.lock)
  1200  
  1201  	pp := getg().m.p.ptr()
  1202  	// First try to free the mspan directly to the cache.
  1203  	if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
  1204  		pp.mspancache.buf[pp.mspancache.len] = s
  1205  		pp.mspancache.len++
  1206  		return
  1207  	}
  1208  	// Failing that (or if we don't have a p), just free it to
  1209  	// the heap.
  1210  	h.spanalloc.free(unsafe.Pointer(s))
  1211  }
  1212  
  1213  // allocSpan allocates an mspan which owns npages worth of memory.
  1214  //
  1215  // If typ.manual() == false, allocSpan allocates a heap span of class spanclass
  1216  // and updates heap accounting. If manual == true, allocSpan allocates a
  1217  // manually-managed span (spanclass is ignored), and the caller is
  1218  // responsible for any accounting related to its use of the span. Either
  1219  // way, allocSpan will atomically add the bytes in the newly allocated
  1220  // span to *sysStat.
  1221  //
  1222  // The returned span is fully initialized.
  1223  //
  1224  // h.lock must not be held.
  1225  //
  1226  // allocSpan must be called on the system stack both because it acquires
  1227  // the heap lock and because it must block GC transitions.
  1228  //
  1229  //go:systemstack
  1230  func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
  1231  	// Function-global state.
  1232  	gp := getg()
  1233  	base, scav := uintptr(0), uintptr(0)
  1234  	growth := uintptr(0)
  1235  
  1236  	// On some platforms we need to provide physical page aligned stack
  1237  	// allocations. Where the page size is less than the physical page
  1238  	// size, we already manage to do this by default.
  1239  	needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
  1240  
  1241  	// If the allocation is small enough, try the page cache!
  1242  	// The page cache does not support aligned allocations, so we cannot use
  1243  	// it if we need to provide a physical page aligned stack allocation.
  1244  	pp := gp.m.p.ptr()
  1245  	if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 {
  1246  		c := &pp.pcache
  1247  
  1248  		// If the cache is empty, refill it.
  1249  		if c.empty() {
  1250  			lock(&h.lock)
  1251  			*c = h.pages.allocToCache()
  1252  			unlock(&h.lock)
  1253  		}
  1254  
  1255  		// Try to allocate from the cache.
  1256  		base, scav = c.alloc(npages)
  1257  		if base != 0 {
  1258  			s = h.tryAllocMSpan()
  1259  			if s != nil {
  1260  				goto HaveSpan
  1261  			}
  1262  			// We have a base but no mspan, so we need
  1263  			// to lock the heap.
  1264  		}
  1265  	}
  1266  
  1267  	// For one reason or another, we couldn't get the
  1268  	// whole job done without the heap lock.
  1269  	lock(&h.lock)
  1270  
  1271  	if needPhysPageAlign {
  1272  		// Overallocate by a physical page to allow for later alignment.
  1273  		extraPages := physPageSize / pageSize
  1274  
  1275  		// Find a big enough region first, but then only allocate the
  1276  		// aligned portion. We can't just allocate and then free the
  1277  		// edges because we need to account for scavenged memory, and
  1278  		// that's difficult with alloc.
  1279  		//
  1280  		// Note that we skip updates to searchAddr here. It's OK if
  1281  		// it's stale and higher than normal; it'll operate correctly,
  1282  		// just come with a performance cost.
  1283  		base, _ = h.pages.find(npages + extraPages)
  1284  		if base == 0 {
  1285  			var ok bool
  1286  			growth, ok = h.grow(npages + extraPages)
  1287  			if !ok {
  1288  				unlock(&h.lock)
  1289  				return nil
  1290  			}
  1291  			base, _ = h.pages.find(npages + extraPages)
  1292  			if base == 0 {
  1293  				throw("grew heap, but no adequate free space found")
  1294  			}
  1295  		}
  1296  		base = alignUp(base, physPageSize)
  1297  		scav = h.pages.allocRange(base, npages)
  1298  	}
  1299  
  1300  	if base == 0 {
  1301  		// Try to acquire a base address.
  1302  		base, scav = h.pages.alloc(npages)
  1303  		if base == 0 {
  1304  			var ok bool
  1305  			growth, ok = h.grow(npages)
  1306  			if !ok {
  1307  				unlock(&h.lock)
  1308  				return nil
  1309  			}
  1310  			base, scav = h.pages.alloc(npages)
  1311  			if base == 0 {
  1312  				throw("grew heap, but no adequate free space found")
  1313  			}
  1314  		}
  1315  	}
  1316  	if s == nil {
  1317  		// We failed to get an mspan earlier, so grab
  1318  		// one now that we have the heap lock.
  1319  		s = h.allocMSpanLocked()
  1320  	}
  1321  	unlock(&h.lock)
  1322  
  1323  HaveSpan:
  1324  	// Decide if we need to scavenge in response to what we just allocated.
  1325  	// Specifically, we track the maximum amount of memory to scavenge of all
  1326  	// the alternatives below, assuming that the maximum satisfies *all*
  1327  	// conditions we check (e.g. if we need to scavenge X to satisfy the
  1328  	// memory limit and Y to satisfy heap-growth scavenging, and Y > X, then
  1329  	// it's fine to pick Y, because the memory limit is still satisfied).
  1330  	//
  1331  	// It's fine to do this after allocating because we expect any scavenged
  1332  	// pages not to get touched until we return. Simultaneously, it's important
  1333  	// to do this before calling sysUsed because that may commit address space.
  1334  	bytesToScavenge := uintptr(0)
  1335  	forceScavenge := false
  1336  	if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() {
  1337  		// Assist with scavenging to maintain the memory limit by the amount
  1338  		// that we expect to page in.
  1339  		inuse := gcController.mappedReady.Load()
  1340  		// Be careful about overflow, especially with uintptrs. Even on 32-bit platforms
  1341  		// someone can set a really big memory limit that isn't math.MaxInt64.
  1342  		if uint64(scav)+inuse > uint64(limit) {
  1343  			bytesToScavenge = uintptr(uint64(scav) + inuse - uint64(limit))
  1344  			forceScavenge = true
  1345  		}
  1346  	}
  1347  	if goal := scavenge.gcPercentGoal.Load(); goal != ^uint64(0) && growth > 0 {
  1348  		// We just caused a heap growth, so scavenge down what will soon be used.
  1349  		// By scavenging inline we deal with the failure to allocate out of
  1350  		// memory fragments by scavenging the memory fragments that are least
  1351  		// likely to be re-used.
  1352  		//
  1353  		// Only bother with this because we're not using a memory limit. We don't
  1354  		// care about heap growths as long as we're under the memory limit, and the
  1355  		// previous check for scaving already handles that.
  1356  		if retained := heapRetained(); retained+uint64(growth) > goal {
  1357  			// The scavenging algorithm requires the heap lock to be dropped so it
  1358  			// can acquire it only sparingly. This is a potentially expensive operation
  1359  			// so it frees up other goroutines to allocate in the meanwhile. In fact,
  1360  			// they can make use of the growth we just created.
  1361  			todo := growth
  1362  			if overage := uintptr(retained + uint64(growth) - goal); todo > overage {
  1363  				todo = overage
  1364  			}
  1365  			if todo > bytesToScavenge {
  1366  				bytesToScavenge = todo
  1367  			}
  1368  		}
  1369  	}
  1370  	// There are a few very limited circumstances where we won't have a P here.
  1371  	// It's OK to simply skip scavenging in these cases. Something else will notice
  1372  	// and pick up the tab.
  1373  	var now int64
  1374  	if pp != nil && bytesToScavenge > 0 {
  1375  		// Measure how long we spent scavenging and add that measurement to the assist
  1376  		// time so we can track it for the GC CPU limiter.
  1377  		//
  1378  		// Limiter event tracking might be disabled if we end up here
  1379  		// while on a mark worker.
  1380  		start := nanotime()
  1381  		track := pp.limiterEvent.start(limiterEventScavengeAssist, start)
  1382  
  1383  		// Scavenge, but back out if the limiter turns on.
  1384  		released := h.pages.scavenge(bytesToScavenge, func() bool {
  1385  			return gcCPULimiter.limiting()
  1386  		}, forceScavenge)
  1387  
  1388  		mheap_.pages.scav.releasedEager.Add(released)
  1389  
  1390  		// Finish up accounting.
  1391  		now = nanotime()
  1392  		if track {
  1393  			pp.limiterEvent.stop(limiterEventScavengeAssist, now)
  1394  		}
  1395  		scavenge.assistTime.Add(now - start)
  1396  	}
  1397  
  1398  	// Initialize the span.
  1399  	h.initSpan(s, typ, spanclass, base, npages, scav)
  1400  
  1401  	if valgrindenabled {
  1402  		valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize)
  1403  	}
  1404  
  1405  	// Commit and account for any scavenged memory that the span now owns.
  1406  	nbytes := npages * pageSize
  1407  	if scav != 0 {
  1408  		// sysUsed all the pages that are actually available
  1409  		// in the span since some of them might be scavenged.
  1410  		sysUsed(unsafe.Pointer(base), nbytes, scav)
  1411  		gcController.heapReleased.add(-int64(scav))
  1412  	}
  1413  	// Update stats.
  1414  	gcController.heapFree.add(-int64(nbytes - scav))
  1415  	if typ == spanAllocHeap {
  1416  		gcController.heapInUse.add(int64(nbytes))
  1417  	}
  1418  	// Update consistent stats.
  1419  	stats := memstats.heapStats.acquire()
  1420  	atomic.Xaddint64(&stats.committed, int64(scav))
  1421  	atomic.Xaddint64(&stats.released, -int64(scav))
  1422  	switch typ {
  1423  	case spanAllocHeap:
  1424  		atomic.Xaddint64(&stats.inHeap, int64(nbytes))
  1425  	case spanAllocStack:
  1426  		atomic.Xaddint64(&stats.inStacks, int64(nbytes))
  1427  	case spanAllocWorkBuf:
  1428  		atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
  1429  	}
  1430  	memstats.heapStats.release()
  1431  
  1432  	// Trace the span alloc.
  1433  	if traceAllocFreeEnabled() {
  1434  		trace := traceAcquire()
  1435  		if trace.ok() {
  1436  			trace.SpanAlloc(s)
  1437  			traceRelease(trace)
  1438  		}
  1439  	}
  1440  	return s
  1441  }
  1442  
  1443  // initSpan initializes a blank span s which will represent the range
  1444  // [base, base+npages*pageSize). typ is the type of span being allocated.
  1445  func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) {
  1446  	// At this point, both s != nil and base != 0, and the heap
  1447  	// lock is no longer held. Initialize the span.
  1448  	s.init(base, npages)
  1449  	// Always call allocNeedsZero to update the arena's zeroedBase watermark
  1450  	// and determine if the memory is considered dirty.
  1451  	needZero := h.allocNeedsZero(base, npages)
  1452  	// If these pages were scavenged (returned to the OS), the kernel guarantees
  1453  	// they will be zero-filled on next use (fault-in), so we can treat them as
  1454  	// already zeroed and skip explicit clearing.
  1455  	if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero {
  1456  		s.needzero = 1
  1457  	}
  1458  	nbytes := npages * pageSize
  1459  	if typ.manual() {
  1460  		s.manualFreeList = 0
  1461  		s.nelems = 0
  1462  		s.state.set(mSpanManual)
  1463  	} else {
  1464  		// We must set span properties before the span is published anywhere
  1465  		// since we're not holding the heap lock.
  1466  		s.spanclass = spanclass
  1467  		if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
  1468  			s.elemsize = nbytes
  1469  			s.nelems = 1
  1470  			s.divMul = 0
  1471  		} else {
  1472  			s.elemsize = uintptr(gc.SizeClassToSize[sizeclass])
  1473  			if goexperiment.GreenTeaGC {
  1474  				var reserve uintptr
  1475  				if gcUsesSpanInlineMarkBits(s.elemsize) {
  1476  					// Reserve space for the inline mark bits.
  1477  					reserve += unsafe.Sizeof(spanInlineMarkBits{})
  1478  				}
  1479  				if heapBitsInSpan(s.elemsize) && !s.spanclass.noscan() {
  1480  					// Reserve space for the pointer/scan bitmap at the end.
  1481  					reserve += nbytes / goarch.PtrSize / 8
  1482  				}
  1483  				s.nelems = uint16((nbytes - reserve) / s.elemsize)
  1484  			} else {
  1485  				if !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
  1486  					// Reserve space for the pointer/scan bitmap at the end.
  1487  					s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
  1488  				} else {
  1489  					s.nelems = uint16(nbytes / s.elemsize)
  1490  				}
  1491  			}
  1492  			s.divMul = gc.SizeClassToDivMagic[sizeclass]
  1493  		}
  1494  
  1495  		// Initialize mark and allocation structures.
  1496  		s.freeindex = 0
  1497  		s.freeIndexForScan = 0
  1498  		s.allocCache = ^uint64(0) // all 1s indicating all free.
  1499  		s.gcmarkBits = newMarkBits(uintptr(s.nelems))
  1500  		s.allocBits = newAllocBits(uintptr(s.nelems))
  1501  
  1502  		// Adjust s.limit down to the object-containing part of the span.
  1503  		s.limit = s.base() + s.elemsize*uintptr(s.nelems)
  1504  
  1505  		// It's safe to access h.sweepgen without the heap lock because it's
  1506  		// only ever updated with the world stopped and we run on the
  1507  		// systemstack which blocks a STW transition.
  1508  		atomic.Store(&s.sweepgen, h.sweepgen)
  1509  
  1510  		// Now that the span is filled in, set its state. This
  1511  		// is a publication barrier for the other fields in
  1512  		// the span. While valid pointers into this span
  1513  		// should never be visible until the span is returned,
  1514  		// if the garbage collector finds an invalid pointer,
  1515  		// access to the span may race with initialization of
  1516  		// the span. We resolve this race by atomically
  1517  		// setting the state after the span is fully
  1518  		// initialized, and atomically checking the state in
  1519  		// any situation where a pointer is suspect.
  1520  		s.state.set(mSpanInUse)
  1521  	}
  1522  
  1523  	// Publish the span in various locations.
  1524  
  1525  	// This is safe to call without the lock held because the slots
  1526  	// related to this span will only ever be read or modified by
  1527  	// this thread until pointers into the span are published (and
  1528  	// we execute a publication barrier at the end of this function
  1529  	// before that happens) or pageInUse is updated.
  1530  	h.setSpans(s.base(), npages, s)
  1531  
  1532  	if !typ.manual() {
  1533  		// Mark in-use span in arena page bitmap.
  1534  		//
  1535  		// This publishes the span to the page sweeper, so
  1536  		// it's imperative that the span be completely initialized
  1537  		// prior to this line.
  1538  		arena, pageIdx, pageMask := pageIndexOf(s.base())
  1539  		atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
  1540  
  1541  		// Mark packed span.
  1542  		if gcUsesSpanInlineMarkBits(s.elemsize) {
  1543  			atomic.Or8(&arena.pageUseSpanInlineMarkBits[pageIdx], pageMask)
  1544  		}
  1545  
  1546  		// Update related page sweeper stats.
  1547  		h.pagesInUse.Add(npages)
  1548  	}
  1549  
  1550  	// Make sure the newly allocated span will be observed
  1551  	// by the GC before pointers into the span are published.
  1552  	publicationBarrier()
  1553  }
  1554  
  1555  // Try to add at least npage pages of memory to the heap,
  1556  // returning how much the heap grew by and whether it worked.
  1557  //
  1558  // h.lock must be held.
  1559  func (h *mheap) grow(npage uintptr) (uintptr, bool) {
  1560  	assertLockHeld(&h.lock)
  1561  
  1562  	firstGrow := h.curArena.base == 0
  1563  
  1564  	// We must grow the heap in whole palloc chunks.
  1565  	// We call sysMap below but note that because we
  1566  	// round up to pallocChunkPages which is on the order
  1567  	// of MiB (generally >= to the huge page size) we
  1568  	// won't be calling it too much.
  1569  	ask := alignUp(npage, pallocChunkPages) * pageSize
  1570  
  1571  	totalGrowth := uintptr(0)
  1572  	// This may overflow because ask could be very large
  1573  	// and is otherwise unrelated to h.curArena.base.
  1574  	end := h.curArena.base + ask
  1575  	nBase := alignUp(end, physPageSize)
  1576  	if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
  1577  		// Not enough room in the current arena. Allocate more
  1578  		// arena space. This may not be contiguous with the
  1579  		// current arena, so we have to request the full ask.
  1580  		av, asize := h.sysAlloc(ask, &h.arenaHints, &h.heapArenas)
  1581  		if av == nil {
  1582  			inUse := gcController.heapFree.load() + gcController.heapReleased.load() + gcController.heapInUse.load()
  1583  			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n")
  1584  			return 0, false
  1585  		}
  1586  
  1587  		if uintptr(av) == h.curArena.end {
  1588  			// The new space is contiguous with the old
  1589  			// space, so just extend the current space.
  1590  			h.curArena.end = uintptr(av) + asize
  1591  		} else {
  1592  			// The new space is discontiguous. Track what
  1593  			// remains of the current space and switch to
  1594  			// the new space. This should be rare.
  1595  			if size := h.curArena.end - h.curArena.base; size != 0 {
  1596  				// Transition this space from Reserved to Prepared and mark it
  1597  				// as released since we'll be able to start using it after updating
  1598  				// the page allocator and releasing the lock at any time.
  1599  				sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased, "heap")
  1600  				// Update stats.
  1601  				stats := memstats.heapStats.acquire()
  1602  				atomic.Xaddint64(&stats.released, int64(size))
  1603  				memstats.heapStats.release()
  1604  				// Update the page allocator's structures to make this
  1605  				// space ready for allocation.
  1606  				h.pages.grow(h.curArena.base, size)
  1607  				totalGrowth += size
  1608  			}
  1609  			// Switch to the new space.
  1610  			h.curArena.base = uintptr(av)
  1611  			h.curArena.end = uintptr(av) + asize
  1612  
  1613  			if firstGrow && randomizeHeapBase {
  1614  				// The top heapAddrBits-logHeapArenaBytes are randomized, we now
  1615  				// want to randomize the next
  1616  				// logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure
  1617  				// h.curArena.base is aligned to pallocChunkBytes.
  1618  				bits := logHeapArenaBytes - logPallocChunkBytes
  1619  				offset := nextHeapRandBits(bits)
  1620  				h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes)
  1621  			}
  1622  		}
  1623  
  1624  		// Recalculate nBase.
  1625  		// We know this won't overflow, because sysAlloc returned
  1626  		// a valid region starting at h.curArena.base which is at
  1627  		// least ask bytes in size.
  1628  		nBase = alignUp(h.curArena.base+ask, physPageSize)
  1629  	}
  1630  
  1631  	// Grow into the current arena.
  1632  	v := h.curArena.base
  1633  	h.curArena.base = nBase
  1634  
  1635  	// Transition the space we're going to use from Reserved to Prepared.
  1636  	//
  1637  	// The allocation is always aligned to the heap arena
  1638  	// size which is always > physPageSize, so its safe to
  1639  	// just add directly to heapReleased.
  1640  	sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap")
  1641  
  1642  	// The memory just allocated counts as both released
  1643  	// and idle, even though it's not yet backed by spans.
  1644  	stats := memstats.heapStats.acquire()
  1645  	atomic.Xaddint64(&stats.released, int64(nBase-v))
  1646  	memstats.heapStats.release()
  1647  
  1648  	// Update the page allocator's structures to make this
  1649  	// space ready for allocation.
  1650  	h.pages.grow(v, nBase-v)
  1651  	totalGrowth += nBase - v
  1652  
  1653  	if firstGrow && randomizeHeapBase {
  1654  		// The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized,
  1655  		// we finally want to randomize the next
  1656  		// log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining
  1657  		// alignment to pageSize. We do this by calculating a random number of
  1658  		// pages into the current arena, and marking them as allocated. The
  1659  		// address of the next available page becomes our fully randomized base
  1660  		// heap address.
  1661  		randOffset := nextHeapRandBits(logPallocChunkBytes)
  1662  		randNumPages := alignDown(randOffset, pageSize) / pageSize
  1663  		if randNumPages != 0 {
  1664  			h.pages.markRandomPaddingPages(v, randNumPages)
  1665  		}
  1666  	}
  1667  
  1668  	return totalGrowth, true
  1669  }
  1670  
  1671  // Free the span back into the heap.
  1672  func (h *mheap) freeSpan(s *mspan) {
  1673  	systemstack(func() {
  1674  		// Trace the span free.
  1675  		if traceAllocFreeEnabled() {
  1676  			trace := traceAcquire()
  1677  			if trace.ok() {
  1678  				trace.SpanFree(s)
  1679  				traceRelease(trace)
  1680  			}
  1681  		}
  1682  
  1683  		lock(&h.lock)
  1684  		if msanenabled {
  1685  			// Tell msan that this entire span is no longer in use.
  1686  			base := unsafe.Pointer(s.base())
  1687  			bytes := s.npages << gc.PageShift
  1688  			msanfree(base, bytes)
  1689  		}
  1690  		if asanenabled {
  1691  			// Tell asan that this entire span is no longer in use.
  1692  			base := unsafe.Pointer(s.base())
  1693  			bytes := s.npages << gc.PageShift
  1694  			asanpoison(base, bytes)
  1695  		}
  1696  		if valgrindenabled {
  1697  			base := s.base()
  1698  			valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base))
  1699  		}
  1700  		h.freeSpanLocked(s, spanAllocHeap)
  1701  		unlock(&h.lock)
  1702  	})
  1703  }
  1704  
  1705  // freeManual frees a manually-managed span returned by allocManual.
  1706  // typ must be the same as the spanAllocType passed to the allocManual that
  1707  // allocated s.
  1708  //
  1709  // This must only be called when gcphase == _GCoff. See mSpanState for
  1710  // an explanation.
  1711  //
  1712  // freeManual must be called on the system stack because it acquires
  1713  // the heap lock. See mheap for details.
  1714  //
  1715  //go:systemstack
  1716  func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
  1717  	// Trace the span free.
  1718  	if traceAllocFreeEnabled() {
  1719  		trace := traceAcquire()
  1720  		if trace.ok() {
  1721  			trace.SpanFree(s)
  1722  			traceRelease(trace)
  1723  		}
  1724  	}
  1725  
  1726  	s.needzero = 1
  1727  	lock(&h.lock)
  1728  	if valgrindenabled {
  1729  		base := s.base()
  1730  		valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base))
  1731  	}
  1732  	h.freeSpanLocked(s, typ)
  1733  	unlock(&h.lock)
  1734  }
  1735  
  1736  func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
  1737  	assertLockHeld(&h.lock)
  1738  
  1739  	switch s.state.get() {
  1740  	case mSpanManual:
  1741  		if s.allocCount != 0 {
  1742  			throw("mheap.freeSpanLocked - invalid stack free")
  1743  		}
  1744  	case mSpanInUse:
  1745  		if s.isUserArenaChunk {
  1746  			throw("mheap.freeSpanLocked - invalid free of user arena chunk")
  1747  		}
  1748  		if s.allocCount != 0 || s.sweepgen != h.sweepgen {
  1749  			print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
  1750  			throw("mheap.freeSpanLocked - invalid free")
  1751  		}
  1752  		h.pagesInUse.Add(-s.npages)
  1753  
  1754  		// Clear in-use bit in arena page bitmap.
  1755  		arena, pageIdx, pageMask := pageIndexOf(s.base())
  1756  		atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
  1757  
  1758  		// Clear small heap span bit if necessary.
  1759  		if gcUsesSpanInlineMarkBits(s.elemsize) {
  1760  			atomic.And8(&arena.pageUseSpanInlineMarkBits[pageIdx], ^pageMask)
  1761  		}
  1762  	default:
  1763  		throw("mheap.freeSpanLocked - invalid span state")
  1764  	}
  1765  
  1766  	// Update stats.
  1767  	//
  1768  	// Mirrors the code in allocSpan.
  1769  	nbytes := s.npages * pageSize
  1770  	gcController.heapFree.add(int64(nbytes))
  1771  	if typ == spanAllocHeap {
  1772  		gcController.heapInUse.add(-int64(nbytes))
  1773  	}
  1774  	// Update consistent stats.
  1775  	stats := memstats.heapStats.acquire()
  1776  	switch typ {
  1777  	case spanAllocHeap:
  1778  		atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
  1779  	case spanAllocStack:
  1780  		atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
  1781  	case spanAllocWorkBuf:
  1782  		atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
  1783  	}
  1784  	memstats.heapStats.release()
  1785  
  1786  	// Mark the space as free.
  1787  	h.pages.free(s.base(), s.npages)
  1788  
  1789  	// Free the span structure. We no longer have a use for it.
  1790  	s.state.set(mSpanDead)
  1791  	h.freeMSpanLocked(s)
  1792  }
  1793  
  1794  // scavengeAll acquires the heap lock (blocking any additional
  1795  // manipulation of the page allocator) and iterates over the whole
  1796  // heap, scavenging every free page available.
  1797  //
  1798  // Must run on the system stack because it acquires the heap lock.
  1799  //
  1800  //go:systemstack
  1801  func (h *mheap) scavengeAll() {
  1802  	// Disallow malloc or panic while holding the heap lock. We do
  1803  	// this here because this is a non-mallocgc entry-point to
  1804  	// the mheap API.
  1805  	gp := getg()
  1806  	gp.m.mallocing++
  1807  
  1808  	// Force scavenge everything.
  1809  	released := h.pages.scavenge(^uintptr(0), nil, true)
  1810  
  1811  	gp.m.mallocing--
  1812  
  1813  	if debug.scavtrace > 0 {
  1814  		printScavTrace(0, released, true)
  1815  	}
  1816  }
  1817  
  1818  //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
  1819  func runtime_debug_freeOSMemory() {
  1820  	GC()
  1821  	systemstack(func() { mheap_.scavengeAll() })
  1822  }
  1823  
  1824  // Initialize a new span with the given start and npages.
  1825  func (span *mspan) init(base uintptr, npages uintptr) {
  1826  	// span is *not* zeroed.
  1827  	span.next = nil
  1828  	span.prev = nil
  1829  	span.list = nil
  1830  	span.startAddr = base
  1831  	span.npages = npages
  1832  	span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans
  1833  	span.allocCount = 0
  1834  	span.spanclass = 0
  1835  	span.elemsize = 0
  1836  	span.speciallock.key = 0
  1837  	span.specials = nil
  1838  	span.needzero = 0
  1839  	span.freeindex = 0
  1840  	span.freeIndexForScan = 0
  1841  	span.allocBits = nil
  1842  	span.gcmarkBits = nil
  1843  	span.pinnerBits = nil
  1844  	span.state.set(mSpanDead)
  1845  	lockInit(&span.speciallock, lockRankMspanSpecial)
  1846  }
  1847  
  1848  func (span *mspan) inList() bool {
  1849  	return span.list != nil
  1850  }
  1851  
  1852  // mSpanList heads a linked list of spans.
  1853  type mSpanList struct {
  1854  	_     sys.NotInHeap
  1855  	first *mspan // first span in list, or nil if none
  1856  	last  *mspan // last span in list, or nil if none
  1857  }
  1858  
  1859  // Initialize an empty doubly-linked list.
  1860  func (list *mSpanList) init() {
  1861  	list.first = nil
  1862  	list.last = nil
  1863  }
  1864  
  1865  func (list *mSpanList) remove(span *mspan) {
  1866  	if span.list != list {
  1867  		print("runtime: failed mSpanList.remove span.npages=", span.npages,
  1868  			" span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
  1869  		throw("mSpanList.remove")
  1870  	}
  1871  	if list.first == span {
  1872  		list.first = span.next
  1873  	} else {
  1874  		span.prev.next = span.next
  1875  	}
  1876  	if list.last == span {
  1877  		list.last = span.prev
  1878  	} else {
  1879  		span.next.prev = span.prev
  1880  	}
  1881  	span.next = nil
  1882  	span.prev = nil
  1883  	span.list = nil
  1884  }
  1885  
  1886  func (list *mSpanList) isEmpty() bool {
  1887  	return list.first == nil
  1888  }
  1889  
  1890  func (list *mSpanList) insert(span *mspan) {
  1891  	if span.next != nil || span.prev != nil || span.list != nil {
  1892  		println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list)
  1893  		throw("mSpanList.insert")
  1894  	}
  1895  	span.next = list.first
  1896  	if list.first != nil {
  1897  		// The list contains at least one span; link it in.
  1898  		// The last span in the list doesn't change.
  1899  		list.first.prev = span
  1900  	} else {
  1901  		// The list contains no spans, so this is also the last span.
  1902  		list.last = span
  1903  	}
  1904  	list.first = span
  1905  	span.list = list
  1906  }
  1907  
  1908  func (list *mSpanList) insertBack(span *mspan) {
  1909  	if span.next != nil || span.prev != nil || span.list != nil {
  1910  		println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list)
  1911  		throw("mSpanList.insertBack")
  1912  	}
  1913  	span.prev = list.last
  1914  	if list.last != nil {
  1915  		// The list contains at least one span.
  1916  		list.last.next = span
  1917  	} else {
  1918  		// The list contains no spans, so this is also the first span.
  1919  		list.first = span
  1920  	}
  1921  	list.last = span
  1922  	span.list = list
  1923  }
  1924  
  1925  // takeAll removes all spans from other and inserts them at the front
  1926  // of list.
  1927  func (list *mSpanList) takeAll(other *mSpanList) {
  1928  	if other.isEmpty() {
  1929  		return
  1930  	}
  1931  
  1932  	// Reparent everything in other to list.
  1933  	for s := other.first; s != nil; s = s.next {
  1934  		s.list = list
  1935  	}
  1936  
  1937  	// Concatenate the lists.
  1938  	if list.isEmpty() {
  1939  		*list = *other
  1940  	} else {
  1941  		// Neither list is empty. Put other before list.
  1942  		other.last.next = list.first
  1943  		list.first.prev = other.last
  1944  		list.first = other.first
  1945  	}
  1946  
  1947  	other.first, other.last = nil, nil
  1948  }
  1949  
  1950  const (
  1951  	// _KindSpecialTinyBlock indicates that a given allocation is a tiny block.
  1952  	// Ordered before KindSpecialFinalizer and KindSpecialCleanup so that it
  1953  	// always appears first in the specials list.
  1954  	// Used only if debug.checkfinalizers != 0.
  1955  	_KindSpecialTinyBlock = 1
  1956  	// _KindSpecialFinalizer is for tracking finalizers.
  1957  	_KindSpecialFinalizer = 2
  1958  	// _KindSpecialWeakHandle is used for creating weak pointers.
  1959  	_KindSpecialWeakHandle = 3
  1960  	// _KindSpecialProfile is for memory profiling.
  1961  	_KindSpecialProfile = 4
  1962  	// _KindSpecialReachable is a special used for tracking
  1963  	// reachability during testing.
  1964  	_KindSpecialReachable = 5
  1965  	// _KindSpecialPinCounter is a special used for objects that are pinned
  1966  	// multiple times
  1967  	_KindSpecialPinCounter = 6
  1968  	// _KindSpecialCleanup is for tracking cleanups.
  1969  	_KindSpecialCleanup = 7
  1970  	// _KindSpecialCheckFinalizer adds additional context to a finalizer or cleanup.
  1971  	// Used only if debug.checkfinalizers != 0.
  1972  	_KindSpecialCheckFinalizer = 8
  1973  	// _KindSpecialBubble is used to associate objects with synctest bubbles.
  1974  	_KindSpecialBubble = 9
  1975  	// _KindSpecialSecret is a special used to mark an object
  1976  	// as needing zeroing immediately upon freeing.
  1977  	_KindSpecialSecret = 10
  1978  )
  1979  
  1980  type special struct {
  1981  	_      sys.NotInHeap
  1982  	next   *special // linked list in span
  1983  	offset uintptr  // span offset of object
  1984  	kind   byte     // kind of special
  1985  }
  1986  
  1987  // spanHasSpecials marks a span as having specials in the arena bitmap.
  1988  func spanHasSpecials(s *mspan) {
  1989  	arenaPage := (s.base() / pageSize) % pagesPerArena
  1990  	ai := arenaIndex(s.base())
  1991  	ha := mheap_.arenas[ai.l1()][ai.l2()]
  1992  	atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8))
  1993  }
  1994  
  1995  // spanHasNoSpecials marks a span as having no specials in the arena bitmap.
  1996  func spanHasNoSpecials(s *mspan) {
  1997  	arenaPage := (s.base() / pageSize) % pagesPerArena
  1998  	ai := arenaIndex(s.base())
  1999  	ha := mheap_.arenas[ai.l1()][ai.l2()]
  2000  	atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8)))
  2001  }
  2002  
  2003  // addspecial adds the special record s to the list of special records for
  2004  // the object p. All fields of s should be filled in except for
  2005  // offset & next, which this routine will fill in.
  2006  // Returns true if the special was successfully added, false otherwise.
  2007  // (The add will fail only if a record with the same p and s->kind
  2008  // already exists unless force is set to true.)
  2009  func addspecial(p unsafe.Pointer, s *special, force bool) bool {
  2010  	span := spanOfHeap(uintptr(p))
  2011  	if span == nil {
  2012  		throw("addspecial on invalid pointer")
  2013  	}
  2014  
  2015  	// Ensure that the span is swept.
  2016  	// Sweeping accesses the specials list w/o locks, so we have
  2017  	// to synchronize with it. And it's just much safer.
  2018  	mp := acquirem()
  2019  	span.ensureSwept()
  2020  
  2021  	offset := uintptr(p) - span.base()
  2022  	kind := s.kind
  2023  
  2024  	lock(&span.speciallock)
  2025  
  2026  	// Find splice point, check for existing record.
  2027  	iter, exists := span.specialFindSplicePoint(offset, kind)
  2028  	if !exists || force {
  2029  		// Splice in record, fill in offset.
  2030  		s.offset = offset
  2031  		s.next = *iter
  2032  		*iter = s
  2033  		spanHasSpecials(span)
  2034  	}
  2035  
  2036  	unlock(&span.speciallock)
  2037  	releasem(mp)
  2038  	// We're converting p to a uintptr and looking it up, and we
  2039  	// don't want it to die and get swept while we're doing so.
  2040  	KeepAlive(p)
  2041  	return !exists || force // already exists or addition was forced
  2042  }
  2043  
  2044  // Removes the Special record of the given kind for the object p.
  2045  // Returns the record if the record existed, nil otherwise.
  2046  // The caller must FixAlloc_Free the result.
  2047  func removespecial(p unsafe.Pointer, kind uint8) *special {
  2048  	span := spanOfHeap(uintptr(p))
  2049  	if span == nil {
  2050  		throw("removespecial on invalid pointer")
  2051  	}
  2052  
  2053  	// Ensure that the span is swept.
  2054  	// Sweeping accesses the specials list w/o locks, so we have
  2055  	// to synchronize with it. And it's just much safer.
  2056  	mp := acquirem()
  2057  	span.ensureSwept()
  2058  
  2059  	offset := uintptr(p) - span.base()
  2060  
  2061  	var result *special
  2062  	lock(&span.speciallock)
  2063  
  2064  	iter, exists := span.specialFindSplicePoint(offset, kind)
  2065  	if exists {
  2066  		s := *iter
  2067  		*iter = s.next
  2068  		result = s
  2069  	}
  2070  	if span.specials == nil {
  2071  		spanHasNoSpecials(span)
  2072  	}
  2073  	unlock(&span.speciallock)
  2074  	releasem(mp)
  2075  	return result
  2076  }
  2077  
  2078  // Find a splice point in the sorted list and check for an already existing
  2079  // record. Returns a pointer to the next-reference in the list predecessor.
  2080  // Returns true, if the referenced item is an exact match.
  2081  func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool) {
  2082  	// Find splice point, check for existing record.
  2083  	iter := &span.specials
  2084  	found := false
  2085  	for {
  2086  		s := *iter
  2087  		if s == nil {
  2088  			break
  2089  		}
  2090  		if offset == s.offset && kind == s.kind {
  2091  			found = true
  2092  			break
  2093  		}
  2094  		if offset < s.offset || (offset == s.offset && kind < s.kind) {
  2095  			break
  2096  		}
  2097  		iter = &s.next
  2098  	}
  2099  	return iter, found
  2100  }
  2101  
  2102  // The described object has a finalizer set for it.
  2103  //
  2104  // specialfinalizer is allocated from non-GC'd memory, so any heap
  2105  // pointers must be specially handled.
  2106  type specialfinalizer struct {
  2107  	_       sys.NotInHeap
  2108  	special special
  2109  	fn      *funcval // May be a heap pointer.
  2110  	nret    uintptr
  2111  	fint    *_type   // May be a heap pointer, but always live.
  2112  	ot      *ptrtype // May be a heap pointer, but always live.
  2113  }
  2114  
  2115  // Adds a finalizer to the object p. Returns true if it succeeded.
  2116  func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
  2117  	lock(&mheap_.speciallock)
  2118  	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
  2119  	unlock(&mheap_.speciallock)
  2120  	s.special.kind = _KindSpecialFinalizer
  2121  	s.fn = f
  2122  	s.nret = nret
  2123  	s.fint = fint
  2124  	s.ot = ot
  2125  	if addspecial(p, &s.special, false) {
  2126  		// This is responsible for maintaining the same
  2127  		// GC-related invariants as markrootSpans in any
  2128  		// situation where it's possible that markrootSpans
  2129  		// has already run but mark termination hasn't yet.
  2130  		if gcphase != _GCoff {
  2131  			base, span, _ := findObject(uintptr(p), 0, 0)
  2132  			mp := acquirem()
  2133  			gcw := &mp.p.ptr().gcw
  2134  			// Mark everything reachable from the object
  2135  			// so it's retained for the finalizer.
  2136  			if !span.spanclass.noscan() {
  2137  				scanObject(base, gcw)
  2138  			}
  2139  			// Mark the finalizer itself, since the
  2140  			// special isn't part of the GC'd heap.
  2141  			scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
  2142  			releasem(mp)
  2143  		}
  2144  		return true
  2145  	}
  2146  
  2147  	// There was an old finalizer
  2148  	lock(&mheap_.speciallock)
  2149  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  2150  	unlock(&mheap_.speciallock)
  2151  	return false
  2152  }
  2153  
  2154  // Removes the finalizer (if any) from the object p.
  2155  func removefinalizer(p unsafe.Pointer) {
  2156  	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
  2157  	if s == nil {
  2158  		return // there wasn't a finalizer to remove
  2159  	}
  2160  	lock(&mheap_.speciallock)
  2161  	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
  2162  	unlock(&mheap_.speciallock)
  2163  }
  2164  
  2165  // The described object has a cleanup set for it.
  2166  type specialCleanup struct {
  2167  	_       sys.NotInHeap
  2168  	special special
  2169  	cleanup cleanupFn
  2170  	// Globally unique ID for the cleanup, obtained from mheap_.cleanupID.
  2171  	id uint64
  2172  }
  2173  
  2174  // addCleanup attaches a cleanup function to the object. Multiple
  2175  // cleanups are allowed on an object, and even the same pointer.
  2176  // A cleanup id is returned which can be used to uniquely identify
  2177  // the cleanup.
  2178  func addCleanup(p unsafe.Pointer, c cleanupFn) uint64 {
  2179  	// TODO(mknyszek): Consider pooling specialCleanups on the P
  2180  	// so we don't have to take the lock every time. Just locking
  2181  	// is a considerable part of the cost of AddCleanup. This
  2182  	// would also require reserving some cleanup IDs on the P.
  2183  	lock(&mheap_.speciallock)
  2184  	s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc())
  2185  	mheap_.cleanupID++ // Increment first. ID 0 is reserved.
  2186  	id := mheap_.cleanupID
  2187  	unlock(&mheap_.speciallock)
  2188  	s.special.kind = _KindSpecialCleanup
  2189  	s.cleanup = c
  2190  	s.id = id
  2191  
  2192  	mp := acquirem()
  2193  	addspecial(p, &s.special, true)
  2194  	// This is responsible for maintaining the same
  2195  	// GC-related invariants as markrootSpans in any
  2196  	// situation where it's possible that markrootSpans
  2197  	// has already run but mark termination hasn't yet.
  2198  	if gcphase != _GCoff {
  2199  		// Mark the cleanup itself, since the
  2200  		// special isn't part of the GC'd heap.
  2201  		gcScanCleanup(s, &mp.p.ptr().gcw)
  2202  	}
  2203  	releasem(mp)
  2204  	// Keep c and its referents alive. There's a window in this function
  2205  	// where it's only reachable via the special while the special hasn't
  2206  	// been added to the specials list yet. This is similar to a bug
  2207  	// discovered for weak handles, see #70455.
  2208  	KeepAlive(c)
  2209  	return id
  2210  }
  2211  
  2212  // Always paired with a specialCleanup or specialfinalizer, adds context.
  2213  type specialCheckFinalizer struct {
  2214  	_         sys.NotInHeap
  2215  	special   special
  2216  	cleanupID uint64 // Needed to disambiguate cleanups.
  2217  	createPC  uintptr
  2218  	funcPC    uintptr
  2219  	ptrType   *_type
  2220  }
  2221  
  2222  // setFinalizerContext adds a specialCheckFinalizer to ptr. ptr must already have a
  2223  // finalizer special attached.
  2224  func setFinalizerContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr) {
  2225  	setCleanupContext(ptr, ptrType, createPC, funcPC, 0)
  2226  }
  2227  
  2228  // setCleanupContext adds a specialCheckFinalizer to ptr. ptr must already have a
  2229  // finalizer or cleanup special attached. Pass 0 for the cleanupID to indicate
  2230  // a finalizer.
  2231  func setCleanupContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr, cleanupID uint64) {
  2232  	lock(&mheap_.speciallock)
  2233  	s := (*specialCheckFinalizer)(mheap_.specialCheckFinalizerAlloc.alloc())
  2234  	unlock(&mheap_.speciallock)
  2235  	s.special.kind = _KindSpecialCheckFinalizer
  2236  	s.cleanupID = cleanupID
  2237  	s.createPC = createPC
  2238  	s.funcPC = funcPC
  2239  	s.ptrType = ptrType
  2240  
  2241  	mp := acquirem()
  2242  	addspecial(ptr, &s.special, true)
  2243  	releasem(mp)
  2244  	KeepAlive(ptr)
  2245  }
  2246  
  2247  func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
  2248  	assertWorldStopped()
  2249  
  2250  	span := spanOfHeap(ptr)
  2251  	if span == nil {
  2252  		return nil
  2253  	}
  2254  	var found *specialCheckFinalizer
  2255  	offset := ptr - span.base()
  2256  	iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer)
  2257  	if exists {
  2258  		for {
  2259  			s := *iter
  2260  			if s == nil {
  2261  				// Reached the end of the linked list. Stop searching at this point.
  2262  				break
  2263  			}
  2264  			if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
  2265  				(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
  2266  				// The special is a cleanup and contains a matching cleanup id.
  2267  				*iter = s.next
  2268  				found = (*specialCheckFinalizer)(unsafe.Pointer(s))
  2269  				break
  2270  			}
  2271  			if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
  2272  				// The special is outside the region specified for that kind of
  2273  				// special. The specials are sorted by kind.
  2274  				break
  2275  			}
  2276  			// Try the next special.
  2277  			iter = &s.next
  2278  		}
  2279  	}
  2280  	return found
  2281  }
  2282  
  2283  // clearFinalizerContext removes the specialCheckFinalizer for the given pointer, if any.
  2284  func clearFinalizerContext(ptr uintptr) {
  2285  	clearCleanupContext(ptr, 0)
  2286  }
  2287  
  2288  // clearFinalizerContext removes the specialCheckFinalizer for the given pointer and cleanup ID, if any.
  2289  func clearCleanupContext(ptr uintptr, cleanupID uint64) {
  2290  	// The following block removes the Special record of type cleanup for the object c.ptr.
  2291  	span := spanOfHeap(ptr)
  2292  	if span == nil {
  2293  		return
  2294  	}
  2295  	// Ensure that the span is swept.
  2296  	// Sweeping accesses the specials list w/o locks, so we have
  2297  	// to synchronize with it. And it's just much safer.
  2298  	mp := acquirem()
  2299  	span.ensureSwept()
  2300  
  2301  	offset := ptr - span.base()
  2302  
  2303  	var found *special
  2304  	lock(&span.speciallock)
  2305  
  2306  	iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer)
  2307  	if exists {
  2308  		for {
  2309  			s := *iter
  2310  			if s == nil {
  2311  				// Reached the end of the linked list. Stop searching at this point.
  2312  				break
  2313  			}
  2314  			if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
  2315  				(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
  2316  				// The special is a cleanup and contains a matching cleanup id.
  2317  				*iter = s.next
  2318  				found = s
  2319  				break
  2320  			}
  2321  			if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
  2322  				// The special is outside the region specified for that kind of
  2323  				// special. The specials are sorted by kind.
  2324  				break
  2325  			}
  2326  			// Try the next special.
  2327  			iter = &s.next
  2328  		}
  2329  	}
  2330  	if span.specials == nil {
  2331  		spanHasNoSpecials(span)
  2332  	}
  2333  	unlock(&span.speciallock)
  2334  	releasem(mp)
  2335  
  2336  	if found == nil {
  2337  		return
  2338  	}
  2339  	lock(&mheap_.speciallock)
  2340  	mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(found))
  2341  	unlock(&mheap_.speciallock)
  2342  }
  2343  
  2344  // Indicates that an allocation is a tiny block.
  2345  // Used only if debug.checkfinalizers != 0.
  2346  type specialTinyBlock struct {
  2347  	_       sys.NotInHeap
  2348  	special special
  2349  }
  2350  
  2351  // setTinyBlockContext marks an allocation as a tiny block to diagnostics like
  2352  // checkfinalizer.
  2353  //
  2354  // A tiny block is only marked if it actually contains more than one distinct
  2355  // value, since we're using this for debugging.
  2356  func setTinyBlockContext(ptr unsafe.Pointer) {
  2357  	lock(&mheap_.speciallock)
  2358  	s := (*specialTinyBlock)(mheap_.specialTinyBlockAlloc.alloc())
  2359  	unlock(&mheap_.speciallock)
  2360  	s.special.kind = _KindSpecialTinyBlock
  2361  
  2362  	mp := acquirem()
  2363  	addspecial(ptr, &s.special, false)
  2364  	releasem(mp)
  2365  	KeepAlive(ptr)
  2366  }
  2367  
  2368  // inTinyBlock returns whether ptr is in a tiny alloc block, at one point grouped
  2369  // with other distinct values.
  2370  func inTinyBlock(ptr uintptr) bool {
  2371  	assertWorldStopped()
  2372  
  2373  	ptr = alignDown(ptr, maxTinySize)
  2374  	span := spanOfHeap(ptr)
  2375  	if span == nil {
  2376  		return false
  2377  	}
  2378  	offset := ptr - span.base()
  2379  	_, exists := span.specialFindSplicePoint(offset, _KindSpecialTinyBlock)
  2380  	return exists
  2381  }
  2382  
  2383  // The described object has a weak pointer.
  2384  //
  2385  // Weak pointers in the GC have the following invariants:
  2386  //
  2387  //   - Strong-to-weak conversions must ensure the strong pointer
  2388  //     remains live until the weak handle is installed. This ensures
  2389  //     that creating a weak pointer cannot fail.
  2390  //
  2391  //   - Weak-to-strong conversions require the weakly-referenced
  2392  //     object to be swept before the conversion may proceed. This
  2393  //     ensures that weak-to-strong conversions cannot resurrect
  2394  //     dead objects by sweeping them before that happens.
  2395  //
  2396  //   - Weak handles are unique and canonical for each byte offset into
  2397  //     an object that a strong pointer may point to, until an object
  2398  //     becomes unreachable.
  2399  //
  2400  //   - Weak handles contain nil as soon as an object becomes unreachable
  2401  //     the first time, before a finalizer makes it reachable again. New
  2402  //     weak handles created after resurrection are newly unique.
  2403  //
  2404  // specialWeakHandle is allocated from non-GC'd memory, so any heap
  2405  // pointers must be specially handled.
  2406  type specialWeakHandle struct {
  2407  	_       sys.NotInHeap
  2408  	special special
  2409  	// handle is a reference to the actual weak pointer.
  2410  	// It is always heap-allocated and must be explicitly kept
  2411  	// live so long as this special exists.
  2412  	handle *atomic.Uintptr
  2413  }
  2414  
  2415  //go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
  2416  func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
  2417  	return unsafe.Pointer(getOrAddWeakHandle(p))
  2418  }
  2419  
  2420  //go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
  2421  func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
  2422  	handle := (*atomic.Uintptr)(u)
  2423  
  2424  	// Prevent preemption. We want to make sure that another GC cycle can't start
  2425  	// and that work.strongFromWeak.block can't change out from under us.
  2426  	mp := acquirem()
  2427  
  2428  	// Yield to the GC if necessary.
  2429  	if work.strongFromWeak.block {
  2430  		releasem(mp)
  2431  
  2432  		// Try to park and wait for mark termination.
  2433  		// N.B. gcParkStrongFromWeak calls acquirem before returning.
  2434  		mp = gcParkStrongFromWeak()
  2435  	}
  2436  
  2437  	p := handle.Load()
  2438  	if p == 0 {
  2439  		releasem(mp)
  2440  		return nil
  2441  	}
  2442  	// Be careful. p may or may not refer to valid memory anymore, as it could've been
  2443  	// swept and released already. It's always safe to ensure a span is swept, though,
  2444  	// even if it's just some random span.
  2445  	span := spanOfHeap(p)
  2446  	if span == nil {
  2447  		// If it's immortal, then just return the pointer.
  2448  		//
  2449  		// Stay non-preemptible so the GC can't see us convert this potentially
  2450  		// completely bogus value to an unsafe.Pointer.
  2451  		if isGoPointerWithoutSpan(unsafe.Pointer(p)) {
  2452  			releasem(mp)
  2453  			return unsafe.Pointer(p)
  2454  		}
  2455  		// It's heap-allocated, so the span probably just got swept and released.
  2456  		releasem(mp)
  2457  		return nil
  2458  	}
  2459  	// Ensure the span is swept.
  2460  	span.ensureSwept()
  2461  
  2462  	// Now we can trust whatever we get from handle, so make a strong pointer.
  2463  	//
  2464  	// Even if we just swept some random span that doesn't contain this object, because
  2465  	// this object is long dead and its memory has since been reused, we'll just observe nil.
  2466  	ptr := unsafe.Pointer(handle.Load())
  2467  
  2468  	// This is responsible for maintaining the same GC-related
  2469  	// invariants as the Yuasa part of the write barrier. During
  2470  	// the mark phase, it's possible that we just created the only
  2471  	// valid pointer to the object pointed to by ptr. If it's only
  2472  	// ever referenced from our stack, and our stack is blackened
  2473  	// already, we could fail to mark it. So, mark it now.
  2474  	if gcphase != _GCoff {
  2475  		shade(uintptr(ptr))
  2476  	}
  2477  	releasem(mp)
  2478  
  2479  	// Explicitly keep ptr alive. This seems unnecessary since we return ptr,
  2480  	// but let's be explicit since it's important we keep ptr alive across the
  2481  	// call to shade.
  2482  	KeepAlive(ptr)
  2483  	return ptr
  2484  }
  2485  
  2486  // gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks.
  2487  func gcParkStrongFromWeak() *m {
  2488  	// Prevent preemption as we check strongFromWeak, so it can't change out from under us.
  2489  	mp := acquirem()
  2490  
  2491  	for work.strongFromWeak.block {
  2492  		lock(&work.strongFromWeak.lock)
  2493  		releasem(mp) // N.B. Holding the lock prevents preemption.
  2494  
  2495  		// Queue ourselves up.
  2496  		work.strongFromWeak.q.pushBack(getg())
  2497  
  2498  		// Park.
  2499  		goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2)
  2500  
  2501  		// Re-acquire the current M since we're going to check the condition again.
  2502  		mp = acquirem()
  2503  
  2504  		// Re-check condition. We may have awoken in the next GC's mark termination phase.
  2505  	}
  2506  	return mp
  2507  }
  2508  
  2509  // gcWakeAllStrongFromWeak wakes all currently blocked weak->strong
  2510  // conversions. This is used at the end of a GC cycle.
  2511  //
  2512  // work.strongFromWeak.block must be false to prevent woken goroutines
  2513  // from immediately going back to sleep.
  2514  func gcWakeAllStrongFromWeak() {
  2515  	lock(&work.strongFromWeak.lock)
  2516  	list := work.strongFromWeak.q.popList()
  2517  	injectglist(&list)
  2518  	unlock(&work.strongFromWeak.lock)
  2519  }
  2520  
  2521  // Retrieves or creates a weak pointer handle for the object p.
  2522  func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
  2523  	if debug.sbrk != 0 {
  2524  		// debug.sbrk never frees memory, so it'll never go nil. However, we do still
  2525  		// need a weak handle that's specific to p. Use the immortal weak handle map.
  2526  		// Keep p alive across the call to getOrAdd defensively, though it doesn't
  2527  		// really matter in this particular case.
  2528  		handle := mheap_.immortalWeakHandles.getOrAdd(uintptr(p))
  2529  		KeepAlive(p)
  2530  		return handle
  2531  	}
  2532  
  2533  	// First try to retrieve without allocating.
  2534  	if handle := getWeakHandle(p); handle != nil {
  2535  		// Keep p alive for the duration of the function to ensure
  2536  		// that it cannot die while we're trying to do this.
  2537  		KeepAlive(p)
  2538  		return handle
  2539  	}
  2540  
  2541  	lock(&mheap_.speciallock)
  2542  	s := (*specialWeakHandle)(mheap_.specialWeakHandleAlloc.alloc())
  2543  	unlock(&mheap_.speciallock)
  2544  
  2545  	// N.B. Pad the weak handle to ensure it doesn't share a tiny
  2546  	// block with any other allocations. This can lead to leaks, such
  2547  	// as in go.dev/issue/76007. As an alternative, we could consider
  2548  	// using the currently-unused 8-byte noscan size class.
  2549  	type weakHandleBox struct {
  2550  		h atomic.Uintptr
  2551  		_ [maxTinySize - unsafe.Sizeof(atomic.Uintptr{})]byte
  2552  	}
  2553  	handle := &(new(weakHandleBox).h)
  2554  	s.special.kind = _KindSpecialWeakHandle
  2555  	s.handle = handle
  2556  	handle.Store(uintptr(p))
  2557  	if addspecial(p, &s.special, false) {
  2558  		// This is responsible for maintaining the same
  2559  		// GC-related invariants as markrootSpans in any
  2560  		// situation where it's possible that markrootSpans
  2561  		// has already run but mark termination hasn't yet.
  2562  		if gcphase != _GCoff {
  2563  			mp := acquirem()
  2564  			gcw := &mp.p.ptr().gcw
  2565  			// Mark the weak handle itself, since the
  2566  			// special isn't part of the GC'd heap.
  2567  			scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
  2568  			releasem(mp)
  2569  		}
  2570  
  2571  		// Keep p alive for the duration of the function to ensure
  2572  		// that it cannot die while we're trying to do this.
  2573  		//
  2574  		// Same for handle, which is only stored in the special.
  2575  		// There's a window where it might die if we don't keep it
  2576  		// alive explicitly. Returning it here is probably good enough,
  2577  		// but let's be defensive and explicit. See #70455.
  2578  		KeepAlive(p)
  2579  		KeepAlive(handle)
  2580  		return handle
  2581  	}
  2582  
  2583  	// There was an existing handle. Free the special
  2584  	// and try again. We must succeed because we're explicitly
  2585  	// keeping p live until the end of this function. Either
  2586  	// we, or someone else, must have succeeded, because we can
  2587  	// only fail in the event of a race, and p will still be
  2588  	// be valid no matter how much time we spend here.
  2589  	lock(&mheap_.speciallock)
  2590  	mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s))
  2591  	unlock(&mheap_.speciallock)
  2592  
  2593  	handle = getWeakHandle(p)
  2594  	if handle == nil {
  2595  		throw("failed to get or create weak handle")
  2596  	}
  2597  
  2598  	// Keep p alive for the duration of the function to ensure
  2599  	// that it cannot die while we're trying to do this.
  2600  	//
  2601  	// Same for handle, just to be defensive.
  2602  	KeepAlive(p)
  2603  	KeepAlive(handle)
  2604  	return handle
  2605  }
  2606  
  2607  func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
  2608  	span := spanOfHeap(uintptr(p))
  2609  	if span == nil {
  2610  		if isGoPointerWithoutSpan(p) {
  2611  			return mheap_.immortalWeakHandles.getOrAdd(uintptr(p))
  2612  		}
  2613  		throw("getWeakHandle on invalid pointer")
  2614  	}
  2615  
  2616  	// Ensure that the span is swept.
  2617  	// Sweeping accesses the specials list w/o locks, so we have
  2618  	// to synchronize with it. And it's just much safer.
  2619  	mp := acquirem()
  2620  	span.ensureSwept()
  2621  
  2622  	offset := uintptr(p) - span.base()
  2623  
  2624  	lock(&span.speciallock)
  2625  
  2626  	// Find the existing record and return the handle if one exists.
  2627  	var handle *atomic.Uintptr
  2628  	iter, exists := span.specialFindSplicePoint(offset, _KindSpecialWeakHandle)
  2629  	if exists {
  2630  		handle = ((*specialWeakHandle)(unsafe.Pointer(*iter))).handle
  2631  	}
  2632  	unlock(&span.speciallock)
  2633  	releasem(mp)
  2634  
  2635  	// Keep p alive for the duration of the function to ensure
  2636  	// that it cannot die while we're trying to do this.
  2637  	KeepAlive(p)
  2638  	return handle
  2639  }
  2640  
  2641  type immortalWeakHandleMap struct {
  2642  	root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap)
  2643  }
  2644  
  2645  // immortalWeakHandle is a lock-free append-only hash-trie.
  2646  //
  2647  // Key features:
  2648  //   - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address.
  2649  //   - New nodes are placed at the first empty level encountered.
  2650  //   - When the first child is added to a node, the existing value is not moved into a child.
  2651  //     This means that we must check the value at each level, not just at the leaf.
  2652  //   - No deletion or rebalancing.
  2653  //   - Intentionally devolves into a linked list on hash collisions (the hash bits will all
  2654  //     get shifted out during iteration, and new nodes will just be appended to the 0th child).
  2655  type immortalWeakHandle struct {
  2656  	_ sys.NotInHeap
  2657  
  2658  	children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap)
  2659  	ptr      uintptr                 // &ptr is the weak handle
  2660  }
  2661  
  2662  // handle returns a canonical weak handle.
  2663  func (h *immortalWeakHandle) handle() *atomic.Uintptr {
  2664  	// N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially
  2665  	// reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics
  2666  	// in getOrAdd.
  2667  	return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr))
  2668  }
  2669  
  2670  // getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated
  2671  // object) and returns a weak handle. The weak handle will never become nil.
  2672  func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr {
  2673  	var newNode *immortalWeakHandle
  2674  	m := &tab.root
  2675  	hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize)
  2676  	hashIter := hash
  2677  	for {
  2678  		n := (*immortalWeakHandle)(m.Load())
  2679  		if n == nil {
  2680  			// Try to insert a new map node. We may end up discarding
  2681  			// this node if we fail to insert because it turns out the
  2682  			// value is already in the map.
  2683  			//
  2684  			// The discard will only happen if two threads race on inserting
  2685  			// the same value. Both might create nodes, but only one will
  2686  			// succeed on insertion. If two threads race to insert two
  2687  			// different values, then both nodes will *always* get inserted,
  2688  			// because the equality checking below will always fail.
  2689  			//
  2690  			// Performance note: contention on insertion is likely to be
  2691  			// higher for small maps, but since this data structure is
  2692  			// append-only, either the map stays small because there isn't
  2693  			// much activity, or the map gets big and races to insert on
  2694  			// the same node are much less likely.
  2695  			if newNode == nil {
  2696  				newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys))
  2697  				newNode.ptr = p
  2698  			}
  2699  			if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) {
  2700  				return newNode.handle()
  2701  			}
  2702  			// Reload n. Because pointers are only stored once,
  2703  			// we must have lost the race, and therefore n is not nil
  2704  			// anymore.
  2705  			n = (*immortalWeakHandle)(m.Load())
  2706  		}
  2707  		if n.ptr == p {
  2708  			return n.handle()
  2709  		}
  2710  		m = &n.children[hashIter>>(8*goarch.PtrSize-1)]
  2711  		hashIter <<= 1
  2712  	}
  2713  }
  2714  
  2715  // The described object is being heap profiled.
  2716  type specialprofile struct {
  2717  	_       sys.NotInHeap
  2718  	special special
  2719  	b       *bucket
  2720  }
  2721  
  2722  // Set the heap profile bucket associated with addr to b.
  2723  func setprofilebucket(p unsafe.Pointer, b *bucket) {
  2724  	lock(&mheap_.speciallock)
  2725  	s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
  2726  	unlock(&mheap_.speciallock)
  2727  	s.special.kind = _KindSpecialProfile
  2728  	s.b = b
  2729  	if !addspecial(p, &s.special, false) {
  2730  		throw("setprofilebucket: profile already set")
  2731  	}
  2732  }
  2733  
  2734  // specialReachable tracks whether an object is reachable on the next
  2735  // GC cycle. This is used by testing.
  2736  type specialReachable struct {
  2737  	special   special
  2738  	done      bool
  2739  	reachable bool
  2740  }
  2741  
  2742  // specialPinCounter tracks whether an object is pinned multiple times.
  2743  type specialPinCounter struct {
  2744  	special special
  2745  	counter uintptr
  2746  }
  2747  
  2748  // specialsIter helps iterate over specials lists.
  2749  type specialsIter struct {
  2750  	pprev **special
  2751  	s     *special
  2752  }
  2753  
  2754  func newSpecialsIter(span *mspan) specialsIter {
  2755  	return specialsIter{&span.specials, span.specials}
  2756  }
  2757  
  2758  func (i *specialsIter) valid() bool {
  2759  	return i.s != nil
  2760  }
  2761  
  2762  func (i *specialsIter) next() {
  2763  	i.pprev = &i.s.next
  2764  	i.s = *i.pprev
  2765  }
  2766  
  2767  // unlinkAndNext removes the current special from the list and moves
  2768  // the iterator to the next special. It returns the unlinked special.
  2769  func (i *specialsIter) unlinkAndNext() *special {
  2770  	cur := i.s
  2771  	i.s = cur.next
  2772  	*i.pprev = i.s
  2773  	return cur
  2774  }
  2775  
  2776  // freeSpecial performs any cleanup on special s and deallocates it.
  2777  // s must already be unlinked from the specials list.
  2778  func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
  2779  	switch s.kind {
  2780  	case _KindSpecialFinalizer:
  2781  		sf := (*specialfinalizer)(unsafe.Pointer(s))
  2782  		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
  2783  		lock(&mheap_.speciallock)
  2784  		mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
  2785  		unlock(&mheap_.speciallock)
  2786  	case _KindSpecialWeakHandle:
  2787  		sw := (*specialWeakHandle)(unsafe.Pointer(s))
  2788  		sw.handle.Store(0)
  2789  		lock(&mheap_.speciallock)
  2790  		mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s))
  2791  		unlock(&mheap_.speciallock)
  2792  	case _KindSpecialProfile:
  2793  		sp := (*specialprofile)(unsafe.Pointer(s))
  2794  		mProf_Free(sp.b, size)
  2795  		lock(&mheap_.speciallock)
  2796  		mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
  2797  		unlock(&mheap_.speciallock)
  2798  	case _KindSpecialReachable:
  2799  		sp := (*specialReachable)(unsafe.Pointer(s))
  2800  		sp.done = true
  2801  		// The creator frees these.
  2802  	case _KindSpecialPinCounter:
  2803  		lock(&mheap_.speciallock)
  2804  		mheap_.specialPinCounterAlloc.free(unsafe.Pointer(s))
  2805  		unlock(&mheap_.speciallock)
  2806  	case _KindSpecialCleanup:
  2807  		sc := (*specialCleanup)(unsafe.Pointer(s))
  2808  		// Cleanups, unlike finalizers, do not resurrect the objects
  2809  		// they're attached to, so we only need to pass the cleanup
  2810  		// function, not the object.
  2811  		gcCleanups.enqueue(sc.cleanup)
  2812  		lock(&mheap_.speciallock)
  2813  		mheap_.specialCleanupAlloc.free(unsafe.Pointer(sc))
  2814  		unlock(&mheap_.speciallock)
  2815  	case _KindSpecialCheckFinalizer:
  2816  		sc := (*specialCheckFinalizer)(unsafe.Pointer(s))
  2817  		lock(&mheap_.speciallock)
  2818  		mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(sc))
  2819  		unlock(&mheap_.speciallock)
  2820  	case _KindSpecialTinyBlock:
  2821  		st := (*specialTinyBlock)(unsafe.Pointer(s))
  2822  		lock(&mheap_.speciallock)
  2823  		mheap_.specialTinyBlockAlloc.free(unsafe.Pointer(st))
  2824  		unlock(&mheap_.speciallock)
  2825  	case _KindSpecialBubble:
  2826  		st := (*specialBubble)(unsafe.Pointer(s))
  2827  		lock(&mheap_.speciallock)
  2828  		mheap_.specialBubbleAlloc.free(unsafe.Pointer(st))
  2829  		unlock(&mheap_.speciallock)
  2830  	case _KindSpecialSecret:
  2831  		memclrNoHeapPointers(p, size)
  2832  		lock(&mheap_.speciallock)
  2833  		mheap_.specialSecretAlloc.free(unsafe.Pointer(s))
  2834  		unlock(&mheap_.speciallock)
  2835  	default:
  2836  		throw("bad special kind")
  2837  		panic("not reached")
  2838  	}
  2839  }
  2840  
  2841  // gcBits is an alloc/mark bitmap. This is always used as gcBits.x.
  2842  type gcBits struct {
  2843  	_ sys.NotInHeap
  2844  	x uint8
  2845  }
  2846  
  2847  // bytep returns a pointer to the n'th byte of b.
  2848  func (b *gcBits) bytep(n uintptr) *uint8 {
  2849  	return addb(&b.x, n)
  2850  }
  2851  
  2852  // bitp returns a pointer to the byte containing bit n and a mask for
  2853  // selecting that bit from *bytep.
  2854  func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
  2855  	return b.bytep(n / 8), 1 << (n % 8)
  2856  }
  2857  
  2858  const gcBitsChunkBytes = uintptr(64 << 10)
  2859  const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
  2860  
  2861  type gcBitsHeader struct {
  2862  	free uintptr // free is the index into bits of the next free byte.
  2863  	next uintptr // *gcBits triggers recursive type bug. (issue 14620)
  2864  }
  2865  
  2866  type gcBitsArena struct {
  2867  	_ sys.NotInHeap
  2868  	// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
  2869  	free uintptr // free is the index into bits of the next free byte; read/write atomically
  2870  	next *gcBitsArena
  2871  	bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
  2872  }
  2873  
  2874  var gcBitsArenas struct {
  2875  	lock     mutex
  2876  	free     *gcBitsArena
  2877  	next     *gcBitsArena // Read atomically. Write atomically under lock.
  2878  	current  *gcBitsArena
  2879  	previous *gcBitsArena
  2880  }
  2881  
  2882  // tryAlloc allocates from b or returns nil if b does not have enough room.
  2883  // This is safe to call concurrently.
  2884  func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
  2885  	if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
  2886  		return nil
  2887  	}
  2888  	// Try to allocate from this block.
  2889  	end := atomic.Xadduintptr(&b.free, bytes)
  2890  	if end > uintptr(len(b.bits)) {
  2891  		return nil
  2892  	}
  2893  	// There was enough room.
  2894  	start := end - bytes
  2895  	return &b.bits[start]
  2896  }
  2897  
  2898  // newMarkBits returns a pointer to 8 byte aligned bytes
  2899  // to be used for a span's mark bits.
  2900  func newMarkBits(nelems uintptr) *gcBits {
  2901  	blocksNeeded := (nelems + 63) / 64
  2902  	bytesNeeded := blocksNeeded * 8
  2903  
  2904  	// Try directly allocating from the current head arena.
  2905  	head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
  2906  	if p := head.tryAlloc(bytesNeeded); p != nil {
  2907  		return p
  2908  	}
  2909  
  2910  	// There's not enough room in the head arena. We may need to
  2911  	// allocate a new arena.
  2912  	lock(&gcBitsArenas.lock)
  2913  	// Try the head arena again, since it may have changed. Now
  2914  	// that we hold the lock, the list head can't change, but its
  2915  	// free position still can.
  2916  	if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
  2917  		unlock(&gcBitsArenas.lock)
  2918  		return p
  2919  	}
  2920  
  2921  	// Allocate a new arena. This may temporarily drop the lock.
  2922  	fresh := newArenaMayUnlock()
  2923  	// If newArenaMayUnlock dropped the lock, another thread may
  2924  	// have put a fresh arena on the "next" list. Try allocating
  2925  	// from next again.
  2926  	if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
  2927  		// Put fresh back on the free list.
  2928  		// TODO: Mark it "already zeroed"
  2929  		fresh.next = gcBitsArenas.free
  2930  		gcBitsArenas.free = fresh
  2931  		unlock(&gcBitsArenas.lock)
  2932  		return p
  2933  	}
  2934  
  2935  	// Allocate from the fresh arena. We haven't linked it in yet, so
  2936  	// this cannot race and is guaranteed to succeed.
  2937  	p := fresh.tryAlloc(bytesNeeded)
  2938  	if p == nil {
  2939  		throw("markBits overflow")
  2940  	}
  2941  
  2942  	// Add the fresh arena to the "next" list.
  2943  	fresh.next = gcBitsArenas.next
  2944  	atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
  2945  
  2946  	unlock(&gcBitsArenas.lock)
  2947  	return p
  2948  }
  2949  
  2950  // newAllocBits returns a pointer to 8 byte aligned bytes
  2951  // to be used for this span's alloc bits.
  2952  // newAllocBits is used to provide newly initialized spans
  2953  // allocation bits. For spans not being initialized the
  2954  // mark bits are repurposed as allocation bits when
  2955  // the span is swept.
  2956  func newAllocBits(nelems uintptr) *gcBits {
  2957  	return newMarkBits(nelems)
  2958  }
  2959  
  2960  // nextMarkBitArenaEpoch establishes a new epoch for the arenas
  2961  // holding the mark bits. The arenas are named relative to the
  2962  // current GC cycle which is demarcated by the call to finishweep_m.
  2963  //
  2964  // All current spans have been swept.
  2965  // During that sweep each span allocated room for its gcmarkBits in
  2966  // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
  2967  // where the GC will mark objects and after each span is swept these bits
  2968  // will be used to allocate objects.
  2969  // gcBitsArenas.current becomes gcBitsArenas.previous where the span's
  2970  // gcAllocBits live until all the spans have been swept during this GC cycle.
  2971  // The span's sweep extinguishes all the references to gcBitsArenas.previous
  2972  // by pointing gcAllocBits into the gcBitsArenas.current.
  2973  // The gcBitsArenas.previous is released to the gcBitsArenas.free list.
  2974  func nextMarkBitArenaEpoch() {
  2975  	lock(&gcBitsArenas.lock)
  2976  	if gcBitsArenas.previous != nil {
  2977  		if gcBitsArenas.free == nil {
  2978  			gcBitsArenas.free = gcBitsArenas.previous
  2979  		} else {
  2980  			// Find end of previous arenas.
  2981  			last := gcBitsArenas.previous
  2982  			for last = gcBitsArenas.previous; last.next != nil; last = last.next {
  2983  			}
  2984  			last.next = gcBitsArenas.free
  2985  			gcBitsArenas.free = gcBitsArenas.previous
  2986  		}
  2987  	}
  2988  	gcBitsArenas.previous = gcBitsArenas.current
  2989  	gcBitsArenas.current = gcBitsArenas.next
  2990  	atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
  2991  	unlock(&gcBitsArenas.lock)
  2992  }
  2993  
  2994  // newArenaMayUnlock allocates and zeroes a gcBits arena.
  2995  // The caller must hold gcBitsArena.lock. This may temporarily release it.
  2996  func newArenaMayUnlock() *gcBitsArena {
  2997  	var result *gcBitsArena
  2998  	if gcBitsArenas.free == nil {
  2999  		unlock(&gcBitsArenas.lock)
  3000  		result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys, "gc bits"))
  3001  		if result == nil {
  3002  			throw("runtime: cannot allocate memory")
  3003  		}
  3004  		lock(&gcBitsArenas.lock)
  3005  	} else {
  3006  		result = gcBitsArenas.free
  3007  		gcBitsArenas.free = gcBitsArenas.free.next
  3008  		memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
  3009  	}
  3010  	result.next = nil
  3011  	// If result.bits is not 8 byte aligned adjust index so
  3012  	// that &result.bits[result.free] is 8 byte aligned.
  3013  	if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 {
  3014  		result.free = 0
  3015  	} else {
  3016  		result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
  3017  	}
  3018  	return result
  3019  }
  3020  

View as plain text