Source file src/runtime/malloc.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator.
     6  //
     7  // This was originally based on tcmalloc, but has diverged quite a bit.
     8  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9  
    10  // The main allocator works in runs of pages.
    11  // Small allocation sizes (up to and including 32 kB) are
    12  // rounded to one of about 70 size classes, each of which
    13  // has its own free set of objects of exactly that size.
    14  // Any free page of memory can be split into a set of objects
    15  // of one size class, which are then managed using a free bitmap.
    16  //
    17  // The allocator's data structures are:
    18  //
    19  //	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20  //		used to manage storage used by the allocator.
    21  //	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22  //	mspan: a run of in-use pages managed by the mheap.
    23  //	mcentral: collects all spans of a given size class.
    24  //	mcache: a per-P cache of mspans with free space.
    25  //	mstats: allocation statistics.
    26  //
    27  // Allocating a small object proceeds up a hierarchy of caches:
    28  //
    29  //	1. Round the size up to one of the small size classes
    30  //	   and look in the corresponding mspan in this P's mcache.
    31  //	   Scan the mspan's free bitmap to find a free slot.
    32  //	   If there is a free slot, allocate it.
    33  //	   This can all be done without acquiring a lock.
    34  //
    35  //	2. If the mspan has no free slots, obtain a new mspan
    36  //	   from the mcentral's list of mspans of the required size
    37  //	   class that have free space.
    38  //	   Obtaining a whole span amortizes the cost of locking
    39  //	   the mcentral.
    40  //
    41  //	3. If the mcentral's mspan list is empty, obtain a run
    42  //	   of pages from the mheap to use for the mspan.
    43  //
    44  //	4. If the mheap is empty or has no page runs large enough,
    45  //	   allocate a new group of pages (at least 1MB) from the
    46  //	   operating system. Allocating a large run of pages
    47  //	   amortizes the cost of talking to the operating system.
    48  //
    49  // Sweeping an mspan and freeing objects on it proceeds up a similar
    50  // hierarchy:
    51  //
    52  //	1. If the mspan is being swept in response to allocation, it
    53  //	   is returned to the mcache to satisfy the allocation.
    54  //
    55  //	2. Otherwise, if the mspan still has allocated objects in it,
    56  //	   it is placed on the mcentral free list for the mspan's size
    57  //	   class.
    58  //
    59  //	3. Otherwise, if all objects in the mspan are free, the mspan's
    60  //	   pages are returned to the mheap and the mspan is now dead.
    61  //
    62  // Allocating and freeing a large object uses the mheap
    63  // directly, bypassing the mcache and mcentral.
    64  //
    65  // If mspan.needzero is false, then free object slots in the mspan are
    66  // already zeroed. Otherwise if needzero is true, objects are zeroed as
    67  // they are allocated. There are various benefits to delaying zeroing
    68  // this way:
    69  //
    70  //	1. Stack frame allocation can avoid zeroing altogether.
    71  //
    72  //	2. It exhibits better temporal locality, since the program is
    73  //	   probably about to write to the memory.
    74  //
    75  //	3. We don't zero pages that never get reused.
    76  
    77  // Virtual memory layout
    78  //
    79  // The heap consists of a set of arenas, which are 64MB on 64-bit and
    80  // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
    81  // aligned to the arena size.
    82  //
    83  // Each arena has an associated heapArena object that stores the
    84  // metadata for that arena: the heap bitmap for all words in the arena
    85  // and the span map for all pages in the arena. heapArena objects are
    86  // themselves allocated off-heap.
    87  //
    88  // Since arenas are aligned, the address space can be viewed as a
    89  // series of arena frames. The arena map (mheap_.arenas) maps from
    90  // arena frame number to *heapArena, or nil for parts of the address
    91  // space not backed by the Go heap. The arena map is structured as a
    92  // two-level array consisting of a "L1" arena map and many "L2" arena
    93  // maps; however, since arenas are large, on many architectures, the
    94  // arena map consists of a single, large L2 map.
    95  //
    96  // The arena map covers the entire possible address space, allowing
    97  // the Go heap to use any part of the address space. The allocator
    98  // attempts to keep arenas contiguous so that large spans (and hence
    99  // large objects) can cross arenas.
   100  
   101  package runtime
   102  
   103  import (
   104  	"internal/goarch"
   105  	"internal/goos"
   106  	"internal/runtime/atomic"
   107  	"internal/runtime/gc"
   108  	"internal/runtime/math"
   109  	"internal/runtime/sys"
   110  	"unsafe"
   111  )
   112  
   113  const (
   114  	maxTinySize   = _TinySize
   115  	tinySizeClass = _TinySizeClass
   116  	maxSmallSize  = gc.MaxSmallSize
   117  	pageSize      = 1 << gc.PageShift
   118  	pageMask      = pageSize - 1
   119  
   120  	// Unused. Left for viewcore.
   121  	_PageSize              = pageSize
   122  	minSizeForMallocHeader = gc.MinSizeForMallocHeader
   123  	mallocHeaderSize       = gc.MallocHeaderSize
   124  
   125  	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   126  	_64bit = 1 << (^uintptr(0) >> 63) / 2
   127  
   128  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   129  	_TinySize      = 16
   130  	_TinySizeClass = int8(2)
   131  
   132  	_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
   133  
   134  	// Per-P, per order stack segment cache size.
   135  	_StackCacheSize = 32 * 1024
   136  
   137  	// Number of orders that get caching. Order 0 is FixedStack
   138  	// and each successive order is twice as large.
   139  	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   140  	// will be allocated directly.
   141  	// Since FixedStack is different on different systems, we
   142  	// must vary NumStackOrders to keep the same maximum cached size.
   143  	//   OS               | FixedStack | NumStackOrders
   144  	//   -----------------+------------+---------------
   145  	//   linux/darwin/bsd | 2KB        | 4
   146  	//   windows/32       | 4KB        | 3
   147  	//   windows/64       | 8KB        | 2
   148  	//   plan9            | 4KB        | 3
   149  	_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
   150  
   151  	// heapAddrBits is the number of bits in a heap address. On
   152  	// amd64, addresses are sign-extended beyond heapAddrBits. On
   153  	// other arches, they are zero-extended.
   154  	//
   155  	// On most 64-bit platforms, we limit this to 48 bits based on a
   156  	// combination of hardware and OS limitations.
   157  	//
   158  	// amd64 hardware limits addresses to 48 bits, sign-extended
   159  	// to 64 bits. Addresses where the top 16 bits are not either
   160  	// all 0 or all 1 are "non-canonical" and invalid. Because of
   161  	// these "negative" addresses, we offset addresses by 1<<47
   162  	// (arenaBaseOffset) on amd64 before computing indexes into
   163  	// the heap arenas index. In 2017, amd64 hardware added
   164  	// support for 57 bit addresses; however, currently only Linux
   165  	// supports this extension and the kernel will never choose an
   166  	// address above 1<<47 unless mmap is called with a hint
   167  	// address above 1<<47 (which we never do).
   168  	//
   169  	// arm64 hardware (as of ARMv8) limits user addresses to 48
   170  	// bits, in the range [0, 1<<48).
   171  	//
   172  	// ppc64, mips64, and s390x support arbitrary 64 bit addresses
   173  	// in hardware. On Linux, Go leans on stricter OS limits. Based
   174  	// on Linux's processor.h, the user address space is limited as
   175  	// follows on 64-bit architectures:
   176  	//
   177  	// Architecture  Name              Maximum Value (exclusive)
   178  	// ---------------------------------------------------------------------
   179  	// amd64         TASK_SIZE_MAX     0x007ffffffff000 (47 bit addresses)
   180  	// arm64         TASK_SIZE_64      0x01000000000000 (48 bit addresses)
   181  	// ppc64{,le}    TASK_SIZE_USER64  0x00400000000000 (46 bit addresses)
   182  	// mips64{,le}   TASK_SIZE64       0x00010000000000 (40 bit addresses)
   183  	// s390x         TASK_SIZE         1<<64 (64 bit addresses)
   184  	//
   185  	// These limits may increase over time, but are currently at
   186  	// most 48 bits except on s390x. On all architectures, Linux
   187  	// starts placing mmap'd regions at addresses that are
   188  	// significantly below 48 bits, so even if it's possible to
   189  	// exceed Go's 48 bit limit, it's extremely unlikely in
   190  	// practice.
   191  	//
   192  	// On 32-bit platforms, we accept the full 32-bit address
   193  	// space because doing so is cheap.
   194  	// mips32 only has access to the low 2GB of virtual memory, so
   195  	// we further limit it to 31 bits.
   196  	//
   197  	// On ios/arm64, although 64-bit pointers are presumably
   198  	// available, pointers are truncated to 33 bits in iOS <14.
   199  	// Furthermore, only the top 4 GiB of the address space are
   200  	// actually available to the application. In iOS >=14, more
   201  	// of the address space is available, and the OS can now
   202  	// provide addresses outside of those 33 bits. Pick 40 bits
   203  	// as a reasonable balance between address space usage by the
   204  	// page allocator, and flexibility for what mmap'd regions
   205  	// we'll accept for the heap. We can't just move to the full
   206  	// 48 bits because this uses too much address space for older
   207  	// iOS versions.
   208  	// TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
   209  	// to a 48-bit address space like every other arm64 platform.
   210  	//
   211  	// WebAssembly currently has a limit of 4GB linear memory.
   212  	heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
   213  
   214  	// maxAlloc is the maximum size of an allocation. On 64-bit,
   215  	// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
   216  	// 32-bit, however, this is one less than 1<<32 because the
   217  	// number of bytes in the address space doesn't actually fit
   218  	// in a uintptr.
   219  	maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
   220  
   221  	// The number of bits in a heap address, the size of heap
   222  	// arenas, and the L1 and L2 arena map sizes are related by
   223  	//
   224  	//   (1 << addr bits) = arena size * L1 entries * L2 entries
   225  	//
   226  	// Currently, we balance these as follows:
   227  	//
   228  	//       Platform  Addr bits  Arena size  L1 entries   L2 entries
   229  	// --------------  ---------  ----------  ----------  -----------
   230  	//       */64-bit         48        64MB           1    4M (32MB)
   231  	// windows/64-bit         48         4MB          64    1M  (8MB)
   232  	//      ios/arm64         40         4MB           1  256K  (2MB)
   233  	//       */32-bit         32         4MB           1  1024  (4KB)
   234  	//     */mips(le)         31         4MB           1   512  (2KB)
   235  
   236  	// heapArenaBytes is the size of a heap arena. The heap
   237  	// consists of mappings of size heapArenaBytes, aligned to
   238  	// heapArenaBytes. The initial heap mapping is one arena.
   239  	//
   240  	// This is currently 64MB on 64-bit non-Windows and 4MB on
   241  	// 32-bit and on Windows. We use smaller arenas on Windows
   242  	// because all committed memory is charged to the process,
   243  	// even if it's not touched. Hence, for processes with small
   244  	// heaps, the mapped arena space needs to be commensurate.
   245  	// This is particularly important with the race detector,
   246  	// since it significantly amplifies the cost of committed
   247  	// memory.
   248  	heapArenaBytes = 1 << logHeapArenaBytes
   249  
   250  	heapArenaWords = heapArenaBytes / goarch.PtrSize
   251  
   252  	// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
   253  	// prefer using heapArenaBytes where possible (we need the
   254  	// constant to compute some other constants).
   255  	logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
   256  
   257  	// heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
   258  	heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
   259  
   260  	pagesPerArena = heapArenaBytes / pageSize
   261  
   262  	// arenaL1Bits is the number of bits of the arena number
   263  	// covered by the first level arena map.
   264  	//
   265  	// This number should be small, since the first level arena
   266  	// map requires PtrSize*(1<<arenaL1Bits) of space in the
   267  	// binary's BSS. It can be zero, in which case the first level
   268  	// index is effectively unused. There is a performance benefit
   269  	// to this, since the generated code can be more efficient,
   270  	// but comes at the cost of having a large L2 mapping.
   271  	//
   272  	// We use the L1 map on 64-bit Windows because the arena size
   273  	// is small, but the address space is still 48 bits, and
   274  	// there's a high cost to having a large L2.
   275  	arenaL1Bits = 6 * (_64bit * goos.IsWindows)
   276  
   277  	// arenaL2Bits is the number of bits of the arena number
   278  	// covered by the second level arena index.
   279  	//
   280  	// The size of each arena map allocation is proportional to
   281  	// 1<<arenaL2Bits, so it's important that this not be too
   282  	// large. 48 bits leads to 32MB arena index allocations, which
   283  	// is about the practical threshold.
   284  	arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
   285  
   286  	// arenaL1Shift is the number of bits to shift an arena frame
   287  	// number by to compute an index into the first level arena map.
   288  	arenaL1Shift = arenaL2Bits
   289  
   290  	// arenaBits is the total bits in a combined arena map index.
   291  	// This is split between the index into the L1 arena map and
   292  	// the L2 arena map.
   293  	arenaBits = arenaL1Bits + arenaL2Bits
   294  
   295  	// arenaBaseOffset is the pointer value that corresponds to
   296  	// index 0 in the heap arena map.
   297  	//
   298  	// On amd64, the address space is 48 bits, sign extended to 64
   299  	// bits. This offset lets us handle "negative" addresses (or
   300  	// high addresses if viewed as unsigned).
   301  	//
   302  	// On aix/ppc64, this offset allows to keep the heapAddrBits to
   303  	// 48. Otherwise, it would be 60 in order to handle mmap addresses
   304  	// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
   305  	// case, the memory reserved in (s *pageAlloc).init for chunks
   306  	// is causing important slowdowns.
   307  	//
   308  	// On other platforms, the user address space is contiguous
   309  	// and starts at 0, so no offset is necessary.
   310  	arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
   311  	// A typed version of this constant that will make it into DWARF (for viewcore).
   312  	arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
   313  
   314  	// Max number of threads to run garbage collection.
   315  	// 2, 3, and 4 are all plausible maximums depending
   316  	// on the hardware details of the machine. The garbage
   317  	// collector scales well to 32 cpus.
   318  	_MaxGcproc = 32
   319  
   320  	// minLegalPointer is the smallest possible legal pointer.
   321  	// This is the smallest possible architectural page size,
   322  	// since we assume that the first page is never mapped.
   323  	//
   324  	// This should agree with minZeroPage in the compiler.
   325  	minLegalPointer uintptr = 4096
   326  
   327  	// minHeapForMetadataHugePages sets a threshold on when certain kinds of
   328  	// heap metadata, currently the arenas map L2 entries and page alloc bitmap
   329  	// mappings, are allowed to be backed by huge pages. If the heap goal ever
   330  	// exceeds this threshold, then huge pages are enabled.
   331  	//
   332  	// These numbers are chosen with the assumption that huge pages are on the
   333  	// order of a few MiB in size.
   334  	//
   335  	// The kind of metadata this applies to has a very low overhead when compared
   336  	// to address space used, but their constant overheads for small heaps would
   337  	// be very high if they were to be backed by huge pages (e.g. a few MiB makes
   338  	// a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB
   339  	// heap). The benefit of huge pages is also not worth it for small heaps,
   340  	// because only a very, very small part of the metadata is used for small heaps.
   341  	//
   342  	// N.B. If the heap goal exceeds the threshold then shrinks to a very small size
   343  	// again, then huge pages will still be enabled for this mapping. The reason is that
   344  	// there's no point unless we're also returning the physical memory for these
   345  	// metadata mappings back to the OS. That would be quite complex to do in general
   346  	// as the heap is likely fragmented after a reduction in heap size.
   347  	minHeapForMetadataHugePages = 1 << 30
   348  )
   349  
   350  // physPageSize is the size in bytes of the OS's physical pages.
   351  // Mapping and unmapping operations must be done at multiples of
   352  // physPageSize.
   353  //
   354  // This must be set by the OS init code (typically in osinit) before
   355  // mallocinit.
   356  var physPageSize uintptr
   357  
   358  // physHugePageSize is the size in bytes of the OS's default physical huge
   359  // page size whose allocation is opaque to the application. It is assumed
   360  // and verified to be a power of two.
   361  //
   362  // If set, this must be set by the OS init code (typically in osinit) before
   363  // mallocinit. However, setting it at all is optional, and leaving the default
   364  // value is always safe (though potentially less efficient).
   365  //
   366  // Since physHugePageSize is always assumed to be a power of two,
   367  // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
   368  // The purpose of physHugePageShift is to avoid doing divisions in
   369  // performance critical functions.
   370  var (
   371  	physHugePageSize  uintptr
   372  	physHugePageShift uint
   373  )
   374  
   375  func mallocinit() {
   376  	if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
   377  		throw("bad TinySizeClass")
   378  	}
   379  
   380  	if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 {
   381  		// heapBits expects modular arithmetic on bitmap
   382  		// addresses to work.
   383  		throw("heapArenaBitmapWords not a power of 2")
   384  	}
   385  
   386  	// Check physPageSize.
   387  	if physPageSize == 0 {
   388  		// The OS init code failed to fetch the physical page size.
   389  		throw("failed to get system page size")
   390  	}
   391  	if physPageSize > maxPhysPageSize {
   392  		print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
   393  		throw("bad system page size")
   394  	}
   395  	if physPageSize < minPhysPageSize {
   396  		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   397  		throw("bad system page size")
   398  	}
   399  	if physPageSize&(physPageSize-1) != 0 {
   400  		print("system page size (", physPageSize, ") must be a power of 2\n")
   401  		throw("bad system page size")
   402  	}
   403  	if physHugePageSize&(physHugePageSize-1) != 0 {
   404  		print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
   405  		throw("bad system huge page size")
   406  	}
   407  	if physHugePageSize > maxPhysHugePageSize {
   408  		// physHugePageSize is greater than the maximum supported huge page size.
   409  		// Don't throw here, like in the other cases, since a system configured
   410  		// in this way isn't wrong, we just don't have the code to support them.
   411  		// Instead, silently set the huge page size to zero.
   412  		physHugePageSize = 0
   413  	}
   414  	if physHugePageSize != 0 {
   415  		// Since physHugePageSize is a power of 2, it suffices to increase
   416  		// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
   417  		for 1<<physHugePageShift != physHugePageSize {
   418  			physHugePageShift++
   419  		}
   420  	}
   421  	if pagesPerArena%pagesPerSpanRoot != 0 {
   422  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
   423  		throw("bad pagesPerSpanRoot")
   424  	}
   425  	if pagesPerArena%pagesPerReclaimerChunk != 0 {
   426  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
   427  		throw("bad pagesPerReclaimerChunk")
   428  	}
   429  	// Check that the minimum size (exclusive) for a malloc header is also
   430  	// a size class boundary. This is important to making sure checks align
   431  	// across different parts of the runtime.
   432  	//
   433  	// While we're here, also check to make sure all these size classes'
   434  	// span sizes are one page. Some code relies on this.
   435  	minSizeForMallocHeaderIsSizeClass := false
   436  	sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true
   437  	for i := 0; i < len(gc.SizeClassToSize); i++ {
   438  		if gc.SizeClassToNPages[i] > 1 {
   439  			sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false
   440  		}
   441  		if gc.MinSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) {
   442  			minSizeForMallocHeaderIsSizeClass = true
   443  			break
   444  		}
   445  	}
   446  	if !minSizeForMallocHeaderIsSizeClass {
   447  		throw("min size of malloc header is not a size class boundary")
   448  	}
   449  	if !sizeClassesUpToMinSizeForMallocHeaderAreOnePage {
   450  		throw("expected all size classes up to min size for malloc header to fit in one-page spans")
   451  	}
   452  	// Check that the pointer bitmap for all small sizes without a malloc header
   453  	// fits in a word.
   454  	if gc.MinSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
   455  		throw("max pointer/scan bitmap size for headerless objects is too large")
   456  	}
   457  
   458  	if minTagBits > tagBits {
   459  		throw("tagBits too small")
   460  	}
   461  
   462  	// Initialize the heap.
   463  	mheap_.init()
   464  	mcache0 = allocmcache()
   465  	lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
   466  	lockInit(&profInsertLock, lockRankProfInsert)
   467  	lockInit(&profBlockLock, lockRankProfBlock)
   468  	lockInit(&profMemActiveLock, lockRankProfMemActive)
   469  	for i := range profMemFutureLock {
   470  		lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
   471  	}
   472  	lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
   473  
   474  	// Create initial arena growth hints.
   475  	if isSbrkPlatform {
   476  		// Don't generate hints on sbrk platforms. We can
   477  		// only grow the break sequentially.
   478  	} else if goarch.PtrSize == 8 {
   479  		// On a 64-bit machine, we pick the following hints
   480  		// because:
   481  		//
   482  		// 1. Starting from the middle of the address space
   483  		// makes it easier to grow out a contiguous range
   484  		// without running in to some other mapping.
   485  		//
   486  		// 2. This makes Go heap addresses more easily
   487  		// recognizable when debugging.
   488  		//
   489  		// 3. Stack scanning in gccgo is still conservative,
   490  		// so it's important that addresses be distinguishable
   491  		// from other data.
   492  		//
   493  		// Starting at 0x00c0 means that the valid memory addresses
   494  		// will begin 0x00c0, 0x00c1, ...
   495  		// In little-endian, that's c0 00, c1 00, ... None of those are valid
   496  		// UTF-8 sequences, and they are otherwise as far away from
   497  		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   498  		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   499  		// on OS X during thread allocations.  0x00c0 causes conflicts with
   500  		// AddressSanitizer which reserves all memory up to 0x0100.
   501  		// These choices reduce the odds of a conservative garbage collector
   502  		// not collecting memory because some non-pointer block of memory
   503  		// had a bit pattern that matched a memory address.
   504  		//
   505  		// However, on arm64, we ignore all this advice above and slam the
   506  		// allocation at 0x40 << 32 because when using 4k pages with 3-level
   507  		// translation buffers, the user address space is limited to 39 bits
   508  		// On ios/arm64, the address space is even smaller.
   509  		//
   510  		// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
   511  		// processes.
   512  		//
   513  		// Space mapped for user arenas comes immediately after the range
   514  		// originally reserved for the regular heap when race mode is not
   515  		// enabled because user arena chunks can never be used for regular heap
   516  		// allocations and we want to avoid fragmenting the address space.
   517  		//
   518  		// In race mode we have no choice but to just use the same hints because
   519  		// the race detector requires that the heap be mapped contiguously.
   520  		for i := 0x7f; i >= 0; i-- {
   521  			var p uintptr
   522  			switch {
   523  			case raceenabled:
   524  				// The TSAN runtime requires the heap
   525  				// to be in the range [0x00c000000000,
   526  				// 0x00e000000000).
   527  				p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
   528  				if p >= uintptrMask&0x00e000000000 {
   529  					continue
   530  				}
   531  			case GOARCH == "arm64" && GOOS == "ios":
   532  				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   533  			case GOARCH == "arm64":
   534  				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   535  			case GOOS == "aix":
   536  				if i == 0 {
   537  					// We don't use addresses directly after 0x0A00000000000000
   538  					// to avoid collisions with others mmaps done by non-go programs.
   539  					continue
   540  				}
   541  				p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
   542  			default:
   543  				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   544  			}
   545  			// Switch to generating hints for user arenas if we've gone
   546  			// through about half the hints. In race mode, take only about
   547  			// a quarter; we don't have very much space to work with.
   548  			hintList := &mheap_.arenaHints
   549  			if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) {
   550  				hintList = &mheap_.userArena.arenaHints
   551  			}
   552  			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   553  			hint.addr = p
   554  			hint.next, *hintList = *hintList, hint
   555  		}
   556  	} else {
   557  		// On a 32-bit machine, we're much more concerned
   558  		// about keeping the usable heap contiguous.
   559  		// Hence:
   560  		//
   561  		// 1. We reserve space for all heapArenas up front so
   562  		// they don't get interleaved with the heap. They're
   563  		// ~258MB, so this isn't too bad. (We could reserve a
   564  		// smaller amount of space up front if this is a
   565  		// problem.)
   566  		//
   567  		// 2. We hint the heap to start right above the end of
   568  		// the binary so we have the best chance of keeping it
   569  		// contiguous.
   570  		//
   571  		// 3. We try to stake out a reasonably large initial
   572  		// heap reservation.
   573  
   574  		const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
   575  		meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation"))
   576  		if meta != 0 {
   577  			mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
   578  		}
   579  
   580  		// We want to start the arena low, but if we're linked
   581  		// against C code, it's possible global constructors
   582  		// have called malloc and adjusted the process' brk.
   583  		// Query the brk so we can avoid trying to map the
   584  		// region over it (which will cause the kernel to put
   585  		// the region somewhere else, likely at a high
   586  		// address).
   587  		procBrk := sbrk0()
   588  
   589  		// If we ask for the end of the data segment but the
   590  		// operating system requires a little more space
   591  		// before we can start allocating, it will give out a
   592  		// slightly higher pointer. Except QEMU, which is
   593  		// buggy, as usual: it won't adjust the pointer
   594  		// upward. So adjust it upward a little bit ourselves:
   595  		// 1/4 MB to get away from the running binary image.
   596  		p := firstmoduledata.end
   597  		if p < procBrk {
   598  			p = procBrk
   599  		}
   600  		if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
   601  			p = mheap_.heapArenaAlloc.end
   602  		}
   603  		p = alignUp(p+(256<<10), heapArenaBytes)
   604  		// Because we're worried about fragmentation on
   605  		// 32-bit, we try to make a large initial reservation.
   606  		arenaSizes := []uintptr{
   607  			512 << 20,
   608  			256 << 20,
   609  			128 << 20,
   610  		}
   611  		for _, arenaSize := range arenaSizes {
   612  			a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation")
   613  			if a != nil {
   614  				mheap_.arena.init(uintptr(a), size, false)
   615  				p = mheap_.arena.end // For hint below
   616  				break
   617  			}
   618  		}
   619  		hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   620  		hint.addr = p
   621  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   622  
   623  		// Place the hint for user arenas just after the large reservation.
   624  		//
   625  		// While this potentially competes with the hint above, in practice we probably
   626  		// aren't going to be getting this far anyway on 32-bit platforms.
   627  		userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   628  		userArenaHint.addr = p
   629  		userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
   630  	}
   631  	// Initialize the memory limit here because the allocator is going to look at it
   632  	// but we haven't called gcinit yet and we're definitely going to allocate memory before then.
   633  	gcController.memoryLimit.Store(math.MaxInt64)
   634  }
   635  
   636  // sysAlloc allocates heap arena space for at least n bytes. The
   637  // returned pointer is always heapArenaBytes-aligned and backed by
   638  // h.arenas metadata. The returned size is always a multiple of
   639  // heapArenaBytes. sysAlloc returns nil on failure.
   640  // There is no corresponding free function.
   641  //
   642  // hintList is a list of hint addresses for where to allocate new
   643  // heap arenas. It must be non-nil.
   644  //
   645  // sysAlloc returns a memory region in the Reserved state. This region must
   646  // be transitioned to Prepared and then Ready before use.
   647  //
   648  // arenaList is the list the arena should be added to.
   649  //
   650  // h must be locked.
   651  func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) (v unsafe.Pointer, size uintptr) {
   652  	assertLockHeld(&h.lock)
   653  
   654  	n = alignUp(n, heapArenaBytes)
   655  
   656  	if hintList == &h.arenaHints {
   657  		// First, try the arena pre-reservation.
   658  		// Newly-used mappings are considered released.
   659  		//
   660  		// Only do this if we're using the regular heap arena hints.
   661  		// This behavior is only for the heap.
   662  		v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap")
   663  		if v != nil {
   664  			size = n
   665  			goto mapped
   666  		}
   667  	}
   668  
   669  	// Try to grow the heap at a hint address.
   670  	for *hintList != nil {
   671  		hint := *hintList
   672  		p := hint.addr
   673  		if hint.down {
   674  			p -= n
   675  		}
   676  		if p+n < p {
   677  			// We can't use this, so don't ask.
   678  			v = nil
   679  		} else if arenaIndex(p+n-1) >= 1<<arenaBits {
   680  			// Outside addressable heap. Can't use.
   681  			v = nil
   682  		} else {
   683  			v = sysReserve(unsafe.Pointer(p), n, "heap reservation")
   684  		}
   685  		if p == uintptr(v) {
   686  			// Success. Update the hint.
   687  			if !hint.down {
   688  				p += n
   689  			}
   690  			hint.addr = p
   691  			size = n
   692  			break
   693  		}
   694  		// Failed. Discard this hint and try the next.
   695  		//
   696  		// TODO: This would be cleaner if sysReserve could be
   697  		// told to only return the requested address. In
   698  		// particular, this is already how Windows behaves, so
   699  		// it would simplify things there.
   700  		if v != nil {
   701  			sysFreeOS(v, n)
   702  		}
   703  		*hintList = hint.next
   704  		h.arenaHintAlloc.free(unsafe.Pointer(hint))
   705  	}
   706  
   707  	if size == 0 {
   708  		if raceenabled {
   709  			// The race detector assumes the heap lives in
   710  			// [0x00c000000000, 0x00e000000000), but we
   711  			// just ran out of hints in this region. Give
   712  			// a nice failure.
   713  			throw("too many address space collisions for -race mode")
   714  		}
   715  
   716  		// All of the hints failed, so we'll take any
   717  		// (sufficiently aligned) address the kernel will give
   718  		// us.
   719  		v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap")
   720  		if v == nil {
   721  			return nil, 0
   722  		}
   723  
   724  		// Create new hints for extending this region.
   725  		hint := (*arenaHint)(h.arenaHintAlloc.alloc())
   726  		hint.addr, hint.down = uintptr(v), true
   727  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   728  		hint = (*arenaHint)(h.arenaHintAlloc.alloc())
   729  		hint.addr = uintptr(v) + size
   730  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   731  	}
   732  
   733  	// Check for bad pointers or pointers we can't use.
   734  	{
   735  		var bad string
   736  		p := uintptr(v)
   737  		if p+size < p {
   738  			bad = "region exceeds uintptr range"
   739  		} else if arenaIndex(p) >= 1<<arenaBits {
   740  			bad = "base outside usable address space"
   741  		} else if arenaIndex(p+size-1) >= 1<<arenaBits {
   742  			bad = "end outside usable address space"
   743  		}
   744  		if bad != "" {
   745  			// This should be impossible on most architectures,
   746  			// but it would be really confusing to debug.
   747  			print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
   748  			throw("memory reservation exceeds address space limit")
   749  		}
   750  	}
   751  
   752  	if uintptr(v)&(heapArenaBytes-1) != 0 {
   753  		throw("misrounded allocation in sysAlloc")
   754  	}
   755  
   756  mapped:
   757  	if valgrindenabled {
   758  		valgrindCreateMempool(v)
   759  		valgrindMakeMemNoAccess(v, size)
   760  	}
   761  
   762  	// Create arena metadata.
   763  	for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
   764  		l2 := h.arenas[ri.l1()]
   765  		if l2 == nil {
   766  			// Allocate an L2 arena map.
   767  			//
   768  			// Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
   769  			// statistic we can comfortably account for this space in. With this structure,
   770  			// we rely on demand paging to avoid large overheads, but tracking which memory
   771  			// is paged in is too expensive. Trying to account for the whole region means
   772  			// that it will appear like an enormous memory overhead in statistics, even though
   773  			// it is not.
   774  			l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index"))
   775  			if l2 == nil {
   776  				throw("out of memory allocating heap arena map")
   777  			}
   778  			if h.arenasHugePages {
   779  				sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
   780  			} else {
   781  				sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
   782  			}
   783  			atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
   784  		}
   785  
   786  		if l2[ri.l2()] != nil {
   787  			throw("arena already initialized")
   788  		}
   789  		var r *heapArena
   790  		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata"))
   791  		if r == nil {
   792  			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   793  			if r == nil {
   794  				throw("out of memory allocating heap arena metadata")
   795  			}
   796  		}
   797  
   798  		// Register the arena in allArenas if requested.
   799  		if len((*arenaList)) == cap((*arenaList)) {
   800  			size := 2 * uintptr(cap((*arenaList))) * goarch.PtrSize
   801  			if size == 0 {
   802  				size = physPageSize
   803  			}
   804  			newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
   805  			if newArray == nil {
   806  				throw("out of memory allocating allArenas")
   807  			}
   808  			oldSlice := (*arenaList)
   809  			*(*notInHeapSlice)(unsafe.Pointer(&(*arenaList))) = notInHeapSlice{newArray, len((*arenaList)), int(size / goarch.PtrSize)}
   810  			copy((*arenaList), oldSlice)
   811  			// Do not free the old backing array because
   812  			// there may be concurrent readers. Since we
   813  			// double the array each time, this can lead
   814  			// to at most 2x waste.
   815  		}
   816  		(*arenaList) = (*arenaList)[:len((*arenaList))+1]
   817  		(*arenaList)[len((*arenaList))-1] = ri
   818  
   819  		// Store atomically just in case an object from the
   820  		// new heap arena becomes visible before the heap lock
   821  		// is released (which shouldn't happen, but there's
   822  		// little downside to this).
   823  		atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
   824  	}
   825  
   826  	// Tell the race detector about the new heap memory.
   827  	if raceenabled {
   828  		racemapshadow(v, size)
   829  	}
   830  
   831  	return
   832  }
   833  
   834  // sysReserveAligned is like sysReserve, but the returned pointer is
   835  // aligned to align bytes. It may reserve either n or n+align bytes,
   836  // so it returns the size that was reserved.
   837  func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
   838  	if isSbrkPlatform {
   839  		if v != nil {
   840  			throw("unexpected heap arena hint on sbrk platform")
   841  		}
   842  		return sysReserveAlignedSbrk(size, align)
   843  	}
   844  	// Since the alignment is rather large in uses of this
   845  	// function, we're not likely to get it by chance, so we ask
   846  	// for a larger region and remove the parts we don't need.
   847  	retries := 0
   848  retry:
   849  	p := uintptr(sysReserve(v, size+align, vmaName))
   850  	switch {
   851  	case p == 0:
   852  		return nil, 0
   853  	case p&(align-1) == 0:
   854  		return unsafe.Pointer(p), size + align
   855  	case GOOS == "windows":
   856  		// On Windows we can't release pieces of a
   857  		// reservation, so we release the whole thing and
   858  		// re-reserve the aligned sub-region. This may race,
   859  		// so we may have to try again.
   860  		sysFreeOS(unsafe.Pointer(p), size+align)
   861  		p = alignUp(p, align)
   862  		p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
   863  		if p != uintptr(p2) {
   864  			// Must have raced. Try again.
   865  			sysFreeOS(p2, size)
   866  			if retries++; retries == 100 {
   867  				throw("failed to allocate aligned heap memory; too many retries")
   868  			}
   869  			goto retry
   870  		}
   871  		// Success.
   872  		return p2, size
   873  	default:
   874  		// Trim off the unaligned parts.
   875  		pAligned := alignUp(p, align)
   876  		sysFreeOS(unsafe.Pointer(p), pAligned-p)
   877  		end := pAligned + size
   878  		endLen := (p + size + align) - end
   879  		if endLen > 0 {
   880  			sysFreeOS(unsafe.Pointer(end), endLen)
   881  		}
   882  		return unsafe.Pointer(pAligned), size
   883  	}
   884  }
   885  
   886  // enableMetadataHugePages enables huge pages for various sources of heap metadata.
   887  //
   888  // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
   889  // time, but may take time proportional to the size of the mapped heap beyond that.
   890  //
   891  // This function is idempotent.
   892  //
   893  // The heap lock must not be held over this operation, since it will briefly acquire
   894  // the heap lock.
   895  //
   896  // Must be called on the system stack because it acquires the heap lock.
   897  //
   898  //go:systemstack
   899  func (h *mheap) enableMetadataHugePages() {
   900  	// Enable huge pages for page structure.
   901  	h.pages.enableChunkHugePages()
   902  
   903  	// Grab the lock and set arenasHugePages if it's not.
   904  	//
   905  	// Once arenasHugePages is set, all new L2 entries will be eligible for
   906  	// huge pages. We'll set all the old entries after we release the lock.
   907  	lock(&h.lock)
   908  	if h.arenasHugePages {
   909  		unlock(&h.lock)
   910  		return
   911  	}
   912  	h.arenasHugePages = true
   913  	unlock(&h.lock)
   914  
   915  	// N.B. The arenas L1 map is quite small on all platforms, so it's fine to
   916  	// just iterate over the whole thing.
   917  	for i := range h.arenas {
   918  		l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i])))
   919  		if l2 == nil {
   920  			continue
   921  		}
   922  		sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
   923  	}
   924  }
   925  
   926  // base address for all 0-byte allocations
   927  var zerobase uintptr
   928  
   929  // nextFreeFast returns the next free object if one is quickly available.
   930  // Otherwise it returns 0.
   931  func nextFreeFast(s *mspan) gclinkptr {
   932  	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
   933  	if theBit < 64 {
   934  		result := s.freeindex + uint16(theBit)
   935  		if result < s.nelems {
   936  			freeidx := result + 1
   937  			if freeidx%64 == 0 && freeidx != s.nelems {
   938  				return 0
   939  			}
   940  			s.allocCache >>= uint(theBit + 1)
   941  			s.freeindex = freeidx
   942  			s.allocCount++
   943  			return gclinkptr(uintptr(result)*s.elemsize + s.base())
   944  		}
   945  	}
   946  	return 0
   947  }
   948  
   949  // nextFree returns the next free object from the cached span if one is available.
   950  // Otherwise it refills the cache with a span with an available object and
   951  // returns that object along with a flag indicating that this was a heavy
   952  // weight allocation. If it is a heavy weight allocation the caller must
   953  // determine whether a new GC cycle needs to be started or if the GC is active
   954  // whether this goroutine needs to assist the GC.
   955  //
   956  // Must run in a non-preemptible context since otherwise the owner of
   957  // c could change.
   958  func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) {
   959  	s = c.alloc[spc]
   960  	checkGCTrigger = false
   961  	freeIndex := s.nextFreeIndex()
   962  	if freeIndex == s.nelems {
   963  		// The span is full.
   964  		if s.allocCount != s.nelems {
   965  			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   966  			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   967  		}
   968  		c.refill(spc)
   969  		checkGCTrigger = true
   970  		s = c.alloc[spc]
   971  
   972  		freeIndex = s.nextFreeIndex()
   973  	}
   974  
   975  	if freeIndex >= s.nelems {
   976  		throw("freeIndex is not valid")
   977  	}
   978  
   979  	v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base())
   980  	s.allocCount++
   981  	if s.allocCount > s.nelems {
   982  		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   983  		throw("s.allocCount > s.nelems")
   984  	}
   985  	return
   986  }
   987  
   988  // doubleCheckMalloc enables a bunch of extra checks to malloc to double-check
   989  // that various invariants are upheld.
   990  //
   991  // We might consider turning these on by default; many of them previously were.
   992  // They account for a few % of mallocgc's cost though, which does matter somewhat
   993  // at scale.
   994  const doubleCheckMalloc = false
   995  
   996  // Allocate an object of size bytes.
   997  // Small objects are allocated from the per-P cache's free lists.
   998  // Large objects (> 32 kB) are allocated straight from the heap.
   999  //
  1000  // mallocgc should be an internal detail,
  1001  // but widely used packages access it using linkname.
  1002  // Notable members of the hall of shame include:
  1003  //   - github.com/bytedance/gopkg
  1004  //   - github.com/bytedance/sonic
  1005  //   - github.com/cloudwego/frugal
  1006  //   - github.com/cockroachdb/cockroach
  1007  //   - github.com/cockroachdb/pebble
  1008  //   - github.com/ugorji/go/codec
  1009  //
  1010  // Do not remove or change the type signature.
  1011  // See go.dev/issue/67401.
  1012  //
  1013  //go:linkname mallocgc
  1014  func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1015  	if doubleCheckMalloc {
  1016  		if gcphase == _GCmarktermination {
  1017  			throw("mallocgc called with gcphase == _GCmarktermination")
  1018  		}
  1019  	}
  1020  
  1021  	// Short-circuit zero-sized allocation requests.
  1022  	if size == 0 {
  1023  		return unsafe.Pointer(&zerobase)
  1024  	}
  1025  
  1026  	// It's possible for any malloc to trigger sweeping, which may in
  1027  	// turn queue finalizers. Record this dynamic lock edge.
  1028  	// N.B. Compiled away if lockrank experiment is not enabled.
  1029  	lockRankMayQueueFinalizer()
  1030  
  1031  	// Pre-malloc debug hooks.
  1032  	if debug.malloc {
  1033  		if x := preMallocgcDebug(size, typ); x != nil {
  1034  			return x
  1035  		}
  1036  	}
  1037  
  1038  	// For ASAN, we allocate extra memory around each allocation called the "redzone."
  1039  	// These "redzones" are marked as unaddressable.
  1040  	var asanRZ uintptr
  1041  	if asanenabled {
  1042  		asanRZ = redZoneSize(size)
  1043  		size += asanRZ
  1044  	}
  1045  
  1046  	// Assist the GC if needed.
  1047  	if gcBlackenEnabled != 0 {
  1048  		deductAssistCredit(size)
  1049  	}
  1050  
  1051  	// Actually do the allocation.
  1052  	var x unsafe.Pointer
  1053  	var elemsize uintptr
  1054  	if size <= maxSmallSize-gc.MallocHeaderSize {
  1055  		if typ == nil || !typ.Pointers() {
  1056  			if size < maxTinySize {
  1057  				x, elemsize = mallocgcTiny(size, typ)
  1058  			} else {
  1059  				x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
  1060  			}
  1061  		} else {
  1062  			if !needzero {
  1063  				throw("objects with pointers must be zeroed")
  1064  			}
  1065  			if heapBitsInSpan(size) {
  1066  				x, elemsize = mallocgcSmallScanNoHeader(size, typ)
  1067  			} else {
  1068  				x, elemsize = mallocgcSmallScanHeader(size, typ)
  1069  			}
  1070  		}
  1071  	} else {
  1072  		x, elemsize = mallocgcLarge(size, typ, needzero)
  1073  	}
  1074  
  1075  	// Notify sanitizers, if enabled.
  1076  	if raceenabled {
  1077  		racemalloc(x, size-asanRZ)
  1078  	}
  1079  	if msanenabled {
  1080  		msanmalloc(x, size-asanRZ)
  1081  	}
  1082  	if asanenabled {
  1083  		// Poison the space between the end of the requested size of x
  1084  		// and the end of the slot. Unpoison the requested allocation.
  1085  		frag := elemsize - size
  1086  		if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-gc.MallocHeaderSize {
  1087  			frag -= gc.MallocHeaderSize
  1088  		}
  1089  		asanpoison(unsafe.Add(x, size-asanRZ), asanRZ)
  1090  		asanunpoison(x, size-asanRZ)
  1091  	}
  1092  	if valgrindenabled {
  1093  		valgrindMalloc(x, size-asanRZ)
  1094  	}
  1095  
  1096  	// Adjust our GC assist debt to account for internal fragmentation.
  1097  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1098  		if assistG := getg().m.curg; assistG != nil {
  1099  			assistG.gcAssistBytes -= int64(elemsize - size)
  1100  		}
  1101  	}
  1102  
  1103  	// Post-malloc debug hooks.
  1104  	if debug.malloc {
  1105  		postMallocgcDebug(x, elemsize, typ)
  1106  	}
  1107  	return x
  1108  }
  1109  
  1110  func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
  1111  	// Set mp.mallocing to keep from being preempted by GC.
  1112  	mp := acquirem()
  1113  	if doubleCheckMalloc {
  1114  		if mp.mallocing != 0 {
  1115  			throw("malloc deadlock")
  1116  		}
  1117  		if mp.gsignal == getg() {
  1118  			throw("malloc during signal")
  1119  		}
  1120  		if typ != nil && typ.Pointers() {
  1121  			throw("expected noscan for tiny alloc")
  1122  		}
  1123  	}
  1124  	mp.mallocing = 1
  1125  
  1126  	// Tiny allocator.
  1127  	//
  1128  	// Tiny allocator combines several tiny allocation requests
  1129  	// into a single memory block. The resulting memory block
  1130  	// is freed when all subobjects are unreachable. The subobjects
  1131  	// must be noscan (don't have pointers), this ensures that
  1132  	// the amount of potentially wasted memory is bounded.
  1133  	//
  1134  	// Size of the memory block used for combining (maxTinySize) is tunable.
  1135  	// Current setting is 16 bytes, which relates to 2x worst case memory
  1136  	// wastage (when all but one subobjects are unreachable).
  1137  	// 8 bytes would result in no wastage at all, but provides less
  1138  	// opportunities for combining.
  1139  	// 32 bytes provides more opportunities for combining,
  1140  	// but can lead to 4x worst case wastage.
  1141  	// The best case winning is 8x regardless of block size.
  1142  	//
  1143  	// Objects obtained from tiny allocator must not be freed explicitly.
  1144  	// So when an object will be freed explicitly, we ensure that
  1145  	// its size >= maxTinySize.
  1146  	//
  1147  	// SetFinalizer has a special case for objects potentially coming
  1148  	// from tiny allocator, it such case it allows to set finalizers
  1149  	// for an inner byte of a memory block.
  1150  	//
  1151  	// The main targets of tiny allocator are small strings and
  1152  	// standalone escaping variables. On a json benchmark
  1153  	// the allocator reduces number of allocations by ~12% and
  1154  	// reduces heap size by ~20%.
  1155  	c := getMCache(mp)
  1156  	off := c.tinyoffset
  1157  	// Align tiny pointer for required (conservative) alignment.
  1158  	if size&7 == 0 {
  1159  		off = alignUp(off, 8)
  1160  	} else if goarch.PtrSize == 4 && size == 12 {
  1161  		// Conservatively align 12-byte objects to 8 bytes on 32-bit
  1162  		// systems so that objects whose first field is a 64-bit
  1163  		// value is aligned to 8 bytes and does not cause a fault on
  1164  		// atomic access. See issue 37262.
  1165  		// TODO(mknyszek): Remove this workaround if/when issue 36606
  1166  		// is resolved.
  1167  		off = alignUp(off, 8)
  1168  	} else if size&3 == 0 {
  1169  		off = alignUp(off, 4)
  1170  	} else if size&1 == 0 {
  1171  		off = alignUp(off, 2)
  1172  	}
  1173  	if off+size <= maxTinySize && c.tiny != 0 {
  1174  		// The object fits into existing tiny block.
  1175  		x := unsafe.Pointer(c.tiny + off)
  1176  		c.tinyoffset = off + size
  1177  		c.tinyAllocs++
  1178  		mp.mallocing = 0
  1179  		releasem(mp)
  1180  		return x, 0
  1181  	}
  1182  	// Allocate a new maxTinySize block.
  1183  	checkGCTrigger := false
  1184  	span := c.alloc[tinySpanClass]
  1185  	v := nextFreeFast(span)
  1186  	if v == 0 {
  1187  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  1188  	}
  1189  	x := unsafe.Pointer(v)
  1190  	(*[2]uint64)(x)[0] = 0 // Always zero
  1191  	(*[2]uint64)(x)[1] = 0
  1192  	// See if we need to replace the existing tiny block with the new one
  1193  	// based on amount of remaining free space.
  1194  	if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
  1195  		// Note: disabled when race detector is on, see comment near end of this function.
  1196  		c.tiny = uintptr(x)
  1197  		c.tinyoffset = size
  1198  	}
  1199  
  1200  	// Ensure that the stores above that initialize x to
  1201  	// type-safe memory and set the heap bits occur before
  1202  	// the caller can make x observable to the garbage
  1203  	// collector. Otherwise, on weakly ordered machines,
  1204  	// the garbage collector could follow a pointer to x,
  1205  	// but see uninitialized memory or stale heap bits.
  1206  	publicationBarrier()
  1207  
  1208  	if writeBarrier.enabled {
  1209  		// Allocate black during GC.
  1210  		// All slots hold nil so no scanning is needed.
  1211  		// This may be racing with GC so do it atomically if there can be
  1212  		// a race marking the bit.
  1213  		gcmarknewobject(span, uintptr(x))
  1214  	} else {
  1215  		// Track the last free index before the mark phase. This field
  1216  		// is only used by the garbage collector. During the mark phase
  1217  		// this is used by the conservative scanner to filter out objects
  1218  		// that are both free and recently-allocated. It's safe to do that
  1219  		// because we allocate-black if the GC is enabled. The conservative
  1220  		// scanner produces pointers out of thin air, so without additional
  1221  		// synchronization it might otherwise observe a partially-initialized
  1222  		// object, which could crash the program.
  1223  		span.freeIndexForScan = span.freeindex
  1224  	}
  1225  
  1226  	// Note cache c only valid while m acquired; see #47302
  1227  	//
  1228  	// N.B. Use the full size because that matches how the GC
  1229  	// will update the mem profile on the "free" side.
  1230  	//
  1231  	// TODO(mknyszek): We should really count the header as part
  1232  	// of gc_sys or something. The code below just pretends it is
  1233  	// internal fragmentation and matches the GC's accounting by
  1234  	// using the whole allocation slot.
  1235  	c.nextSample -= int64(span.elemsize)
  1236  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1237  		profilealloc(mp, x, span.elemsize)
  1238  	}
  1239  	mp.mallocing = 0
  1240  	releasem(mp)
  1241  
  1242  	if checkGCTrigger {
  1243  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1244  			gcStart(t)
  1245  		}
  1246  	}
  1247  
  1248  	if raceenabled {
  1249  		// Pad tinysize allocations so they are aligned with the end
  1250  		// of the tinyalloc region. This ensures that any arithmetic
  1251  		// that goes off the top end of the object will be detectable
  1252  		// by checkptr (issue 38872).
  1253  		// Note that we disable tinyalloc when raceenabled for this to work.
  1254  		// TODO: This padding is only performed when the race detector
  1255  		// is enabled. It would be nice to enable it if any package
  1256  		// was compiled with checkptr, but there's no easy way to
  1257  		// detect that (especially at compile time).
  1258  		// TODO: enable this padding for all allocations, not just
  1259  		// tinyalloc ones. It's tricky because of pointer maps.
  1260  		// Maybe just all noscan objects?
  1261  		x = add(x, span.elemsize-size)
  1262  	}
  1263  	return x, span.elemsize
  1264  }
  1265  
  1266  func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
  1267  	// Set mp.mallocing to keep from being preempted by GC.
  1268  	mp := acquirem()
  1269  	if doubleCheckMalloc {
  1270  		if mp.mallocing != 0 {
  1271  			throw("malloc deadlock")
  1272  		}
  1273  		if mp.gsignal == getg() {
  1274  			throw("malloc during signal")
  1275  		}
  1276  		if typ != nil && typ.Pointers() {
  1277  			throw("expected noscan type for noscan alloc")
  1278  		}
  1279  	}
  1280  	mp.mallocing = 1
  1281  
  1282  	checkGCTrigger := false
  1283  	c := getMCache(mp)
  1284  	var sizeclass uint8
  1285  	if size <= gc.SmallSizeMax-8 {
  1286  		sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
  1287  	} else {
  1288  		sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
  1289  	}
  1290  	size = uintptr(gc.SizeClassToSize[sizeclass])
  1291  	spc := makeSpanClass(sizeclass, true)
  1292  	span := c.alloc[spc]
  1293  	v := nextFreeFast(span)
  1294  	if v == 0 {
  1295  		v, span, checkGCTrigger = c.nextFree(spc)
  1296  	}
  1297  	x := unsafe.Pointer(v)
  1298  	if needzero && span.needzero != 0 {
  1299  		memclrNoHeapPointers(x, size)
  1300  	}
  1301  
  1302  	// Ensure that the stores above that initialize x to
  1303  	// type-safe memory and set the heap bits occur before
  1304  	// the caller can make x observable to the garbage
  1305  	// collector. Otherwise, on weakly ordered machines,
  1306  	// the garbage collector could follow a pointer to x,
  1307  	// but see uninitialized memory or stale heap bits.
  1308  	publicationBarrier()
  1309  
  1310  	if writeBarrier.enabled {
  1311  		// Allocate black during GC.
  1312  		// All slots hold nil so no scanning is needed.
  1313  		// This may be racing with GC so do it atomically if there can be
  1314  		// a race marking the bit.
  1315  		gcmarknewobject(span, uintptr(x))
  1316  	} else {
  1317  		// Track the last free index before the mark phase. This field
  1318  		// is only used by the garbage collector. During the mark phase
  1319  		// this is used by the conservative scanner to filter out objects
  1320  		// that are both free and recently-allocated. It's safe to do that
  1321  		// because we allocate-black if the GC is enabled. The conservative
  1322  		// scanner produces pointers out of thin air, so without additional
  1323  		// synchronization it might otherwise observe a partially-initialized
  1324  		// object, which could crash the program.
  1325  		span.freeIndexForScan = span.freeindex
  1326  	}
  1327  
  1328  	// Note cache c only valid while m acquired; see #47302
  1329  	//
  1330  	// N.B. Use the full size because that matches how the GC
  1331  	// will update the mem profile on the "free" side.
  1332  	//
  1333  	// TODO(mknyszek): We should really count the header as part
  1334  	// of gc_sys or something. The code below just pretends it is
  1335  	// internal fragmentation and matches the GC's accounting by
  1336  	// using the whole allocation slot.
  1337  	c.nextSample -= int64(size)
  1338  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1339  		profilealloc(mp, x, size)
  1340  	}
  1341  	mp.mallocing = 0
  1342  	releasem(mp)
  1343  
  1344  	if checkGCTrigger {
  1345  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1346  			gcStart(t)
  1347  		}
  1348  	}
  1349  	return x, size
  1350  }
  1351  
  1352  func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
  1353  	// Set mp.mallocing to keep from being preempted by GC.
  1354  	mp := acquirem()
  1355  	if doubleCheckMalloc {
  1356  		if mp.mallocing != 0 {
  1357  			throw("malloc deadlock")
  1358  		}
  1359  		if mp.gsignal == getg() {
  1360  			throw("malloc during signal")
  1361  		}
  1362  		if typ == nil || !typ.Pointers() {
  1363  			throw("noscan allocated in scan-only path")
  1364  		}
  1365  		if !heapBitsInSpan(size) {
  1366  			throw("heap bits in not in span for non-header-only path")
  1367  		}
  1368  	}
  1369  	mp.mallocing = 1
  1370  
  1371  	checkGCTrigger := false
  1372  	c := getMCache(mp)
  1373  	sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
  1374  	spc := makeSpanClass(sizeclass, false)
  1375  	span := c.alloc[spc]
  1376  	v := nextFreeFast(span)
  1377  	if v == 0 {
  1378  		v, span, checkGCTrigger = c.nextFree(spc)
  1379  	}
  1380  	x := unsafe.Pointer(v)
  1381  	if span.needzero != 0 {
  1382  		memclrNoHeapPointers(x, size)
  1383  	}
  1384  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1385  		// initHeapBits already set the pointer bits for the 8-byte sizeclass
  1386  		// on 64-bit platforms.
  1387  		c.scanAlloc += 8
  1388  	} else {
  1389  		c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
  1390  	}
  1391  	size = uintptr(gc.SizeClassToSize[sizeclass])
  1392  
  1393  	// Ensure that the stores above that initialize x to
  1394  	// type-safe memory and set the heap bits occur before
  1395  	// the caller can make x observable to the garbage
  1396  	// collector. Otherwise, on weakly ordered machines,
  1397  	// the garbage collector could follow a pointer to x,
  1398  	// but see uninitialized memory or stale heap bits.
  1399  	publicationBarrier()
  1400  
  1401  	if writeBarrier.enabled {
  1402  		// Allocate black during GC.
  1403  		// All slots hold nil so no scanning is needed.
  1404  		// This may be racing with GC so do it atomically if there can be
  1405  		// a race marking the bit.
  1406  		gcmarknewobject(span, uintptr(x))
  1407  	} else {
  1408  		// Track the last free index before the mark phase. This field
  1409  		// is only used by the garbage collector. During the mark phase
  1410  		// this is used by the conservative scanner to filter out objects
  1411  		// that are both free and recently-allocated. It's safe to do that
  1412  		// because we allocate-black if the GC is enabled. The conservative
  1413  		// scanner produces pointers out of thin air, so without additional
  1414  		// synchronization it might otherwise observe a partially-initialized
  1415  		// object, which could crash the program.
  1416  		span.freeIndexForScan = span.freeindex
  1417  	}
  1418  
  1419  	// Note cache c only valid while m acquired; see #47302
  1420  	//
  1421  	// N.B. Use the full size because that matches how the GC
  1422  	// will update the mem profile on the "free" side.
  1423  	//
  1424  	// TODO(mknyszek): We should really count the header as part
  1425  	// of gc_sys or something. The code below just pretends it is
  1426  	// internal fragmentation and matches the GC's accounting by
  1427  	// using the whole allocation slot.
  1428  	c.nextSample -= int64(size)
  1429  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1430  		profilealloc(mp, x, size)
  1431  	}
  1432  	mp.mallocing = 0
  1433  	releasem(mp)
  1434  
  1435  	if checkGCTrigger {
  1436  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1437  			gcStart(t)
  1438  		}
  1439  	}
  1440  	return x, size
  1441  }
  1442  
  1443  func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
  1444  	// Set mp.mallocing to keep from being preempted by GC.
  1445  	mp := acquirem()
  1446  	if doubleCheckMalloc {
  1447  		if mp.mallocing != 0 {
  1448  			throw("malloc deadlock")
  1449  		}
  1450  		if mp.gsignal == getg() {
  1451  			throw("malloc during signal")
  1452  		}
  1453  		if typ == nil || !typ.Pointers() {
  1454  			throw("noscan allocated in scan-only path")
  1455  		}
  1456  		if heapBitsInSpan(size) {
  1457  			throw("heap bits in span for header-only path")
  1458  		}
  1459  	}
  1460  	mp.mallocing = 1
  1461  
  1462  	checkGCTrigger := false
  1463  	c := getMCache(mp)
  1464  	size += gc.MallocHeaderSize
  1465  	var sizeclass uint8
  1466  	if size <= gc.SmallSizeMax-8 {
  1467  		sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
  1468  	} else {
  1469  		sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
  1470  	}
  1471  	size = uintptr(gc.SizeClassToSize[sizeclass])
  1472  	spc := makeSpanClass(sizeclass, false)
  1473  	span := c.alloc[spc]
  1474  	v := nextFreeFast(span)
  1475  	if v == 0 {
  1476  		v, span, checkGCTrigger = c.nextFree(spc)
  1477  	}
  1478  	x := unsafe.Pointer(v)
  1479  	if span.needzero != 0 {
  1480  		memclrNoHeapPointers(x, size)
  1481  	}
  1482  	header := (**_type)(x)
  1483  	x = add(x, gc.MallocHeaderSize)
  1484  	c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span)
  1485  
  1486  	// Ensure that the stores above that initialize x to
  1487  	// type-safe memory and set the heap bits occur before
  1488  	// the caller can make x observable to the garbage
  1489  	// collector. Otherwise, on weakly ordered machines,
  1490  	// the garbage collector could follow a pointer to x,
  1491  	// but see uninitialized memory or stale heap bits.
  1492  	publicationBarrier()
  1493  
  1494  	if writeBarrier.enabled {
  1495  		// Allocate black during GC.
  1496  		// All slots hold nil so no scanning is needed.
  1497  		// This may be racing with GC so do it atomically if there can be
  1498  		// a race marking the bit.
  1499  		gcmarknewobject(span, uintptr(x))
  1500  	} else {
  1501  		// Track the last free index before the mark phase. This field
  1502  		// is only used by the garbage collector. During the mark phase
  1503  		// this is used by the conservative scanner to filter out objects
  1504  		// that are both free and recently-allocated. It's safe to do that
  1505  		// because we allocate-black if the GC is enabled. The conservative
  1506  		// scanner produces pointers out of thin air, so without additional
  1507  		// synchronization it might otherwise observe a partially-initialized
  1508  		// object, which could crash the program.
  1509  		span.freeIndexForScan = span.freeindex
  1510  	}
  1511  
  1512  	// Note cache c only valid while m acquired; see #47302
  1513  	//
  1514  	// N.B. Use the full size because that matches how the GC
  1515  	// will update the mem profile on the "free" side.
  1516  	//
  1517  	// TODO(mknyszek): We should really count the header as part
  1518  	// of gc_sys or something. The code below just pretends it is
  1519  	// internal fragmentation and matches the GC's accounting by
  1520  	// using the whole allocation slot.
  1521  	c.nextSample -= int64(size)
  1522  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1523  		profilealloc(mp, x, size)
  1524  	}
  1525  	mp.mallocing = 0
  1526  	releasem(mp)
  1527  
  1528  	if checkGCTrigger {
  1529  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1530  			gcStart(t)
  1531  		}
  1532  	}
  1533  	return x, size
  1534  }
  1535  
  1536  func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
  1537  	// Set mp.mallocing to keep from being preempted by GC.
  1538  	mp := acquirem()
  1539  	if doubleCheckMalloc {
  1540  		if mp.mallocing != 0 {
  1541  			throw("malloc deadlock")
  1542  		}
  1543  		if mp.gsignal == getg() {
  1544  			throw("malloc during signal")
  1545  		}
  1546  	}
  1547  	mp.mallocing = 1
  1548  
  1549  	c := getMCache(mp)
  1550  	// For large allocations, keep track of zeroed state so that
  1551  	// bulk zeroing can be happen later in a preemptible context.
  1552  	span := c.allocLarge(size, typ == nil || !typ.Pointers())
  1553  	span.freeindex = 1
  1554  	span.allocCount = 1
  1555  	span.largeType = nil // Tell the GC not to look at this yet.
  1556  	size = span.elemsize
  1557  	x := unsafe.Pointer(span.base())
  1558  
  1559  	// Ensure that the store above that sets largeType to
  1560  	// nil happens before the caller can make x observable
  1561  	// to the garbage collector.
  1562  	//
  1563  	// Otherwise, on weakly ordered machines, the garbage
  1564  	// collector could follow a pointer to x, but see a stale
  1565  	// largeType value.
  1566  	publicationBarrier()
  1567  
  1568  	if writeBarrier.enabled {
  1569  		// Allocate black during GC.
  1570  		// All slots hold nil so no scanning is needed.
  1571  		// This may be racing with GC so do it atomically if there can be
  1572  		// a race marking the bit.
  1573  		gcmarknewobject(span, uintptr(x))
  1574  	} else {
  1575  		// Track the last free index before the mark phase. This field
  1576  		// is only used by the garbage collector. During the mark phase
  1577  		// this is used by the conservative scanner to filter out objects
  1578  		// that are both free and recently-allocated. It's safe to do that
  1579  		// because we allocate-black if the GC is enabled. The conservative
  1580  		// scanner produces pointers out of thin air, so without additional
  1581  		// synchronization it might otherwise observe a partially-initialized
  1582  		// object, which could crash the program.
  1583  		span.freeIndexForScan = span.freeindex
  1584  	}
  1585  
  1586  	// Note cache c only valid while m acquired; see #47302
  1587  	//
  1588  	// N.B. Use the full size because that matches how the GC
  1589  	// will update the mem profile on the "free" side.
  1590  	//
  1591  	// TODO(mknyszek): We should really count the header as part
  1592  	// of gc_sys or something. The code below just pretends it is
  1593  	// internal fragmentation and matches the GC's accounting by
  1594  	// using the whole allocation slot.
  1595  	c.nextSample -= int64(size)
  1596  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1597  		profilealloc(mp, x, size)
  1598  	}
  1599  	mp.mallocing = 0
  1600  	releasem(mp)
  1601  
  1602  	// Check to see if we need to trigger the GC.
  1603  	if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1604  		gcStart(t)
  1605  	}
  1606  
  1607  	// Objects can be zeroed late in a context where preemption can occur.
  1608  	//
  1609  	// x will keep the memory alive.
  1610  	if needzero && span.needzero != 0 {
  1611  		// N.B. size == fullSize always in this case.
  1612  		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
  1613  	}
  1614  
  1615  	// Set the type and run the publication barrier while non-preemptible. We need to make
  1616  	// sure that between heapSetTypeLarge and publicationBarrier we cannot get preempted,
  1617  	// otherwise the GC could potentially observe non-zeroed memory but largeType set on weak
  1618  	// memory architectures.
  1619  	//
  1620  	// The GC can also potentially observe non-zeroed memory if conservative scanning spuriously
  1621  	// observes a partially-allocated object, see the freeIndexForScan update above. This case is
  1622  	// handled by synchronization inside heapSetTypeLarge.
  1623  	mp = acquirem()
  1624  	if typ != nil && typ.Pointers() {
  1625  		// Finish storing the type information, now that we're certain the memory is zeroed.
  1626  		getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span)
  1627  	}
  1628  	// Publish the object again, now with zeroed memory and initialized type information.
  1629  	//
  1630  	// Even if we didn't update any type information, this is necessary to ensure that, for example,
  1631  	// x written to a global without any synchronization still results in other goroutines observing
  1632  	// zeroed memory.
  1633  	publicationBarrier()
  1634  	releasem(mp)
  1635  	return x, size
  1636  }
  1637  
  1638  func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer {
  1639  	if debug.sbrk != 0 {
  1640  		align := uintptr(16)
  1641  		if typ != nil {
  1642  			// TODO(austin): This should be just
  1643  			//   align = uintptr(typ.align)
  1644  			// but that's only 4 on 32-bit platforms,
  1645  			// even if there's a uint64 field in typ (see #599).
  1646  			// This causes 64-bit atomic accesses to panic.
  1647  			// Hence, we use stricter alignment that matches
  1648  			// the normal allocator better.
  1649  			if size&7 == 0 {
  1650  				align = 8
  1651  			} else if size&3 == 0 {
  1652  				align = 4
  1653  			} else if size&1 == 0 {
  1654  				align = 2
  1655  			} else {
  1656  				align = 1
  1657  			}
  1658  		}
  1659  		return persistentalloc(size, align, &memstats.other_sys)
  1660  	}
  1661  	if inittrace.active && inittrace.id == getg().goid {
  1662  		// Init functions are executed sequentially in a single goroutine.
  1663  		inittrace.allocs += 1
  1664  	}
  1665  	return nil
  1666  }
  1667  
  1668  func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) {
  1669  	if inittrace.active && inittrace.id == getg().goid {
  1670  		// Init functions are executed sequentially in a single goroutine.
  1671  		inittrace.bytes += uint64(elemsize)
  1672  	}
  1673  
  1674  	if traceAllocFreeEnabled() {
  1675  		trace := traceAcquire()
  1676  		if trace.ok() {
  1677  			trace.HeapObjectAlloc(uintptr(x), typ)
  1678  			traceRelease(trace)
  1679  		}
  1680  	}
  1681  
  1682  	// N.B. elemsize == 0 indicates a tiny allocation, since no new slot was
  1683  	// allocated to fulfill this call to mallocgc. This means checkfinalizer
  1684  	// will only flag an error if there is actually any risk. If an allocation
  1685  	// has the tiny block to itself, it will not get flagged, because we won't
  1686  	// mark the block as a tiny block.
  1687  	if debug.checkfinalizers != 0 && elemsize == 0 {
  1688  		setTinyBlockContext(unsafe.Pointer(alignDown(uintptr(x), maxTinySize)))
  1689  	}
  1690  }
  1691  
  1692  // deductAssistCredit reduces the current G's assist credit
  1693  // by size bytes, and assists the GC if necessary.
  1694  //
  1695  // Caller must be preemptible.
  1696  //
  1697  // Returns the G for which the assist credit was accounted.
  1698  func deductAssistCredit(size uintptr) {
  1699  	// Charge the current user G for this allocation.
  1700  	assistG := getg()
  1701  	if assistG.m.curg != nil {
  1702  		assistG = assistG.m.curg
  1703  	}
  1704  	// Charge the allocation against the G. We'll account
  1705  	// for internal fragmentation at the end of mallocgc.
  1706  	assistG.gcAssistBytes -= int64(size)
  1707  
  1708  	if assistG.gcAssistBytes < 0 {
  1709  		// This G is in debt. Assist the GC to correct
  1710  		// this before allocating. This must happen
  1711  		// before disabling preemption.
  1712  		gcAssistAlloc(assistG)
  1713  	}
  1714  }
  1715  
  1716  // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
  1717  // on chunks of the buffer to be zeroed, with opportunities for preemption
  1718  // along the way.  memclrNoHeapPointers contains no safepoints and also
  1719  // cannot be preemptively scheduled, so this provides a still-efficient
  1720  // block copy that can also be preempted on a reasonable granularity.
  1721  //
  1722  // Use this with care; if the data being cleared is tagged to contain
  1723  // pointers, this allows the GC to run before it is all cleared.
  1724  func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
  1725  	v := uintptr(x)
  1726  	// got this from benchmarking. 128k is too small, 512k is too large.
  1727  	const chunkBytes = 256 * 1024
  1728  	vsize := v + size
  1729  	for voff := v; voff < vsize; voff = voff + chunkBytes {
  1730  		if getg().preempt {
  1731  			// may hold locks, e.g., profiling
  1732  			goschedguarded()
  1733  		}
  1734  		// clear min(avail, lump) bytes
  1735  		n := vsize - voff
  1736  		if n > chunkBytes {
  1737  			n = chunkBytes
  1738  		}
  1739  		memclrNoHeapPointers(unsafe.Pointer(voff), n)
  1740  	}
  1741  }
  1742  
  1743  // implementation of new builtin
  1744  // compiler (both frontend and SSA backend) knows the signature
  1745  // of this function.
  1746  func newobject(typ *_type) unsafe.Pointer {
  1747  	return mallocgc(typ.Size_, typ, true)
  1748  }
  1749  
  1750  //go:linkname maps_newobject internal/runtime/maps.newobject
  1751  func maps_newobject(typ *_type) unsafe.Pointer {
  1752  	return newobject(typ)
  1753  }
  1754  
  1755  // reflect_unsafe_New is meant for package reflect,
  1756  // but widely used packages access it using linkname.
  1757  // Notable members of the hall of shame include:
  1758  //   - gitee.com/quant1x/gox
  1759  //   - github.com/goccy/json
  1760  //   - github.com/modern-go/reflect2
  1761  //   - github.com/v2pro/plz
  1762  //
  1763  // Do not remove or change the type signature.
  1764  // See go.dev/issue/67401.
  1765  //
  1766  //go:linkname reflect_unsafe_New reflect.unsafe_New
  1767  func reflect_unsafe_New(typ *_type) unsafe.Pointer {
  1768  	return mallocgc(typ.Size_, typ, true)
  1769  }
  1770  
  1771  //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
  1772  func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
  1773  	return mallocgc(typ.Size_, typ, true)
  1774  }
  1775  
  1776  // newarray allocates an array of n elements of type typ.
  1777  //
  1778  // newarray should be an internal detail,
  1779  // but widely used packages access it using linkname.
  1780  // Notable members of the hall of shame include:
  1781  //   - github.com/RomiChan/protobuf
  1782  //   - github.com/segmentio/encoding
  1783  //   - github.com/ugorji/go/codec
  1784  //
  1785  // Do not remove or change the type signature.
  1786  // See go.dev/issue/67401.
  1787  //
  1788  //go:linkname newarray
  1789  func newarray(typ *_type, n int) unsafe.Pointer {
  1790  	if n == 1 {
  1791  		return mallocgc(typ.Size_, typ, true)
  1792  	}
  1793  	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
  1794  	if overflow || mem > maxAlloc || n < 0 {
  1795  		panic(plainError("runtime: allocation size out of range"))
  1796  	}
  1797  	return mallocgc(mem, typ, true)
  1798  }
  1799  
  1800  // reflect_unsafe_NewArray is meant for package reflect,
  1801  // but widely used packages access it using linkname.
  1802  // Notable members of the hall of shame include:
  1803  //   - gitee.com/quant1x/gox
  1804  //   - github.com/bytedance/sonic
  1805  //   - github.com/goccy/json
  1806  //   - github.com/modern-go/reflect2
  1807  //   - github.com/segmentio/encoding
  1808  //   - github.com/segmentio/kafka-go
  1809  //   - github.com/v2pro/plz
  1810  //
  1811  // Do not remove or change the type signature.
  1812  // See go.dev/issue/67401.
  1813  //
  1814  //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
  1815  func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
  1816  	return newarray(typ, n)
  1817  }
  1818  
  1819  //go:linkname maps_newarray internal/runtime/maps.newarray
  1820  func maps_newarray(typ *_type, n int) unsafe.Pointer {
  1821  	return newarray(typ, n)
  1822  }
  1823  
  1824  // profilealloc resets the current mcache's nextSample counter and
  1825  // records a memory profile sample.
  1826  //
  1827  // The caller must be non-preemptible and have a P.
  1828  func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
  1829  	c := getMCache(mp)
  1830  	if c == nil {
  1831  		throw("profilealloc called without a P or outside bootstrapping")
  1832  	}
  1833  	c.memProfRate = MemProfileRate
  1834  	c.nextSample = nextSample()
  1835  	mProf_Malloc(mp, x, size)
  1836  }
  1837  
  1838  // nextSample returns the next sampling point for heap profiling. The goal is
  1839  // to sample allocations on average every MemProfileRate bytes, but with a
  1840  // completely random distribution over the allocation timeline; this
  1841  // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
  1842  // processes, the distance between two samples follows the exponential
  1843  // distribution (exp(MemProfileRate)), so the best return value is a random
  1844  // number taken from an exponential distribution whose mean is MemProfileRate.
  1845  func nextSample() int64 {
  1846  	if MemProfileRate == 0 {
  1847  		// Basically never sample.
  1848  		return math.MaxInt64
  1849  	}
  1850  	if MemProfileRate == 1 {
  1851  		// Sample immediately.
  1852  		return 0
  1853  	}
  1854  	return int64(fastexprand(MemProfileRate))
  1855  }
  1856  
  1857  // fastexprand returns a random number from an exponential distribution with
  1858  // the specified mean.
  1859  func fastexprand(mean int) int32 {
  1860  	// Avoid overflow. Maximum possible step is
  1861  	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
  1862  	switch {
  1863  	case mean > 0x7000000:
  1864  		mean = 0x7000000
  1865  	case mean == 0:
  1866  		return 0
  1867  	}
  1868  
  1869  	// Take a random sample of the exponential distribution exp(-mean*x).
  1870  	// The probability distribution function is mean*exp(-mean*x), so the CDF is
  1871  	// p = 1 - exp(-mean*x), so
  1872  	// q = 1 - p == exp(-mean*x)
  1873  	// log_e(q) = -mean*x
  1874  	// -log_e(q)/mean = x
  1875  	// x = -log_e(q) * mean
  1876  	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
  1877  	const randomBitCount = 26
  1878  	q := cheaprandn(1<<randomBitCount) + 1
  1879  	qlog := fastlog2(float64(q)) - randomBitCount
  1880  	if qlog > 0 {
  1881  		qlog = 0
  1882  	}
  1883  	const minusLog2 = -0.6931471805599453 // -ln(2)
  1884  	return int32(qlog*(minusLog2*float64(mean))) + 1
  1885  }
  1886  
  1887  type persistentAlloc struct {
  1888  	base *notInHeap
  1889  	off  uintptr
  1890  }
  1891  
  1892  var globalAlloc struct {
  1893  	mutex
  1894  	persistentAlloc
  1895  }
  1896  
  1897  // persistentChunkSize is the number of bytes we allocate when we grow
  1898  // a persistentAlloc.
  1899  const persistentChunkSize = 256 << 10
  1900  
  1901  // persistentChunks is a list of all the persistent chunks we have
  1902  // allocated. The list is maintained through the first word in the
  1903  // persistent chunk. This is updated atomically.
  1904  var persistentChunks *notInHeap
  1905  
  1906  // Wrapper around sysAlloc that can allocate small chunks.
  1907  // There is no associated free operation.
  1908  // Intended for things like function/type/debug-related persistent data.
  1909  // If align is 0, uses default align (currently 8).
  1910  // The returned memory will be zeroed.
  1911  // sysStat must be non-nil.
  1912  //
  1913  // Consider marking persistentalloc'd types not in heap by embedding
  1914  // internal/runtime/sys.NotInHeap.
  1915  //
  1916  // nosplit because it is used during write barriers and must not be preempted.
  1917  //
  1918  //go:nosplit
  1919  func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1920  	var p *notInHeap
  1921  	systemstack(func() {
  1922  		p = persistentalloc1(size, align, sysStat)
  1923  	})
  1924  	return unsafe.Pointer(p)
  1925  }
  1926  
  1927  // Must run on system stack because stack growth can (re)invoke it.
  1928  // See issue 9174.
  1929  //
  1930  //go:systemstack
  1931  func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
  1932  	const (
  1933  		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
  1934  	)
  1935  
  1936  	if size == 0 {
  1937  		throw("persistentalloc: size == 0")
  1938  	}
  1939  	if align != 0 {
  1940  		if align&(align-1) != 0 {
  1941  			throw("persistentalloc: align is not a power of 2")
  1942  		}
  1943  		if align > pageSize {
  1944  			throw("persistentalloc: align is too large")
  1945  		}
  1946  	} else {
  1947  		align = 8
  1948  	}
  1949  
  1950  	if size >= maxBlock {
  1951  		return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata"))
  1952  	}
  1953  
  1954  	mp := acquirem()
  1955  	var persistent *persistentAlloc
  1956  	if mp != nil && mp.p != 0 {
  1957  		persistent = &mp.p.ptr().palloc
  1958  	} else {
  1959  		lock(&globalAlloc.mutex)
  1960  		persistent = &globalAlloc.persistentAlloc
  1961  	}
  1962  	persistent.off = alignUp(persistent.off, align)
  1963  	if persistent.off+size > persistentChunkSize || persistent.base == nil {
  1964  		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata"))
  1965  		if persistent.base == nil {
  1966  			if persistent == &globalAlloc.persistentAlloc {
  1967  				unlock(&globalAlloc.mutex)
  1968  			}
  1969  			throw("runtime: cannot allocate memory")
  1970  		}
  1971  
  1972  		// Add the new chunk to the persistentChunks list.
  1973  		for {
  1974  			chunks := uintptr(unsafe.Pointer(persistentChunks))
  1975  			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
  1976  			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
  1977  				break
  1978  			}
  1979  		}
  1980  		persistent.off = alignUp(goarch.PtrSize, align)
  1981  	}
  1982  	p := persistent.base.add(persistent.off)
  1983  	persistent.off += size
  1984  	releasem(mp)
  1985  	if persistent == &globalAlloc.persistentAlloc {
  1986  		unlock(&globalAlloc.mutex)
  1987  	}
  1988  
  1989  	if sysStat != &memstats.other_sys {
  1990  		sysStat.add(int64(size))
  1991  		memstats.other_sys.add(-int64(size))
  1992  	}
  1993  	return p
  1994  }
  1995  
  1996  // inPersistentAlloc reports whether p points to memory allocated by
  1997  // persistentalloc. This must be nosplit because it is called by the
  1998  // cgo checker code, which is called by the write barrier code.
  1999  //
  2000  //go:nosplit
  2001  func inPersistentAlloc(p uintptr) bool {
  2002  	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
  2003  	for chunk != 0 {
  2004  		if p >= chunk && p < chunk+persistentChunkSize {
  2005  			return true
  2006  		}
  2007  		chunk = *(*uintptr)(unsafe.Pointer(chunk))
  2008  	}
  2009  	return false
  2010  }
  2011  
  2012  // linearAlloc is a simple linear allocator that pre-reserves a region
  2013  // of memory and then optionally maps that region into the Ready state
  2014  // as needed.
  2015  //
  2016  // The caller is responsible for locking.
  2017  type linearAlloc struct {
  2018  	next   uintptr // next free byte
  2019  	mapped uintptr // one byte past end of mapped space
  2020  	end    uintptr // end of reserved space
  2021  
  2022  	mapMemory bool // transition memory from Reserved to Ready if true
  2023  }
  2024  
  2025  func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
  2026  	if base+size < base {
  2027  		// Chop off the last byte. The runtime isn't prepared
  2028  		// to deal with situations where the bounds could overflow.
  2029  		// Leave that memory reserved, though, so we don't map it
  2030  		// later.
  2031  		size -= 1
  2032  	}
  2033  	l.next, l.mapped = base, base
  2034  	l.end = base + size
  2035  	l.mapMemory = mapMemory
  2036  }
  2037  
  2038  func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
  2039  	p := alignUp(l.next, align)
  2040  	if p+size > l.end {
  2041  		return nil
  2042  	}
  2043  	l.next = p + size
  2044  	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
  2045  		if l.mapMemory {
  2046  			// Transition from Reserved to Prepared to Ready.
  2047  			n := pEnd - l.mapped
  2048  			sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName)
  2049  			sysUsed(unsafe.Pointer(l.mapped), n, n)
  2050  		}
  2051  		l.mapped = pEnd
  2052  	}
  2053  	return unsafe.Pointer(p)
  2054  }
  2055  
  2056  // notInHeap is off-heap memory allocated by a lower-level allocator
  2057  // like sysAlloc or persistentAlloc.
  2058  //
  2059  // In general, it's better to use real types which embed
  2060  // internal/runtime/sys.NotInHeap, but this serves as a generic type
  2061  // for situations where that isn't possible (like in the allocators).
  2062  //
  2063  // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
  2064  type notInHeap struct{ _ sys.NotInHeap }
  2065  
  2066  func (p *notInHeap) add(bytes uintptr) *notInHeap {
  2067  	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
  2068  }
  2069  
  2070  // redZoneSize computes the size of the redzone for a given allocation.
  2071  // Refer to the implementation of the compiler-rt.
  2072  func redZoneSize(userSize uintptr) uintptr {
  2073  	switch {
  2074  	case userSize <= (64 - 16):
  2075  		return 16 << 0
  2076  	case userSize <= (128 - 32):
  2077  		return 16 << 1
  2078  	case userSize <= (512 - 64):
  2079  		return 16 << 2
  2080  	case userSize <= (4096 - 128):
  2081  		return 16 << 3
  2082  	case userSize <= (1<<14)-256:
  2083  		return 16 << 4
  2084  	case userSize <= (1<<15)-512:
  2085  		return 16 << 5
  2086  	case userSize <= (1<<16)-1024:
  2087  		return 16 << 6
  2088  	default:
  2089  		return 16 << 7
  2090  	}
  2091  }
  2092  

View as plain text