Source file src/runtime/mpagealloc.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Page allocator.
     6  //
     7  // The page allocator manages mapped pages (defined by pageSize, NOT
     8  // physPageSize) for allocation and re-use. It is embedded into mheap.
     9  //
    10  // Pages are managed using a bitmap that is sharded into chunks.
    11  // In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
    12  // process's address space. Chunks are managed in a sparse-array-style structure
    13  // similar to mheap.arenas, since the bitmap may be large on some systems.
    14  //
    15  // The bitmap is efficiently searched by using a radix tree in combination
    16  // with fast bit-wise intrinsics. Allocation is performed using an address-ordered
    17  // first-fit approach.
    18  //
    19  // Each entry in the radix tree is a summary that describes three properties of
    20  // a particular region of the address space: the number of contiguous free pages
    21  // at the start and end of the region it represents, and the maximum number of
    22  // contiguous free pages found anywhere in that region.
    23  //
    24  // Each level of the radix tree is stored as one contiguous array, which represents
    25  // a different granularity of subdivision of the processes' address space. Thus, this
    26  // radix tree is actually implicit in these large arrays, as opposed to having explicit
    27  // dynamically-allocated pointer-based node structures. Naturally, these arrays may be
    28  // quite large for system with large address spaces, so in these cases they are mapped
    29  // into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk.
    30  //
    31  // The root level (referred to as L0 and index 0 in pageAlloc.summary) has each
    32  // summary represent the largest section of address space (16 GiB on 64-bit systems),
    33  // with each subsequent level representing successively smaller subsections until we
    34  // reach the finest granularity at the leaves, a chunk.
    35  //
    36  // More specifically, each summary in each level (except for leaf summaries)
    37  // represents some number of entries in the following level. For example, each
    38  // summary in the root level may represent a 16 GiB region of address space,
    39  // and in the next level there could be 8 corresponding entries which represent 2
    40  // GiB subsections of that 16 GiB region, each of which could correspond to 8
    41  // entries in the next level which each represent 256 MiB regions, and so on.
    42  //
    43  // Thus, this design only scales to heaps so large, but can always be extended to
    44  // larger heaps by simply adding levels to the radix tree, which mostly costs
    45  // additional virtual address space. The choice of managing large arrays also means
    46  // that a large amount of virtual address space may be reserved by the runtime.
    47  
    48  package runtime
    49  
    50  import (
    51  	"internal/runtime/atomic"
    52  	"internal/runtime/gc"
    53  	"unsafe"
    54  )
    55  
    56  const (
    57  	// The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
    58  	// in the bitmap at once.
    59  	pallocChunkPages    = 1 << logPallocChunkPages
    60  	pallocChunkBytes    = pallocChunkPages * pageSize
    61  	logPallocChunkPages = 9
    62  	logPallocChunkBytes = logPallocChunkPages + gc.PageShift
    63  
    64  	// The number of radix bits for each level.
    65  	//
    66  	// The value of 3 is chosen such that the block of summaries we need to scan at
    67  	// each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is
    68  	// close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree
    69  	// levels perfectly into the 21-bit pallocBits summary field at the root level.
    70  	//
    71  	// The following equation explains how each of the constants relate:
    72  	// summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits
    73  	//
    74  	// summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.
    75  	summaryLevelBits = 3
    76  	summaryL0Bits    = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
    77  
    78  	// pallocChunksL2Bits is the number of bits of the chunk index number
    79  	// covered by the second level of the chunks map.
    80  	//
    81  	// See (*pageAlloc).chunks for more details. Update the documentation
    82  	// there should this change.
    83  	pallocChunksL2Bits  = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
    84  	pallocChunksL1Shift = pallocChunksL2Bits
    85  
    86  	vmaNamePageAllocIndex = "page alloc index"
    87  )
    88  
    89  // maxSearchAddr returns the maximum searchAddr value, which indicates
    90  // that the heap has no free space.
    91  //
    92  // This function exists just to make it clear that this is the maximum address
    93  // for the page allocator's search space. See maxOffAddr for details.
    94  //
    95  // It's a function (rather than a variable) because it needs to be
    96  // usable before package runtime's dynamic initialization is complete.
    97  // See #51913 for details.
    98  func maxSearchAddr() offAddr { return maxOffAddr }
    99  
   100  // Global chunk index.
   101  //
   102  // Represents an index into the leaf level of the radix tree.
   103  // Similar to arenaIndex, except instead of arenas, it divides the address
   104  // space into chunks.
   105  type chunkIdx uint
   106  
   107  // chunkIndex returns the global index of the palloc chunk containing the
   108  // pointer p.
   109  func chunkIndex(p uintptr) chunkIdx {
   110  	return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes)
   111  }
   112  
   113  // chunkBase returns the base address of the palloc chunk at index ci.
   114  func chunkBase(ci chunkIdx) uintptr {
   115  	return uintptr(ci)*pallocChunkBytes + arenaBaseOffset
   116  }
   117  
   118  // chunkPageIndex computes the index of the page that contains p,
   119  // relative to the chunk which contains p.
   120  func chunkPageIndex(p uintptr) uint {
   121  	return uint(p % pallocChunkBytes / pageSize)
   122  }
   123  
   124  // l1 returns the index into the first level of (*pageAlloc).chunks.
   125  func (i chunkIdx) l1() uint {
   126  	if pallocChunksL1Bits == 0 {
   127  		// Let the compiler optimize this away if there's no
   128  		// L1 map.
   129  		return 0
   130  	} else {
   131  		return uint(i) >> pallocChunksL1Shift
   132  	}
   133  }
   134  
   135  // l2 returns the index into the second level of (*pageAlloc).chunks.
   136  func (i chunkIdx) l2() uint {
   137  	if pallocChunksL1Bits == 0 {
   138  		return uint(i)
   139  	} else {
   140  		return uint(i) & (1<<pallocChunksL2Bits - 1)
   141  	}
   142  }
   143  
   144  // offAddrToLevelIndex converts an address in the offset address space
   145  // to the index into summary[level] containing addr.
   146  func offAddrToLevelIndex(level int, addr offAddr) int {
   147  	return int((addr.a - arenaBaseOffset) >> levelShift[level])
   148  }
   149  
   150  // levelIndexToOffAddr converts an index into summary[level] into
   151  // the corresponding address in the offset address space.
   152  func levelIndexToOffAddr(level, idx int) offAddr {
   153  	return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset}
   154  }
   155  
   156  // addrsToSummaryRange converts base and limit pointers into a range
   157  // of entries for the given summary level.
   158  //
   159  // The returned range is inclusive on the lower bound and exclusive on
   160  // the upper bound.
   161  func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) {
   162  	// This is slightly more nuanced than just a shift for the exclusive
   163  	// upper-bound. Note that the exclusive upper bound may be within a
   164  	// summary at this level, meaning if we just do the obvious computation
   165  	// hi will end up being an inclusive upper bound. Unfortunately, just
   166  	// adding 1 to that is too broad since we might be on the very edge
   167  	// of a summary's max page count boundary for this level
   168  	// (1 << levelLogPages[level]). So, make limit an inclusive upper bound
   169  	// then shift, then add 1, so we get an exclusive upper bound at the end.
   170  	lo = int((base - arenaBaseOffset) >> levelShift[level])
   171  	hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1
   172  	return
   173  }
   174  
   175  // blockAlignSummaryRange aligns indices into the given level to that
   176  // level's block width (1 << levelBits[level]). It assumes lo is inclusive
   177  // and hi is exclusive, and so aligns them down and up respectively.
   178  func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
   179  	e := uintptr(1) << levelBits[level]
   180  	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
   181  }
   182  
   183  type pageAlloc struct {
   184  	// Radix tree of summaries.
   185  	//
   186  	// Each slice's cap represents the whole memory reservation.
   187  	// Each slice's len reflects the allocator's maximum known
   188  	// mapped heap address for that level.
   189  	//
   190  	// The backing store of each summary level is reserved in init
   191  	// and may or may not be committed in grow (small address spaces
   192  	// may commit all the memory in init).
   193  	//
   194  	// The purpose of keeping len <= cap is to enforce bounds checks
   195  	// on the top end of the slice so that instead of an unknown
   196  	// runtime segmentation fault, we get a much friendlier out-of-bounds
   197  	// error.
   198  	//
   199  	// To iterate over a summary level, use inUse to determine which ranges
   200  	// are currently available. Otherwise one might try to access
   201  	// memory which is only Reserved which may result in a hard fault.
   202  	//
   203  	// We may still get segmentation faults < len since some of that
   204  	// memory may not be committed yet.
   205  	summary [summaryLevels][]pallocSum
   206  
   207  	// chunks is a slice of bitmap chunks.
   208  	//
   209  	// The total size of chunks is quite large on most 64-bit platforms
   210  	// (O(GiB) or more) if flattened, so rather than making one large mapping
   211  	// (which has problems on some platforms, even when PROT_NONE) we use a
   212  	// two-level sparse array approach similar to the arena index in mheap.
   213  	//
   214  	// To find the chunk containing a memory address `a`, do:
   215  	//   chunkOf(chunkIndex(a))
   216  	//
   217  	// Below is a table describing the configuration for chunks for various
   218  	// heapAddrBits supported by the runtime.
   219  	//
   220  	// heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
   221  	// ------------------------------------------------
   222  	// 32           | 0       | 10      | 128 KiB
   223  	// 33 (iOS)     | 0       | 11      | 256 KiB
   224  	// 48           | 13      | 13      | 1 MiB
   225  	//
   226  	// There's no reason to use the L1 part of chunks on 32-bit, the
   227  	// address space is small so the L2 is small. For platforms with a
   228  	// 48-bit address space, we pick the L1 such that the L2 is 1 MiB
   229  	// in size, which is a good balance between low granularity without
   230  	// making the impact on BSS too high (note the L1 is stored directly
   231  	// in pageAlloc).
   232  	//
   233  	// To iterate over the bitmap, use inUse to determine which ranges
   234  	// are currently available. Otherwise one might iterate over unused
   235  	// ranges.
   236  	//
   237  	// Protected by mheapLock.
   238  	//
   239  	// TODO(mknyszek): Consider changing the definition of the bitmap
   240  	// such that 1 means free and 0 means in-use so that summaries and
   241  	// the bitmaps align better on zero-values.
   242  	chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
   243  
   244  	// The address to start an allocation search with. It must never
   245  	// point to any memory that is not contained in inUse, i.e.
   246  	// inUse.contains(searchAddr.addr()) must always be true. The one
   247  	// exception to this rule is that it may take on the value of
   248  	// maxOffAddr to indicate that the heap is exhausted.
   249  	//
   250  	// We guarantee that all valid heap addresses below this value
   251  	// are allocated and not worth searching.
   252  	searchAddr offAddr
   253  
   254  	// start and end represent the chunk indices
   255  	// which pageAlloc knows about. It assumes
   256  	// chunks in the range [start, end) are
   257  	// currently ready to use.
   258  	start, end chunkIdx
   259  
   260  	// inUse is a slice of ranges of address space which are
   261  	// known by the page allocator to be currently in-use (passed
   262  	// to grow).
   263  	//
   264  	// We care much more about having a contiguous heap in these cases
   265  	// and take additional measures to ensure that, so in nearly all
   266  	// cases this should have just 1 element.
   267  	//
   268  	// All access is protected by the mheapLock.
   269  	inUse addrRanges
   270  
   271  	// scav stores the scavenger state.
   272  	scav struct {
   273  		// index is an efficient index of chunks that have pages available to
   274  		// scavenge.
   275  		index scavengeIndex
   276  
   277  		// releasedBg is the amount of memory released in the background this
   278  		// scavenge cycle.
   279  		releasedBg atomic.Uintptr
   280  
   281  		// releasedEager is the amount of memory released eagerly this scavenge
   282  		// cycle.
   283  		releasedEager atomic.Uintptr
   284  	}
   285  
   286  	// mheap_.lock. This level of indirection makes it possible
   287  	// to test pageAlloc independently of the runtime allocator.
   288  	mheapLock *mutex
   289  
   290  	// sysStat is the runtime memstat to update when new system
   291  	// memory is committed by the pageAlloc for allocation metadata.
   292  	sysStat *sysMemStat
   293  
   294  	// summaryMappedReady is the number of bytes mapped in the Ready state
   295  	// in the summary structure. Used only for testing currently.
   296  	//
   297  	// Protected by mheapLock.
   298  	summaryMappedReady uintptr
   299  
   300  	// chunkHugePages indicates whether page bitmap chunks should be backed
   301  	// by huge pages.
   302  	chunkHugePages bool
   303  
   304  	// Whether or not this struct is being used in tests.
   305  	test bool
   306  }
   307  
   308  func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool) {
   309  	if levelLogPages[0] > logMaxPackedValue {
   310  		// We can't represent 1<<levelLogPages[0] pages, the maximum number
   311  		// of pages we need to represent at the root level, in a summary, which
   312  		// is a big problem. Throw.
   313  		print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n")
   314  		print("runtime: summary max pages = ", maxPackedValue, "\n")
   315  		throw("root level max pages doesn't fit in summary")
   316  	}
   317  	p.sysStat = sysStat
   318  
   319  	// Initialize p.inUse.
   320  	p.inUse.init(sysStat)
   321  
   322  	// System-dependent initialization.
   323  	p.sysInit(test)
   324  
   325  	// Start with the searchAddr in a state indicating there's no free memory.
   326  	p.searchAddr = maxSearchAddr()
   327  
   328  	// Set the mheapLock.
   329  	p.mheapLock = mheapLock
   330  
   331  	// Initialize the scavenge index.
   332  	p.summaryMappedReady += p.scav.index.init(test, sysStat)
   333  
   334  	// Set if we're in a test.
   335  	p.test = test
   336  }
   337  
   338  // tryChunkOf returns the bitmap data for the given chunk.
   339  //
   340  // Returns nil if the chunk data has not been mapped.
   341  func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
   342  	l2 := p.chunks[ci.l1()]
   343  	if l2 == nil {
   344  		return nil
   345  	}
   346  	return &l2[ci.l2()]
   347  }
   348  
   349  // chunkOf returns the chunk at the given chunk index.
   350  //
   351  // The chunk index must be valid or this method may throw.
   352  func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
   353  	return &p.chunks[ci.l1()][ci.l2()]
   354  }
   355  
   356  // grow sets up the metadata for the address range [base, base+size).
   357  // It may allocate metadata, in which case *p.sysStat will be updated.
   358  //
   359  // p.mheapLock must be held.
   360  func (p *pageAlloc) grow(base, size uintptr) {
   361  	assertLockHeld(p.mheapLock)
   362  
   363  	// Round up to chunks, since we can't deal with increments smaller
   364  	// than chunks. Also, sysGrow expects aligned values.
   365  	limit := alignUp(base+size, pallocChunkBytes)
   366  	base = alignDown(base, pallocChunkBytes)
   367  
   368  	// Grow the summary levels in a system-dependent manner.
   369  	// We just update a bunch of additional metadata here.
   370  	p.sysGrow(base, limit)
   371  
   372  	// Grow the scavenge index.
   373  	p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat)
   374  
   375  	// Update p.start and p.end.
   376  	// If no growth happened yet, start == 0. This is generally
   377  	// safe since the zero page is unmapped.
   378  	firstGrowth := p.start == 0
   379  	start, end := chunkIndex(base), chunkIndex(limit)
   380  	if firstGrowth || start < p.start {
   381  		p.start = start
   382  	}
   383  	if end > p.end {
   384  		p.end = end
   385  	}
   386  	// Note that [base, limit) will never overlap with any existing
   387  	// range inUse because grow only ever adds never-used memory
   388  	// regions to the page allocator.
   389  	p.inUse.add(makeAddrRange(base, limit))
   390  
   391  	// A grow operation is a lot like a free operation, so if our
   392  	// chunk ends up below p.searchAddr, update p.searchAddr to the
   393  	// new address, just like in free.
   394  	if b := (offAddr{base}); b.lessThan(p.searchAddr) {
   395  		p.searchAddr = b
   396  	}
   397  
   398  	// Add entries into chunks, which is sparse, if needed. Then,
   399  	// initialize the bitmap.
   400  	//
   401  	// Newly-grown memory is always considered scavenged.
   402  	// Set all the bits in the scavenged bitmaps high.
   403  	for c := chunkIndex(base); c < chunkIndex(limit); c++ {
   404  		if p.chunks[c.l1()] == nil {
   405  			// Create the necessary l2 entry.
   406  			const l2Size = unsafe.Sizeof(*p.chunks[0])
   407  			r := sysAlloc(l2Size, p.sysStat, vmaNamePageAllocIndex)
   408  			if r == nil {
   409  				throw("pageAlloc: out of memory")
   410  			}
   411  			if !p.test {
   412  				// Make the chunk mapping eligible or ineligible
   413  				// for huge pages, depending on what our current
   414  				// state is.
   415  				if p.chunkHugePages {
   416  					sysHugePage(r, l2Size)
   417  				} else {
   418  					sysNoHugePage(r, l2Size)
   419  				}
   420  			}
   421  			// Store the new chunk block but avoid a write barrier.
   422  			// grow is used in call chains that disallow write barriers.
   423  			*(*uintptr)(unsafe.Pointer(&p.chunks[c.l1()])) = uintptr(r)
   424  		}
   425  		p.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
   426  	}
   427  
   428  	// Update summaries accordingly. The grow acts like a free, so
   429  	// we need to ensure this newly-free memory is visible in the
   430  	// summaries.
   431  	p.update(base, size/pageSize, true, false)
   432  }
   433  
   434  // enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default).
   435  //
   436  // This function is idempotent.
   437  //
   438  // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
   439  // time, but may take time proportional to the size of the mapped heap beyond that.
   440  //
   441  // The heap lock must not be held over this operation, since it will briefly acquire
   442  // the heap lock.
   443  //
   444  // Must be called on the system stack because it acquires the heap lock.
   445  //
   446  //go:systemstack
   447  func (p *pageAlloc) enableChunkHugePages() {
   448  	// Grab the heap lock to turn on huge pages for new chunks and clone the current
   449  	// heap address space ranges.
   450  	//
   451  	// After the lock is released, we can be sure that bitmaps for any new chunks may
   452  	// be backed with huge pages, and we have the address space for the rest of the
   453  	// chunks. At the end of this function, all chunk metadata should be backed by huge
   454  	// pages.
   455  	lock(&mheap_.lock)
   456  	if p.chunkHugePages {
   457  		unlock(&mheap_.lock)
   458  		return
   459  	}
   460  	p.chunkHugePages = true
   461  	var inUse addrRanges
   462  	inUse.sysStat = p.sysStat
   463  	p.inUse.cloneInto(&inUse)
   464  	unlock(&mheap_.lock)
   465  
   466  	// This might seem like a lot of work, but all these loops are for generality.
   467  	//
   468  	// For a 1 GiB contiguous heap, a 48-bit address space, 13 L1 bits, a palloc chunk size
   469  	// of 4 MiB, and adherence to the default set of heap address hints, this will result in
   470  	// exactly 1 call to sysHugePage.
   471  	for _, r := range p.inUse.ranges {
   472  		for i := chunkIndex(r.base.addr()).l1(); i < chunkIndex(r.limit.addr()-1).l1(); i++ {
   473  			// N.B. We can assume that p.chunks[i] is non-nil and in a mapped part of p.chunks
   474  			// because it's derived from inUse, which never shrinks.
   475  			sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0]))
   476  		}
   477  	}
   478  }
   479  
   480  // update updates heap metadata. It must be called each time the bitmap
   481  // is updated.
   482  //
   483  // If contig is true, update does some optimizations assuming that there was
   484  // a contiguous allocation or free between addr and addr+npages. alloc indicates
   485  // whether the operation performed was an allocation or a free.
   486  //
   487  // p.mheapLock must be held.
   488  func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
   489  	assertLockHeld(p.mheapLock)
   490  
   491  	// base, limit, start, and end are inclusive.
   492  	limit := base + npages*pageSize - 1
   493  	sc, ec := chunkIndex(base), chunkIndex(limit)
   494  
   495  	// Handle updating the lowest level first.
   496  	if sc == ec {
   497  		// Fast path: the allocation doesn't span more than one chunk,
   498  		// so update this one and if the summary didn't change, return.
   499  		x := p.summary[len(p.summary)-1][sc]
   500  		y := p.chunkOf(sc).summarize()
   501  		if x == y {
   502  			return
   503  		}
   504  		p.summary[len(p.summary)-1][sc] = y
   505  	} else if contig {
   506  		// Slow contiguous path: the allocation spans more than one chunk
   507  		// and at least one summary is guaranteed to change.
   508  		summary := p.summary[len(p.summary)-1]
   509  
   510  		// Update the summary for chunk sc.
   511  		summary[sc] = p.chunkOf(sc).summarize()
   512  
   513  		// Update the summaries for chunks in between, which are
   514  		// either totally allocated or freed.
   515  		whole := p.summary[len(p.summary)-1][sc+1 : ec]
   516  		if alloc {
   517  			clear(whole)
   518  		} else {
   519  			for i := range whole {
   520  				whole[i] = freeChunkSum
   521  			}
   522  		}
   523  
   524  		// Update the summary for chunk ec.
   525  		summary[ec] = p.chunkOf(ec).summarize()
   526  	} else {
   527  		// Slow general path: the allocation spans more than one chunk
   528  		// and at least one summary is guaranteed to change.
   529  		//
   530  		// We can't assume a contiguous allocation happened, so walk over
   531  		// every chunk in the range and manually recompute the summary.
   532  		summary := p.summary[len(p.summary)-1]
   533  		for c := sc; c <= ec; c++ {
   534  			summary[c] = p.chunkOf(c).summarize()
   535  		}
   536  	}
   537  
   538  	// Walk up the radix tree and update the summaries appropriately.
   539  	changed := true
   540  	for l := len(p.summary) - 2; l >= 0 && changed; l-- {
   541  		// Update summaries at level l from summaries at level l+1.
   542  		changed = false
   543  
   544  		// "Constants" for the previous level which we
   545  		// need to compute the summary from that level.
   546  		logEntriesPerBlock := levelBits[l+1]
   547  		logMaxPages := levelLogPages[l+1]
   548  
   549  		// lo and hi describe all the parts of the level we need to look at.
   550  		lo, hi := addrsToSummaryRange(l, base, limit+1)
   551  
   552  		// Iterate over each block, updating the corresponding summary in the less-granular level.
   553  		for i := lo; i < hi; i++ {
   554  			children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
   555  			sum := mergeSummaries(children, logMaxPages)
   556  			old := p.summary[l][i]
   557  			if old != sum {
   558  				changed = true
   559  				p.summary[l][i] = sum
   560  			}
   561  		}
   562  	}
   563  }
   564  
   565  // allocRange marks the range of memory [base, base+npages*pageSize) as
   566  // allocated. It also updates the summaries to reflect the newly-updated
   567  // bitmap.
   568  //
   569  // Returns the amount of scavenged memory in bytes present in the
   570  // allocated range.
   571  //
   572  // p.mheapLock must be held.
   573  func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
   574  	assertLockHeld(p.mheapLock)
   575  
   576  	limit := base + npages*pageSize - 1
   577  	sc, ec := chunkIndex(base), chunkIndex(limit)
   578  	si, ei := chunkPageIndex(base), chunkPageIndex(limit)
   579  
   580  	scav := uint(0)
   581  	if sc == ec {
   582  		// The range doesn't cross any chunk boundaries.
   583  		chunk := p.chunkOf(sc)
   584  		scav += chunk.scavenged.popcntRange(si, ei+1-si)
   585  		chunk.allocRange(si, ei+1-si)
   586  		p.scav.index.alloc(sc, ei+1-si)
   587  	} else {
   588  		// The range crosses at least one chunk boundary.
   589  		chunk := p.chunkOf(sc)
   590  		scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
   591  		chunk.allocRange(si, pallocChunkPages-si)
   592  		p.scav.index.alloc(sc, pallocChunkPages-si)
   593  		for c := sc + 1; c < ec; c++ {
   594  			chunk := p.chunkOf(c)
   595  			scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
   596  			chunk.allocAll()
   597  			p.scav.index.alloc(c, pallocChunkPages)
   598  		}
   599  		chunk = p.chunkOf(ec)
   600  		scav += chunk.scavenged.popcntRange(0, ei+1)
   601  		chunk.allocRange(0, ei+1)
   602  		p.scav.index.alloc(ec, ei+1)
   603  	}
   604  	p.update(base, npages, true, true)
   605  	return uintptr(scav) * pageSize
   606  }
   607  
   608  // findMappedAddr returns the smallest mapped offAddr that is
   609  // >= addr. That is, if addr refers to mapped memory, then it is
   610  // returned. If addr is higher than any mapped region, then
   611  // it returns maxOffAddr.
   612  //
   613  // p.mheapLock must be held.
   614  func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
   615  	assertLockHeld(p.mheapLock)
   616  
   617  	// If we're not in a test, validate first by checking mheap_.arenas.
   618  	// This is a fast path which is only safe to use outside of testing.
   619  	ai := arenaIndex(addr.addr())
   620  	if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
   621  		vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr())
   622  		if ok {
   623  			return offAddr{vAddr}
   624  		} else {
   625  			// The candidate search address is greater than any
   626  			// known address, which means we definitely have no
   627  			// free memory left.
   628  			return maxOffAddr
   629  		}
   630  	}
   631  	return addr
   632  }
   633  
   634  // find searches for the first (address-ordered) contiguous free region of
   635  // npages in size and returns a base address for that region.
   636  //
   637  // It uses p.searchAddr to prune its search and assumes that no palloc chunks
   638  // below chunkIndex(p.searchAddr) contain any free memory at all.
   639  //
   640  // find also computes and returns a candidate p.searchAddr, which may or
   641  // may not prune more of the address space than p.searchAddr already does.
   642  // This candidate is always a valid p.searchAddr.
   643  //
   644  // find represents the slow path and the full radix tree search.
   645  //
   646  // Returns a base address of 0 on failure, in which case the candidate
   647  // searchAddr returned is invalid and must be ignored.
   648  //
   649  // p.mheapLock must be held.
   650  func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
   651  	assertLockHeld(p.mheapLock)
   652  
   653  	// Search algorithm.
   654  	//
   655  	// This algorithm walks each level l of the radix tree from the root level
   656  	// to the leaf level. It iterates over at most 1 << levelBits[l] of entries
   657  	// in a given level in the radix tree, and uses the summary information to
   658  	// find either:
   659  	//  1) That a given subtree contains a large enough contiguous region, at
   660  	//     which point it continues iterating on the next level, or
   661  	//  2) That there are enough contiguous boundary-crossing bits to satisfy
   662  	//     the allocation, at which point it knows exactly where to start
   663  	//     allocating from.
   664  	//
   665  	// i tracks the index into the current level l's structure for the
   666  	// contiguous 1 << levelBits[l] entries we're actually interested in.
   667  	//
   668  	// NOTE: Technically this search could allocate a region which crosses
   669  	// the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is
   670  	// a discontinuity. However, the only way this could happen is if the
   671  	// page at the zero address is mapped, and this is impossible on
   672  	// every system we support where arenaBaseOffset != 0. So, the
   673  	// discontinuity is already encoded in the fact that the OS will never
   674  	// map the zero page for us, and this function doesn't try to handle
   675  	// this case in any way.
   676  
   677  	// i is the beginning of the block of entries we're searching at the
   678  	// current level.
   679  	i := 0
   680  
   681  	// firstFree is the region of address space that we are certain to
   682  	// find the first free page in the heap. base and bound are the inclusive
   683  	// bounds of this window, and both are addresses in the linearized, contiguous
   684  	// view of the address space (with arenaBaseOffset pre-added). At each level,
   685  	// this window is narrowed as we find the memory region containing the
   686  	// first free page of memory. To begin with, the range reflects the
   687  	// full process address space.
   688  	//
   689  	// firstFree is updated by calling foundFree each time free space in the
   690  	// heap is discovered.
   691  	//
   692  	// At the end of the search, base.addr() is the best new
   693  	// searchAddr we could deduce in this search.
   694  	firstFree := struct {
   695  		base, bound offAddr
   696  	}{
   697  		base:  minOffAddr,
   698  		bound: maxOffAddr,
   699  	}
   700  	// foundFree takes the given address range [addr, addr+size) and
   701  	// updates firstFree if it is a narrower range. The input range must
   702  	// either be fully contained within firstFree or not overlap with it
   703  	// at all.
   704  	//
   705  	// This way, we'll record the first summary we find with any free
   706  	// pages on the root level and narrow that down if we descend into
   707  	// that summary. But as soon as we need to iterate beyond that summary
   708  	// in a level to find a large enough range, we'll stop narrowing.
   709  	foundFree := func(addr offAddr, size uintptr) {
   710  		if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
   711  			// This range fits within the current firstFree window, so narrow
   712  			// down the firstFree window to the base and bound of this range.
   713  			firstFree.base = addr
   714  			firstFree.bound = addr.add(size - 1)
   715  		} else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
   716  			// This range only partially overlaps with the firstFree range,
   717  			// so throw.
   718  			print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
   719  			print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
   720  			throw("range partially overlaps")
   721  		}
   722  	}
   723  
   724  	// lastSum is the summary which we saw on the previous level that made us
   725  	// move on to the next level. Used to print additional information in the
   726  	// case of a catastrophic failure.
   727  	// lastSumIdx is that summary's index in the previous level.
   728  	lastSum := packPallocSum(0, 0, 0)
   729  	lastSumIdx := -1
   730  
   731  nextLevel:
   732  	for l := 0; l < len(p.summary); l++ {
   733  		// For the root level, entriesPerBlock is the whole level.
   734  		entriesPerBlock := 1 << levelBits[l]
   735  		logMaxPages := levelLogPages[l]
   736  
   737  		// We've moved into a new level, so let's update i to our new
   738  		// starting index. This is a no-op for level 0.
   739  		i <<= levelBits[l]
   740  
   741  		// Slice out the block of entries we care about.
   742  		entries := p.summary[l][i : i+entriesPerBlock]
   743  
   744  		// Determine j0, the first index we should start iterating from.
   745  		// The searchAddr may help us eliminate iterations if we followed the
   746  		// searchAddr on the previous level or we're on the root level, in which
   747  		// case the searchAddr should be the same as i after levelShift.
   748  		j0 := 0
   749  		if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
   750  			j0 = searchIdx & (entriesPerBlock - 1)
   751  		}
   752  
   753  		// Run over the level entries looking for
   754  		// a contiguous run of at least npages either
   755  		// within an entry or across entries.
   756  		//
   757  		// base contains the page index (relative to
   758  		// the first entry's first page) of the currently
   759  		// considered run of consecutive pages.
   760  		//
   761  		// size contains the size of the currently considered
   762  		// run of consecutive pages.
   763  		var base, size uint
   764  		for j := j0; j < len(entries); j++ {
   765  			sum := entries[j]
   766  			if sum == 0 {
   767  				// A full entry means we broke any streak and
   768  				// that we should skip it altogether.
   769  				size = 0
   770  				continue
   771  			}
   772  
   773  			// We've encountered a non-zero summary which means
   774  			// free memory, so update firstFree.
   775  			foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
   776  
   777  			s := sum.start()
   778  			if size+s >= uint(npages) {
   779  				// If size == 0 we don't have a run yet,
   780  				// which means base isn't valid. So, set
   781  				// base to the first page in this block.
   782  				if size == 0 {
   783  					base = uint(j) << logMaxPages
   784  				}
   785  				// We hit npages; we're done!
   786  				size += s
   787  				break
   788  			}
   789  			if sum.max() >= uint(npages) {
   790  				// The entry itself contains npages contiguous
   791  				// free pages, so continue on the next level
   792  				// to find that run.
   793  				i += j
   794  				lastSumIdx = i
   795  				lastSum = sum
   796  				continue nextLevel
   797  			}
   798  			if size == 0 || s < 1<<logMaxPages {
   799  				// We either don't have a current run started, or this entry
   800  				// isn't totally free (meaning we can't continue the current
   801  				// one), so try to begin a new run by setting size and base
   802  				// based on sum.end.
   803  				size = sum.end()
   804  				base = uint(j+1)<<logMaxPages - size
   805  				continue
   806  			}
   807  			// The entry is completely free, so continue the run.
   808  			size += 1 << logMaxPages
   809  		}
   810  		if size >= uint(npages) {
   811  			// We found a sufficiently large run of free pages straddling
   812  			// some boundary, so compute the address and return it.
   813  			addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
   814  			return addr, p.findMappedAddr(firstFree.base)
   815  		}
   816  		if l == 0 {
   817  			// We're at level zero, so that means we've exhausted our search.
   818  			return 0, maxSearchAddr()
   819  		}
   820  
   821  		// We're not at level zero, and we exhausted the level we were looking in.
   822  		// This means that either our calculations were wrong or the level above
   823  		// lied to us. In either case, dump some useful state and throw.
   824  		print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
   825  		print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
   826  		print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n")
   827  		print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
   828  		for j := 0; j < len(entries); j++ {
   829  			sum := entries[j]
   830  			print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
   831  		}
   832  		throw("bad summary data")
   833  	}
   834  
   835  	// Since we've gotten to this point, that means we haven't found a
   836  	// sufficiently-sized free region straddling some boundary (chunk or larger).
   837  	// This means the last summary we inspected must have had a large enough "max"
   838  	// value, so look inside the chunk to find a suitable run.
   839  	//
   840  	// After iterating over all levels, i must contain a chunk index which
   841  	// is what the final level represents.
   842  	ci := chunkIdx(i)
   843  	j, searchIdx := p.chunkOf(ci).find(npages, 0)
   844  	if j == ^uint(0) {
   845  		// We couldn't find any space in this chunk despite the summaries telling
   846  		// us it should be there. There's likely a bug, so dump some state and throw.
   847  		sum := p.summary[len(p.summary)-1][i]
   848  		print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
   849  		print("runtime: npages = ", npages, "\n")
   850  		throw("bad summary data")
   851  	}
   852  
   853  	// Compute the address at which the free space starts.
   854  	addr := chunkBase(ci) + uintptr(j)*pageSize
   855  
   856  	// Since we actually searched the chunk, we may have
   857  	// found an even narrower free window.
   858  	searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
   859  	foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
   860  	return addr, p.findMappedAddr(firstFree.base)
   861  }
   862  
   863  // alloc allocates npages worth of memory from the page heap, returning the base
   864  // address for the allocation and the amount of scavenged memory in bytes
   865  // contained in the region [base address, base address + npages*pageSize).
   866  //
   867  // Returns a 0 base address on failure, in which case other returned values
   868  // should be ignored.
   869  //
   870  // p.mheapLock must be held.
   871  //
   872  // Must run on the system stack because p.mheapLock must be held.
   873  //
   874  //go:systemstack
   875  func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
   876  	assertLockHeld(p.mheapLock)
   877  
   878  	// If the searchAddr refers to a region which has a higher address than
   879  	// any known chunk, then we know we're out of memory.
   880  	if chunkIndex(p.searchAddr.addr()) >= p.end {
   881  		return 0, 0
   882  	}
   883  
   884  	// If npages has a chance of fitting in the chunk where the searchAddr is,
   885  	// search it directly.
   886  	searchAddr := minOffAddr
   887  	if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) {
   888  		// npages is guaranteed to be no greater than pallocChunkPages here.
   889  		i := chunkIndex(p.searchAddr.addr())
   890  		if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) {
   891  			j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr()))
   892  			if j == ^uint(0) {
   893  				print("runtime: max = ", max, ", npages = ", npages, "\n")
   894  				print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n")
   895  				throw("bad summary data")
   896  			}
   897  			addr = chunkBase(i) + uintptr(j)*pageSize
   898  			searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
   899  			goto Found
   900  		}
   901  	}
   902  	// We failed to use a searchAddr for one reason or another, so try
   903  	// the slow path.
   904  	addr, searchAddr = p.find(npages)
   905  	if addr == 0 {
   906  		if npages == 1 {
   907  			// We failed to find a single free page, the smallest unit
   908  			// of allocation. This means we know the heap is completely
   909  			// exhausted. Otherwise, the heap still might have free
   910  			// space in it, just not enough contiguous space to
   911  			// accommodate npages.
   912  			p.searchAddr = maxSearchAddr()
   913  		}
   914  		return 0, 0
   915  	}
   916  Found:
   917  	// Go ahead and actually mark the bits now that we have an address.
   918  	scav = p.allocRange(addr, npages)
   919  
   920  	// If we found a higher searchAddr, we know that all the
   921  	// heap memory before that searchAddr in an offset address space is
   922  	// allocated, so bump p.searchAddr up to the new one.
   923  	if p.searchAddr.lessThan(searchAddr) {
   924  		p.searchAddr = searchAddr
   925  	}
   926  	return addr, scav
   927  }
   928  
   929  // free returns npages worth of memory starting at base back to the page heap.
   930  //
   931  // p.mheapLock must be held.
   932  //
   933  // Must run on the system stack because p.mheapLock must be held.
   934  //
   935  //go:systemstack
   936  func (p *pageAlloc) free(base, npages uintptr) {
   937  	assertLockHeld(p.mheapLock)
   938  
   939  	// If we're freeing pages below the p.searchAddr, update searchAddr.
   940  	if b := (offAddr{base}); b.lessThan(p.searchAddr) {
   941  		p.searchAddr = b
   942  	}
   943  	limit := base + npages*pageSize - 1
   944  	if npages == 1 {
   945  		// Fast path: we're clearing a single bit, and we know exactly
   946  		// where it is, so mark it directly.
   947  		i := chunkIndex(base)
   948  		pi := chunkPageIndex(base)
   949  		p.chunkOf(i).free1(pi)
   950  		p.scav.index.free(i, pi, 1)
   951  	} else {
   952  		// Slow path: we're clearing more bits so we may need to iterate.
   953  		sc, ec := chunkIndex(base), chunkIndex(limit)
   954  		si, ei := chunkPageIndex(base), chunkPageIndex(limit)
   955  
   956  		if sc == ec {
   957  			// The range doesn't cross any chunk boundaries.
   958  			p.chunkOf(sc).free(si, ei+1-si)
   959  			p.scav.index.free(sc, si, ei+1-si)
   960  		} else {
   961  			// The range crosses at least one chunk boundary.
   962  			p.chunkOf(sc).free(si, pallocChunkPages-si)
   963  			p.scav.index.free(sc, si, pallocChunkPages-si)
   964  			for c := sc + 1; c < ec; c++ {
   965  				p.chunkOf(c).freeAll()
   966  				p.scav.index.free(c, 0, pallocChunkPages)
   967  			}
   968  			p.chunkOf(ec).free(0, ei+1)
   969  			p.scav.index.free(ec, 0, ei+1)
   970  		}
   971  	}
   972  	p.update(base, npages, true, false)
   973  }
   974  
   975  const (
   976  	pallocSumBytes = unsafe.Sizeof(pallocSum(0))
   977  
   978  	// maxPackedValue is the maximum value that any of the three fields in
   979  	// the pallocSum may take on.
   980  	maxPackedValue    = 1 << logMaxPackedValue
   981  	logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits
   982  
   983  	freeChunkSum = pallocSum(uint64(pallocChunkPages) |
   984  		uint64(pallocChunkPages<<logMaxPackedValue) |
   985  		uint64(pallocChunkPages<<(2*logMaxPackedValue)))
   986  )
   987  
   988  // pallocSum is a packed summary type which packs three numbers: start, max,
   989  // and end into a single 8-byte value. Each of these values are a summary of
   990  // a bitmap and are thus counts, each of which may have a maximum value of
   991  // 2^21 - 1, or all three may be equal to 2^21. The latter case is represented
   992  // by just setting the 64th bit.
   993  type pallocSum uint64
   994  
   995  // packPallocSum takes a start, max, and end value and produces a pallocSum.
   996  func packPallocSum(start, max, end uint) pallocSum {
   997  	if max == maxPackedValue {
   998  		return pallocSum(uint64(1 << 63))
   999  	}
  1000  	return pallocSum((uint64(start) & (maxPackedValue - 1)) |
  1001  		((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) |
  1002  		((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue)))
  1003  }
  1004  
  1005  // start extracts the start value from a packed sum.
  1006  func (p pallocSum) start() uint {
  1007  	if uint64(p)&uint64(1<<63) != 0 {
  1008  		return maxPackedValue
  1009  	}
  1010  	return uint(uint64(p) & (maxPackedValue - 1))
  1011  }
  1012  
  1013  // max extracts the max value from a packed sum.
  1014  func (p pallocSum) max() uint {
  1015  	if uint64(p)&uint64(1<<63) != 0 {
  1016  		return maxPackedValue
  1017  	}
  1018  	return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1))
  1019  }
  1020  
  1021  // end extracts the end value from a packed sum.
  1022  func (p pallocSum) end() uint {
  1023  	if uint64(p)&uint64(1<<63) != 0 {
  1024  		return maxPackedValue
  1025  	}
  1026  	return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
  1027  }
  1028  
  1029  // unpack unpacks all three values from the summary.
  1030  func (p pallocSum) unpack() (uint, uint, uint) {
  1031  	if uint64(p)&uint64(1<<63) != 0 {
  1032  		return maxPackedValue, maxPackedValue, maxPackedValue
  1033  	}
  1034  	return uint(uint64(p) & (maxPackedValue - 1)),
  1035  		uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)),
  1036  		uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
  1037  }
  1038  
  1039  // mergeSummaries merges consecutive summaries which may each represent at
  1040  // most 1 << logMaxPagesPerSum pages each together into one.
  1041  func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum {
  1042  	// Merge the summaries in sums into one.
  1043  	//
  1044  	// We do this by keeping a running summary representing the merged
  1045  	// summaries of sums[:i] in start, most, and end.
  1046  	start, most, end := sums[0].unpack()
  1047  	for i := 1; i < len(sums); i++ {
  1048  		// Merge in sums[i].
  1049  		si, mi, ei := sums[i].unpack()
  1050  
  1051  		// Merge in sums[i].start only if the running summary is
  1052  		// completely free, otherwise this summary's start
  1053  		// plays no role in the combined sum.
  1054  		if start == uint(i)<<logMaxPagesPerSum {
  1055  			start += si
  1056  		}
  1057  
  1058  		// Recompute the max value of the running sum by looking
  1059  		// across the boundary between the running sum and sums[i]
  1060  		// and at the max sums[i], taking the greatest of those two
  1061  		// and the max of the running sum.
  1062  		most = max(most, end+si, mi)
  1063  
  1064  		// Merge in end by checking if this new summary is totally
  1065  		// free. If it is, then we want to extend the running sum's
  1066  		// end by the new summary. If not, then we have some alloc'd
  1067  		// pages in there and we just want to take the end value in
  1068  		// sums[i].
  1069  		if ei == 1<<logMaxPagesPerSum {
  1070  			end += 1 << logMaxPagesPerSum
  1071  		} else {
  1072  			end = ei
  1073  		}
  1074  	}
  1075  	return packPallocSum(start, most, end)
  1076  }
  1077  

View as plain text