Source file src/runtime/malloc.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "internal/goarch" 105 "internal/goexperiment" 106 "internal/goos" 107 "internal/runtime/atomic" 108 "internal/runtime/gc" 109 "internal/runtime/math" 110 "internal/runtime/sys" 111 "unsafe" 112 ) 113 114 const ( 115 maxTinySize = _TinySize 116 tinySizeClass = _TinySizeClass 117 maxSmallSize = gc.MaxSmallSize 118 pageSize = 1 << gc.PageShift 119 pageMask = pageSize - 1 120 121 // Unused. Left for viewcore. 122 _PageSize = pageSize 123 minSizeForMallocHeader = gc.MinSizeForMallocHeader 124 mallocHeaderSize = gc.MallocHeaderSize 125 126 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 127 _64bit = 1 << (^uintptr(0) >> 63) / 2 128 129 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 130 _TinySize = gc.TinySize 131 _TinySizeClass = int8(gc.TinySizeClass) 132 133 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 134 135 // Per-P, per order stack segment cache size. 136 _StackCacheSize = 32 * 1024 137 138 // Number of orders that get caching. Order 0 is FixedStack 139 // and each successive order is twice as large. 140 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 141 // will be allocated directly. 142 // Since FixedStack is different on different systems, we 143 // must vary NumStackOrders to keep the same maximum cached size. 144 // OS | FixedStack | NumStackOrders 145 // -----------------+------------+--------------- 146 // linux/darwin/bsd | 2KB | 4 147 // windows/32 | 4KB | 3 148 // windows/64 | 8KB | 2 149 // plan9 | 4KB | 3 150 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 151 152 // heapAddrBits is the number of bits in a heap address. On 153 // amd64, addresses are sign-extended beyond heapAddrBits. On 154 // other arches, they are zero-extended. 155 // 156 // On most 64-bit platforms, we limit this to 48 bits based on a 157 // combination of hardware and OS limitations. 158 // 159 // amd64 hardware limits addresses to 48 bits, sign-extended 160 // to 64 bits. Addresses where the top 16 bits are not either 161 // all 0 or all 1 are "non-canonical" and invalid. Because of 162 // these "negative" addresses, we offset addresses by 1<<47 163 // (arenaBaseOffset) on amd64 before computing indexes into 164 // the heap arenas index. In 2017, amd64 hardware added 165 // support for 57 bit addresses; however, currently only Linux 166 // supports this extension and the kernel will never choose an 167 // address above 1<<47 unless mmap is called with a hint 168 // address above 1<<47 (which we never do). 169 // 170 // arm64 hardware (as of ARMv8) limits user addresses to 48 171 // bits, in the range [0, 1<<48). 172 // 173 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 174 // in hardware. On Linux, Go leans on stricter OS limits. Based 175 // on Linux's processor.h, the user address space is limited as 176 // follows on 64-bit architectures: 177 // 178 // Architecture Name Maximum Value (exclusive) 179 // --------------------------------------------------------------------- 180 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 181 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 182 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 183 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 184 // s390x TASK_SIZE 1<<64 (64 bit addresses) 185 // 186 // These limits may increase over time, but are currently at 187 // most 48 bits except on s390x. On all architectures, Linux 188 // starts placing mmap'd regions at addresses that are 189 // significantly below 48 bits, so even if it's possible to 190 // exceed Go's 48 bit limit, it's extremely unlikely in 191 // practice. 192 // 193 // On 32-bit platforms, we accept the full 32-bit address 194 // space because doing so is cheap. 195 // mips32 only has access to the low 2GB of virtual memory, so 196 // we further limit it to 31 bits. 197 // 198 // On ios/arm64, although 64-bit pointers are presumably 199 // available, pointers are truncated to 33 bits in iOS <14. 200 // Furthermore, only the top 4 GiB of the address space are 201 // actually available to the application. In iOS >=14, more 202 // of the address space is available, and the OS can now 203 // provide addresses outside of those 33 bits. Pick 40 bits 204 // as a reasonable balance between address space usage by the 205 // page allocator, and flexibility for what mmap'd regions 206 // we'll accept for the heap. We can't just move to the full 207 // 48 bits because this uses too much address space for older 208 // iOS versions. 209 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 210 // to a 48-bit address space like every other arm64 platform. 211 // 212 // WebAssembly currently has a limit of 4GB linear memory. 213 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 214 215 // maxAlloc is the maximum size of an allocation. On 64-bit, 216 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 217 // 32-bit, however, this is one less than 1<<32 because the 218 // number of bytes in the address space doesn't actually fit 219 // in a uintptr. 220 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 221 222 // The number of bits in a heap address, the size of heap 223 // arenas, and the L1 and L2 arena map sizes are related by 224 // 225 // (1 << addr bits) = arena size * L1 entries * L2 entries 226 // 227 // Currently, we balance these as follows: 228 // 229 // Platform Addr bits Arena size L1 entries L2 entries 230 // -------------- --------- ---------- ---------- ----------- 231 // */64-bit 48 64MB 1 4M (32MB) 232 // windows/64-bit 48 4MB 64 1M (8MB) 233 // ios/arm64 40 4MB 1 256K (2MB) 234 // */32-bit 32 4MB 1 1024 (4KB) 235 // */mips(le) 31 4MB 1 512 (2KB) 236 // wasm 32 512KB 1 8192 (64KB) 237 238 // heapArenaBytes is the size of a heap arena. The heap 239 // consists of mappings of size heapArenaBytes, aligned to 240 // heapArenaBytes. The initial heap mapping is one arena. 241 // 242 // This is currently 64MB on 64-bit non-Windows, 4MB on 243 // 32-bit and on Windows, and 512KB on Wasm. We use smaller 244 // arenas on Windows because all committed memory is charged 245 // to the process, even if it's not touched. Hence, for 246 // processes with small heaps, the mapped arena space needs 247 // to be commensurate. This is particularly important with 248 // the race detector, since it significantly amplifies the 249 // cost of committed memory. We use smaller arenas on Wasm 250 // because some Wasm programs have very small heap, and 251 // everything in the Wasm linear memory is charged. 252 heapArenaBytes = 1 << logHeapArenaBytes 253 254 heapArenaWords = heapArenaBytes / goarch.PtrSize 255 256 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 257 // prefer using heapArenaBytes where possible (we need the 258 // constant to compute some other constants). 259 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (9+10)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64 260 261 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs. 262 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize) 263 264 pagesPerArena = heapArenaBytes / pageSize 265 266 // arenaL1Bits is the number of bits of the arena number 267 // covered by the first level arena map. 268 // 269 // This number should be small, since the first level arena 270 // map requires PtrSize*(1<<arenaL1Bits) of space in the 271 // binary's BSS. It can be zero, in which case the first level 272 // index is effectively unused. There is a performance benefit 273 // to this, since the generated code can be more efficient, 274 // but comes at the cost of having a large L2 mapping. 275 // 276 // We use the L1 map on 64-bit Windows because the arena size 277 // is small, but the address space is still 48 bits, and 278 // there's a high cost to having a large L2. 279 arenaL1Bits = 6 * (_64bit * goos.IsWindows) 280 281 // arenaL2Bits is the number of bits of the arena number 282 // covered by the second level arena index. 283 // 284 // The size of each arena map allocation is proportional to 285 // 1<<arenaL2Bits, so it's important that this not be too 286 // large. 48 bits leads to 32MB arena index allocations, which 287 // is about the practical threshold. 288 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 289 290 // arenaL1Shift is the number of bits to shift an arena frame 291 // number by to compute an index into the first level arena map. 292 arenaL1Shift = arenaL2Bits 293 294 // arenaBits is the total bits in a combined arena map index. 295 // This is split between the index into the L1 arena map and 296 // the L2 arena map. 297 arenaBits = arenaL1Bits + arenaL2Bits 298 299 // arenaBaseOffset is the pointer value that corresponds to 300 // index 0 in the heap arena map. 301 // 302 // On amd64, the address space is 48 bits, sign extended to 64 303 // bits. This offset lets us handle "negative" addresses (or 304 // high addresses if viewed as unsigned). 305 // 306 // On aix/ppc64, this offset allows to keep the heapAddrBits to 307 // 48. Otherwise, it would be 60 in order to handle mmap addresses 308 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 309 // case, the memory reserved in (s *pageAlloc).init for chunks 310 // is causing important slowdowns. 311 // 312 // On other platforms, the user address space is contiguous 313 // and starts at 0, so no offset is necessary. 314 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix 315 // A typed version of this constant that will make it into DWARF (for viewcore). 316 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 317 318 // Max number of threads to run garbage collection. 319 // 2, 3, and 4 are all plausible maximums depending 320 // on the hardware details of the machine. The garbage 321 // collector scales well to 32 cpus. 322 _MaxGcproc = 32 323 324 // minLegalPointer is the smallest possible legal pointer. 325 // This is the smallest possible architectural page size, 326 // since we assume that the first page is never mapped. 327 // 328 // This should agree with minZeroPage in the compiler. 329 minLegalPointer uintptr = 4096 330 331 // minHeapForMetadataHugePages sets a threshold on when certain kinds of 332 // heap metadata, currently the arenas map L2 entries and page alloc bitmap 333 // mappings, are allowed to be backed by huge pages. If the heap goal ever 334 // exceeds this threshold, then huge pages are enabled. 335 // 336 // These numbers are chosen with the assumption that huge pages are on the 337 // order of a few MiB in size. 338 // 339 // The kind of metadata this applies to has a very low overhead when compared 340 // to address space used, but their constant overheads for small heaps would 341 // be very high if they were to be backed by huge pages (e.g. a few MiB makes 342 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB 343 // heap). The benefit of huge pages is also not worth it for small heaps, 344 // because only a very, very small part of the metadata is used for small heaps. 345 // 346 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size 347 // again, then huge pages will still be enabled for this mapping. The reason is that 348 // there's no point unless we're also returning the physical memory for these 349 // metadata mappings back to the OS. That would be quite complex to do in general 350 // as the heap is likely fragmented after a reduction in heap size. 351 minHeapForMetadataHugePages = 1 << 30 352 353 // randomizeHeapBase indicates if the heap base address should be randomized. 354 // See comment in mallocinit for how the randomization is performed. 355 randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform && !raceenabled && !msanenabled && !asanenabled 356 357 // randHeapBasePrefixMask is used to extract the top byte of the randomized 358 // heap base address. 359 randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8)) 360 ) 361 362 // physPageSize is the size in bytes of the OS's physical pages. 363 // Mapping and unmapping operations must be done at multiples of 364 // physPageSize. 365 // 366 // This must be set by the OS init code (typically in osinit) before 367 // mallocinit. 368 var physPageSize uintptr 369 370 // physHugePageSize is the size in bytes of the OS's default physical huge 371 // page size whose allocation is opaque to the application. It is assumed 372 // and verified to be a power of two. 373 // 374 // If set, this must be set by the OS init code (typically in osinit) before 375 // mallocinit. However, setting it at all is optional, and leaving the default 376 // value is always safe (though potentially less efficient). 377 // 378 // Since physHugePageSize is always assumed to be a power of two, 379 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 380 // The purpose of physHugePageShift is to avoid doing divisions in 381 // performance critical functions. 382 var ( 383 physHugePageSize uintptr 384 physHugePageShift uint 385 ) 386 387 var ( 388 // heapRandSeed is a random value that is populated in mallocinit if 389 // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to 390 // randomize the base heap address. 391 heapRandSeed uintptr 392 heapRandSeedBitsRemaining int 393 ) 394 395 func nextHeapRandBits(bits int) uintptr { 396 if bits > heapRandSeedBitsRemaining { 397 throw("not enough heapRandSeed bits remaining") 398 } 399 r := heapRandSeed >> (64 - bits) 400 heapRandSeed <<= bits 401 heapRandSeedBitsRemaining -= bits 402 return r 403 } 404 405 func mallocinit() { 406 if gc.SizeClassToSize[tinySizeClass] != maxTinySize { 407 throw("bad TinySizeClass") 408 } 409 410 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 { 411 // heapBits expects modular arithmetic on bitmap 412 // addresses to work. 413 throw("heapArenaBitmapWords not a power of 2") 414 } 415 416 // Check physPageSize. 417 if physPageSize == 0 { 418 // The OS init code failed to fetch the physical page size. 419 throw("failed to get system page size") 420 } 421 if physPageSize > maxPhysPageSize { 422 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 423 throw("bad system page size") 424 } 425 if physPageSize < minPhysPageSize { 426 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 427 throw("bad system page size") 428 } 429 if physPageSize&(physPageSize-1) != 0 { 430 print("system page size (", physPageSize, ") must be a power of 2\n") 431 throw("bad system page size") 432 } 433 if physHugePageSize&(physHugePageSize-1) != 0 { 434 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 435 throw("bad system huge page size") 436 } 437 if physHugePageSize > maxPhysHugePageSize { 438 // physHugePageSize is greater than the maximum supported huge page size. 439 // Don't throw here, like in the other cases, since a system configured 440 // in this way isn't wrong, we just don't have the code to support them. 441 // Instead, silently set the huge page size to zero. 442 physHugePageSize = 0 443 } 444 if physHugePageSize != 0 { 445 // Since physHugePageSize is a power of 2, it suffices to increase 446 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 447 for 1<<physHugePageShift != physHugePageSize { 448 physHugePageShift++ 449 } 450 } 451 if pagesPerArena%pagesPerSpanRoot != 0 { 452 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 453 throw("bad pagesPerSpanRoot") 454 } 455 if pagesPerArena%pagesPerReclaimerChunk != 0 { 456 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 457 throw("bad pagesPerReclaimerChunk") 458 } 459 // Check that the minimum size (exclusive) for a malloc header is also 460 // a size class boundary. This is important to making sure checks align 461 // across different parts of the runtime. 462 // 463 // While we're here, also check to make sure all these size classes' 464 // span sizes are one page. Some code relies on this. 465 minSizeForMallocHeaderIsSizeClass := false 466 sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true 467 for i := 0; i < len(gc.SizeClassToSize); i++ { 468 if gc.SizeClassToNPages[i] > 1 { 469 sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false 470 } 471 if gc.MinSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) { 472 minSizeForMallocHeaderIsSizeClass = true 473 break 474 } 475 } 476 if !minSizeForMallocHeaderIsSizeClass { 477 throw("min size of malloc header is not a size class boundary") 478 } 479 if !sizeClassesUpToMinSizeForMallocHeaderAreOnePage { 480 throw("expected all size classes up to min size for malloc header to fit in one-page spans") 481 } 482 // Check that the pointer bitmap for all small sizes without a malloc header 483 // fits in a word. 484 if gc.MinSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize { 485 throw("max pointer/scan bitmap size for headerless objects is too large") 486 } 487 488 if minTagBits > tagBits { 489 throw("tagBits too small") 490 } 491 492 // Initialize the heap. 493 mheap_.init() 494 mcache0 = allocmcache() 495 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 496 lockInit(&profInsertLock, lockRankProfInsert) 497 lockInit(&profBlockLock, lockRankProfBlock) 498 lockInit(&profMemActiveLock, lockRankProfMemActive) 499 for i := range profMemFutureLock { 500 lockInit(&profMemFutureLock[i], lockRankProfMemFuture) 501 } 502 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 503 504 // Create initial arena growth hints. 505 if isSbrkPlatform { 506 // Don't generate hints on sbrk platforms. We can 507 // only grow the break sequentially. 508 } else if goarch.PtrSize == 8 { 509 // On a 64-bit machine, we pick the following hints 510 // because: 511 // 512 // 1. Starting from the middle of the address space 513 // makes it easier to grow out a contiguous range 514 // without running in to some other mapping. 515 // 516 // 2. This makes Go heap addresses more easily 517 // recognizable when debugging. 518 // 519 // 3. Stack scanning in gccgo is still conservative, 520 // so it's important that addresses be distinguishable 521 // from other data. 522 // 523 // Starting at 0x00c0 means that the valid memory addresses 524 // will begin 0x00c0, 0x00c1, ... 525 // In little-endian, that's c0 00, c1 00, ... None of those are valid 526 // UTF-8 sequences, and they are otherwise as far away from 527 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 528 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 529 // on OS X during thread allocations. 0x00c0 causes conflicts with 530 // AddressSanitizer which reserves all memory up to 0x0100. 531 // These choices reduce the odds of a conservative garbage collector 532 // not collecting memory because some non-pointer block of memory 533 // had a bit pattern that matched a memory address. 534 // 535 // However, on arm64, we ignore all this advice above and slam the 536 // allocation at 0x40 << 32 because when using 4k pages with 3-level 537 // translation buffers, the user address space is limited to 39 bits 538 // On ios/arm64, the address space is even smaller. 539 // 540 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 541 // processes. 542 // 543 // Space mapped for user arenas comes immediately after the range 544 // originally reserved for the regular heap when race mode is not 545 // enabled because user arena chunks can never be used for regular heap 546 // allocations and we want to avoid fragmenting the address space. 547 // 548 // In race mode we have no choice but to just use the same hints because 549 // the race detector requires that the heap be mapped contiguously. 550 // 551 // If randomizeHeapBase is set, we attempt to randomize the base address 552 // as much as possible. We do this by generating a random uint64 via 553 // bootstrapRand and using it's bits to randomize portions of the base 554 // address as follows: 555 // * We first generate a random heapArenaBytes aligned address that we use for 556 // generating the hints. 557 // * On the first call to mheap.grow, we then generate a random PallocChunkBytes 558 // aligned offset into the mmap'd heap region, which we use as the base for 559 // the heap region. 560 // * We then select a page offset in that PallocChunkBytes region to start the 561 // heap at, and mark all the pages up to that offset as allocated. 562 // 563 // Our final randomized "heap base address" becomes the first byte of 564 // the first available page returned by the page allocator. This results 565 // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64) 566 // bits of entropy. 567 568 var randHeapBase uintptr 569 var randHeapBasePrefix byte 570 // heapAddrBits is 48 on most platforms, but we only use 47 of those 571 // bits in order to provide a good amount of room for the heap to grow 572 // contiguously. On amd64, there are 48 bits, but the top bit is sign 573 // extended, so we throw away another bit, just to be safe. 574 randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1) 575 if randomizeHeapBase { 576 // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes 577 // bits, using them as the top bits for randHeapBase. 578 heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64 579 580 topBits := (randHeapAddrBits - logHeapArenaBytes) 581 randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits) 582 randHeapBase = alignUp(randHeapBase, heapArenaBytes) 583 randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8)) 584 } 585 586 var vmaSize int 587 if GOARCH == "riscv64" { 588 // Identify which memory layout is in use based on the system 589 // stack address, knowing that the bottom half of virtual memory 590 // is user space. This should result in 39, 48 or 57. It may be 591 // possible to use RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS at some 592 // point in the future - for now use the system stack address. 593 vmaSize = sys.Len64(uint64(getg().m.g0.stack.hi)) + 1 594 if raceenabled && vmaSize != 39 && vmaSize != 48 { 595 println("vma size = ", vmaSize) 596 throw("riscv64 vma size is unknown and race mode is enabled") 597 } 598 } 599 600 for i := 0x7f; i >= 0; i-- { 601 var p uintptr 602 switch { 603 case raceenabled && GOARCH == "riscv64" && vmaSize == 39: 604 p = uintptr(i)<<28 | uintptrMask&(0x0013<<28) 605 if p >= uintptrMask&0x000f00000000 { 606 continue 607 } 608 case raceenabled: 609 // The TSAN runtime requires the heap 610 // to be in the range [0x00c000000000, 611 // 0x00e000000000). 612 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 613 if p >= uintptrMask&0x00e000000000 { 614 continue 615 } 616 case randomizeHeapBase: 617 prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8) 618 p = prefix | (randHeapBase & randHeapBasePrefixMask) 619 case GOARCH == "arm64" && GOOS == "ios": 620 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 621 case GOARCH == "arm64": 622 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 623 case GOARCH == "riscv64" && vmaSize == 39: 624 p = uintptr(i)<<32 | uintptrMask&(0x0013<<28) 625 case GOOS == "aix": 626 if i == 0 { 627 // We don't use addresses directly after 0x0A00000000000000 628 // to avoid collisions with others mmaps done by non-go programs. 629 continue 630 } 631 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 632 default: 633 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 634 } 635 // Switch to generating hints for user arenas if we've gone 636 // through about half the hints. In race mode, take only about 637 // a quarter; we don't have very much space to work with. 638 hintList := &mheap_.arenaHints 639 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) { 640 hintList = &mheap_.userArena.arenaHints 641 } 642 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 643 hint.addr = p 644 hint.next, *hintList = *hintList, hint 645 } 646 } else { 647 // On a 32-bit machine, we're much more concerned 648 // about keeping the usable heap contiguous. 649 // Hence: 650 // 651 // 1. We reserve space for all heapArenas up front so 652 // they don't get interleaved with the heap. They're 653 // ~258MB, so this isn't too bad. (We could reserve a 654 // smaller amount of space up front if this is a 655 // problem.) 656 // 657 // 2. We hint the heap to start right above the end of 658 // the binary so we have the best chance of keeping it 659 // contiguous. 660 // 661 // 3. We try to stake out a reasonably large initial 662 // heap reservation. 663 664 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 665 meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation")) 666 if meta != 0 { 667 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 668 } 669 670 // We want to start the arena low, but if we're linked 671 // against C code, it's possible global constructors 672 // have called malloc and adjusted the process' brk. 673 // Query the brk so we can avoid trying to map the 674 // region over it (which will cause the kernel to put 675 // the region somewhere else, likely at a high 676 // address). 677 procBrk := sbrk0() 678 679 // If we ask for the end of the data segment but the 680 // operating system requires a little more space 681 // before we can start allocating, it will give out a 682 // slightly higher pointer. Except QEMU, which is 683 // buggy, as usual: it won't adjust the pointer 684 // upward. So adjust it upward a little bit ourselves: 685 // 1/4 MB to get away from the running binary image. 686 p := firstmoduledata.end 687 if p < procBrk { 688 p = procBrk 689 } 690 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 691 p = mheap_.heapArenaAlloc.end 692 } 693 p = alignUp(p+(256<<10), heapArenaBytes) 694 // Because we're worried about fragmentation on 695 // 32-bit, we try to make a large initial reservation. 696 arenaSizes := []uintptr{ 697 512 << 20, 698 256 << 20, 699 128 << 20, 700 } 701 for _, arenaSize := range arenaSizes { 702 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation") 703 if a != nil { 704 mheap_.arena.init(uintptr(a), size, false) 705 p = mheap_.arena.end // For hint below 706 break 707 } 708 } 709 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 710 hint.addr = p 711 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 712 713 // Place the hint for user arenas just after the large reservation. 714 // 715 // While this potentially competes with the hint above, in practice we probably 716 // aren't going to be getting this far anyway on 32-bit platforms. 717 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 718 userArenaHint.addr = p 719 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint 720 } 721 // Initialize the memory limit here because the allocator is going to look at it 722 // but we haven't called gcinit yet and we're definitely going to allocate memory before then. 723 gcController.memoryLimit.Store(math.MaxInt64) 724 } 725 726 // sysAlloc allocates heap arena space for at least n bytes. The 727 // returned pointer is always heapArenaBytes-aligned and backed by 728 // h.arenas metadata. The returned size is always a multiple of 729 // heapArenaBytes. sysAlloc returns nil on failure. 730 // There is no corresponding free function. 731 // 732 // hintList is a list of hint addresses for where to allocate new 733 // heap arenas. It must be non-nil. 734 // 735 // sysAlloc returns a memory region in the Reserved state. This region must 736 // be transitioned to Prepared and then Ready before use. 737 // 738 // arenaList is the list the arena should be added to. 739 // 740 // h must be locked. 741 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) (v unsafe.Pointer, size uintptr) { 742 assertLockHeld(&h.lock) 743 744 n = alignUp(n, heapArenaBytes) 745 746 if hintList == &h.arenaHints { 747 // First, try the arena pre-reservation. 748 // Newly-used mappings are considered released. 749 // 750 // Only do this if we're using the regular heap arena hints. 751 // This behavior is only for the heap. 752 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap") 753 if v != nil { 754 size = n 755 goto mapped 756 } 757 } 758 759 // Try to grow the heap at a hint address. 760 for *hintList != nil { 761 hint := *hintList 762 p := hint.addr 763 if hint.down { 764 p -= n 765 } 766 if p+n < p { 767 // We can't use this, so don't ask. 768 v = nil 769 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 770 // Outside addressable heap. Can't use. 771 v = nil 772 } else { 773 v = sysReserve(unsafe.Pointer(p), n, "heap reservation") 774 } 775 if p == uintptr(v) { 776 // Success. Update the hint. 777 if !hint.down { 778 p += n 779 } 780 hint.addr = p 781 size = n 782 break 783 } 784 // Failed. Discard this hint and try the next. 785 // 786 // TODO: This would be cleaner if sysReserve could be 787 // told to only return the requested address. In 788 // particular, this is already how Windows behaves, so 789 // it would simplify things there. 790 if v != nil { 791 sysFreeOS(v, n) 792 } 793 *hintList = hint.next 794 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 795 } 796 797 if size == 0 { 798 if raceenabled { 799 // The race detector assumes the heap lives in 800 // [0x00c000000000, 0x00e000000000), but we 801 // just ran out of hints in this region. Give 802 // a nice failure. 803 throw("too many address space collisions for -race mode") 804 } 805 806 // All of the hints failed, so we'll take any 807 // (sufficiently aligned) address the kernel will give 808 // us. 809 v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap") 810 if v == nil { 811 return nil, 0 812 } 813 814 // Create new hints for extending this region. 815 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 816 hint.addr, hint.down = uintptr(v), true 817 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 818 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 819 hint.addr = uintptr(v) + size 820 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 821 } 822 823 // Check for bad pointers or pointers we can't use. 824 { 825 var bad string 826 p := uintptr(v) 827 if p+size < p { 828 bad = "region exceeds uintptr range" 829 } else if arenaIndex(p) >= 1<<arenaBits { 830 bad = "base outside usable address space" 831 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 832 bad = "end outside usable address space" 833 } 834 if bad != "" { 835 // This should be impossible on most architectures, 836 // but it would be really confusing to debug. 837 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 838 throw("memory reservation exceeds address space limit") 839 } 840 } 841 842 if uintptr(v)&(heapArenaBytes-1) != 0 { 843 throw("misrounded allocation in sysAlloc") 844 } 845 846 mapped: 847 if valgrindenabled { 848 valgrindCreateMempool(v) 849 valgrindMakeMemNoAccess(v, size) 850 } 851 852 // Create arena metadata. 853 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 854 l2 := h.arenas[ri.l1()] 855 if l2 == nil { 856 // Allocate an L2 arena map. 857 // 858 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no 859 // statistic we can comfortably account for this space in. With this structure, 860 // we rely on demand paging to avoid large overheads, but tracking which memory 861 // is paged in is too expensive. Trying to account for the whole region means 862 // that it will appear like an enormous memory overhead in statistics, even though 863 // it is not. 864 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index")) 865 if l2 == nil { 866 throw("out of memory allocating heap arena map") 867 } 868 if h.arenasHugePages { 869 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 870 } else { 871 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 872 } 873 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 874 } 875 876 if l2[ri.l2()] != nil { 877 throw("arena already initialized") 878 } 879 var r *heapArena 880 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata")) 881 if r == nil { 882 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 883 if r == nil { 884 throw("out of memory allocating heap arena metadata") 885 } 886 } 887 888 // Register the arena in allArenas if requested. 889 if len((*arenaList)) == cap((*arenaList)) { 890 size := 2 * uintptr(cap((*arenaList))) * goarch.PtrSize 891 if size == 0 { 892 size = physPageSize 893 } 894 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) 895 if newArray == nil { 896 throw("out of memory allocating allArenas") 897 } 898 oldSlice := (*arenaList) 899 *(*notInHeapSlice)(unsafe.Pointer(&(*arenaList))) = notInHeapSlice{newArray, len((*arenaList)), int(size / goarch.PtrSize)} 900 copy((*arenaList), oldSlice) 901 // Do not free the old backing array because 902 // there may be concurrent readers. Since we 903 // double the array each time, this can lead 904 // to at most 2x waste. 905 } 906 (*arenaList) = (*arenaList)[:len((*arenaList))+1] 907 (*arenaList)[len((*arenaList))-1] = ri 908 909 // Store atomically just in case an object from the 910 // new heap arena becomes visible before the heap lock 911 // is released (which shouldn't happen, but there's 912 // little downside to this). 913 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 914 } 915 916 // Tell the race detector about the new heap memory. 917 if raceenabled { 918 racemapshadow(v, size) 919 } 920 921 return 922 } 923 924 // sysReserveAligned is like sysReserve, but the returned pointer is 925 // aligned to align bytes. It may reserve either n or n+align bytes, 926 // so it returns the size that was reserved. 927 func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) { 928 if isSbrkPlatform { 929 if v != nil { 930 throw("unexpected heap arena hint on sbrk platform") 931 } 932 return sysReserveAlignedSbrk(size, align) 933 } 934 // Since the alignment is rather large in uses of this 935 // function, we're not likely to get it by chance, so we ask 936 // for a larger region and remove the parts we don't need. 937 retries := 0 938 retry: 939 p := uintptr(sysReserve(v, size+align, vmaName)) 940 switch { 941 case p == 0: 942 return nil, 0 943 case p&(align-1) == 0: 944 return unsafe.Pointer(p), size + align 945 case GOOS == "windows": 946 // On Windows we can't release pieces of a 947 // reservation, so we release the whole thing and 948 // re-reserve the aligned sub-region. This may race, 949 // so we may have to try again. 950 sysFreeOS(unsafe.Pointer(p), size+align) 951 p = alignUp(p, align) 952 p2 := sysReserve(unsafe.Pointer(p), size, vmaName) 953 if p != uintptr(p2) { 954 // Must have raced. Try again. 955 sysFreeOS(p2, size) 956 if retries++; retries == 100 { 957 throw("failed to allocate aligned heap memory; too many retries") 958 } 959 goto retry 960 } 961 // Success. 962 return p2, size 963 default: 964 // Trim off the unaligned parts. 965 pAligned := alignUp(p, align) 966 sysFreeOS(unsafe.Pointer(p), pAligned-p) 967 end := pAligned + size 968 endLen := (p + size + align) - end 969 if endLen > 0 { 970 sysFreeOS(unsafe.Pointer(end), endLen) 971 } 972 return unsafe.Pointer(pAligned), size 973 } 974 } 975 976 // enableMetadataHugePages enables huge pages for various sources of heap metadata. 977 // 978 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant 979 // time, but may take time proportional to the size of the mapped heap beyond that. 980 // 981 // This function is idempotent. 982 // 983 // The heap lock must not be held over this operation, since it will briefly acquire 984 // the heap lock. 985 // 986 // Must be called on the system stack because it acquires the heap lock. 987 // 988 //go:systemstack 989 func (h *mheap) enableMetadataHugePages() { 990 // Enable huge pages for page structure. 991 h.pages.enableChunkHugePages() 992 993 // Grab the lock and set arenasHugePages if it's not. 994 // 995 // Once arenasHugePages is set, all new L2 entries will be eligible for 996 // huge pages. We'll set all the old entries after we release the lock. 997 lock(&h.lock) 998 if h.arenasHugePages { 999 unlock(&h.lock) 1000 return 1001 } 1002 h.arenasHugePages = true 1003 unlock(&h.lock) 1004 1005 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to 1006 // just iterate over the whole thing. 1007 for i := range h.arenas { 1008 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) 1009 if l2 == nil { 1010 continue 1011 } 1012 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 1013 } 1014 } 1015 1016 // base address for all 0-byte allocations 1017 var zerobase uintptr 1018 1019 // nextFreeFast returns the next free object if one is quickly available. 1020 // Otherwise it returns 0. 1021 func nextFreeFast(s *mspan) gclinkptr { 1022 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 1023 if theBit < 64 { 1024 result := s.freeindex + uint16(theBit) 1025 if result < s.nelems { 1026 freeidx := result + 1 1027 if freeidx%64 == 0 && freeidx != s.nelems { 1028 return 0 1029 } 1030 s.allocCache >>= uint(theBit + 1) 1031 s.freeindex = freeidx 1032 s.allocCount++ 1033 return gclinkptr(uintptr(result)*s.elemsize + s.base()) 1034 } 1035 } 1036 return 0 1037 } 1038 1039 // nextFree returns the next free object from the cached span if one is available. 1040 // Otherwise it refills the cache with a span with an available object and 1041 // returns that object along with a flag indicating that this was a heavy 1042 // weight allocation. If it is a heavy weight allocation the caller must 1043 // determine whether a new GC cycle needs to be started or if the GC is active 1044 // whether this goroutine needs to assist the GC. 1045 // 1046 // Must run in a non-preemptible context since otherwise the owner of 1047 // c could change. 1048 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) { 1049 s = c.alloc[spc] 1050 checkGCTrigger = false 1051 freeIndex := s.nextFreeIndex() 1052 if freeIndex == s.nelems { 1053 // The span is full. 1054 if s.allocCount != s.nelems { 1055 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 1056 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 1057 } 1058 c.refill(spc) 1059 checkGCTrigger = true 1060 s = c.alloc[spc] 1061 1062 freeIndex = s.nextFreeIndex() 1063 } 1064 1065 if freeIndex >= s.nelems { 1066 throw("freeIndex is not valid") 1067 } 1068 1069 v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base()) 1070 s.allocCount++ 1071 if s.allocCount > s.nelems { 1072 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 1073 throw("s.allocCount > s.nelems") 1074 } 1075 return 1076 } 1077 1078 // doubleCheckMalloc enables a bunch of extra checks to malloc to double-check 1079 // that various invariants are upheld. 1080 // 1081 // We might consider turning these on by default; many of them previously were. 1082 // They account for a few % of mallocgc's cost though, which does matter somewhat 1083 // at scale. (When testing changes to malloc, consider enabling this, and also 1084 // some function-local 'doubleCheck' consts such as in mbitmap.go currently.) 1085 const doubleCheckMalloc = false 1086 1087 // sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized 1088 // mallocgc implementation: the experiment must be enabled, and none of the sanitizers should 1089 // be enabled. The tables used to select the size-specialized malloc function do not compile 1090 // properly on plan9, so size-specialized malloc is also disabled on plan9. 1091 const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled 1092 1093 // runtimeFreegcEnabled is the set of conditions where we enable the runtime.freegc 1094 // implementation and the corresponding allocation-related changes: the experiment must be 1095 // enabled, and none of the memory sanitizers should be enabled. We allow the race detector, 1096 // in contrast to sizeSpecializedMallocEnabled. 1097 // TODO(thepudds): it would be nice to check Valgrind integration, though there are some hints 1098 // there might not be any canned tests in tree for Go's integration with Valgrind. 1099 const runtimeFreegcEnabled = goexperiment.RuntimeFreegc && !asanenabled && !msanenabled && !valgrindenabled 1100 1101 // Allocate an object of size bytes. 1102 // Small objects are allocated from the per-P cache's free lists. 1103 // Large objects (> 32 kB) are allocated straight from the heap. 1104 // 1105 // mallocgc should be an internal detail, 1106 // but widely used packages access it using linkname. 1107 // Notable members of the hall of shame include: 1108 // - github.com/bytedance/gopkg 1109 // - github.com/bytedance/sonic 1110 // - github.com/cloudwego/frugal 1111 // - github.com/cockroachdb/cockroach 1112 // - github.com/cockroachdb/pebble 1113 // - github.com/ugorji/go/codec 1114 // 1115 // Do not remove or change the type signature. 1116 // See go.dev/issue/67401. 1117 // 1118 //go:linkname mallocgc 1119 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 1120 if doubleCheckMalloc { 1121 if gcphase == _GCmarktermination { 1122 throw("mallocgc called with gcphase == _GCmarktermination") 1123 } 1124 } 1125 1126 // Short-circuit zero-sized allocation requests. 1127 if size == 0 { 1128 return unsafe.Pointer(&zerobase) 1129 } 1130 1131 if sizeSpecializedMallocEnabled && heapBitsInSpan(size) { 1132 if typ == nil || !typ.Pointers() { 1133 return mallocNoScanTable[size](size, typ, needzero) 1134 } else { 1135 if !needzero { 1136 throw("objects with pointers must be zeroed") 1137 } 1138 return mallocScanTable[size](size, typ, needzero) 1139 } 1140 } 1141 1142 // It's possible for any malloc to trigger sweeping, which may in 1143 // turn queue finalizers. Record this dynamic lock edge. 1144 // N.B. Compiled away if lockrank experiment is not enabled. 1145 lockRankMayQueueFinalizer() 1146 1147 // Pre-malloc debug hooks. 1148 if debug.malloc { 1149 if x := preMallocgcDebug(size, typ); x != nil { 1150 return x 1151 } 1152 } 1153 1154 // For ASAN, we allocate extra memory around each allocation called the "redzone." 1155 // These "redzones" are marked as unaddressable. 1156 var asanRZ uintptr 1157 if asanenabled { 1158 asanRZ = redZoneSize(size) 1159 size += asanRZ 1160 } 1161 1162 // Assist the GC if needed. (On the reuse path, we currently compensate for this; 1163 // changes here might require changes there.) 1164 if gcBlackenEnabled != 0 { 1165 deductAssistCredit(size) 1166 } 1167 1168 // Actually do the allocation. 1169 var x unsafe.Pointer 1170 var elemsize uintptr 1171 if sizeSpecializedMallocEnabled { 1172 // we know that heapBitsInSpan is true. 1173 if size <= maxSmallSize-gc.MallocHeaderSize { 1174 if typ == nil || !typ.Pointers() { 1175 x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 1176 } else { 1177 if !needzero { 1178 throw("objects with pointers must be zeroed") 1179 } 1180 x, elemsize = mallocgcSmallScanHeader(size, typ) 1181 } 1182 } else { 1183 x, elemsize = mallocgcLarge(size, typ, needzero) 1184 } 1185 } else { 1186 if size <= maxSmallSize-gc.MallocHeaderSize { 1187 if typ == nil || !typ.Pointers() { 1188 if size < maxTinySize { 1189 x, elemsize = mallocgcTiny(size, typ) 1190 } else { 1191 x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 1192 } 1193 } else { 1194 if !needzero { 1195 throw("objects with pointers must be zeroed") 1196 } 1197 if heapBitsInSpan(size) { 1198 x, elemsize = mallocgcSmallScanNoHeader(size, typ) 1199 } else { 1200 x, elemsize = mallocgcSmallScanHeader(size, typ) 1201 } 1202 } 1203 } else { 1204 x, elemsize = mallocgcLarge(size, typ, needzero) 1205 } 1206 } 1207 1208 // Notify sanitizers, if enabled. 1209 if raceenabled { 1210 racemalloc(x, size-asanRZ) 1211 } 1212 if msanenabled { 1213 msanmalloc(x, size-asanRZ) 1214 } 1215 if asanenabled { 1216 // Poison the space between the end of the requested size of x 1217 // and the end of the slot. Unpoison the requested allocation. 1218 frag := elemsize - size 1219 if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-gc.MallocHeaderSize { 1220 frag -= gc.MallocHeaderSize 1221 } 1222 asanpoison(unsafe.Add(x, size-asanRZ), asanRZ) 1223 asanunpoison(x, size-asanRZ) 1224 } 1225 if valgrindenabled { 1226 valgrindMalloc(x, size-asanRZ) 1227 } 1228 1229 // Adjust our GC assist debt to account for internal fragmentation. 1230 if gcBlackenEnabled != 0 && elemsize != 0 { 1231 if assistG := getg().m.curg; assistG != nil { 1232 assistG.gcAssistBytes -= int64(elemsize - size) 1233 } 1234 } 1235 1236 // Post-malloc debug hooks. 1237 if debug.malloc { 1238 postMallocgcDebug(x, elemsize, typ) 1239 } 1240 return x 1241 } 1242 1243 func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1244 // Set mp.mallocing to keep from being preempted by GC. 1245 mp := acquirem() 1246 if doubleCheckMalloc { 1247 if mp.mallocing != 0 { 1248 throw("malloc deadlock") 1249 } 1250 if mp.gsignal == getg() { 1251 throw("malloc during signal") 1252 } 1253 if typ != nil && typ.Pointers() { 1254 throw("expected noscan for tiny alloc") 1255 } 1256 } 1257 mp.mallocing = 1 1258 1259 // Tiny allocator. 1260 // 1261 // Tiny allocator combines several tiny allocation requests 1262 // into a single memory block. The resulting memory block 1263 // is freed when all subobjects are unreachable. The subobjects 1264 // must be noscan (don't have pointers), this ensures that 1265 // the amount of potentially wasted memory is bounded. 1266 // 1267 // Size of the memory block used for combining (maxTinySize) is tunable. 1268 // Current setting is 16 bytes, which relates to 2x worst case memory 1269 // wastage (when all but one subobjects are unreachable). 1270 // 8 bytes would result in no wastage at all, but provides less 1271 // opportunities for combining. 1272 // 32 bytes provides more opportunities for combining, 1273 // but can lead to 4x worst case wastage. 1274 // The best case winning is 8x regardless of block size. 1275 // 1276 // Objects obtained from tiny allocator must not be freed explicitly. 1277 // So when an object will be freed explicitly, we ensure that 1278 // its size >= maxTinySize. 1279 // 1280 // SetFinalizer has a special case for objects potentially coming 1281 // from tiny allocator, it such case it allows to set finalizers 1282 // for an inner byte of a memory block. 1283 // 1284 // The main targets of tiny allocator are small strings and 1285 // standalone escaping variables. On a json benchmark 1286 // the allocator reduces number of allocations by ~12% and 1287 // reduces heap size by ~20%. 1288 c := getMCache(mp) 1289 off := c.tinyoffset 1290 // Align tiny pointer for required (conservative) alignment. 1291 if size&7 == 0 { 1292 off = alignUp(off, 8) 1293 } else if goarch.PtrSize == 4 && size == 12 { 1294 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1295 // systems so that objects whose first field is a 64-bit 1296 // value is aligned to 8 bytes and does not cause a fault on 1297 // atomic access. See issue 37262. 1298 // TODO(mknyszek): Remove this workaround if/when issue 36606 1299 // is resolved. 1300 off = alignUp(off, 8) 1301 } else if size&3 == 0 { 1302 off = alignUp(off, 4) 1303 } else if size&1 == 0 { 1304 off = alignUp(off, 2) 1305 } 1306 if off+size <= maxTinySize && c.tiny != 0 { 1307 // The object fits into existing tiny block. 1308 x := unsafe.Pointer(c.tiny + off) 1309 c.tinyoffset = off + size 1310 c.tinyAllocs++ 1311 mp.mallocing = 0 1312 releasem(mp) 1313 return x, 0 1314 } 1315 // Allocate a new maxTinySize block. 1316 checkGCTrigger := false 1317 span := c.alloc[tinySpanClass] 1318 v := nextFreeFast(span) 1319 if v == 0 { 1320 v, span, checkGCTrigger = c.nextFree(tinySpanClass) 1321 } 1322 x := unsafe.Pointer(v) 1323 (*[2]uint64)(x)[0] = 0 // Always zero 1324 (*[2]uint64)(x)[1] = 0 1325 // See if we need to replace the existing tiny block with the new one 1326 // based on amount of remaining free space. 1327 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1328 // Note: disabled when race detector is on, see comment near end of this function. 1329 c.tiny = uintptr(x) 1330 c.tinyoffset = size 1331 } 1332 1333 // Ensure that the stores above that initialize x to 1334 // type-safe memory and set the heap bits occur before 1335 // the caller can make x observable to the garbage 1336 // collector. Otherwise, on weakly ordered machines, 1337 // the garbage collector could follow a pointer to x, 1338 // but see uninitialized memory or stale heap bits. 1339 publicationBarrier() 1340 1341 if writeBarrier.enabled { 1342 // Allocate black during GC. 1343 // All slots hold nil so no scanning is needed. 1344 // This may be racing with GC so do it atomically if there can be 1345 // a race marking the bit. 1346 gcmarknewobject(span, uintptr(x)) 1347 } else { 1348 // Track the last free index before the mark phase. This field 1349 // is only used by the garbage collector. During the mark phase 1350 // this is used by the conservative scanner to filter out objects 1351 // that are both free and recently-allocated. It's safe to do that 1352 // because we allocate-black if the GC is enabled. The conservative 1353 // scanner produces pointers out of thin air, so without additional 1354 // synchronization it might otherwise observe a partially-initialized 1355 // object, which could crash the program. 1356 span.freeIndexForScan = span.freeindex 1357 } 1358 1359 // Note cache c only valid while m acquired; see #47302 1360 // 1361 // N.B. Use the full size because that matches how the GC 1362 // will update the mem profile on the "free" side. 1363 // 1364 // TODO(mknyszek): We should really count the header as part 1365 // of gc_sys or something. The code below just pretends it is 1366 // internal fragmentation and matches the GC's accounting by 1367 // using the whole allocation slot. 1368 c.nextSample -= int64(span.elemsize) 1369 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1370 profilealloc(mp, x, span.elemsize) 1371 } 1372 mp.mallocing = 0 1373 releasem(mp) 1374 1375 if checkGCTrigger { 1376 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1377 gcStart(t) 1378 } 1379 } 1380 1381 if raceenabled { 1382 // Pad tinysize allocations so they are aligned with the end 1383 // of the tinyalloc region. This ensures that any arithmetic 1384 // that goes off the top end of the object will be detectable 1385 // by checkptr (issue 38872). 1386 // Note that we disable tinyalloc when raceenabled for this to work. 1387 // TODO: This padding is only performed when the race detector 1388 // is enabled. It would be nice to enable it if any package 1389 // was compiled with checkptr, but there's no easy way to 1390 // detect that (especially at compile time). 1391 // TODO: enable this padding for all allocations, not just 1392 // tinyalloc ones. It's tricky because of pointer maps. 1393 // Maybe just all noscan objects? 1394 x = add(x, span.elemsize-size) 1395 } 1396 return x, span.elemsize 1397 } 1398 1399 func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1400 // Set mp.mallocing to keep from being preempted by GC. 1401 mp := acquirem() 1402 if doubleCheckMalloc { 1403 if mp.mallocing != 0 { 1404 throw("malloc deadlock") 1405 } 1406 if mp.gsignal == getg() { 1407 throw("malloc during signal") 1408 } 1409 if typ != nil && typ.Pointers() { 1410 throw("expected noscan type for noscan alloc") 1411 } 1412 } 1413 mp.mallocing = 1 1414 1415 checkGCTrigger := false 1416 c := getMCache(mp) 1417 var sizeclass uint8 1418 if size <= gc.SmallSizeMax-8 { 1419 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1420 } else { 1421 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 1422 } 1423 size = uintptr(gc.SizeClassToSize[sizeclass]) 1424 spc := makeSpanClass(sizeclass, true) 1425 span := c.alloc[spc] 1426 1427 // First, check for a reusable object. 1428 if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { 1429 // We have a reusable object, use it. 1430 x := mallocgcSmallNoscanReuse(c, span, spc, size, needzero) 1431 mp.mallocing = 0 1432 releasem(mp) 1433 return x, size 1434 } 1435 1436 v := nextFreeFast(span) 1437 if v == 0 { 1438 v, span, checkGCTrigger = c.nextFree(spc) 1439 } 1440 x := unsafe.Pointer(v) 1441 if needzero && span.needzero != 0 { 1442 memclrNoHeapPointers(x, size) 1443 } 1444 1445 // Ensure that the stores above that initialize x to 1446 // type-safe memory and set the heap bits occur before 1447 // the caller can make x observable to the garbage 1448 // collector. Otherwise, on weakly ordered machines, 1449 // the garbage collector could follow a pointer to x, 1450 // but see uninitialized memory or stale heap bits. 1451 publicationBarrier() 1452 1453 if writeBarrier.enabled { 1454 // Allocate black during GC. 1455 // All slots hold nil so no scanning is needed. 1456 // This may be racing with GC so do it atomically if there can be 1457 // a race marking the bit. 1458 gcmarknewobject(span, uintptr(x)) 1459 } else { 1460 // Track the last free index before the mark phase. This field 1461 // is only used by the garbage collector. During the mark phase 1462 // this is used by the conservative scanner to filter out objects 1463 // that are both free and recently-allocated. It's safe to do that 1464 // because we allocate-black if the GC is enabled. The conservative 1465 // scanner produces pointers out of thin air, so without additional 1466 // synchronization it might otherwise observe a partially-initialized 1467 // object, which could crash the program. 1468 span.freeIndexForScan = span.freeindex 1469 } 1470 1471 // Note cache c only valid while m acquired; see #47302 1472 // 1473 // N.B. Use the full size because that matches how the GC 1474 // will update the mem profile on the "free" side. 1475 // 1476 // TODO(mknyszek): We should really count the header as part 1477 // of gc_sys or something. The code below just pretends it is 1478 // internal fragmentation and matches the GC's accounting by 1479 // using the whole allocation slot. 1480 c.nextSample -= int64(size) 1481 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1482 profilealloc(mp, x, size) 1483 } 1484 mp.mallocing = 0 1485 releasem(mp) 1486 1487 if checkGCTrigger { 1488 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1489 gcStart(t) 1490 } 1491 } 1492 return x, size 1493 } 1494 1495 // mallocgcSmallNoscanReuse returns a previously freed noscan object after preparing it for reuse. 1496 // It must only be called if hasReusableNoscan returned true. 1497 func mallocgcSmallNoscanReuse(c *mcache, span *mspan, spc spanClass, size uintptr, needzero bool) unsafe.Pointer { 1498 // TODO(thepudds): could nextFreeFast, nextFree and nextReusable return unsafe.Pointer? 1499 // Maybe doesn't matter. gclinkptr might be for historical reasons. 1500 v, span := c.nextReusableNoScan(span, spc) 1501 x := unsafe.Pointer(v) 1502 1503 // Compensate for the GC assist credit deducted in mallocgc (before calling us and 1504 // after we return) because this is not a newly allocated object. We use the full slot 1505 // size (elemsize) here because that's what mallocgc deducts overall. Note we only 1506 // adjust this when gcBlackenEnabled is true, which follows mallocgc behavior. 1507 // TODO(thepudds): a follow-up CL adds a more specific test of our assist credit 1508 // handling, including for validating internal fragmentation handling. 1509 if gcBlackenEnabled != 0 { 1510 addAssistCredit(size) 1511 } 1512 1513 // This is a previously used object, so only check needzero (and not span.needzero) 1514 // for clearing. 1515 if needzero { 1516 memclrNoHeapPointers(x, size) 1517 } 1518 1519 // See publicationBarrier comment in mallocgcSmallNoscan. 1520 publicationBarrier() 1521 1522 // Finish and return. Note that we do not update span.freeIndexForScan, profiling info, 1523 // nor do we check gcTrigger. 1524 // TODO(thepudds): the current approach is viable for a GOEXPERIMENT, but 1525 // means we do not profile reused heap objects. Ultimately, we will need a better 1526 // approach for profiling, or at least ensure we are not introducing bias in the 1527 // profiled allocations. 1528 // TODO(thepudds): related, we probably want to adjust how allocs and frees are counted 1529 // in the existing stats. Currently, reused objects are not counted as allocs nor 1530 // frees, but instead roughly appear as if the original heap object lived on. We 1531 // probably will also want some additional runtime/metrics, and generally think about 1532 // user-facing observability & diagnostics, though all this likely can wait for an 1533 // official proposal. 1534 if writeBarrier.enabled { 1535 // Allocate black during GC. 1536 // All slots hold nil so no scanning is needed. 1537 // This may be racing with GC so do it atomically if there can be 1538 // a race marking the bit. 1539 gcmarknewobject(span, uintptr(x)) 1540 } 1541 return x 1542 } 1543 1544 func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1545 // Set mp.mallocing to keep from being preempted by GC. 1546 mp := acquirem() 1547 if doubleCheckMalloc { 1548 if mp.mallocing != 0 { 1549 throw("malloc deadlock") 1550 } 1551 if mp.gsignal == getg() { 1552 throw("malloc during signal") 1553 } 1554 if typ == nil || !typ.Pointers() { 1555 throw("noscan allocated in scan-only path") 1556 } 1557 if !heapBitsInSpan(size) { 1558 throw("heap bits in not in span for non-header-only path") 1559 } 1560 } 1561 mp.mallocing = 1 1562 1563 checkGCTrigger := false 1564 c := getMCache(mp) 1565 sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1566 spc := makeSpanClass(sizeclass, false) 1567 span := c.alloc[spc] 1568 v := nextFreeFast(span) 1569 if v == 0 { 1570 v, span, checkGCTrigger = c.nextFree(spc) 1571 } 1572 x := unsafe.Pointer(v) 1573 if span.needzero != 0 { 1574 memclrNoHeapPointers(x, size) 1575 } 1576 if goarch.PtrSize == 8 && sizeclass == 1 { 1577 // initHeapBits already set the pointer bits for the 8-byte sizeclass 1578 // on 64-bit platforms. 1579 c.scanAlloc += 8 1580 } else { 1581 c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span) 1582 } 1583 size = uintptr(gc.SizeClassToSize[sizeclass]) 1584 1585 // Ensure that the stores above that initialize x to 1586 // type-safe memory and set the heap bits occur before 1587 // the caller can make x observable to the garbage 1588 // collector. Otherwise, on weakly ordered machines, 1589 // the garbage collector could follow a pointer to x, 1590 // but see uninitialized memory or stale heap bits. 1591 publicationBarrier() 1592 1593 if writeBarrier.enabled { 1594 // Allocate black during GC. 1595 // All slots hold nil so no scanning is needed. 1596 // This may be racing with GC so do it atomically if there can be 1597 // a race marking the bit. 1598 gcmarknewobject(span, uintptr(x)) 1599 } else { 1600 // Track the last free index before the mark phase. This field 1601 // is only used by the garbage collector. During the mark phase 1602 // this is used by the conservative scanner to filter out objects 1603 // that are both free and recently-allocated. It's safe to do that 1604 // because we allocate-black if the GC is enabled. The conservative 1605 // scanner produces pointers out of thin air, so without additional 1606 // synchronization it might otherwise observe a partially-initialized 1607 // object, which could crash the program. 1608 span.freeIndexForScan = span.freeindex 1609 } 1610 1611 // Note cache c only valid while m acquired; see #47302 1612 // 1613 // N.B. Use the full size because that matches how the GC 1614 // will update the mem profile on the "free" side. 1615 // 1616 // TODO(mknyszek): We should really count the header as part 1617 // of gc_sys or something. The code below just pretends it is 1618 // internal fragmentation and matches the GC's accounting by 1619 // using the whole allocation slot. 1620 c.nextSample -= int64(size) 1621 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1622 profilealloc(mp, x, size) 1623 } 1624 mp.mallocing = 0 1625 releasem(mp) 1626 1627 if checkGCTrigger { 1628 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1629 gcStart(t) 1630 } 1631 } 1632 return x, size 1633 } 1634 1635 func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1636 // Set mp.mallocing to keep from being preempted by GC. 1637 mp := acquirem() 1638 if doubleCheckMalloc { 1639 if mp.mallocing != 0 { 1640 throw("malloc deadlock") 1641 } 1642 if mp.gsignal == getg() { 1643 throw("malloc during signal") 1644 } 1645 if typ == nil || !typ.Pointers() { 1646 throw("noscan allocated in scan-only path") 1647 } 1648 if heapBitsInSpan(size) { 1649 throw("heap bits in span for header-only path") 1650 } 1651 } 1652 mp.mallocing = 1 1653 1654 checkGCTrigger := false 1655 c := getMCache(mp) 1656 size += gc.MallocHeaderSize 1657 var sizeclass uint8 1658 if size <= gc.SmallSizeMax-8 { 1659 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1660 } else { 1661 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 1662 } 1663 size = uintptr(gc.SizeClassToSize[sizeclass]) 1664 spc := makeSpanClass(sizeclass, false) 1665 span := c.alloc[spc] 1666 v := nextFreeFast(span) 1667 if v == 0 { 1668 v, span, checkGCTrigger = c.nextFree(spc) 1669 } 1670 x := unsafe.Pointer(v) 1671 if span.needzero != 0 { 1672 memclrNoHeapPointers(x, size) 1673 } 1674 header := (**_type)(x) 1675 x = add(x, gc.MallocHeaderSize) 1676 c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span) 1677 1678 // Ensure that the stores above that initialize x to 1679 // type-safe memory and set the heap bits occur before 1680 // the caller can make x observable to the garbage 1681 // collector. Otherwise, on weakly ordered machines, 1682 // the garbage collector could follow a pointer to x, 1683 // but see uninitialized memory or stale heap bits. 1684 publicationBarrier() 1685 1686 if writeBarrier.enabled { 1687 // Allocate black during GC. 1688 // All slots hold nil so no scanning is needed. 1689 // This may be racing with GC so do it atomically if there can be 1690 // a race marking the bit. 1691 gcmarknewobject(span, uintptr(x)) 1692 } else { 1693 // Track the last free index before the mark phase. This field 1694 // is only used by the garbage collector. During the mark phase 1695 // this is used by the conservative scanner to filter out objects 1696 // that are both free and recently-allocated. It's safe to do that 1697 // because we allocate-black if the GC is enabled. The conservative 1698 // scanner produces pointers out of thin air, so without additional 1699 // synchronization it might otherwise observe a partially-initialized 1700 // object, which could crash the program. 1701 span.freeIndexForScan = span.freeindex 1702 } 1703 1704 // Note cache c only valid while m acquired; see #47302 1705 // 1706 // N.B. Use the full size because that matches how the GC 1707 // will update the mem profile on the "free" side. 1708 // 1709 // TODO(mknyszek): We should really count the header as part 1710 // of gc_sys or something. The code below just pretends it is 1711 // internal fragmentation and matches the GC's accounting by 1712 // using the whole allocation slot. 1713 c.nextSample -= int64(size) 1714 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1715 profilealloc(mp, x, size) 1716 } 1717 mp.mallocing = 0 1718 releasem(mp) 1719 1720 if checkGCTrigger { 1721 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1722 gcStart(t) 1723 } 1724 } 1725 return x, size 1726 } 1727 1728 func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1729 // Set mp.mallocing to keep from being preempted by GC. 1730 mp := acquirem() 1731 if doubleCheckMalloc { 1732 if mp.mallocing != 0 { 1733 throw("malloc deadlock") 1734 } 1735 if mp.gsignal == getg() { 1736 throw("malloc during signal") 1737 } 1738 } 1739 mp.mallocing = 1 1740 1741 c := getMCache(mp) 1742 // For large allocations, keep track of zeroed state so that 1743 // bulk zeroing can be happen later in a preemptible context. 1744 span := c.allocLarge(size, typ == nil || !typ.Pointers()) 1745 span.freeindex = 1 1746 span.allocCount = 1 1747 span.largeType = nil // Tell the GC not to look at this yet. 1748 size = span.elemsize 1749 x := unsafe.Pointer(span.base()) 1750 1751 // Ensure that the store above that sets largeType to 1752 // nil happens before the caller can make x observable 1753 // to the garbage collector. 1754 // 1755 // Otherwise, on weakly ordered machines, the garbage 1756 // collector could follow a pointer to x, but see a stale 1757 // largeType value. 1758 publicationBarrier() 1759 1760 if writeBarrier.enabled { 1761 // Allocate black during GC. 1762 // All slots hold nil so no scanning is needed. 1763 // This may be racing with GC so do it atomically if there can be 1764 // a race marking the bit. 1765 gcmarknewobject(span, uintptr(x)) 1766 } else { 1767 // Track the last free index before the mark phase. This field 1768 // is only used by the garbage collector. During the mark phase 1769 // this is used by the conservative scanner to filter out objects 1770 // that are both free and recently-allocated. It's safe to do that 1771 // because we allocate-black if the GC is enabled. The conservative 1772 // scanner produces pointers out of thin air, so without additional 1773 // synchronization it might otherwise observe a partially-initialized 1774 // object, which could crash the program. 1775 span.freeIndexForScan = span.freeindex 1776 } 1777 1778 // Note cache c only valid while m acquired; see #47302 1779 // 1780 // N.B. Use the full size because that matches how the GC 1781 // will update the mem profile on the "free" side. 1782 // 1783 // TODO(mknyszek): We should really count the header as part 1784 // of gc_sys or something. The code below just pretends it is 1785 // internal fragmentation and matches the GC's accounting by 1786 // using the whole allocation slot. 1787 c.nextSample -= int64(size) 1788 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1789 profilealloc(mp, x, size) 1790 } 1791 mp.mallocing = 0 1792 releasem(mp) 1793 1794 // Check to see if we need to trigger the GC. 1795 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1796 gcStart(t) 1797 } 1798 1799 // Objects can be zeroed late in a context where preemption can occur. 1800 // 1801 // x will keep the memory alive. 1802 if needzero && span.needzero != 0 { 1803 // N.B. size == fullSize always in this case. 1804 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1805 } 1806 1807 // Set the type and run the publication barrier while non-preemptible. We need to make 1808 // sure that between heapSetTypeLarge and publicationBarrier we cannot get preempted, 1809 // otherwise the GC could potentially observe non-zeroed memory but largeType set on weak 1810 // memory architectures. 1811 // 1812 // The GC can also potentially observe non-zeroed memory if conservative scanning spuriously 1813 // observes a partially-allocated object, see the freeIndexForScan update above. This case is 1814 // handled by synchronization inside heapSetTypeLarge. 1815 mp = acquirem() 1816 if typ != nil && typ.Pointers() { 1817 // Finish storing the type information, now that we're certain the memory is zeroed. 1818 getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span) 1819 } 1820 // Publish the object again, now with zeroed memory and initialized type information. 1821 // 1822 // Even if we didn't update any type information, this is necessary to ensure that, for example, 1823 // x written to a global without any synchronization still results in other goroutines observing 1824 // zeroed memory. 1825 publicationBarrier() 1826 releasem(mp) 1827 return x, size 1828 } 1829 1830 func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer { 1831 if debug.sbrk != 0 { 1832 align := uintptr(16) 1833 if typ != nil { 1834 // TODO(austin): This should be just 1835 // align = uintptr(typ.align) 1836 // but that's only 4 on 32-bit platforms, 1837 // even if there's a uint64 field in typ (see #599). 1838 // This causes 64-bit atomic accesses to panic. 1839 // Hence, we use stricter alignment that matches 1840 // the normal allocator better. 1841 if size&7 == 0 { 1842 align = 8 1843 } else if size&3 == 0 { 1844 align = 4 1845 } else if size&1 == 0 { 1846 align = 2 1847 } else { 1848 align = 1 1849 } 1850 } 1851 return persistentalloc(size, align, &memstats.other_sys) 1852 } 1853 if inittrace.active && inittrace.id == getg().goid { 1854 // Init functions are executed sequentially in a single goroutine. 1855 inittrace.allocs += 1 1856 } 1857 return nil 1858 } 1859 1860 func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) { 1861 if inittrace.active && inittrace.id == getg().goid { 1862 // Init functions are executed sequentially in a single goroutine. 1863 inittrace.bytes += uint64(elemsize) 1864 } 1865 1866 if traceAllocFreeEnabled() { 1867 trace := traceAcquire() 1868 if trace.ok() { 1869 trace.HeapObjectAlloc(uintptr(x), typ) 1870 traceRelease(trace) 1871 } 1872 } 1873 1874 // N.B. elemsize == 0 indicates a tiny allocation, since no new slot was 1875 // allocated to fulfill this call to mallocgc. This means checkfinalizer 1876 // will only flag an error if there is actually any risk. If an allocation 1877 // has the tiny block to itself, it will not get flagged, because we won't 1878 // mark the block as a tiny block. 1879 if debug.checkfinalizers != 0 && elemsize == 0 { 1880 setTinyBlockContext(unsafe.Pointer(alignDown(uintptr(x), maxTinySize))) 1881 } 1882 } 1883 1884 // deductAssistCredit reduces the current G's assist credit 1885 // by size bytes, and assists the GC if necessary. 1886 // 1887 // Caller must be preemptible. 1888 func deductAssistCredit(size uintptr) { 1889 // Charge the current user G for this allocation. 1890 assistG := getg() 1891 if assistG.m.curg != nil { 1892 assistG = assistG.m.curg 1893 } 1894 // Charge the allocation against the G. We'll account 1895 // for internal fragmentation at the end of mallocgc. 1896 assistG.gcAssistBytes -= int64(size) 1897 1898 if assistG.gcAssistBytes < 0 { 1899 // This G is in debt. Assist the GC to correct 1900 // this before allocating. This must happen 1901 // before disabling preemption. 1902 gcAssistAlloc(assistG) 1903 } 1904 } 1905 1906 // addAssistCredit is like deductAssistCredit, 1907 // but adds credit rather than removes, 1908 // and never calls gcAssistAlloc. 1909 func addAssistCredit(size uintptr) { 1910 // Credit the current user G. 1911 assistG := getg() 1912 if assistG.m.curg != nil { // TODO(thepudds): do we need to do this? 1913 assistG = assistG.m.curg 1914 } 1915 // Credit the size against the G. 1916 assistG.gcAssistBytes += int64(size) 1917 } 1918 1919 const ( 1920 // doubleCheckReusable enables some additional invariant checks for the 1921 // runtime.freegc and reusable objects. Note that some of these checks alter timing, 1922 // and it is good to test changes with and without this enabled. 1923 doubleCheckReusable = false 1924 1925 // debugReusableLog enables some printlns for runtime.freegc and reusable objects. 1926 debugReusableLog = false 1927 ) 1928 1929 // freegc records that a heap object is reusable and available for 1930 // immediate reuse in a subsequent mallocgc allocation, without 1931 // needing to wait for the GC cycle to progress. 1932 // 1933 // The information is recorded in a free list stored in the 1934 // current P's mcache. The caller must pass in the user size 1935 // and whether the object has pointers, which allows a faster free 1936 // operation. 1937 // 1938 // freegc must be called by the effective owner of ptr who knows 1939 // the pointer is logically dead, with no possible aliases that might 1940 // be used past that moment. In other words, ptr must be the 1941 // last and only pointer to its referent. 1942 // 1943 // The intended caller is the compiler. 1944 // 1945 // Note: please do not send changes that attempt to add freegc calls 1946 // to the standard library. 1947 // 1948 // ptr must point to a heap object or into the current g's stack, 1949 // in which case freegc is a no-op. In particular, ptr must not point 1950 // to memory in the data or bss sections, which is partially enforced. 1951 // For objects with a malloc header, ptr should point mallocHeaderSize bytes 1952 // past the base; otherwise, ptr should point to the base of the heap object. 1953 // In other words, ptr should be the same pointer that was returned by mallocgc. 1954 // 1955 // In addition, the caller must know that ptr's object has no specials, such 1956 // as might have been created by a call to SetFinalizer or AddCleanup. 1957 // (Internally, the runtime deals appropriately with internally-created 1958 // specials, such as specials for memory profiling). 1959 // 1960 // If the size of ptr's object is less than 16 bytes or greater than 1961 // 32KiB - gc.MallocHeaderSize bytes, freegc is currently a no-op. It must only 1962 // be called in alloc-safe places. It currently throws if noscan is false 1963 // (support for which is implemented in a later CL in our stack). 1964 // 1965 // Note that freegc accepts an unsafe.Pointer and hence keeps the pointer 1966 // alive. It therefore could be a pessimization in some cases (such 1967 // as a long-lived function) if the caller does not call freegc before 1968 // or roughly when the liveness analysis of the compiler 1969 // would otherwise have determined ptr's object is reclaimable by the GC. 1970 func freegc(ptr unsafe.Pointer, size uintptr, noscan bool) bool { 1971 if !runtimeFreegcEnabled || !reusableSize(size) { 1972 return false 1973 } 1974 if sizeSpecializedMallocEnabled && !noscan { 1975 // TODO(thepudds): temporarily disable freegc with SizeSpecializedMalloc for pointer types 1976 // until we finish integrating. 1977 return false 1978 } 1979 1980 if ptr == nil { 1981 throw("freegc nil") 1982 } 1983 1984 // Set mp.mallocing to keep from being preempted by GC. 1985 // Otherwise, the GC could flush our mcache or otherwise cause problems. 1986 mp := acquirem() 1987 if mp.mallocing != 0 { 1988 throw("freegc deadlock") 1989 } 1990 if mp.gsignal == getg() { 1991 throw("freegc during signal") 1992 } 1993 mp.mallocing = 1 1994 1995 if mp.curg.stack.lo <= uintptr(ptr) && uintptr(ptr) < mp.curg.stack.hi { 1996 // This points into our stack, so free is a no-op. 1997 mp.mallocing = 0 1998 releasem(mp) 1999 return false 2000 } 2001 2002 if doubleCheckReusable { 2003 // TODO(thepudds): we could enforce no free on globals in bss or data. Maybe by 2004 // checking span via spanOf or spanOfHeap, or maybe walk from firstmoduledata 2005 // like isGoPointerWithoutSpan, or activeModules, or something. If so, we might 2006 // be able to delay checking until reuse (e.g., check span just before reusing, 2007 // though currently we don't always need to lookup a span on reuse). If we think 2008 // no usage patterns could result in globals, maybe enforcement for globals could 2009 // be behind -d=checkptr=1 or similar. The compiler can have knowledge of where 2010 // a variable is allocated, but stdlib does not, although there are certain 2011 // usage patterns that cannot result in a global. 2012 // TODO(thepudds): separately, consider a local debugReusableMcacheOnly here 2013 // to ignore freed objects if not in mspan in mcache, maybe when freeing and reading, 2014 // by checking something like s.base() <= uintptr(v) && uintptr(v) < s.limit. Or 2015 // maybe a GODEBUG or compiler debug flag. 2016 span := spanOf(uintptr(ptr)) 2017 if span == nil { 2018 throw("nextReusable: nil span for pointer in free list") 2019 } 2020 if state := span.state.get(); state != mSpanInUse { 2021 throw("nextReusable: span is not in use") 2022 } 2023 } 2024 2025 if debug.clobberfree != 0 { 2026 clobberfree(ptr, size) 2027 } 2028 2029 // We first check if p is still in our per-P cache. 2030 // Get our per-P cache for small objects. 2031 c := getMCache(mp) 2032 if c == nil { 2033 throw("freegc called without a P or outside bootstrapping") 2034 } 2035 2036 v := uintptr(ptr) 2037 if !noscan && !heapBitsInSpan(size) { 2038 // mallocgcSmallScanHeader expects to get the base address of the object back 2039 // from the findReusable funcs (as well as from nextFreeFast and nextFree), and 2040 // not mallocHeaderSize bytes into a object, so adjust that here. 2041 v -= mallocHeaderSize 2042 2043 // The size class lookup wants size to be adjusted by mallocHeaderSize. 2044 size += mallocHeaderSize 2045 } 2046 2047 // TODO(thepudds): should verify (behind doubleCheckReusable constant) that our calculated 2048 // sizeclass here matches what's in span found via spanOf(ptr) or findObject(ptr). 2049 var sizeclass uint8 2050 if size <= gc.SmallSizeMax-8 { 2051 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 2052 } else { 2053 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 2054 } 2055 2056 spc := makeSpanClass(sizeclass, noscan) 2057 s := c.alloc[spc] 2058 2059 if debugReusableLog { 2060 if s.base() <= uintptr(v) && uintptr(v) < s.limit { 2061 println("freegc [in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) 2062 } else { 2063 println("freegc [NOT in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) 2064 } 2065 } 2066 2067 if noscan { 2068 c.addReusableNoscan(spc, uintptr(v)) 2069 } else { 2070 // TODO(thepudds): implemented in later CL in our stack. 2071 throw("freegc called for object with pointers, not yet implemented") 2072 } 2073 2074 // For stats, for now we leave allocCount alone, roughly pretending to the rest 2075 // of the system that this potential reuse never happened. 2076 2077 mp.mallocing = 0 2078 releasem(mp) 2079 2080 return true 2081 } 2082 2083 // nextReusableNoScan returns the next reusable object for a noscan span, 2084 // or 0 if no reusable object is found. 2085 func (c *mcache) nextReusableNoScan(s *mspan, spc spanClass) (gclinkptr, *mspan) { 2086 if !runtimeFreegcEnabled { 2087 return 0, s 2088 } 2089 2090 // Pop a reusable pointer from the free list for this span class. 2091 v := c.reusableNoscan[spc] 2092 if v == 0 { 2093 return 0, s 2094 } 2095 c.reusableNoscan[spc] = v.ptr().next 2096 2097 if debugReusableLog { 2098 println("reusing from ptr free list:", hex(v), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) 2099 } 2100 if doubleCheckReusable { 2101 doubleCheckNextReusable(v) // debug only sanity check 2102 } 2103 2104 // For noscan spans, we only need the span if the write barrier is enabled (so that our caller 2105 // can call gcmarknewobject to allocate black). If the write barrier is enabled, we can skip 2106 // looking up the span when the pointer is in a span in the mcache. 2107 if !writeBarrier.enabled { 2108 return v, nil 2109 } 2110 if s.base() <= uintptr(v) && uintptr(v) < s.limit { 2111 // Return the original span. 2112 return v, s 2113 } 2114 2115 // We must find and return the span. 2116 span := spanOf(uintptr(v)) 2117 if span == nil { 2118 // TODO(thepudds): construct a test that triggers this throw. 2119 throw("nextReusableNoScan: nil span for pointer in reusable object free list") 2120 } 2121 2122 return v, span 2123 } 2124 2125 // doubleCheckNextReusable checks some invariants. 2126 // TODO(thepudds): will probably delete some of this. Can mostly be ignored for review. 2127 func doubleCheckNextReusable(v gclinkptr) { 2128 // TODO(thepudds): should probably take the spanClass as well to confirm expected 2129 // sizeclass match. 2130 _, span, objIndex := findObject(uintptr(v), 0, 0) 2131 if span == nil { 2132 throw("nextReusable: nil span for pointer in free list") 2133 } 2134 if state := span.state.get(); state != mSpanInUse { 2135 throw("nextReusable: span is not in use") 2136 } 2137 if uintptr(v) < span.base() || uintptr(v) >= span.limit { 2138 throw("nextReusable: span is not in range") 2139 } 2140 if span.objBase(uintptr(v)) != uintptr(v) { 2141 print("nextReusable: v=", hex(v), " base=", hex(span.objBase(uintptr(v))), "\n") 2142 throw("nextReusable: v is non-base-address for object found on pointer free list") 2143 } 2144 if span.isFree(objIndex) { 2145 throw("nextReusable: pointer on free list is free") 2146 } 2147 2148 const debugReusableEnsureSwept = false 2149 if debugReusableEnsureSwept { 2150 // Currently disabled. 2151 // Note: ensureSwept here alters behavior (not just an invariant check). 2152 span.ensureSwept() 2153 if span.isFree(objIndex) { 2154 throw("nextReusable: pointer on free list is free after ensureSwept") 2155 } 2156 } 2157 } 2158 2159 // reusableSize reports if size is a currently supported size for a reusable object. 2160 func reusableSize(size uintptr) bool { 2161 if size < maxTinySize || size > maxSmallSize-mallocHeaderSize { 2162 return false 2163 } 2164 return true 2165 } 2166 2167 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 2168 // on chunks of the buffer to be zeroed, with opportunities for preemption 2169 // along the way. memclrNoHeapPointers contains no safepoints and also 2170 // cannot be preemptively scheduled, so this provides a still-efficient 2171 // block copy that can also be preempted on a reasonable granularity. 2172 // 2173 // Use this with care; if the data being cleared is tagged to contain 2174 // pointers, this allows the GC to run before it is all cleared. 2175 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 2176 v := uintptr(x) 2177 // got this from benchmarking. 128k is too small, 512k is too large. 2178 const chunkBytes = 256 * 1024 2179 vsize := v + size 2180 for voff := v; voff < vsize; voff = voff + chunkBytes { 2181 if getg().preempt { 2182 // may hold locks, e.g., profiling 2183 goschedguarded() 2184 } 2185 // clear min(avail, lump) bytes 2186 n := vsize - voff 2187 if n > chunkBytes { 2188 n = chunkBytes 2189 } 2190 memclrNoHeapPointers(unsafe.Pointer(voff), n) 2191 } 2192 } 2193 2194 // implementation of new builtin 2195 // compiler (both frontend and SSA backend) knows the signature 2196 // of this function. 2197 func newobject(typ *_type) unsafe.Pointer { 2198 return mallocgc(typ.Size_, typ, true) 2199 } 2200 2201 //go:linkname maps_newobject internal/runtime/maps.newobject 2202 func maps_newobject(typ *_type) unsafe.Pointer { 2203 return newobject(typ) 2204 } 2205 2206 // reflect_unsafe_New is meant for package reflect, 2207 // but widely used packages access it using linkname. 2208 // Notable members of the hall of shame include: 2209 // - gitee.com/quant1x/gox 2210 // - github.com/goccy/json 2211 // - github.com/modern-go/reflect2 2212 // - github.com/v2pro/plz 2213 // 2214 // Do not remove or change the type signature. 2215 // See go.dev/issue/67401. 2216 // 2217 //go:linkname reflect_unsafe_New reflect.unsafe_New 2218 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 2219 return mallocgc(typ.Size_, typ, true) 2220 } 2221 2222 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 2223 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 2224 return mallocgc(typ.Size_, typ, true) 2225 } 2226 2227 // newarray allocates an array of n elements of type typ. 2228 // 2229 // newarray should be an internal detail, 2230 // but widely used packages access it using linkname. 2231 // Notable members of the hall of shame include: 2232 // - github.com/RomiChan/protobuf 2233 // - github.com/segmentio/encoding 2234 // - github.com/ugorji/go/codec 2235 // 2236 // Do not remove or change the type signature. 2237 // See go.dev/issue/67401. 2238 // 2239 //go:linkname newarray 2240 func newarray(typ *_type, n int) unsafe.Pointer { 2241 if n == 1 { 2242 return mallocgc(typ.Size_, typ, true) 2243 } 2244 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 2245 if overflow || mem > maxAlloc || n < 0 { 2246 panic(plainError("runtime: allocation size out of range")) 2247 } 2248 return mallocgc(mem, typ, true) 2249 } 2250 2251 // reflect_unsafe_NewArray is meant for package reflect, 2252 // but widely used packages access it using linkname. 2253 // Notable members of the hall of shame include: 2254 // - gitee.com/quant1x/gox 2255 // - github.com/bytedance/sonic 2256 // - github.com/goccy/json 2257 // - github.com/modern-go/reflect2 2258 // - github.com/segmentio/encoding 2259 // - github.com/segmentio/kafka-go 2260 // - github.com/v2pro/plz 2261 // 2262 // Do not remove or change the type signature. 2263 // See go.dev/issue/67401. 2264 // 2265 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 2266 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 2267 return newarray(typ, n) 2268 } 2269 2270 //go:linkname maps_newarray internal/runtime/maps.newarray 2271 func maps_newarray(typ *_type, n int) unsafe.Pointer { 2272 return newarray(typ, n) 2273 } 2274 2275 // profilealloc resets the current mcache's nextSample counter and 2276 // records a memory profile sample. 2277 // 2278 // The caller must be non-preemptible and have a P. 2279 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 2280 c := getMCache(mp) 2281 if c == nil { 2282 throw("profilealloc called without a P or outside bootstrapping") 2283 } 2284 c.memProfRate = MemProfileRate 2285 c.nextSample = nextSample() 2286 mProf_Malloc(mp, x, size) 2287 } 2288 2289 // nextSample returns the next sampling point for heap profiling. The goal is 2290 // to sample allocations on average every MemProfileRate bytes, but with a 2291 // completely random distribution over the allocation timeline; this 2292 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 2293 // processes, the distance between two samples follows the exponential 2294 // distribution (exp(MemProfileRate)), so the best return value is a random 2295 // number taken from an exponential distribution whose mean is MemProfileRate. 2296 func nextSample() int64 { 2297 if MemProfileRate == 0 { 2298 // Basically never sample. 2299 return math.MaxInt64 2300 } 2301 if MemProfileRate == 1 { 2302 // Sample immediately. 2303 return 0 2304 } 2305 return int64(fastexprand(MemProfileRate)) 2306 } 2307 2308 // fastexprand returns a random number from an exponential distribution with 2309 // the specified mean. 2310 func fastexprand(mean int) int32 { 2311 // Avoid overflow. Maximum possible step is 2312 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 2313 switch { 2314 case mean > 0x7000000: 2315 mean = 0x7000000 2316 case mean == 0: 2317 return 0 2318 } 2319 2320 // Take a random sample of the exponential distribution exp(-mean*x). 2321 // The probability distribution function is mean*exp(-mean*x), so the CDF is 2322 // p = 1 - exp(-mean*x), so 2323 // q = 1 - p == exp(-mean*x) 2324 // log_e(q) = -mean*x 2325 // -log_e(q)/mean = x 2326 // x = -log_e(q) * mean 2327 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 2328 const randomBitCount = 26 2329 q := cheaprandn(1<<randomBitCount) + 1 2330 qlog := fastlog2(float64(q)) - randomBitCount 2331 if qlog > 0 { 2332 qlog = 0 2333 } 2334 const minusLog2 = -0.6931471805599453 // -ln(2) 2335 return int32(qlog*(minusLog2*float64(mean))) + 1 2336 } 2337 2338 type persistentAlloc struct { 2339 base *notInHeap 2340 off uintptr 2341 } 2342 2343 var globalAlloc struct { 2344 mutex 2345 persistentAlloc 2346 } 2347 2348 // persistentChunkSize is the number of bytes we allocate when we grow 2349 // a persistentAlloc. 2350 const persistentChunkSize = 256 << 10 2351 2352 // persistentChunks is a list of all the persistent chunks we have 2353 // allocated. The list is maintained through the first word in the 2354 // persistent chunk. This is updated atomically. 2355 var persistentChunks *notInHeap 2356 2357 // Wrapper around sysAlloc that can allocate small chunks. 2358 // There is no associated free operation. 2359 // Intended for things like function/type/debug-related persistent data. 2360 // If align is 0, uses default align (currently 8). 2361 // The returned memory will be zeroed. 2362 // sysStat must be non-nil. 2363 // 2364 // Consider marking persistentalloc'd types not in heap by embedding 2365 // internal/runtime/sys.NotInHeap. 2366 // 2367 // nosplit because it is used during write barriers and must not be preempted. 2368 // 2369 //go:nosplit 2370 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 2371 var p *notInHeap 2372 systemstack(func() { 2373 p = persistentalloc1(size, align, sysStat) 2374 }) 2375 return unsafe.Pointer(p) 2376 } 2377 2378 // Must run on system stack because stack growth can (re)invoke it. 2379 // See issue 9174. 2380 // 2381 //go:systemstack 2382 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 2383 const ( 2384 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 2385 ) 2386 2387 if size == 0 { 2388 throw("persistentalloc: size == 0") 2389 } 2390 if align != 0 { 2391 if align&(align-1) != 0 { 2392 throw("persistentalloc: align is not a power of 2") 2393 } 2394 if align > pageSize { 2395 throw("persistentalloc: align is too large") 2396 } 2397 } else { 2398 align = 8 2399 } 2400 2401 if size >= maxBlock { 2402 return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata")) 2403 } 2404 2405 mp := acquirem() 2406 var persistent *persistentAlloc 2407 if mp != nil && mp.p != 0 { 2408 persistent = &mp.p.ptr().palloc 2409 } else { 2410 lock(&globalAlloc.mutex) 2411 persistent = &globalAlloc.persistentAlloc 2412 } 2413 persistent.off = alignUp(persistent.off, align) 2414 if persistent.off+size > persistentChunkSize || persistent.base == nil { 2415 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata")) 2416 if persistent.base == nil { 2417 if persistent == &globalAlloc.persistentAlloc { 2418 unlock(&globalAlloc.mutex) 2419 } 2420 throw("runtime: cannot allocate memory") 2421 } 2422 2423 // Add the new chunk to the persistentChunks list. 2424 for { 2425 chunks := uintptr(unsafe.Pointer(persistentChunks)) 2426 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 2427 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 2428 break 2429 } 2430 } 2431 persistent.off = alignUp(goarch.PtrSize, align) 2432 } 2433 p := persistent.base.add(persistent.off) 2434 persistent.off += size 2435 releasem(mp) 2436 if persistent == &globalAlloc.persistentAlloc { 2437 unlock(&globalAlloc.mutex) 2438 } 2439 2440 if sysStat != &memstats.other_sys { 2441 sysStat.add(int64(size)) 2442 memstats.other_sys.add(-int64(size)) 2443 } 2444 return p 2445 } 2446 2447 // inPersistentAlloc reports whether p points to memory allocated by 2448 // persistentalloc. This must be nosplit because it is called by the 2449 // cgo checker code, which is called by the write barrier code. 2450 // 2451 //go:nosplit 2452 func inPersistentAlloc(p uintptr) bool { 2453 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 2454 for chunk != 0 { 2455 if p >= chunk && p < chunk+persistentChunkSize { 2456 return true 2457 } 2458 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 2459 } 2460 return false 2461 } 2462 2463 // linearAlloc is a simple linear allocator that pre-reserves a region 2464 // of memory and then optionally maps that region into the Ready state 2465 // as needed. 2466 // 2467 // The caller is responsible for locking. 2468 type linearAlloc struct { 2469 next uintptr // next free byte 2470 mapped uintptr // one byte past end of mapped space 2471 end uintptr // end of reserved space 2472 2473 mapMemory bool // transition memory from Reserved to Ready if true 2474 } 2475 2476 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 2477 if base+size < base { 2478 // Chop off the last byte. The runtime isn't prepared 2479 // to deal with situations where the bounds could overflow. 2480 // Leave that memory reserved, though, so we don't map it 2481 // later. 2482 size -= 1 2483 } 2484 l.next, l.mapped = base, base 2485 l.end = base + size 2486 l.mapMemory = mapMemory 2487 } 2488 2489 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer { 2490 p := alignUp(l.next, align) 2491 if p+size > l.end { 2492 return nil 2493 } 2494 l.next = p + size 2495 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 2496 if l.mapMemory { 2497 // Transition from Reserved to Prepared to Ready. 2498 n := pEnd - l.mapped 2499 sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName) 2500 sysUsed(unsafe.Pointer(l.mapped), n, n) 2501 } 2502 l.mapped = pEnd 2503 } 2504 return unsafe.Pointer(p) 2505 } 2506 2507 // notInHeap is off-heap memory allocated by a lower-level allocator 2508 // like sysAlloc or persistentAlloc. 2509 // 2510 // In general, it's better to use real types which embed 2511 // internal/runtime/sys.NotInHeap, but this serves as a generic type 2512 // for situations where that isn't possible (like in the allocators). 2513 // 2514 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 2515 type notInHeap struct{ _ sys.NotInHeap } 2516 2517 func (p *notInHeap) add(bytes uintptr) *notInHeap { 2518 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 2519 } 2520 2521 // redZoneSize computes the size of the redzone for a given allocation. 2522 // Refer to the implementation of the compiler-rt. 2523 func redZoneSize(userSize uintptr) uintptr { 2524 switch { 2525 case userSize <= (64 - 16): 2526 return 16 << 0 2527 case userSize <= (128 - 32): 2528 return 16 << 1 2529 case userSize <= (512 - 64): 2530 return 16 << 2 2531 case userSize <= (4096 - 128): 2532 return 16 << 3 2533 case userSize <= (1<<14)-256: 2534 return 16 << 4 2535 case userSize <= (1<<15)-512: 2536 return 16 << 5 2537 case userSize <= (1<<16)-1024: 2538 return 16 << 6 2539 default: 2540 return 16 << 7 2541 } 2542 } 2543