Source file src/runtime/mheap.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.go for overview. 8 9 package runtime 10 11 import ( 12 "internal/abi" 13 "internal/cpu" 14 "internal/goarch" 15 "internal/goexperiment" 16 "internal/runtime/atomic" 17 "internal/runtime/gc" 18 "internal/runtime/sys" 19 "unsafe" 20 ) 21 22 const ( 23 // minPhysPageSize is a lower-bound on the physical page size. The 24 // true physical page size may be larger than this. In contrast, 25 // sys.PhysPageSize is an upper-bound on the physical page size. 26 minPhysPageSize = 4096 27 28 // maxPhysPageSize is the maximum page size the runtime supports. 29 maxPhysPageSize = 512 << 10 30 31 // maxPhysHugePageSize sets an upper-bound on the maximum huge page size 32 // that the runtime supports. 33 maxPhysHugePageSize = pallocChunkBytes 34 35 // pagesPerReclaimerChunk indicates how many pages to scan from the 36 // pageInUse bitmap at a time. Used by the page reclaimer. 37 // 38 // Higher values reduce contention on scanning indexes (such as 39 // h.reclaimIndex), but increase the minimum latency of the 40 // operation. 41 // 42 // The time required to scan this many pages can vary a lot depending 43 // on how many spans are actually freed. Experimentally, it can 44 // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only 45 // free spans at ~32 MB/ms. Using 512 pages bounds this at 46 // roughly 100µs. 47 // 48 // Must be a multiple of the pageInUse bitmap element size and 49 // must also evenly divide pagesPerArena. 50 pagesPerReclaimerChunk = min(512, pagesPerArena) 51 52 // physPageAlignedStacks indicates whether stack allocations must be 53 // physical page aligned. This is a requirement for MAP_STACK on 54 // OpenBSD. 55 physPageAlignedStacks = GOOS == "openbsd" 56 ) 57 58 // Main malloc heap. 59 // The heap use pageAlloc to manage free and scavenged pages, 60 // but all the other global data is here too. 61 // 62 // mheap must not be heap-allocated because it contains mSpanLists, 63 // which must not be heap-allocated. 64 type mheap struct { 65 _ sys.NotInHeap 66 67 // lock must only be acquired on the system stack, otherwise a g 68 // could self-deadlock if its stack grows with the lock held. 69 lock mutex 70 71 pages pageAlloc // page allocation data structure 72 73 sweepgen uint32 // sweep generation, see comment in mspan; written during STW 74 75 // allspans is a slice of all mspans ever created. Each mspan 76 // appears exactly once. 77 // 78 // The memory for allspans is manually managed and can be 79 // reallocated and move as the heap grows. 80 // 81 // In general, allspans is protected by mheap_.lock, which 82 // prevents concurrent access as well as freeing the backing 83 // store. Accesses during STW might not hold the lock, but 84 // must ensure that allocation cannot happen around the 85 // access (since that may free the backing store). 86 allspans []*mspan // all spans out there 87 88 // Proportional sweep 89 // 90 // These parameters represent a linear function from gcController.heapLive 91 // to page sweep count. The proportional sweep system works to 92 // stay in the black by keeping the current page sweep count 93 // above this line at the current gcController.heapLive. 94 // 95 // The line has slope sweepPagesPerByte and passes through a 96 // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At 97 // any given time, the system is at (gcController.heapLive, 98 // pagesSwept) in this space. 99 // 100 // It is important that the line pass through a point we 101 // control rather than simply starting at a 0,0 origin 102 // because that lets us adjust sweep pacing at any time while 103 // accounting for current progress. If we could only adjust 104 // the slope, it would create a discontinuity in debt if any 105 // progress has already been made. 106 pagesInUse atomic.Uintptr // pages of spans in stats mSpanInUse 107 pagesSwept atomic.Uint64 // pages swept this cycle 108 pagesSweptBasis atomic.Uint64 // pagesSwept to use as the origin of the sweep ratio 109 sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without 110 sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without 111 112 // Page reclaimer state 113 114 // reclaimIndex is the page index in heapArenas of next page to 115 // reclaim. Specifically, it refers to page (i % 116 // pagesPerArena) of arena heapArenas[i / pagesPerArena]. 117 // 118 // If this is >= 1<<63, the page reclaimer is done scanning 119 // the page marks. 120 reclaimIndex atomic.Uint64 121 122 // reclaimCredit is spare credit for extra pages swept. Since 123 // the page reclaimer works in large chunks, it may reclaim 124 // more than requested. Any spare pages released go to this 125 // credit pool. 126 reclaimCredit atomic.Uintptr 127 128 _ cpu.CacheLinePad // prevents false-sharing between arenas and preceding variables 129 130 // arenas is the heap arena map. It points to the metadata for 131 // the heap for every arena frame of the entire usable virtual 132 // address space. 133 // 134 // Use arenaIndex to compute indexes into this array. 135 // 136 // For regions of the address space that are not backed by the 137 // Go heap, the arena map contains nil. 138 // 139 // Modifications are protected by mheap_.lock. Reads can be 140 // performed without locking; however, a given entry can 141 // transition from nil to non-nil at any time when the lock 142 // isn't held. (Entries never transitions back to nil.) 143 // 144 // In general, this is a two-level mapping consisting of an L1 145 // map and possibly many L2 maps. This saves space when there 146 // are a huge number of arena frames. However, on many 147 // platforms (even 64-bit), arenaL1Bits is 0, making this 148 // effectively a single-level map. In this case, arenas[0] 149 // will never be nil. 150 arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena 151 152 // arenasHugePages indicates whether arenas' L2 entries are eligible 153 // to be backed by huge pages. 154 arenasHugePages bool 155 156 // heapArenaAlloc is pre-reserved space for allocating heapArena 157 // objects. This is only used on 32-bit, where we pre-reserve 158 // this space to avoid interleaving it with the heap itself. 159 heapArenaAlloc linearAlloc 160 161 // arenaHints is a list of addresses at which to attempt to 162 // add more heap arenas. This is initially populated with a 163 // set of general hint addresses, and grown with the bounds of 164 // actual heap arena ranges. 165 arenaHints *arenaHint 166 167 // arena is a pre-reserved space for allocating heap arenas 168 // (the actual arenas). This is only used on 32-bit. 169 arena linearAlloc 170 171 // heapArenas is the arenaIndex of every mapped arena mapped for the heap. 172 // This can be used to iterate through the heap address space. 173 // 174 // Access is protected by mheap_.lock. However, since this is 175 // append-only and old backing arrays are never freed, it is 176 // safe to acquire mheap_.lock, copy the slice header, and 177 // then release mheap_.lock. 178 heapArenas []arenaIdx 179 180 // userArenaArenas is the arenaIndex of every mapped arena mapped for 181 // user arenas. 182 // 183 // Access is protected by mheap_.lock. However, since this is 184 // append-only and old backing arrays are never freed, it is 185 // safe to acquire mheap_.lock, copy the slice header, and 186 // then release mheap_.lock. 187 userArenaArenas []arenaIdx 188 189 // sweepArenas is a snapshot of heapArenas taken at the 190 // beginning of the sweep cycle. This can be read safely by 191 // simply blocking GC (by disabling preemption). 192 sweepArenas []arenaIdx 193 194 // markArenas is a snapshot of heapArenas taken at the beginning 195 // of the mark cycle. Because heapArenas is append-only, neither 196 // this slice nor its contents will change during the mark, so 197 // it can be read safely. 198 markArenas []arenaIdx 199 200 // curArena is the arena that the heap is currently growing 201 // into. This should always be physPageSize-aligned. 202 curArena struct { 203 base, end uintptr 204 } 205 206 // central free lists for small size classes. 207 // the padding makes sure that the mcentrals are 208 // spaced CacheLinePadSize bytes apart, so that each mcentral.lock 209 // gets its own cache line. 210 // central is indexed by spanClass. 211 central [numSpanClasses]struct { 212 mcentral mcentral 213 pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte 214 } 215 216 spanalloc fixalloc // allocator for span 217 spanSPMCAlloc fixalloc // allocator for spanSPMC, protected by work.spanSPMCs.lock 218 cachealloc fixalloc // allocator for mcache 219 specialfinalizeralloc fixalloc // allocator for specialfinalizer 220 specialCleanupAlloc fixalloc // allocator for specialCleanup 221 specialCheckFinalizerAlloc fixalloc // allocator for specialCheckFinalizer 222 specialTinyBlockAlloc fixalloc // allocator for specialTinyBlock 223 specialprofilealloc fixalloc // allocator for specialprofile 224 specialReachableAlloc fixalloc // allocator for specialReachable 225 specialPinCounterAlloc fixalloc // allocator for specialPinCounter 226 specialWeakHandleAlloc fixalloc // allocator for specialWeakHandle 227 specialBubbleAlloc fixalloc // allocator for specialBubble 228 specialSecretAlloc fixalloc // allocator for specialSecret 229 speciallock mutex // lock for special record allocators. 230 arenaHintAlloc fixalloc // allocator for arenaHints 231 232 // User arena state. 233 // 234 // Protected by mheap_.lock. 235 userArena struct { 236 // arenaHints is a list of addresses at which to attempt to 237 // add more heap arenas for user arena chunks. This is initially 238 // populated with a set of general hint addresses, and grown with 239 // the bounds of actual heap arena ranges. 240 arenaHints *arenaHint 241 242 // quarantineList is a list of user arena spans that have been set to fault, but 243 // are waiting for all pointers into them to go away. Sweeping handles 244 // identifying when this is true, and moves the span to the ready list. 245 quarantineList mSpanList 246 247 // readyList is a list of empty user arena spans that are ready for reuse. 248 readyList mSpanList 249 } 250 251 // cleanupID is a counter which is incremented each time a cleanup special is added 252 // to a span. It's used to create globally unique identifiers for individual cleanup. 253 // cleanupID is protected by mheap_.speciallock. It must only be incremented while holding 254 // the lock. ID 0 is reserved. Users should increment first, then read the value. 255 cleanupID uint64 256 257 _ cpu.CacheLinePad 258 259 immortalWeakHandles immortalWeakHandleMap 260 261 unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF 262 } 263 264 var mheap_ mheap 265 266 // A heapArena stores metadata for a heap arena. heapArenas are stored 267 // outside of the Go heap and accessed via the mheap_.arenas index. 268 type heapArena struct { 269 _ sys.NotInHeap 270 271 // spans maps from virtual address page ID within this arena to *mspan. 272 // For allocated spans, their pages map to the span itself. 273 // For free spans, only the lowest and highest pages map to the span itself. 274 // Internal pages map to an arbitrary span. 275 // For pages that have never been allocated, spans entries are nil. 276 // 277 // Modifications are protected by mheap.lock. Reads can be 278 // performed without locking, but ONLY from indexes that are 279 // known to contain in-use or stack spans. This means there 280 // must not be a safe-point between establishing that an 281 // address is live and looking it up in the spans array. 282 spans [pagesPerArena]*mspan 283 284 // pageInUse is a bitmap that indicates which spans are in 285 // state mSpanInUse. This bitmap is indexed by page number, 286 // but only the bit corresponding to the first page in each 287 // span is used. 288 // 289 // Reads and writes are atomic. 290 pageInUse [pagesPerArena / 8]uint8 291 292 // pageMarks is a bitmap that indicates which spans have any 293 // marked objects on them. Like pageInUse, only the bit 294 // corresponding to the first page in each span is used. 295 // 296 // Writes are done atomically during marking. Reads are 297 // non-atomic and lock-free since they only occur during 298 // sweeping (and hence never race with writes). 299 // 300 // This is used to quickly find whole spans that can be freed. 301 // 302 // TODO(austin): It would be nice if this was uint64 for 303 // faster scanning, but we don't have 64-bit atomic bit 304 // operations. 305 pageMarks [pagesPerArena / 8]uint8 306 307 // pageSpecials is a bitmap that indicates which spans have 308 // specials (finalizers or other). Like pageInUse, only the bit 309 // corresponding to the first page in each span is used. 310 // 311 // Writes are done atomically whenever a special is added to 312 // a span and whenever the last special is removed from a span. 313 // Reads are done atomically to find spans containing specials 314 // during marking. 315 pageSpecials [pagesPerArena / 8]uint8 316 317 // pageUseSpanInlineMarkBits is a bitmap where each bit corresponds 318 // to a span, as only spans one page in size can have inline mark bits. 319 // The bit indicates that the span has a spanInlineMarkBits struct 320 // stored directly at the top end of the span's memory. 321 pageUseSpanInlineMarkBits [pagesPerArena / 8]uint8 322 323 // checkmarks stores the debug.gccheckmark state. It is only 324 // used if debug.gccheckmark > 0 or debug.checkfinalizers > 0. 325 checkmarks *checkmarksMap 326 327 // zeroedBase marks the first byte of the first page in this 328 // arena which hasn't been used yet and is therefore already 329 // zero. zeroedBase is relative to the arena base. 330 // Increases monotonically until it hits heapArenaBytes. 331 // 332 // This field is sufficient to determine if an allocation 333 // needs to be zeroed because the page allocator follows an 334 // address-ordered first-fit policy. 335 // 336 // Read atomically and written with an atomic CAS. 337 zeroedBase uintptr 338 } 339 340 // arenaHint is a hint for where to grow the heap arenas. See 341 // mheap_.arenaHints. 342 type arenaHint struct { 343 _ sys.NotInHeap 344 addr uintptr 345 down bool 346 next *arenaHint 347 } 348 349 // An mspan is a run of pages. 350 // 351 // When a mspan is in the heap free treap, state == mSpanFree 352 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 353 // If the mspan is in the heap scav treap, then in addition to the 354 // above scavenged == true. scavenged == false in all other cases. 355 // 356 // When a mspan is allocated, state == mSpanInUse or mSpanManual 357 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 358 359 // Every mspan is in one doubly-linked list, either in the mheap's 360 // busy list or one of the mcentral's span lists. 361 362 // An mspan representing actual memory has state mSpanInUse, 363 // mSpanManual, or mSpanFree. Transitions between these states are 364 // constrained as follows: 365 // 366 // - A span may transition from free to in-use or manual during any GC 367 // phase. 368 // 369 // - During sweeping (gcphase == _GCoff), a span may transition from 370 // in-use to free (as a result of sweeping) or manual to free (as a 371 // result of stacks being freed). 372 // 373 // - During GC (gcphase != _GCoff), a span *must not* transition from 374 // manual or in-use to free. Because concurrent GC may read a pointer 375 // and then look up its span, the span state must be monotonic. 376 // 377 // Setting mspan.state to mSpanInUse or mSpanManual must be done 378 // atomically and only after all other span fields are valid. 379 // Likewise, if inspecting a span is contingent on it being 380 // mSpanInUse, the state should be loaded atomically and checked 381 // before depending on other fields. This allows the garbage collector 382 // to safely deal with potentially invalid pointers, since resolving 383 // such pointers may race with a span being allocated. 384 type mSpanState uint8 385 386 const ( 387 mSpanDead mSpanState = iota 388 mSpanInUse // allocated for garbage collected heap 389 mSpanManual // allocated for manual management (e.g., stack allocator) 390 ) 391 392 // mSpanStateNames are the names of the span states, indexed by 393 // mSpanState. 394 var mSpanStateNames = []string{ 395 "mSpanDead", 396 "mSpanInUse", 397 "mSpanManual", 398 } 399 400 // mSpanStateBox holds an atomic.Uint8 to provide atomic operations on 401 // an mSpanState. This is a separate type to disallow accidental comparison 402 // or assignment with mSpanState. 403 type mSpanStateBox struct { 404 s atomic.Uint8 405 } 406 407 // It is nosplit to match get, below. 408 409 //go:nosplit 410 func (b *mSpanStateBox) set(s mSpanState) { 411 b.s.Store(uint8(s)) 412 } 413 414 // It is nosplit because it's called indirectly by typedmemclr, 415 // which must not be preempted. 416 417 //go:nosplit 418 func (b *mSpanStateBox) get() mSpanState { 419 return mSpanState(b.s.Load()) 420 } 421 422 type mspan struct { 423 _ sys.NotInHeap 424 next *mspan // next span in list, or nil if none 425 prev *mspan // previous span in list, or nil if none 426 list *mSpanList // For debugging. 427 428 startAddr uintptr // address of first byte of span aka s.base() 429 npages uintptr // number of pages in span 430 431 manualFreeList gclinkptr // list of free objects in mSpanManual spans 432 433 // freeindex is the slot index between 0 and nelems at which to begin scanning 434 // for the next free object in this span. 435 // Each allocation scans allocBits starting at freeindex until it encounters a 0 436 // indicating a free object. freeindex is then adjusted so that subsequent scans begin 437 // just past the newly discovered free object. 438 // 439 // If freeindex == nelems, this span has no free objects, though might have reusable objects. 440 // 441 // allocBits is a bitmap of objects in this span. 442 // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 443 // then object n is free; 444 // otherwise, object n is allocated. Bits starting at nelems are 445 // undefined and should never be referenced. 446 // 447 // Object n starts at address n*elemsize + (start << pageShift). 448 freeindex uint16 449 // TODO: Look up nelems from sizeclass and remove this field if it 450 // helps performance. 451 nelems uint16 // number of object in the span. 452 // freeIndexForScan is like freeindex, except that freeindex is 453 // used by the allocator whereas freeIndexForScan is used by the 454 // GC scanner. They are two fields so that the GC sees the object 455 // is allocated only when the object and the heap bits are 456 // initialized (see also the assignment of freeIndexForScan in 457 // mallocgc, and issue 54596). 458 freeIndexForScan uint16 459 460 // Cache of the allocBits at freeindex. allocCache is shifted 461 // such that the lowest bit corresponds to the bit freeindex. 462 // allocCache holds the complement of allocBits, thus allowing 463 // ctz (count trailing zero) to use it directly. 464 // allocCache may contain bits beyond s.nelems; the caller must ignore 465 // these. 466 allocCache uint64 467 468 // allocBits and gcmarkBits hold pointers to a span's mark and 469 // allocation bits. The pointers are 8 byte aligned. 470 // There are three arenas where this data is held. 471 // free: Dirty arenas that are no longer accessed 472 // and can be reused. 473 // next: Holds information to be used in the next GC cycle. 474 // current: Information being used during this GC cycle. 475 // previous: Information being used during the last GC cycle. 476 // A new GC cycle starts with the call to finishsweep_m. 477 // finishsweep_m moves the previous arena to the free arena, 478 // the current arena to the previous arena, and 479 // the next arena to the current arena. 480 // The next arena is populated as the spans request 481 // memory to hold gcmarkBits for the next GC cycle as well 482 // as allocBits for newly allocated spans. 483 // 484 // The pointer arithmetic is done "by hand" instead of using 485 // arrays to avoid bounds checks along critical performance 486 // paths. 487 // The sweep will free the old allocBits and set allocBits to the 488 // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed 489 // out memory. 490 allocBits *gcBits 491 gcmarkBits *gcBits 492 pinnerBits *gcBits // bitmap for pinned objects; accessed atomically 493 494 // sweep generation: 495 // if sweepgen == h->sweepgen - 2, the span needs sweeping 496 // if sweepgen == h->sweepgen - 1, the span is currently being swept 497 // if sweepgen == h->sweepgen, the span is swept and ready to use 498 // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping 499 // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached 500 // h->sweepgen is incremented by 2 after every GC 501 502 sweepgen uint32 503 divMul uint32 // for divide by elemsize 504 allocCount uint16 // number of allocated objects 505 spanclass spanClass // size class and noscan (uint8) 506 state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods) 507 needzero uint8 // needs to be zeroed before allocation 508 isUserArenaChunk bool // whether or not this span represents a user arena 509 allocCountBeforeCache uint16 // a copy of allocCount that is stored just before this span is cached 510 elemsize uintptr // computed from sizeclass or from npages 511 limit uintptr // end of data in span 512 speciallock mutex // guards specials list and changes to pinnerBits 513 specials *special // linked list of special records sorted by offset. 514 userArenaChunkFree addrRange // interval for managing chunk allocation 515 largeType *_type // malloc header for large objects. 516 } 517 518 func (s *mspan) base() uintptr { 519 return s.startAddr 520 } 521 522 func (s *mspan) layout() (size, n, total uintptr) { 523 total = s.npages << gc.PageShift 524 size = s.elemsize 525 if size > 0 { 526 n = total / size 527 } 528 return 529 } 530 531 // recordspan adds a newly allocated span to h.allspans. 532 // 533 // This only happens the first time a span is allocated from 534 // mheap.spanalloc (it is not called when a span is reused). 535 // 536 // Write barriers are disallowed here because it can be called from 537 // gcWork when allocating new workbufs. However, because it's an 538 // indirect call from the fixalloc initializer, the compiler can't see 539 // this. 540 // 541 // The heap lock must be held. 542 // 543 //go:nowritebarrierrec 544 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { 545 h := (*mheap)(vh) 546 s := (*mspan)(p) 547 548 assertLockHeld(&h.lock) 549 550 if len(h.allspans) >= cap(h.allspans) { 551 n := 64 * 1024 / goarch.PtrSize 552 if n < cap(h.allspans)*3/2 { 553 n = cap(h.allspans) * 3 / 2 554 } 555 var new []*mspan 556 sp := (*slice)(unsafe.Pointer(&new)) 557 sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array") 558 if sp.array == nil { 559 throw("runtime: cannot allocate memory") 560 } 561 sp.len = len(h.allspans) 562 sp.cap = n 563 if len(h.allspans) > 0 { 564 copy(new, h.allspans) 565 } 566 oldAllspans := h.allspans 567 *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new)) 568 if len(oldAllspans) != 0 { 569 sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys) 570 } 571 } 572 h.allspans = h.allspans[:len(h.allspans)+1] 573 h.allspans[len(h.allspans)-1] = s 574 } 575 576 // A spanClass represents the size class and noscan-ness of a span. 577 // 578 // Each size class has a noscan spanClass and a scan spanClass. The 579 // noscan spanClass contains only noscan objects, which do not contain 580 // pointers and thus do not need to be scanned by the garbage 581 // collector. 582 type spanClass uint8 583 584 const ( 585 numSpanClasses = gc.NumSizeClasses << 1 586 tinySpanClass = spanClass(tinySizeClass<<1 | 1) 587 ) 588 589 func makeSpanClass(sizeclass uint8, noscan bool) spanClass { 590 return spanClass(sizeclass<<1) | spanClass(bool2int(noscan)) 591 } 592 593 //go:nosplit 594 func (sc spanClass) sizeclass() int8 { 595 return int8(sc >> 1) 596 } 597 598 //go:nosplit 599 func (sc spanClass) noscan() bool { 600 return sc&1 != 0 601 } 602 603 // arenaIndex returns the index into mheap_.arenas of the arena 604 // containing metadata for p. This index combines of an index into the 605 // L1 map and an index into the L2 map and should be used as 606 // mheap_.arenas[ai.l1()][ai.l2()]. 607 // 608 // If p is outside the range of valid heap addresses, either l1() or 609 // l2() will be out of bounds. 610 // 611 // It is nosplit because it's called by spanOf and several other 612 // nosplit functions. 613 // 614 //go:nosplit 615 func arenaIndex(p uintptr) arenaIdx { 616 return arenaIdx((p - arenaBaseOffset) / heapArenaBytes) 617 } 618 619 // arenaBase returns the low address of the region covered by heap 620 // arena i. 621 func arenaBase(i arenaIdx) uintptr { 622 return uintptr(i)*heapArenaBytes + arenaBaseOffset 623 } 624 625 type arenaIdx uint 626 627 // l1 returns the "l1" portion of an arenaIdx. 628 // 629 // Marked nosplit because it's called by spanOf and other nosplit 630 // functions. 631 // 632 //go:nosplit 633 func (i arenaIdx) l1() uint { 634 if arenaL1Bits == 0 { 635 // Let the compiler optimize this away if there's no 636 // L1 map. 637 return 0 638 } else { 639 return uint(i) >> arenaL1Shift 640 } 641 } 642 643 // l2 returns the "l2" portion of an arenaIdx. 644 // 645 // Marked nosplit because it's called by spanOf and other nosplit funcs. 646 // functions. 647 // 648 //go:nosplit 649 func (i arenaIdx) l2() uint { 650 if arenaL1Bits == 0 { 651 return uint(i) 652 } else { 653 return uint(i) & (1<<arenaL2Bits - 1) 654 } 655 } 656 657 // inheap reports whether b is a pointer into a (potentially dead) heap object. 658 // It returns false for pointers into mSpanManual spans. 659 // Non-preemptible because it is used by write barriers. 660 // 661 //go:nowritebarrier 662 //go:nosplit 663 func inheap(b uintptr) bool { 664 return spanOfHeap(b) != nil 665 } 666 667 // inHeapOrStack is a variant of inheap that returns true for pointers 668 // into any allocated heap span. 669 // 670 //go:nowritebarrier 671 //go:nosplit 672 func inHeapOrStack(b uintptr) bool { 673 s := spanOf(b) 674 if s == nil || b < s.base() { 675 return false 676 } 677 switch s.state.get() { 678 case mSpanInUse, mSpanManual: 679 return b < s.limit 680 default: 681 return false 682 } 683 } 684 685 // spanOf returns the span of p. If p does not point into the heap 686 // arena or no span has ever contained p, spanOf returns nil. 687 // 688 // If p does not point to allocated memory, this may return a non-nil 689 // span that does *not* contain p. If this is a possibility, the 690 // caller should either call spanOfHeap or check the span bounds 691 // explicitly. 692 // 693 // Must be nosplit because it has callers that are nosplit. 694 // 695 //go:nosplit 696 func spanOf(p uintptr) *mspan { 697 // This function looks big, but we use a lot of constant 698 // folding around arenaL1Bits to get it under the inlining 699 // budget. Also, many of the checks here are safety checks 700 // that Go needs to do anyway, so the generated code is quite 701 // short. 702 ri := arenaIndex(p) 703 if arenaL1Bits == 0 { 704 // If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can. 705 if ri.l2() >= uint(len(mheap_.arenas[0])) { 706 return nil 707 } 708 } else { 709 // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't. 710 if ri.l1() >= uint(len(mheap_.arenas)) { 711 return nil 712 } 713 } 714 l2 := mheap_.arenas[ri.l1()] 715 if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1. 716 return nil 717 } 718 ha := l2[ri.l2()] 719 if ha == nil { 720 return nil 721 } 722 return ha.spans[(p/pageSize)%pagesPerArena] 723 } 724 725 // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 726 // that p points into an allocated heap arena. 727 // 728 // Must be nosplit because it has callers that are nosplit. 729 // 730 //go:nosplit 731 func spanOfUnchecked(p uintptr) *mspan { 732 ai := arenaIndex(p) 733 return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena] 734 } 735 736 // spanOfHeap is like spanOf, but returns nil if p does not point to a 737 // heap object. 738 // 739 // Must be nosplit because it has callers that are nosplit. 740 // 741 //go:nosplit 742 func spanOfHeap(p uintptr) *mspan { 743 s := spanOf(p) 744 // s is nil if it's never been allocated. Otherwise, we check 745 // its state first because we don't trust this pointer, so we 746 // have to synchronize with span initialization. Then, it's 747 // still possible we picked up a stale span pointer, so we 748 // have to check the span's bounds. 749 if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit { 750 return nil 751 } 752 return s 753 } 754 755 // pageIndexOf returns the arena, page index, and page mask for pointer p. 756 // The caller must ensure p is in the heap. 757 func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) { 758 ai := arenaIndex(p) 759 arena = mheap_.arenas[ai.l1()][ai.l2()] 760 pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse)) 761 pageMask = byte(1 << ((p / pageSize) % 8)) 762 return 763 } 764 765 // heapArenaOf returns the heap arena for p, if one exists. 766 func heapArenaOf(p uintptr) *heapArena { 767 ri := arenaIndex(p) 768 if arenaL1Bits == 0 { 769 // If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can. 770 if ri.l2() >= uint(len(mheap_.arenas[0])) { 771 return nil 772 } 773 } else { 774 // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't. 775 if ri.l1() >= uint(len(mheap_.arenas)) { 776 return nil 777 } 778 } 779 l2 := mheap_.arenas[ri.l1()] 780 if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1. 781 return nil 782 } 783 return l2[ri.l2()] 784 } 785 786 // Initialize the heap. 787 func (h *mheap) init() { 788 lockInit(&h.lock, lockRankMheap) 789 lockInit(&h.speciallock, lockRankMheapSpecial) 790 791 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) 792 h.spanSPMCAlloc.init(unsafe.Sizeof(spanSPMC{}), nil, nil, &memstats.gcMiscSys) 793 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) 794 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) 795 h.specialCleanupAlloc.init(unsafe.Sizeof(specialCleanup{}), nil, nil, &memstats.other_sys) 796 h.specialCheckFinalizerAlloc.init(unsafe.Sizeof(specialCheckFinalizer{}), nil, nil, &memstats.other_sys) 797 h.specialTinyBlockAlloc.init(unsafe.Sizeof(specialTinyBlock{}), nil, nil, &memstats.other_sys) 798 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) 799 h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys) 800 h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys) 801 h.specialSecretAlloc.init(unsafe.Sizeof(specialSecret{}), nil, nil, &memstats.other_sys) 802 h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys) 803 h.specialBubbleAlloc.init(unsafe.Sizeof(specialBubble{}), nil, nil, &memstats.other_sys) 804 h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys) 805 806 // Don't zero mspan allocations. Background sweeping can 807 // inspect a span concurrently with allocating it, so it's 808 // important that the span's sweepgen survive across freeing 809 // and re-allocating a span to prevent background sweeping 810 // from improperly cas'ing it from 0. 811 // 812 // This is safe because mspan contains no heap pointers. 813 h.spanalloc.zero = false 814 815 // h->mapcache needs no init 816 817 for i := range h.central { 818 h.central[i].mcentral.init(spanClass(i)) 819 } 820 821 h.pages.init(&h.lock, &memstats.gcMiscSys, false) 822 823 xRegInitAlloc() 824 } 825 826 // reclaim sweeps and reclaims at least npage pages into the heap. 827 // It is called before allocating npage pages to keep growth in check. 828 // 829 // reclaim implements the page-reclaimer half of the sweeper. 830 // 831 // h.lock must NOT be held. 832 func (h *mheap) reclaim(npage uintptr) { 833 // TODO(austin): Half of the time spent freeing spans is in 834 // locking/unlocking the heap (even with low contention). We 835 // could make the slow path here several times faster by 836 // batching heap frees. 837 838 // Bail early if there's no more reclaim work. 839 if h.reclaimIndex.Load() >= 1<<63 { 840 return 841 } 842 843 // Disable preemption so the GC can't start while we're 844 // sweeping, so we can read h.sweepArenas, and so 845 // traceGCSweepStart/Done pair on the P. 846 mp := acquirem() 847 848 trace := traceAcquire() 849 if trace.ok() { 850 trace.GCSweepStart() 851 traceRelease(trace) 852 } 853 854 arenas := h.sweepArenas 855 locked := false 856 for npage > 0 { 857 // Pull from accumulated credit first. 858 if credit := h.reclaimCredit.Load(); credit > 0 { 859 take := credit 860 if take > npage { 861 // Take only what we need. 862 take = npage 863 } 864 if h.reclaimCredit.CompareAndSwap(credit, credit-take) { 865 npage -= take 866 } 867 continue 868 } 869 870 // Claim a chunk of work. 871 idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk) 872 if idx/pagesPerArena >= uintptr(len(arenas)) { 873 // Page reclaiming is done. 874 h.reclaimIndex.Store(1 << 63) 875 break 876 } 877 878 if !locked { 879 // Lock the heap for reclaimChunk. 880 lock(&h.lock) 881 locked = true 882 } 883 884 // Scan this chunk. 885 nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk) 886 if nfound <= npage { 887 npage -= nfound 888 } else { 889 // Put spare pages toward global credit. 890 h.reclaimCredit.Add(nfound - npage) 891 npage = 0 892 } 893 } 894 if locked { 895 unlock(&h.lock) 896 } 897 898 trace = traceAcquire() 899 if trace.ok() { 900 trace.GCSweepDone() 901 traceRelease(trace) 902 } 903 releasem(mp) 904 } 905 906 // reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n). 907 // It returns the number of pages returned to the heap. 908 // 909 // h.lock must be held and the caller must be non-preemptible. Note: h.lock may be 910 // temporarily unlocked and re-locked in order to do sweeping or if tracing is 911 // enabled. 912 func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { 913 // The heap lock must be held because this accesses the 914 // heapArena.spans arrays using potentially non-live pointers. 915 // In particular, if a span were freed and merged concurrently 916 // with this probing heapArena.spans, it would be possible to 917 // observe arbitrary, stale span pointers. 918 assertLockHeld(&h.lock) 919 920 n0 := n 921 var nFreed uintptr 922 sl := sweep.active.begin() 923 if !sl.valid { 924 return 0 925 } 926 for n > 0 { 927 ai := arenas[pageIdx/pagesPerArena] 928 ha := h.arenas[ai.l1()][ai.l2()] 929 930 // Get a chunk of the bitmap to work on. 931 arenaPage := uint(pageIdx % pagesPerArena) 932 inUse := ha.pageInUse[arenaPage/8:] 933 marked := ha.pageMarks[arenaPage/8:] 934 if uintptr(len(inUse)) > n/8 { 935 inUse = inUse[:n/8] 936 marked = marked[:n/8] 937 } 938 939 // Scan this bitmap chunk for spans that are in-use 940 // but have no marked objects on them. 941 for i := range inUse { 942 inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i] 943 if inUseUnmarked == 0 { 944 continue 945 } 946 947 for j := uint(0); j < 8; j++ { 948 if inUseUnmarked&(1<<j) != 0 { 949 s := ha.spans[arenaPage+uint(i)*8+j] 950 if s, ok := sl.tryAcquire(s); ok { 951 npages := s.npages 952 unlock(&h.lock) 953 if s.sweep(false) { 954 nFreed += npages 955 } 956 lock(&h.lock) 957 // Reload inUse. It's possible nearby 958 // spans were freed when we dropped the 959 // lock and we don't want to get stale 960 // pointers from the spans array. 961 inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i] 962 } 963 } 964 } 965 } 966 967 // Advance. 968 pageIdx += uintptr(len(inUse) * 8) 969 n -= uintptr(len(inUse) * 8) 970 } 971 sweep.active.end(sl) 972 trace := traceAcquire() 973 if trace.ok() { 974 unlock(&h.lock) 975 // Account for pages scanned but not reclaimed. 976 trace.GCSweepSpan((n0 - nFreed) * pageSize) 977 traceRelease(trace) 978 lock(&h.lock) 979 } 980 981 assertLockHeld(&h.lock) // Must be locked on return. 982 return nFreed 983 } 984 985 // spanAllocType represents the type of allocation to make, or 986 // the type of allocation to be freed. 987 type spanAllocType uint8 988 989 const ( 990 spanAllocHeap spanAllocType = iota // heap span 991 spanAllocStack // stack span 992 spanAllocWorkBuf // work buf span 993 ) 994 995 // manual returns true if the span allocation is manually managed. 996 func (s spanAllocType) manual() bool { 997 return s != spanAllocHeap 998 } 999 1000 // alloc allocates a new span of npage pages from the GC'd heap. 1001 // 1002 // spanclass indicates the span's size class and scannability. 1003 // 1004 // Returns a span that has been fully initialized. span.needzero indicates 1005 // whether the span has been zeroed. Note that it may not be. 1006 func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan { 1007 // Don't do any operations that lock the heap on the G stack. 1008 // It might trigger stack growth, and the stack growth code needs 1009 // to be able to allocate heap. 1010 var s *mspan 1011 systemstack(func() { 1012 // To prevent excessive heap growth, before allocating n pages 1013 // we need to sweep and reclaim at least n pages. 1014 if !isSweepDone() { 1015 h.reclaim(npages) 1016 } 1017 s = h.allocSpan(npages, spanAllocHeap, spanclass) 1018 }) 1019 return s 1020 } 1021 1022 // allocManual allocates a manually-managed span of npage pages. 1023 // allocManual returns nil if allocation fails. 1024 // 1025 // allocManual adds the bytes used to *stat, which should be a 1026 // memstats in-use field. Unlike allocations in the GC'd heap, the 1027 // allocation does *not* count toward heapInUse. 1028 // 1029 // The memory backing the returned span may not be zeroed if 1030 // span.needzero is set. 1031 // 1032 // allocManual must be called on the system stack because it may 1033 // acquire the heap lock via allocSpan. See mheap for details. 1034 // 1035 // If new code is written to call allocManual, do NOT use an 1036 // existing spanAllocType value and instead declare a new one. 1037 // 1038 //go:systemstack 1039 func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan { 1040 if !typ.manual() { 1041 throw("manual span allocation called with non-manually-managed type") 1042 } 1043 return h.allocSpan(npages, typ, 0) 1044 } 1045 1046 // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize)) 1047 // is s. 1048 func (h *mheap) setSpans(base, npage uintptr, s *mspan) { 1049 p := base / pageSize 1050 ai := arenaIndex(base) 1051 ha := h.arenas[ai.l1()][ai.l2()] 1052 for n := uintptr(0); n < npage; n++ { 1053 i := (p + n) % pagesPerArena 1054 if i == 0 { 1055 ai = arenaIndex(base + n*pageSize) 1056 ha = h.arenas[ai.l1()][ai.l2()] 1057 } 1058 ha.spans[i] = s 1059 } 1060 } 1061 1062 // allocNeedsZero checks if the region of address space [base, base+npage*pageSize), 1063 // assumed to be allocated, needs to be zeroed, updating heap arena metadata for 1064 // future allocations. 1065 // 1066 // This must be called each time pages are allocated from the heap, even if the page 1067 // allocator can otherwise prove the memory it's allocating is already zero because 1068 // they're fresh from the operating system. It updates heapArena metadata that is 1069 // critical for future page allocations. 1070 // 1071 // There are no locking constraints on this method. 1072 func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) { 1073 for npage > 0 { 1074 ai := arenaIndex(base) 1075 ha := h.arenas[ai.l1()][ai.l2()] 1076 1077 zeroedBase := atomic.Loaduintptr(&ha.zeroedBase) 1078 arenaBase := base % heapArenaBytes 1079 if arenaBase < zeroedBase { 1080 // We extended into the non-zeroed part of the 1081 // arena, so this region needs to be zeroed before use. 1082 // 1083 // zeroedBase is monotonically increasing, so if we see this now then 1084 // we can be sure we need to zero this memory region. 1085 // 1086 // We still need to update zeroedBase for this arena, and 1087 // potentially more arenas. 1088 needZero = true 1089 } 1090 // We may observe arenaBase > zeroedBase if we're racing with one or more 1091 // allocations which are acquiring memory directly before us in the address 1092 // space. But, because we know no one else is acquiring *this* memory, it's 1093 // still safe to not zero. 1094 1095 // Compute how far into the arena we extend into, capped 1096 // at heapArenaBytes. 1097 arenaLimit := arenaBase + npage*pageSize 1098 if arenaLimit > heapArenaBytes { 1099 arenaLimit = heapArenaBytes 1100 } 1101 // Increase ha.zeroedBase so it's >= arenaLimit. 1102 // We may be racing with other updates. 1103 for arenaLimit > zeroedBase { 1104 if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) { 1105 break 1106 } 1107 zeroedBase = atomic.Loaduintptr(&ha.zeroedBase) 1108 // Double check basic conditions of zeroedBase. 1109 if zeroedBase <= arenaLimit && zeroedBase > arenaBase { 1110 // The zeroedBase moved into the space we were trying to 1111 // claim. That's very bad, and indicates someone allocated 1112 // the same region we did. 1113 throw("potentially overlapping in-use allocations detected") 1114 } 1115 } 1116 1117 // Move base forward and subtract from npage to move into 1118 // the next arena, or finish. 1119 base += arenaLimit - arenaBase 1120 npage -= (arenaLimit - arenaBase) / pageSize 1121 } 1122 return 1123 } 1124 1125 // tryAllocMSpan attempts to allocate an mspan object from 1126 // the P-local cache, but may fail. 1127 // 1128 // h.lock need not be held. 1129 // 1130 // This caller must ensure that its P won't change underneath 1131 // it during this function. Currently to ensure that we enforce 1132 // that the function is run on the system stack, because that's 1133 // the only place it is used now. In the future, this requirement 1134 // may be relaxed if its use is necessary elsewhere. 1135 // 1136 //go:systemstack 1137 func (h *mheap) tryAllocMSpan() *mspan { 1138 pp := getg().m.p.ptr() 1139 // If we don't have a p or the cache is empty, we can't do 1140 // anything here. 1141 if pp == nil || pp.mspancache.len == 0 { 1142 return nil 1143 } 1144 // Pull off the last entry in the cache. 1145 s := pp.mspancache.buf[pp.mspancache.len-1] 1146 pp.mspancache.len-- 1147 return s 1148 } 1149 1150 // allocMSpanLocked allocates an mspan object. 1151 // 1152 // h.lock must be held. 1153 // 1154 // allocMSpanLocked must be called on the system stack because 1155 // its caller holds the heap lock. See mheap for details. 1156 // Running on the system stack also ensures that we won't 1157 // switch Ps during this function. See tryAllocMSpan for details. 1158 // 1159 //go:systemstack 1160 func (h *mheap) allocMSpanLocked() *mspan { 1161 assertLockHeld(&h.lock) 1162 1163 pp := getg().m.p.ptr() 1164 if pp == nil { 1165 // We don't have a p so just do the normal thing. 1166 return (*mspan)(h.spanalloc.alloc()) 1167 } 1168 // Refill the cache if necessary. 1169 if pp.mspancache.len == 0 { 1170 const refillCount = len(pp.mspancache.buf) / 2 1171 for i := 0; i < refillCount; i++ { 1172 pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc()) 1173 } 1174 pp.mspancache.len = refillCount 1175 } 1176 // Pull off the last entry in the cache. 1177 s := pp.mspancache.buf[pp.mspancache.len-1] 1178 pp.mspancache.len-- 1179 return s 1180 } 1181 1182 // freeMSpanLocked free an mspan object. 1183 // 1184 // h.lock must be held. 1185 // 1186 // freeMSpanLocked must be called on the system stack because 1187 // its caller holds the heap lock. See mheap for details. 1188 // Running on the system stack also ensures that we won't 1189 // switch Ps during this function. See tryAllocMSpan for details. 1190 // 1191 //go:systemstack 1192 func (h *mheap) freeMSpanLocked(s *mspan) { 1193 assertLockHeld(&h.lock) 1194 1195 pp := getg().m.p.ptr() 1196 // First try to free the mspan directly to the cache. 1197 if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) { 1198 pp.mspancache.buf[pp.mspancache.len] = s 1199 pp.mspancache.len++ 1200 return 1201 } 1202 // Failing that (or if we don't have a p), just free it to 1203 // the heap. 1204 h.spanalloc.free(unsafe.Pointer(s)) 1205 } 1206 1207 // allocSpan allocates an mspan which owns npages worth of memory. 1208 // 1209 // If typ.manual() == false, allocSpan allocates a heap span of class spanclass 1210 // and updates heap accounting. If manual == true, allocSpan allocates a 1211 // manually-managed span (spanclass is ignored), and the caller is 1212 // responsible for any accounting related to its use of the span. Either 1213 // way, allocSpan will atomically add the bytes in the newly allocated 1214 // span to *sysStat. 1215 // 1216 // The returned span is fully initialized. 1217 // 1218 // h.lock must not be held. 1219 // 1220 // allocSpan must be called on the system stack both because it acquires 1221 // the heap lock and because it must block GC transitions. 1222 // 1223 //go:systemstack 1224 func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) { 1225 // Function-global state. 1226 gp := getg() 1227 base, scav := uintptr(0), uintptr(0) 1228 growth := uintptr(0) 1229 1230 // On some platforms we need to provide physical page aligned stack 1231 // allocations. Where the page size is less than the physical page 1232 // size, we already manage to do this by default. 1233 needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize 1234 1235 // If the allocation is small enough, try the page cache! 1236 // The page cache does not support aligned allocations, so we cannot use 1237 // it if we need to provide a physical page aligned stack allocation. 1238 pp := gp.m.p.ptr() 1239 if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 { 1240 c := &pp.pcache 1241 1242 // If the cache is empty, refill it. 1243 if c.empty() { 1244 lock(&h.lock) 1245 *c = h.pages.allocToCache() 1246 unlock(&h.lock) 1247 } 1248 1249 // Try to allocate from the cache. 1250 base, scav = c.alloc(npages) 1251 if base != 0 { 1252 s = h.tryAllocMSpan() 1253 if s != nil { 1254 goto HaveSpan 1255 } 1256 // We have a base but no mspan, so we need 1257 // to lock the heap. 1258 } 1259 } 1260 1261 // For one reason or another, we couldn't get the 1262 // whole job done without the heap lock. 1263 lock(&h.lock) 1264 1265 if needPhysPageAlign { 1266 // Overallocate by a physical page to allow for later alignment. 1267 extraPages := physPageSize / pageSize 1268 1269 // Find a big enough region first, but then only allocate the 1270 // aligned portion. We can't just allocate and then free the 1271 // edges because we need to account for scavenged memory, and 1272 // that's difficult with alloc. 1273 // 1274 // Note that we skip updates to searchAddr here. It's OK if 1275 // it's stale and higher than normal; it'll operate correctly, 1276 // just come with a performance cost. 1277 base, _ = h.pages.find(npages + extraPages) 1278 if base == 0 { 1279 var ok bool 1280 growth, ok = h.grow(npages + extraPages) 1281 if !ok { 1282 unlock(&h.lock) 1283 return nil 1284 } 1285 base, _ = h.pages.find(npages + extraPages) 1286 if base == 0 { 1287 throw("grew heap, but no adequate free space found") 1288 } 1289 } 1290 base = alignUp(base, physPageSize) 1291 scav = h.pages.allocRange(base, npages) 1292 } 1293 1294 if base == 0 { 1295 // Try to acquire a base address. 1296 base, scav = h.pages.alloc(npages) 1297 if base == 0 { 1298 var ok bool 1299 growth, ok = h.grow(npages) 1300 if !ok { 1301 unlock(&h.lock) 1302 return nil 1303 } 1304 base, scav = h.pages.alloc(npages) 1305 if base == 0 { 1306 throw("grew heap, but no adequate free space found") 1307 } 1308 } 1309 } 1310 if s == nil { 1311 // We failed to get an mspan earlier, so grab 1312 // one now that we have the heap lock. 1313 s = h.allocMSpanLocked() 1314 } 1315 unlock(&h.lock) 1316 1317 HaveSpan: 1318 // Decide if we need to scavenge in response to what we just allocated. 1319 // Specifically, we track the maximum amount of memory to scavenge of all 1320 // the alternatives below, assuming that the maximum satisfies *all* 1321 // conditions we check (e.g. if we need to scavenge X to satisfy the 1322 // memory limit and Y to satisfy heap-growth scavenging, and Y > X, then 1323 // it's fine to pick Y, because the memory limit is still satisfied). 1324 // 1325 // It's fine to do this after allocating because we expect any scavenged 1326 // pages not to get touched until we return. Simultaneously, it's important 1327 // to do this before calling sysUsed because that may commit address space. 1328 bytesToScavenge := uintptr(0) 1329 forceScavenge := false 1330 if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() { 1331 // Assist with scavenging to maintain the memory limit by the amount 1332 // that we expect to page in. 1333 inuse := gcController.mappedReady.Load() 1334 // Be careful about overflow, especially with uintptrs. Even on 32-bit platforms 1335 // someone can set a really big memory limit that isn't math.MaxInt64. 1336 if uint64(scav)+inuse > uint64(limit) { 1337 bytesToScavenge = uintptr(uint64(scav) + inuse - uint64(limit)) 1338 forceScavenge = true 1339 } 1340 } 1341 if goal := scavenge.gcPercentGoal.Load(); goal != ^uint64(0) && growth > 0 { 1342 // We just caused a heap growth, so scavenge down what will soon be used. 1343 // By scavenging inline we deal with the failure to allocate out of 1344 // memory fragments by scavenging the memory fragments that are least 1345 // likely to be re-used. 1346 // 1347 // Only bother with this because we're not using a memory limit. We don't 1348 // care about heap growths as long as we're under the memory limit, and the 1349 // previous check for scaving already handles that. 1350 if retained := heapRetained(); retained+uint64(growth) > goal { 1351 // The scavenging algorithm requires the heap lock to be dropped so it 1352 // can acquire it only sparingly. This is a potentially expensive operation 1353 // so it frees up other goroutines to allocate in the meanwhile. In fact, 1354 // they can make use of the growth we just created. 1355 todo := growth 1356 if overage := uintptr(retained + uint64(growth) - goal); todo > overage { 1357 todo = overage 1358 } 1359 if todo > bytesToScavenge { 1360 bytesToScavenge = todo 1361 } 1362 } 1363 } 1364 // There are a few very limited circumstances where we won't have a P here. 1365 // It's OK to simply skip scavenging in these cases. Something else will notice 1366 // and pick up the tab. 1367 var now int64 1368 if pp != nil && bytesToScavenge > 0 { 1369 // Measure how long we spent scavenging and add that measurement to the assist 1370 // time so we can track it for the GC CPU limiter. 1371 // 1372 // Limiter event tracking might be disabled if we end up here 1373 // while on a mark worker. 1374 start := nanotime() 1375 track := pp.limiterEvent.start(limiterEventScavengeAssist, start) 1376 1377 // Scavenge, but back out if the limiter turns on. 1378 released := h.pages.scavenge(bytesToScavenge, func() bool { 1379 return gcCPULimiter.limiting() 1380 }, forceScavenge) 1381 1382 mheap_.pages.scav.releasedEager.Add(released) 1383 1384 // Finish up accounting. 1385 now = nanotime() 1386 if track { 1387 pp.limiterEvent.stop(limiterEventScavengeAssist, now) 1388 } 1389 scavenge.assistTime.Add(now - start) 1390 } 1391 1392 // Initialize the span. 1393 h.initSpan(s, typ, spanclass, base, npages, scav) 1394 1395 if valgrindenabled { 1396 valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize) 1397 } 1398 1399 // Commit and account for any scavenged memory that the span now owns. 1400 nbytes := npages * pageSize 1401 if scav != 0 { 1402 // sysUsed all the pages that are actually available 1403 // in the span since some of them might be scavenged. 1404 sysUsed(unsafe.Pointer(base), nbytes, scav) 1405 gcController.heapReleased.add(-int64(scav)) 1406 } 1407 // Update stats. 1408 gcController.heapFree.add(-int64(nbytes - scav)) 1409 if typ == spanAllocHeap { 1410 gcController.heapInUse.add(int64(nbytes)) 1411 } 1412 // Update consistent stats. 1413 stats := memstats.heapStats.acquire() 1414 atomic.Xaddint64(&stats.committed, int64(scav)) 1415 atomic.Xaddint64(&stats.released, -int64(scav)) 1416 switch typ { 1417 case spanAllocHeap: 1418 atomic.Xaddint64(&stats.inHeap, int64(nbytes)) 1419 case spanAllocStack: 1420 atomic.Xaddint64(&stats.inStacks, int64(nbytes)) 1421 case spanAllocWorkBuf: 1422 atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes)) 1423 } 1424 memstats.heapStats.release() 1425 1426 // Trace the span alloc. 1427 if traceAllocFreeEnabled() { 1428 trace := traceAcquire() 1429 if trace.ok() { 1430 trace.SpanAlloc(s) 1431 traceRelease(trace) 1432 } 1433 } 1434 return s 1435 } 1436 1437 // initSpan initializes a blank span s which will represent the range 1438 // [base, base+npages*pageSize). typ is the type of span being allocated. 1439 func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) { 1440 // At this point, both s != nil and base != 0, and the heap 1441 // lock is no longer held. Initialize the span. 1442 s.init(base, npages) 1443 // Always call allocNeedsZero to update the arena's zeroedBase watermark 1444 // and determine if the memory is considered dirty. 1445 needZero := h.allocNeedsZero(base, npages) 1446 // If these pages were scavenged (returned to the OS), the kernel guarantees 1447 // they will be zero-filled on next use (fault-in), so we can treat them as 1448 // already zeroed and skip explicit clearing. 1449 if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero { 1450 s.needzero = 1 1451 } 1452 nbytes := npages * pageSize 1453 if typ.manual() { 1454 s.manualFreeList = 0 1455 s.nelems = 0 1456 s.state.set(mSpanManual) 1457 } else { 1458 // We must set span properties before the span is published anywhere 1459 // since we're not holding the heap lock. 1460 s.spanclass = spanclass 1461 if sizeclass := spanclass.sizeclass(); sizeclass == 0 { 1462 s.elemsize = nbytes 1463 s.nelems = 1 1464 s.divMul = 0 1465 } else { 1466 s.elemsize = uintptr(gc.SizeClassToSize[sizeclass]) 1467 if goexperiment.GreenTeaGC { 1468 var reserve uintptr 1469 if gcUsesSpanInlineMarkBits(s.elemsize) { 1470 // Reserve space for the inline mark bits. 1471 reserve += unsafe.Sizeof(spanInlineMarkBits{}) 1472 } 1473 if heapBitsInSpan(s.elemsize) && !s.spanclass.noscan() { 1474 // Reserve space for the pointer/scan bitmap at the end. 1475 reserve += nbytes / goarch.PtrSize / 8 1476 } 1477 s.nelems = uint16((nbytes - reserve) / s.elemsize) 1478 } else { 1479 if !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) { 1480 // Reserve space for the pointer/scan bitmap at the end. 1481 s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize) 1482 } else { 1483 s.nelems = uint16(nbytes / s.elemsize) 1484 } 1485 } 1486 s.divMul = gc.SizeClassToDivMagic[sizeclass] 1487 } 1488 1489 // Initialize mark and allocation structures. 1490 s.freeindex = 0 1491 s.freeIndexForScan = 0 1492 s.allocCache = ^uint64(0) // all 1s indicating all free. 1493 s.gcmarkBits = newMarkBits(uintptr(s.nelems)) 1494 s.allocBits = newAllocBits(uintptr(s.nelems)) 1495 1496 // Adjust s.limit down to the object-containing part of the span. 1497 s.limit = s.base() + s.elemsize*uintptr(s.nelems) 1498 1499 // It's safe to access h.sweepgen without the heap lock because it's 1500 // only ever updated with the world stopped and we run on the 1501 // systemstack which blocks a STW transition. 1502 atomic.Store(&s.sweepgen, h.sweepgen) 1503 1504 // Now that the span is filled in, set its state. This 1505 // is a publication barrier for the other fields in 1506 // the span. While valid pointers into this span 1507 // should never be visible until the span is returned, 1508 // if the garbage collector finds an invalid pointer, 1509 // access to the span may race with initialization of 1510 // the span. We resolve this race by atomically 1511 // setting the state after the span is fully 1512 // initialized, and atomically checking the state in 1513 // any situation where a pointer is suspect. 1514 s.state.set(mSpanInUse) 1515 } 1516 1517 // Publish the span in various locations. 1518 1519 // This is safe to call without the lock held because the slots 1520 // related to this span will only ever be read or modified by 1521 // this thread until pointers into the span are published (and 1522 // we execute a publication barrier at the end of this function 1523 // before that happens) or pageInUse is updated. 1524 h.setSpans(s.base(), npages, s) 1525 1526 if !typ.manual() { 1527 // Mark in-use span in arena page bitmap. 1528 // 1529 // This publishes the span to the page sweeper, so 1530 // it's imperative that the span be completely initialized 1531 // prior to this line. 1532 arena, pageIdx, pageMask := pageIndexOf(s.base()) 1533 atomic.Or8(&arena.pageInUse[pageIdx], pageMask) 1534 1535 // Mark packed span. 1536 if gcUsesSpanInlineMarkBits(s.elemsize) { 1537 atomic.Or8(&arena.pageUseSpanInlineMarkBits[pageIdx], pageMask) 1538 } 1539 1540 // Update related page sweeper stats. 1541 h.pagesInUse.Add(npages) 1542 } 1543 1544 // Make sure the newly allocated span will be observed 1545 // by the GC before pointers into the span are published. 1546 publicationBarrier() 1547 } 1548 1549 // Try to add at least npage pages of memory to the heap, 1550 // returning how much the heap grew by and whether it worked. 1551 // 1552 // h.lock must be held. 1553 func (h *mheap) grow(npage uintptr) (uintptr, bool) { 1554 assertLockHeld(&h.lock) 1555 1556 firstGrow := h.curArena.base == 0 1557 1558 // We must grow the heap in whole palloc chunks. 1559 // We call sysMap below but note that because we 1560 // round up to pallocChunkPages which is on the order 1561 // of MiB (generally >= to the huge page size) we 1562 // won't be calling it too much. 1563 ask := alignUp(npage, pallocChunkPages) * pageSize 1564 1565 totalGrowth := uintptr(0) 1566 // This may overflow because ask could be very large 1567 // and is otherwise unrelated to h.curArena.base. 1568 end := h.curArena.base + ask 1569 nBase := alignUp(end, physPageSize) 1570 if nBase > h.curArena.end || /* overflow */ end < h.curArena.base { 1571 // Not enough room in the current arena. Allocate more 1572 // arena space. This may not be contiguous with the 1573 // current arena, so we have to request the full ask. 1574 av, asize := h.sysAlloc(ask, &h.arenaHints, &h.heapArenas) 1575 if av == nil { 1576 inUse := gcController.heapFree.load() + gcController.heapReleased.load() + gcController.heapInUse.load() 1577 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n") 1578 return 0, false 1579 } 1580 1581 if uintptr(av) == h.curArena.end { 1582 // The new space is contiguous with the old 1583 // space, so just extend the current space. 1584 h.curArena.end = uintptr(av) + asize 1585 } else { 1586 // The new space is discontiguous. Track what 1587 // remains of the current space and switch to 1588 // the new space. This should be rare. 1589 if size := h.curArena.end - h.curArena.base; size != 0 { 1590 // Transition this space from Reserved to Prepared and mark it 1591 // as released since we'll be able to start using it after updating 1592 // the page allocator and releasing the lock at any time. 1593 sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased, "heap") 1594 // Update stats. 1595 stats := memstats.heapStats.acquire() 1596 atomic.Xaddint64(&stats.released, int64(size)) 1597 memstats.heapStats.release() 1598 // Update the page allocator's structures to make this 1599 // space ready for allocation. 1600 h.pages.grow(h.curArena.base, size) 1601 totalGrowth += size 1602 } 1603 // Switch to the new space. 1604 h.curArena.base = uintptr(av) 1605 h.curArena.end = uintptr(av) + asize 1606 1607 if firstGrow && randomizeHeapBase { 1608 // The top heapAddrBits-logHeapArenaBytes are randomized, we now 1609 // want to randomize the next 1610 // logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure 1611 // h.curArena.base is aligned to pallocChunkBytes. 1612 bits := logHeapArenaBytes - logPallocChunkBytes 1613 offset := nextHeapRandBits(bits) 1614 h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes) 1615 } 1616 } 1617 1618 // Recalculate nBase. 1619 // We know this won't overflow, because sysAlloc returned 1620 // a valid region starting at h.curArena.base which is at 1621 // least ask bytes in size. 1622 nBase = alignUp(h.curArena.base+ask, physPageSize) 1623 } 1624 1625 // Grow into the current arena. 1626 v := h.curArena.base 1627 h.curArena.base = nBase 1628 1629 // Transition the space we're going to use from Reserved to Prepared. 1630 // 1631 // The allocation is always aligned to the heap arena 1632 // size which is always > physPageSize, so its safe to 1633 // just add directly to heapReleased. 1634 sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap") 1635 1636 // The memory just allocated counts as both released 1637 // and idle, even though it's not yet backed by spans. 1638 stats := memstats.heapStats.acquire() 1639 atomic.Xaddint64(&stats.released, int64(nBase-v)) 1640 memstats.heapStats.release() 1641 1642 // Update the page allocator's structures to make this 1643 // space ready for allocation. 1644 h.pages.grow(v, nBase-v) 1645 totalGrowth += nBase - v 1646 1647 if firstGrow && randomizeHeapBase { 1648 // The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized, 1649 // we finally want to randomize the next 1650 // log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining 1651 // alignment to pageSize. We do this by calculating a random number of 1652 // pages into the current arena, and marking them as allocated. The 1653 // address of the next available page becomes our fully randomized base 1654 // heap address. 1655 randOffset := nextHeapRandBits(logPallocChunkBytes) 1656 randNumPages := alignDown(randOffset, pageSize) / pageSize 1657 if randNumPages != 0 { 1658 h.pages.markRandomPaddingPages(v, randNumPages) 1659 } 1660 } 1661 1662 return totalGrowth, true 1663 } 1664 1665 // Free the span back into the heap. 1666 func (h *mheap) freeSpan(s *mspan) { 1667 systemstack(func() { 1668 // Trace the span free. 1669 if traceAllocFreeEnabled() { 1670 trace := traceAcquire() 1671 if trace.ok() { 1672 trace.SpanFree(s) 1673 traceRelease(trace) 1674 } 1675 } 1676 1677 lock(&h.lock) 1678 if msanenabled { 1679 // Tell msan that this entire span is no longer in use. 1680 base := unsafe.Pointer(s.base()) 1681 bytes := s.npages << gc.PageShift 1682 msanfree(base, bytes) 1683 } 1684 if asanenabled { 1685 // Tell asan that this entire span is no longer in use. 1686 base := unsafe.Pointer(s.base()) 1687 bytes := s.npages << gc.PageShift 1688 asanpoison(base, bytes) 1689 } 1690 if valgrindenabled { 1691 base := s.base() 1692 valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base)) 1693 } 1694 h.freeSpanLocked(s, spanAllocHeap) 1695 unlock(&h.lock) 1696 }) 1697 } 1698 1699 // freeManual frees a manually-managed span returned by allocManual. 1700 // typ must be the same as the spanAllocType passed to the allocManual that 1701 // allocated s. 1702 // 1703 // This must only be called when gcphase == _GCoff. See mSpanState for 1704 // an explanation. 1705 // 1706 // freeManual must be called on the system stack because it acquires 1707 // the heap lock. See mheap for details. 1708 // 1709 //go:systemstack 1710 func (h *mheap) freeManual(s *mspan, typ spanAllocType) { 1711 // Trace the span free. 1712 if traceAllocFreeEnabled() { 1713 trace := traceAcquire() 1714 if trace.ok() { 1715 trace.SpanFree(s) 1716 traceRelease(trace) 1717 } 1718 } 1719 1720 s.needzero = 1 1721 lock(&h.lock) 1722 if valgrindenabled { 1723 base := s.base() 1724 valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base)) 1725 } 1726 h.freeSpanLocked(s, typ) 1727 unlock(&h.lock) 1728 } 1729 1730 func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { 1731 assertLockHeld(&h.lock) 1732 1733 switch s.state.get() { 1734 case mSpanManual: 1735 if s.allocCount != 0 { 1736 throw("mheap.freeSpanLocked - invalid stack free") 1737 } 1738 case mSpanInUse: 1739 if s.isUserArenaChunk { 1740 throw("mheap.freeSpanLocked - invalid free of user arena chunk") 1741 } 1742 if s.allocCount != 0 || s.sweepgen != h.sweepgen { 1743 print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 1744 throw("mheap.freeSpanLocked - invalid free") 1745 } 1746 h.pagesInUse.Add(-s.npages) 1747 1748 // Clear in-use bit in arena page bitmap. 1749 arena, pageIdx, pageMask := pageIndexOf(s.base()) 1750 atomic.And8(&arena.pageInUse[pageIdx], ^pageMask) 1751 1752 // Clear small heap span bit if necessary. 1753 if gcUsesSpanInlineMarkBits(s.elemsize) { 1754 atomic.And8(&arena.pageUseSpanInlineMarkBits[pageIdx], ^pageMask) 1755 } 1756 default: 1757 throw("mheap.freeSpanLocked - invalid span state") 1758 } 1759 1760 // Update stats. 1761 // 1762 // Mirrors the code in allocSpan. 1763 nbytes := s.npages * pageSize 1764 gcController.heapFree.add(int64(nbytes)) 1765 if typ == spanAllocHeap { 1766 gcController.heapInUse.add(-int64(nbytes)) 1767 } 1768 // Update consistent stats. 1769 stats := memstats.heapStats.acquire() 1770 switch typ { 1771 case spanAllocHeap: 1772 atomic.Xaddint64(&stats.inHeap, -int64(nbytes)) 1773 case spanAllocStack: 1774 atomic.Xaddint64(&stats.inStacks, -int64(nbytes)) 1775 case spanAllocWorkBuf: 1776 atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes)) 1777 } 1778 memstats.heapStats.release() 1779 1780 // Mark the space as free. 1781 h.pages.free(s.base(), s.npages) 1782 1783 // Free the span structure. We no longer have a use for it. 1784 s.state.set(mSpanDead) 1785 h.freeMSpanLocked(s) 1786 } 1787 1788 // scavengeAll acquires the heap lock (blocking any additional 1789 // manipulation of the page allocator) and iterates over the whole 1790 // heap, scavenging every free page available. 1791 // 1792 // Must run on the system stack because it acquires the heap lock. 1793 // 1794 //go:systemstack 1795 func (h *mheap) scavengeAll() { 1796 // Disallow malloc or panic while holding the heap lock. We do 1797 // this here because this is a non-mallocgc entry-point to 1798 // the mheap API. 1799 gp := getg() 1800 gp.m.mallocing++ 1801 1802 // Force scavenge everything. 1803 released := h.pages.scavenge(^uintptr(0), nil, true) 1804 1805 gp.m.mallocing-- 1806 1807 if debug.scavtrace > 0 { 1808 printScavTrace(0, released, true) 1809 } 1810 } 1811 1812 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory 1813 func runtime_debug_freeOSMemory() { 1814 GC() 1815 systemstack(func() { mheap_.scavengeAll() }) 1816 } 1817 1818 // Initialize a new span with the given start and npages. 1819 func (span *mspan) init(base uintptr, npages uintptr) { 1820 // span is *not* zeroed. 1821 span.next = nil 1822 span.prev = nil 1823 span.list = nil 1824 span.startAddr = base 1825 span.npages = npages 1826 span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans 1827 span.allocCount = 0 1828 span.spanclass = 0 1829 span.elemsize = 0 1830 span.speciallock.key = 0 1831 span.specials = nil 1832 span.needzero = 0 1833 span.freeindex = 0 1834 span.freeIndexForScan = 0 1835 span.allocBits = nil 1836 span.gcmarkBits = nil 1837 span.pinnerBits = nil 1838 span.state.set(mSpanDead) 1839 lockInit(&span.speciallock, lockRankMspanSpecial) 1840 } 1841 1842 func (span *mspan) inList() bool { 1843 return span.list != nil 1844 } 1845 1846 // mSpanList heads a linked list of spans. 1847 type mSpanList struct { 1848 _ sys.NotInHeap 1849 first *mspan // first span in list, or nil if none 1850 last *mspan // last span in list, or nil if none 1851 } 1852 1853 // Initialize an empty doubly-linked list. 1854 func (list *mSpanList) init() { 1855 list.first = nil 1856 list.last = nil 1857 } 1858 1859 func (list *mSpanList) remove(span *mspan) { 1860 if span.list != list { 1861 print("runtime: failed mSpanList.remove span.npages=", span.npages, 1862 " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n") 1863 throw("mSpanList.remove") 1864 } 1865 if list.first == span { 1866 list.first = span.next 1867 } else { 1868 span.prev.next = span.next 1869 } 1870 if list.last == span { 1871 list.last = span.prev 1872 } else { 1873 span.next.prev = span.prev 1874 } 1875 span.next = nil 1876 span.prev = nil 1877 span.list = nil 1878 } 1879 1880 func (list *mSpanList) isEmpty() bool { 1881 return list.first == nil 1882 } 1883 1884 func (list *mSpanList) insert(span *mspan) { 1885 if span.next != nil || span.prev != nil || span.list != nil { 1886 println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list) 1887 throw("mSpanList.insert") 1888 } 1889 span.next = list.first 1890 if list.first != nil { 1891 // The list contains at least one span; link it in. 1892 // The last span in the list doesn't change. 1893 list.first.prev = span 1894 } else { 1895 // The list contains no spans, so this is also the last span. 1896 list.last = span 1897 } 1898 list.first = span 1899 span.list = list 1900 } 1901 1902 func (list *mSpanList) insertBack(span *mspan) { 1903 if span.next != nil || span.prev != nil || span.list != nil { 1904 println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list) 1905 throw("mSpanList.insertBack") 1906 } 1907 span.prev = list.last 1908 if list.last != nil { 1909 // The list contains at least one span. 1910 list.last.next = span 1911 } else { 1912 // The list contains no spans, so this is also the first span. 1913 list.first = span 1914 } 1915 list.last = span 1916 span.list = list 1917 } 1918 1919 // takeAll removes all spans from other and inserts them at the front 1920 // of list. 1921 func (list *mSpanList) takeAll(other *mSpanList) { 1922 if other.isEmpty() { 1923 return 1924 } 1925 1926 // Reparent everything in other to list. 1927 for s := other.first; s != nil; s = s.next { 1928 s.list = list 1929 } 1930 1931 // Concatenate the lists. 1932 if list.isEmpty() { 1933 *list = *other 1934 } else { 1935 // Neither list is empty. Put other before list. 1936 other.last.next = list.first 1937 list.first.prev = other.last 1938 list.first = other.first 1939 } 1940 1941 other.first, other.last = nil, nil 1942 } 1943 1944 const ( 1945 // _KindSpecialTinyBlock indicates that a given allocation is a tiny block. 1946 // Ordered before KindSpecialFinalizer and KindSpecialCleanup so that it 1947 // always appears first in the specials list. 1948 // Used only if debug.checkfinalizers != 0. 1949 _KindSpecialTinyBlock = 1 1950 // _KindSpecialFinalizer is for tracking finalizers. 1951 _KindSpecialFinalizer = 2 1952 // _KindSpecialWeakHandle is used for creating weak pointers. 1953 _KindSpecialWeakHandle = 3 1954 // _KindSpecialProfile is for memory profiling. 1955 _KindSpecialProfile = 4 1956 // _KindSpecialReachable is a special used for tracking 1957 // reachability during testing. 1958 _KindSpecialReachable = 5 1959 // _KindSpecialPinCounter is a special used for objects that are pinned 1960 // multiple times 1961 _KindSpecialPinCounter = 6 1962 // _KindSpecialCleanup is for tracking cleanups. 1963 _KindSpecialCleanup = 7 1964 // _KindSpecialCheckFinalizer adds additional context to a finalizer or cleanup. 1965 // Used only if debug.checkfinalizers != 0. 1966 _KindSpecialCheckFinalizer = 8 1967 // _KindSpecialBubble is used to associate objects with synctest bubbles. 1968 _KindSpecialBubble = 9 1969 // _KindSpecialSecret is a special used to mark an object 1970 // as needing zeroing immediately upon freeing. 1971 _KindSpecialSecret = 10 1972 ) 1973 1974 type special struct { 1975 _ sys.NotInHeap 1976 next *special // linked list in span 1977 offset uintptr // span offset of object 1978 kind byte // kind of special 1979 } 1980 1981 // spanHasSpecials marks a span as having specials in the arena bitmap. 1982 func spanHasSpecials(s *mspan) { 1983 arenaPage := (s.base() / pageSize) % pagesPerArena 1984 ai := arenaIndex(s.base()) 1985 ha := mheap_.arenas[ai.l1()][ai.l2()] 1986 atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8)) 1987 } 1988 1989 // spanHasNoSpecials marks a span as having no specials in the arena bitmap. 1990 func spanHasNoSpecials(s *mspan) { 1991 arenaPage := (s.base() / pageSize) % pagesPerArena 1992 ai := arenaIndex(s.base()) 1993 ha := mheap_.arenas[ai.l1()][ai.l2()] 1994 atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8))) 1995 } 1996 1997 // addspecial adds the special record s to the list of special records for 1998 // the object p. All fields of s should be filled in except for 1999 // offset & next, which this routine will fill in. 2000 // Returns true if the special was successfully added, false otherwise. 2001 // (The add will fail only if a record with the same p and s->kind 2002 // already exists unless force is set to true.) 2003 func addspecial(p unsafe.Pointer, s *special, force bool) bool { 2004 span := spanOfHeap(uintptr(p)) 2005 if span == nil { 2006 throw("addspecial on invalid pointer") 2007 } 2008 2009 // Ensure that the span is swept. 2010 // Sweeping accesses the specials list w/o locks, so we have 2011 // to synchronize with it. And it's just much safer. 2012 mp := acquirem() 2013 span.ensureSwept() 2014 2015 offset := uintptr(p) - span.base() 2016 kind := s.kind 2017 2018 lock(&span.speciallock) 2019 2020 // Find splice point, check for existing record. 2021 iter, exists := span.specialFindSplicePoint(offset, kind) 2022 if !exists || force { 2023 // Splice in record, fill in offset. 2024 s.offset = offset 2025 s.next = *iter 2026 *iter = s 2027 spanHasSpecials(span) 2028 } 2029 2030 unlock(&span.speciallock) 2031 releasem(mp) 2032 // We're converting p to a uintptr and looking it up, and we 2033 // don't want it to die and get swept while we're doing so. 2034 KeepAlive(p) 2035 return !exists || force // already exists or addition was forced 2036 } 2037 2038 // Removes the Special record of the given kind for the object p. 2039 // Returns the record if the record existed, nil otherwise. 2040 // The caller must FixAlloc_Free the result. 2041 func removespecial(p unsafe.Pointer, kind uint8) *special { 2042 span := spanOfHeap(uintptr(p)) 2043 if span == nil { 2044 throw("removespecial on invalid pointer") 2045 } 2046 2047 // Ensure that the span is swept. 2048 // Sweeping accesses the specials list w/o locks, so we have 2049 // to synchronize with it. And it's just much safer. 2050 mp := acquirem() 2051 span.ensureSwept() 2052 2053 offset := uintptr(p) - span.base() 2054 2055 var result *special 2056 lock(&span.speciallock) 2057 2058 iter, exists := span.specialFindSplicePoint(offset, kind) 2059 if exists { 2060 s := *iter 2061 *iter = s.next 2062 result = s 2063 } 2064 if span.specials == nil { 2065 spanHasNoSpecials(span) 2066 } 2067 unlock(&span.speciallock) 2068 releasem(mp) 2069 return result 2070 } 2071 2072 // Find a splice point in the sorted list and check for an already existing 2073 // record. Returns a pointer to the next-reference in the list predecessor. 2074 // Returns true, if the referenced item is an exact match. 2075 func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool) { 2076 // Find splice point, check for existing record. 2077 iter := &span.specials 2078 found := false 2079 for { 2080 s := *iter 2081 if s == nil { 2082 break 2083 } 2084 if offset == s.offset && kind == s.kind { 2085 found = true 2086 break 2087 } 2088 if offset < s.offset || (offset == s.offset && kind < s.kind) { 2089 break 2090 } 2091 iter = &s.next 2092 } 2093 return iter, found 2094 } 2095 2096 // The described object has a finalizer set for it. 2097 // 2098 // specialfinalizer is allocated from non-GC'd memory, so any heap 2099 // pointers must be specially handled. 2100 type specialfinalizer struct { 2101 _ sys.NotInHeap 2102 special special 2103 fn *funcval // May be a heap pointer. 2104 nret uintptr 2105 fint *_type // May be a heap pointer, but always live. 2106 ot *ptrtype // May be a heap pointer, but always live. 2107 } 2108 2109 // Adds a finalizer to the object p. Returns true if it succeeded. 2110 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 2111 lock(&mheap_.speciallock) 2112 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 2113 unlock(&mheap_.speciallock) 2114 s.special.kind = _KindSpecialFinalizer 2115 s.fn = f 2116 s.nret = nret 2117 s.fint = fint 2118 s.ot = ot 2119 if addspecial(p, &s.special, false) { 2120 // This is responsible for maintaining the same 2121 // GC-related invariants as markrootSpans in any 2122 // situation where it's possible that markrootSpans 2123 // has already run but mark termination hasn't yet. 2124 if gcphase != _GCoff { 2125 base, span, _ := findObject(uintptr(p), 0, 0) 2126 mp := acquirem() 2127 gcw := &mp.p.ptr().gcw 2128 // Mark everything reachable from the object 2129 // so it's retained for the finalizer. 2130 if !span.spanclass.noscan() { 2131 scanObject(base, gcw) 2132 } 2133 // Mark the finalizer itself, since the 2134 // special isn't part of the GC'd heap. 2135 scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil) 2136 releasem(mp) 2137 } 2138 return true 2139 } 2140 2141 // There was an old finalizer 2142 lock(&mheap_.speciallock) 2143 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 2144 unlock(&mheap_.speciallock) 2145 return false 2146 } 2147 2148 // Removes the finalizer (if any) from the object p. 2149 func removefinalizer(p unsafe.Pointer) { 2150 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 2151 if s == nil { 2152 return // there wasn't a finalizer to remove 2153 } 2154 lock(&mheap_.speciallock) 2155 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 2156 unlock(&mheap_.speciallock) 2157 } 2158 2159 // The described object has a cleanup set for it. 2160 type specialCleanup struct { 2161 _ sys.NotInHeap 2162 special special 2163 cleanup cleanupFn 2164 // Globally unique ID for the cleanup, obtained from mheap_.cleanupID. 2165 id uint64 2166 } 2167 2168 // addCleanup attaches a cleanup function to the object. Multiple 2169 // cleanups are allowed on an object, and even the same pointer. 2170 // A cleanup id is returned which can be used to uniquely identify 2171 // the cleanup. 2172 func addCleanup(p unsafe.Pointer, c cleanupFn) uint64 { 2173 // TODO(mknyszek): Consider pooling specialCleanups on the P 2174 // so we don't have to take the lock every time. Just locking 2175 // is a considerable part of the cost of AddCleanup. This 2176 // would also require reserving some cleanup IDs on the P. 2177 lock(&mheap_.speciallock) 2178 s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc()) 2179 mheap_.cleanupID++ // Increment first. ID 0 is reserved. 2180 id := mheap_.cleanupID 2181 unlock(&mheap_.speciallock) 2182 s.special.kind = _KindSpecialCleanup 2183 s.cleanup = c 2184 s.id = id 2185 2186 mp := acquirem() 2187 addspecial(p, &s.special, true) 2188 // This is responsible for maintaining the same 2189 // GC-related invariants as markrootSpans in any 2190 // situation where it's possible that markrootSpans 2191 // has already run but mark termination hasn't yet. 2192 if gcphase != _GCoff { 2193 // Mark the cleanup itself, since the 2194 // special isn't part of the GC'd heap. 2195 gcScanCleanup(s, &mp.p.ptr().gcw) 2196 } 2197 releasem(mp) 2198 // Keep c and its referents alive. There's a window in this function 2199 // where it's only reachable via the special while the special hasn't 2200 // been added to the specials list yet. This is similar to a bug 2201 // discovered for weak handles, see #70455. 2202 KeepAlive(c) 2203 return id 2204 } 2205 2206 // Always paired with a specialCleanup or specialfinalizer, adds context. 2207 type specialCheckFinalizer struct { 2208 _ sys.NotInHeap 2209 special special 2210 cleanupID uint64 // Needed to disambiguate cleanups. 2211 createPC uintptr 2212 funcPC uintptr 2213 ptrType *_type 2214 } 2215 2216 // setFinalizerContext adds a specialCheckFinalizer to ptr. ptr must already have a 2217 // finalizer special attached. 2218 func setFinalizerContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr) { 2219 setCleanupContext(ptr, ptrType, createPC, funcPC, 0) 2220 } 2221 2222 // setCleanupContext adds a specialCheckFinalizer to ptr. ptr must already have a 2223 // finalizer or cleanup special attached. Pass 0 for the cleanupID to indicate 2224 // a finalizer. 2225 func setCleanupContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr, cleanupID uint64) { 2226 lock(&mheap_.speciallock) 2227 s := (*specialCheckFinalizer)(mheap_.specialCheckFinalizerAlloc.alloc()) 2228 unlock(&mheap_.speciallock) 2229 s.special.kind = _KindSpecialCheckFinalizer 2230 s.cleanupID = cleanupID 2231 s.createPC = createPC 2232 s.funcPC = funcPC 2233 s.ptrType = ptrType 2234 2235 mp := acquirem() 2236 addspecial(ptr, &s.special, true) 2237 releasem(mp) 2238 KeepAlive(ptr) 2239 } 2240 2241 func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer { 2242 assertWorldStopped() 2243 2244 span := spanOfHeap(ptr) 2245 if span == nil { 2246 return nil 2247 } 2248 var found *specialCheckFinalizer 2249 offset := ptr - span.base() 2250 iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer) 2251 if exists { 2252 for { 2253 s := *iter 2254 if s == nil { 2255 // Reached the end of the linked list. Stop searching at this point. 2256 break 2257 } 2258 if offset == s.offset && _KindSpecialCheckFinalizer == s.kind && 2259 (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID { 2260 // The special is a cleanup and contains a matching cleanup id. 2261 *iter = s.next 2262 found = (*specialCheckFinalizer)(unsafe.Pointer(s)) 2263 break 2264 } 2265 if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) { 2266 // The special is outside the region specified for that kind of 2267 // special. The specials are sorted by kind. 2268 break 2269 } 2270 // Try the next special. 2271 iter = &s.next 2272 } 2273 } 2274 return found 2275 } 2276 2277 // clearFinalizerContext removes the specialCheckFinalizer for the given pointer, if any. 2278 func clearFinalizerContext(ptr uintptr) { 2279 clearCleanupContext(ptr, 0) 2280 } 2281 2282 // clearFinalizerContext removes the specialCheckFinalizer for the given pointer and cleanup ID, if any. 2283 func clearCleanupContext(ptr uintptr, cleanupID uint64) { 2284 // The following block removes the Special record of type cleanup for the object c.ptr. 2285 span := spanOfHeap(ptr) 2286 if span == nil { 2287 return 2288 } 2289 // Ensure that the span is swept. 2290 // Sweeping accesses the specials list w/o locks, so we have 2291 // to synchronize with it. And it's just much safer. 2292 mp := acquirem() 2293 span.ensureSwept() 2294 2295 offset := ptr - span.base() 2296 2297 var found *special 2298 lock(&span.speciallock) 2299 2300 iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer) 2301 if exists { 2302 for { 2303 s := *iter 2304 if s == nil { 2305 // Reached the end of the linked list. Stop searching at this point. 2306 break 2307 } 2308 if offset == s.offset && _KindSpecialCheckFinalizer == s.kind && 2309 (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID { 2310 // The special is a cleanup and contains a matching cleanup id. 2311 *iter = s.next 2312 found = s 2313 break 2314 } 2315 if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) { 2316 // The special is outside the region specified for that kind of 2317 // special. The specials are sorted by kind. 2318 break 2319 } 2320 // Try the next special. 2321 iter = &s.next 2322 } 2323 } 2324 if span.specials == nil { 2325 spanHasNoSpecials(span) 2326 } 2327 unlock(&span.speciallock) 2328 releasem(mp) 2329 2330 if found == nil { 2331 return 2332 } 2333 lock(&mheap_.speciallock) 2334 mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(found)) 2335 unlock(&mheap_.speciallock) 2336 } 2337 2338 // Indicates that an allocation is a tiny block. 2339 // Used only if debug.checkfinalizers != 0. 2340 type specialTinyBlock struct { 2341 _ sys.NotInHeap 2342 special special 2343 } 2344 2345 // setTinyBlockContext marks an allocation as a tiny block to diagnostics like 2346 // checkfinalizer. 2347 // 2348 // A tiny block is only marked if it actually contains more than one distinct 2349 // value, since we're using this for debugging. 2350 func setTinyBlockContext(ptr unsafe.Pointer) { 2351 lock(&mheap_.speciallock) 2352 s := (*specialTinyBlock)(mheap_.specialTinyBlockAlloc.alloc()) 2353 unlock(&mheap_.speciallock) 2354 s.special.kind = _KindSpecialTinyBlock 2355 2356 mp := acquirem() 2357 addspecial(ptr, &s.special, false) 2358 releasem(mp) 2359 KeepAlive(ptr) 2360 } 2361 2362 // inTinyBlock returns whether ptr is in a tiny alloc block, at one point grouped 2363 // with other distinct values. 2364 func inTinyBlock(ptr uintptr) bool { 2365 assertWorldStopped() 2366 2367 ptr = alignDown(ptr, maxTinySize) 2368 span := spanOfHeap(ptr) 2369 if span == nil { 2370 return false 2371 } 2372 offset := ptr - span.base() 2373 _, exists := span.specialFindSplicePoint(offset, _KindSpecialTinyBlock) 2374 return exists 2375 } 2376 2377 // The described object has a weak pointer. 2378 // 2379 // Weak pointers in the GC have the following invariants: 2380 // 2381 // - Strong-to-weak conversions must ensure the strong pointer 2382 // remains live until the weak handle is installed. This ensures 2383 // that creating a weak pointer cannot fail. 2384 // 2385 // - Weak-to-strong conversions require the weakly-referenced 2386 // object to be swept before the conversion may proceed. This 2387 // ensures that weak-to-strong conversions cannot resurrect 2388 // dead objects by sweeping them before that happens. 2389 // 2390 // - Weak handles are unique and canonical for each byte offset into 2391 // an object that a strong pointer may point to, until an object 2392 // becomes unreachable. 2393 // 2394 // - Weak handles contain nil as soon as an object becomes unreachable 2395 // the first time, before a finalizer makes it reachable again. New 2396 // weak handles created after resurrection are newly unique. 2397 // 2398 // specialWeakHandle is allocated from non-GC'd memory, so any heap 2399 // pointers must be specially handled. 2400 type specialWeakHandle struct { 2401 _ sys.NotInHeap 2402 special special 2403 // handle is a reference to the actual weak pointer. 2404 // It is always heap-allocated and must be explicitly kept 2405 // live so long as this special exists. 2406 handle *atomic.Uintptr 2407 } 2408 2409 //go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer 2410 func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer { 2411 return unsafe.Pointer(getOrAddWeakHandle(p)) 2412 } 2413 2414 //go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak 2415 func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { 2416 handle := (*atomic.Uintptr)(u) 2417 2418 // Prevent preemption. We want to make sure that another GC cycle can't start 2419 // and that work.strongFromWeak.block can't change out from under us. 2420 mp := acquirem() 2421 2422 // Yield to the GC if necessary. 2423 if work.strongFromWeak.block { 2424 releasem(mp) 2425 2426 // Try to park and wait for mark termination. 2427 // N.B. gcParkStrongFromWeak calls acquirem before returning. 2428 mp = gcParkStrongFromWeak() 2429 } 2430 2431 p := handle.Load() 2432 if p == 0 { 2433 releasem(mp) 2434 return nil 2435 } 2436 // Be careful. p may or may not refer to valid memory anymore, as it could've been 2437 // swept and released already. It's always safe to ensure a span is swept, though, 2438 // even if it's just some random span. 2439 span := spanOfHeap(p) 2440 if span == nil { 2441 // If it's immortal, then just return the pointer. 2442 // 2443 // Stay non-preemptible so the GC can't see us convert this potentially 2444 // completely bogus value to an unsafe.Pointer. 2445 if isGoPointerWithoutSpan(unsafe.Pointer(p)) { 2446 releasem(mp) 2447 return unsafe.Pointer(p) 2448 } 2449 // It's heap-allocated, so the span probably just got swept and released. 2450 releasem(mp) 2451 return nil 2452 } 2453 // Ensure the span is swept. 2454 span.ensureSwept() 2455 2456 // Now we can trust whatever we get from handle, so make a strong pointer. 2457 // 2458 // Even if we just swept some random span that doesn't contain this object, because 2459 // this object is long dead and its memory has since been reused, we'll just observe nil. 2460 ptr := unsafe.Pointer(handle.Load()) 2461 2462 // This is responsible for maintaining the same GC-related 2463 // invariants as the Yuasa part of the write barrier. During 2464 // the mark phase, it's possible that we just created the only 2465 // valid pointer to the object pointed to by ptr. If it's only 2466 // ever referenced from our stack, and our stack is blackened 2467 // already, we could fail to mark it. So, mark it now. 2468 if gcphase != _GCoff { 2469 shade(uintptr(ptr)) 2470 } 2471 releasem(mp) 2472 2473 // Explicitly keep ptr alive. This seems unnecessary since we return ptr, 2474 // but let's be explicit since it's important we keep ptr alive across the 2475 // call to shade. 2476 KeepAlive(ptr) 2477 return ptr 2478 } 2479 2480 // gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks. 2481 func gcParkStrongFromWeak() *m { 2482 // Prevent preemption as we check strongFromWeak, so it can't change out from under us. 2483 mp := acquirem() 2484 2485 for work.strongFromWeak.block { 2486 lock(&work.strongFromWeak.lock) 2487 releasem(mp) // N.B. Holding the lock prevents preemption. 2488 2489 // Queue ourselves up. 2490 work.strongFromWeak.q.pushBack(getg()) 2491 2492 // Park. 2493 goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2) 2494 2495 // Re-acquire the current M since we're going to check the condition again. 2496 mp = acquirem() 2497 2498 // Re-check condition. We may have awoken in the next GC's mark termination phase. 2499 } 2500 return mp 2501 } 2502 2503 // gcWakeAllStrongFromWeak wakes all currently blocked weak->strong 2504 // conversions. This is used at the end of a GC cycle. 2505 // 2506 // work.strongFromWeak.block must be false to prevent woken goroutines 2507 // from immediately going back to sleep. 2508 func gcWakeAllStrongFromWeak() { 2509 lock(&work.strongFromWeak.lock) 2510 list := work.strongFromWeak.q.popList() 2511 injectglist(&list) 2512 unlock(&work.strongFromWeak.lock) 2513 } 2514 2515 // Retrieves or creates a weak pointer handle for the object p. 2516 func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { 2517 if debug.sbrk != 0 { 2518 // debug.sbrk never frees memory, so it'll never go nil. However, we do still 2519 // need a weak handle that's specific to p. Use the immortal weak handle map. 2520 // Keep p alive across the call to getOrAdd defensively, though it doesn't 2521 // really matter in this particular case. 2522 handle := mheap_.immortalWeakHandles.getOrAdd(uintptr(p)) 2523 KeepAlive(p) 2524 return handle 2525 } 2526 2527 // First try to retrieve without allocating. 2528 if handle := getWeakHandle(p); handle != nil { 2529 // Keep p alive for the duration of the function to ensure 2530 // that it cannot die while we're trying to do this. 2531 KeepAlive(p) 2532 return handle 2533 } 2534 2535 lock(&mheap_.speciallock) 2536 s := (*specialWeakHandle)(mheap_.specialWeakHandleAlloc.alloc()) 2537 unlock(&mheap_.speciallock) 2538 2539 // N.B. Pad the weak handle to ensure it doesn't share a tiny 2540 // block with any other allocations. This can lead to leaks, such 2541 // as in go.dev/issue/76007. As an alternative, we could consider 2542 // using the currently-unused 8-byte noscan size class. 2543 type weakHandleBox struct { 2544 h atomic.Uintptr 2545 _ [maxTinySize - unsafe.Sizeof(atomic.Uintptr{})]byte 2546 } 2547 handle := &(new(weakHandleBox).h) 2548 s.special.kind = _KindSpecialWeakHandle 2549 s.handle = handle 2550 handle.Store(uintptr(p)) 2551 if addspecial(p, &s.special, false) { 2552 // This is responsible for maintaining the same 2553 // GC-related invariants as markrootSpans in any 2554 // situation where it's possible that markrootSpans 2555 // has already run but mark termination hasn't yet. 2556 if gcphase != _GCoff { 2557 mp := acquirem() 2558 gcw := &mp.p.ptr().gcw 2559 // Mark the weak handle itself, since the 2560 // special isn't part of the GC'd heap. 2561 scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil) 2562 releasem(mp) 2563 } 2564 2565 // Keep p alive for the duration of the function to ensure 2566 // that it cannot die while we're trying to do this. 2567 // 2568 // Same for handle, which is only stored in the special. 2569 // There's a window where it might die if we don't keep it 2570 // alive explicitly. Returning it here is probably good enough, 2571 // but let's be defensive and explicit. See #70455. 2572 KeepAlive(p) 2573 KeepAlive(handle) 2574 return handle 2575 } 2576 2577 // There was an existing handle. Free the special 2578 // and try again. We must succeed because we're explicitly 2579 // keeping p live until the end of this function. Either 2580 // we, or someone else, must have succeeded, because we can 2581 // only fail in the event of a race, and p will still be 2582 // be valid no matter how much time we spend here. 2583 lock(&mheap_.speciallock) 2584 mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s)) 2585 unlock(&mheap_.speciallock) 2586 2587 handle = getWeakHandle(p) 2588 if handle == nil { 2589 throw("failed to get or create weak handle") 2590 } 2591 2592 // Keep p alive for the duration of the function to ensure 2593 // that it cannot die while we're trying to do this. 2594 // 2595 // Same for handle, just to be defensive. 2596 KeepAlive(p) 2597 KeepAlive(handle) 2598 return handle 2599 } 2600 2601 func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { 2602 span := spanOfHeap(uintptr(p)) 2603 if span == nil { 2604 if isGoPointerWithoutSpan(p) { 2605 return mheap_.immortalWeakHandles.getOrAdd(uintptr(p)) 2606 } 2607 throw("getWeakHandle on invalid pointer") 2608 } 2609 2610 // Ensure that the span is swept. 2611 // Sweeping accesses the specials list w/o locks, so we have 2612 // to synchronize with it. And it's just much safer. 2613 mp := acquirem() 2614 span.ensureSwept() 2615 2616 offset := uintptr(p) - span.base() 2617 2618 lock(&span.speciallock) 2619 2620 // Find the existing record and return the handle if one exists. 2621 var handle *atomic.Uintptr 2622 iter, exists := span.specialFindSplicePoint(offset, _KindSpecialWeakHandle) 2623 if exists { 2624 handle = ((*specialWeakHandle)(unsafe.Pointer(*iter))).handle 2625 } 2626 unlock(&span.speciallock) 2627 releasem(mp) 2628 2629 // Keep p alive for the duration of the function to ensure 2630 // that it cannot die while we're trying to do this. 2631 KeepAlive(p) 2632 return handle 2633 } 2634 2635 type immortalWeakHandleMap struct { 2636 root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap) 2637 } 2638 2639 // immortalWeakHandle is a lock-free append-only hash-trie. 2640 // 2641 // Key features: 2642 // - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address. 2643 // - New nodes are placed at the first empty level encountered. 2644 // - When the first child is added to a node, the existing value is not moved into a child. 2645 // This means that we must check the value at each level, not just at the leaf. 2646 // - No deletion or rebalancing. 2647 // - Intentionally devolves into a linked list on hash collisions (the hash bits will all 2648 // get shifted out during iteration, and new nodes will just be appended to the 0th child). 2649 type immortalWeakHandle struct { 2650 _ sys.NotInHeap 2651 2652 children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap) 2653 ptr uintptr // &ptr is the weak handle 2654 } 2655 2656 // handle returns a canonical weak handle. 2657 func (h *immortalWeakHandle) handle() *atomic.Uintptr { 2658 // N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially 2659 // reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics 2660 // in getOrAdd. 2661 return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr)) 2662 } 2663 2664 // getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated 2665 // object) and returns a weak handle. The weak handle will never become nil. 2666 func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr { 2667 var newNode *immortalWeakHandle 2668 m := &tab.root 2669 hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize) 2670 hashIter := hash 2671 for { 2672 n := (*immortalWeakHandle)(m.Load()) 2673 if n == nil { 2674 // Try to insert a new map node. We may end up discarding 2675 // this node if we fail to insert because it turns out the 2676 // value is already in the map. 2677 // 2678 // The discard will only happen if two threads race on inserting 2679 // the same value. Both might create nodes, but only one will 2680 // succeed on insertion. If two threads race to insert two 2681 // different values, then both nodes will *always* get inserted, 2682 // because the equality checking below will always fail. 2683 // 2684 // Performance note: contention on insertion is likely to be 2685 // higher for small maps, but since this data structure is 2686 // append-only, either the map stays small because there isn't 2687 // much activity, or the map gets big and races to insert on 2688 // the same node are much less likely. 2689 if newNode == nil { 2690 newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys)) 2691 newNode.ptr = p 2692 } 2693 if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) { 2694 return newNode.handle() 2695 } 2696 // Reload n. Because pointers are only stored once, 2697 // we must have lost the race, and therefore n is not nil 2698 // anymore. 2699 n = (*immortalWeakHandle)(m.Load()) 2700 } 2701 if n.ptr == p { 2702 return n.handle() 2703 } 2704 m = &n.children[hashIter>>(8*goarch.PtrSize-1)] 2705 hashIter <<= 1 2706 } 2707 } 2708 2709 // The described object is being heap profiled. 2710 type specialprofile struct { 2711 _ sys.NotInHeap 2712 special special 2713 b *bucket 2714 } 2715 2716 // Set the heap profile bucket associated with addr to b. 2717 func setprofilebucket(p unsafe.Pointer, b *bucket) { 2718 lock(&mheap_.speciallock) 2719 s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 2720 unlock(&mheap_.speciallock) 2721 s.special.kind = _KindSpecialProfile 2722 s.b = b 2723 if !addspecial(p, &s.special, false) { 2724 throw("setprofilebucket: profile already set") 2725 } 2726 } 2727 2728 // specialReachable tracks whether an object is reachable on the next 2729 // GC cycle. This is used by testing. 2730 type specialReachable struct { 2731 special special 2732 done bool 2733 reachable bool 2734 } 2735 2736 // specialPinCounter tracks whether an object is pinned multiple times. 2737 type specialPinCounter struct { 2738 special special 2739 counter uintptr 2740 } 2741 2742 // specialSecret tracks whether we need to zero an object immediately 2743 // upon freeing. 2744 type specialSecret struct { 2745 _ sys.NotInHeap 2746 special special 2747 size uintptr 2748 } 2749 2750 // specialsIter helps iterate over specials lists. 2751 type specialsIter struct { 2752 pprev **special 2753 s *special 2754 } 2755 2756 func newSpecialsIter(span *mspan) specialsIter { 2757 return specialsIter{&span.specials, span.specials} 2758 } 2759 2760 func (i *specialsIter) valid() bool { 2761 return i.s != nil 2762 } 2763 2764 func (i *specialsIter) next() { 2765 i.pprev = &i.s.next 2766 i.s = *i.pprev 2767 } 2768 2769 // unlinkAndNext removes the current special from the list and moves 2770 // the iterator to the next special. It returns the unlinked special. 2771 func (i *specialsIter) unlinkAndNext() *special { 2772 cur := i.s 2773 i.s = cur.next 2774 *i.pprev = i.s 2775 return cur 2776 } 2777 2778 // freeSpecial performs any cleanup on special s and deallocates it. 2779 // s must already be unlinked from the specials list. 2780 // TODO(mknyszek): p and size together DO NOT represent a valid allocation. 2781 // size is the size of the allocation block in the span (mspan.elemsize), and p is 2782 // whatever pointer the special was attached to, which need not point to the 2783 // beginning of the block, though it may. 2784 // Consider passing the arguments differently to avoid giving the impression 2785 // that p and size together represent an address range. 2786 func freeSpecial(s *special, p unsafe.Pointer, size uintptr) { 2787 switch s.kind { 2788 case _KindSpecialFinalizer: 2789 sf := (*specialfinalizer)(unsafe.Pointer(s)) 2790 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot) 2791 lock(&mheap_.speciallock) 2792 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf)) 2793 unlock(&mheap_.speciallock) 2794 case _KindSpecialWeakHandle: 2795 sw := (*specialWeakHandle)(unsafe.Pointer(s)) 2796 sw.handle.Store(0) 2797 lock(&mheap_.speciallock) 2798 mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s)) 2799 unlock(&mheap_.speciallock) 2800 case _KindSpecialProfile: 2801 sp := (*specialprofile)(unsafe.Pointer(s)) 2802 mProf_Free(sp.b, size) 2803 lock(&mheap_.speciallock) 2804 mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 2805 unlock(&mheap_.speciallock) 2806 case _KindSpecialReachable: 2807 sp := (*specialReachable)(unsafe.Pointer(s)) 2808 sp.done = true 2809 // The creator frees these. 2810 case _KindSpecialPinCounter: 2811 lock(&mheap_.speciallock) 2812 mheap_.specialPinCounterAlloc.free(unsafe.Pointer(s)) 2813 unlock(&mheap_.speciallock) 2814 case _KindSpecialCleanup: 2815 sc := (*specialCleanup)(unsafe.Pointer(s)) 2816 // Cleanups, unlike finalizers, do not resurrect the objects 2817 // they're attached to, so we only need to pass the cleanup 2818 // function, not the object. 2819 gcCleanups.enqueue(sc.cleanup) 2820 lock(&mheap_.speciallock) 2821 mheap_.specialCleanupAlloc.free(unsafe.Pointer(sc)) 2822 unlock(&mheap_.speciallock) 2823 case _KindSpecialCheckFinalizer: 2824 sc := (*specialCheckFinalizer)(unsafe.Pointer(s)) 2825 lock(&mheap_.speciallock) 2826 mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(sc)) 2827 unlock(&mheap_.speciallock) 2828 case _KindSpecialTinyBlock: 2829 st := (*specialTinyBlock)(unsafe.Pointer(s)) 2830 lock(&mheap_.speciallock) 2831 mheap_.specialTinyBlockAlloc.free(unsafe.Pointer(st)) 2832 unlock(&mheap_.speciallock) 2833 case _KindSpecialBubble: 2834 st := (*specialBubble)(unsafe.Pointer(s)) 2835 lock(&mheap_.speciallock) 2836 mheap_.specialBubbleAlloc.free(unsafe.Pointer(st)) 2837 unlock(&mheap_.speciallock) 2838 case _KindSpecialSecret: 2839 ss := (*specialSecret)(unsafe.Pointer(s)) 2840 // p is the actual byte location that the special was 2841 // attached to, but the size argument is the span 2842 // element size. If we were to zero out using the size 2843 // argument, we'd trounce over adjacent memory in cases 2844 // where the allocation contains a header. Hence, we use 2845 // the user-visible size which we stash in the special itself. 2846 // 2847 // p always points to the beginning of the user-visible 2848 // allocation since the only way to attach a secret special 2849 // is via the allocation path. This isn't universal for 2850 // tiny allocs, but we avoid them in mallocgc anyway. 2851 memclrNoHeapPointers(p, ss.size) 2852 lock(&mheap_.speciallock) 2853 mheap_.specialSecretAlloc.free(unsafe.Pointer(s)) 2854 unlock(&mheap_.speciallock) 2855 default: 2856 throw("bad special kind") 2857 panic("not reached") 2858 } 2859 } 2860 2861 // gcBits is an alloc/mark bitmap. This is always used as gcBits.x. 2862 type gcBits struct { 2863 _ sys.NotInHeap 2864 x uint8 2865 } 2866 2867 // bytep returns a pointer to the n'th byte of b. 2868 func (b *gcBits) bytep(n uintptr) *uint8 { 2869 return addb(&b.x, n) 2870 } 2871 2872 // bitp returns a pointer to the byte containing bit n and a mask for 2873 // selecting that bit from *bytep. 2874 func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) { 2875 return b.bytep(n / 8), 1 << (n % 8) 2876 } 2877 2878 const gcBitsChunkBytes = uintptr(64 << 10) 2879 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{}) 2880 2881 type gcBitsHeader struct { 2882 free uintptr // free is the index into bits of the next free byte. 2883 next uintptr // *gcBits triggers recursive type bug. (issue 14620) 2884 } 2885 2886 type gcBitsArena struct { 2887 _ sys.NotInHeap 2888 // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. 2889 free uintptr // free is the index into bits of the next free byte; read/write atomically 2890 next *gcBitsArena 2891 bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits 2892 } 2893 2894 var gcBitsArenas struct { 2895 lock mutex 2896 free *gcBitsArena 2897 next *gcBitsArena // Read atomically. Write atomically under lock. 2898 current *gcBitsArena 2899 previous *gcBitsArena 2900 } 2901 2902 // tryAlloc allocates from b or returns nil if b does not have enough room. 2903 // This is safe to call concurrently. 2904 func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits { 2905 if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { 2906 return nil 2907 } 2908 // Try to allocate from this block. 2909 end := atomic.Xadduintptr(&b.free, bytes) 2910 if end > uintptr(len(b.bits)) { 2911 return nil 2912 } 2913 // There was enough room. 2914 start := end - bytes 2915 return &b.bits[start] 2916 } 2917 2918 // newMarkBits returns a pointer to 8 byte aligned bytes 2919 // to be used for a span's mark bits. 2920 func newMarkBits(nelems uintptr) *gcBits { 2921 blocksNeeded := (nelems + 63) / 64 2922 bytesNeeded := blocksNeeded * 8 2923 2924 // Try directly allocating from the current head arena. 2925 head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next))) 2926 if p := head.tryAlloc(bytesNeeded); p != nil { 2927 return p 2928 } 2929 2930 // There's not enough room in the head arena. We may need to 2931 // allocate a new arena. 2932 lock(&gcBitsArenas.lock) 2933 // Try the head arena again, since it may have changed. Now 2934 // that we hold the lock, the list head can't change, but its 2935 // free position still can. 2936 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 2937 unlock(&gcBitsArenas.lock) 2938 return p 2939 } 2940 2941 // Allocate a new arena. This may temporarily drop the lock. 2942 fresh := newArenaMayUnlock() 2943 // If newArenaMayUnlock dropped the lock, another thread may 2944 // have put a fresh arena on the "next" list. Try allocating 2945 // from next again. 2946 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 2947 // Put fresh back on the free list. 2948 // TODO: Mark it "already zeroed" 2949 fresh.next = gcBitsArenas.free 2950 gcBitsArenas.free = fresh 2951 unlock(&gcBitsArenas.lock) 2952 return p 2953 } 2954 2955 // Allocate from the fresh arena. We haven't linked it in yet, so 2956 // this cannot race and is guaranteed to succeed. 2957 p := fresh.tryAlloc(bytesNeeded) 2958 if p == nil { 2959 throw("markBits overflow") 2960 } 2961 2962 // Add the fresh arena to the "next" list. 2963 fresh.next = gcBitsArenas.next 2964 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh)) 2965 2966 unlock(&gcBitsArenas.lock) 2967 return p 2968 } 2969 2970 // newAllocBits returns a pointer to 8 byte aligned bytes 2971 // to be used for this span's alloc bits. 2972 // newAllocBits is used to provide newly initialized spans 2973 // allocation bits. For spans not being initialized the 2974 // mark bits are repurposed as allocation bits when 2975 // the span is swept. 2976 func newAllocBits(nelems uintptr) *gcBits { 2977 return newMarkBits(nelems) 2978 } 2979 2980 // nextMarkBitArenaEpoch establishes a new epoch for the arenas 2981 // holding the mark bits. The arenas are named relative to the 2982 // current GC cycle which is demarcated by the call to finishweep_m. 2983 // 2984 // All current spans have been swept. 2985 // During that sweep each span allocated room for its gcmarkBits in 2986 // gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current 2987 // where the GC will mark objects and after each span is swept these bits 2988 // will be used to allocate objects. 2989 // gcBitsArenas.current becomes gcBitsArenas.previous where the span's 2990 // gcAllocBits live until all the spans have been swept during this GC cycle. 2991 // The span's sweep extinguishes all the references to gcBitsArenas.previous 2992 // by pointing gcAllocBits into the gcBitsArenas.current. 2993 // The gcBitsArenas.previous is released to the gcBitsArenas.free list. 2994 func nextMarkBitArenaEpoch() { 2995 lock(&gcBitsArenas.lock) 2996 if gcBitsArenas.previous != nil { 2997 if gcBitsArenas.free == nil { 2998 gcBitsArenas.free = gcBitsArenas.previous 2999 } else { 3000 // Find end of previous arenas. 3001 last := gcBitsArenas.previous 3002 for last = gcBitsArenas.previous; last.next != nil; last = last.next { 3003 } 3004 last.next = gcBitsArenas.free 3005 gcBitsArenas.free = gcBitsArenas.previous 3006 } 3007 } 3008 gcBitsArenas.previous = gcBitsArenas.current 3009 gcBitsArenas.current = gcBitsArenas.next 3010 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed 3011 unlock(&gcBitsArenas.lock) 3012 } 3013 3014 // newArenaMayUnlock allocates and zeroes a gcBits arena. 3015 // The caller must hold gcBitsArena.lock. This may temporarily release it. 3016 func newArenaMayUnlock() *gcBitsArena { 3017 var result *gcBitsArena 3018 if gcBitsArenas.free == nil { 3019 unlock(&gcBitsArenas.lock) 3020 result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys, "gc bits")) 3021 if result == nil { 3022 throw("runtime: cannot allocate memory") 3023 } 3024 lock(&gcBitsArenas.lock) 3025 } else { 3026 result = gcBitsArenas.free 3027 gcBitsArenas.free = gcBitsArenas.free.next 3028 memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes) 3029 } 3030 result.next = nil 3031 // If result.bits is not 8 byte aligned adjust index so 3032 // that &result.bits[result.free] is 8 byte aligned. 3033 if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 { 3034 result.free = 0 3035 } else { 3036 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7) 3037 } 3038 return result 3039 } 3040