Source file src/runtime/malloc.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "internal/goarch" 105 "internal/goexperiment" 106 "internal/goos" 107 "internal/runtime/atomic" 108 "internal/runtime/gc" 109 "internal/runtime/math" 110 "internal/runtime/sys" 111 "unsafe" 112 ) 113 114 const ( 115 maxTinySize = _TinySize 116 tinySizeClass = _TinySizeClass 117 maxSmallSize = gc.MaxSmallSize 118 pageSize = 1 << gc.PageShift 119 pageMask = pageSize - 1 120 121 // Unused. Left for viewcore. 122 _PageSize = pageSize 123 minSizeForMallocHeader = gc.MinSizeForMallocHeader 124 mallocHeaderSize = gc.MallocHeaderSize 125 126 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 127 _64bit = 1 << (^uintptr(0) >> 63) / 2 128 129 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 130 _TinySize = 16 131 _TinySizeClass = int8(2) 132 133 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 134 135 // Per-P, per order stack segment cache size. 136 _StackCacheSize = 32 * 1024 137 138 // Number of orders that get caching. Order 0 is FixedStack 139 // and each successive order is twice as large. 140 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 141 // will be allocated directly. 142 // Since FixedStack is different on different systems, we 143 // must vary NumStackOrders to keep the same maximum cached size. 144 // OS | FixedStack | NumStackOrders 145 // -----------------+------------+--------------- 146 // linux/darwin/bsd | 2KB | 4 147 // windows/32 | 4KB | 3 148 // windows/64 | 8KB | 2 149 // plan9 | 4KB | 3 150 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 151 152 // heapAddrBits is the number of bits in a heap address. On 153 // amd64, addresses are sign-extended beyond heapAddrBits. On 154 // other arches, they are zero-extended. 155 // 156 // On most 64-bit platforms, we limit this to 48 bits based on a 157 // combination of hardware and OS limitations. 158 // 159 // amd64 hardware limits addresses to 48 bits, sign-extended 160 // to 64 bits. Addresses where the top 16 bits are not either 161 // all 0 or all 1 are "non-canonical" and invalid. Because of 162 // these "negative" addresses, we offset addresses by 1<<47 163 // (arenaBaseOffset) on amd64 before computing indexes into 164 // the heap arenas index. In 2017, amd64 hardware added 165 // support for 57 bit addresses; however, currently only Linux 166 // supports this extension and the kernel will never choose an 167 // address above 1<<47 unless mmap is called with a hint 168 // address above 1<<47 (which we never do). 169 // 170 // arm64 hardware (as of ARMv8) limits user addresses to 48 171 // bits, in the range [0, 1<<48). 172 // 173 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 174 // in hardware. On Linux, Go leans on stricter OS limits. Based 175 // on Linux's processor.h, the user address space is limited as 176 // follows on 64-bit architectures: 177 // 178 // Architecture Name Maximum Value (exclusive) 179 // --------------------------------------------------------------------- 180 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 181 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 182 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 183 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 184 // s390x TASK_SIZE 1<<64 (64 bit addresses) 185 // 186 // These limits may increase over time, but are currently at 187 // most 48 bits except on s390x. On all architectures, Linux 188 // starts placing mmap'd regions at addresses that are 189 // significantly below 48 bits, so even if it's possible to 190 // exceed Go's 48 bit limit, it's extremely unlikely in 191 // practice. 192 // 193 // On 32-bit platforms, we accept the full 32-bit address 194 // space because doing so is cheap. 195 // mips32 only has access to the low 2GB of virtual memory, so 196 // we further limit it to 31 bits. 197 // 198 // On ios/arm64, although 64-bit pointers are presumably 199 // available, pointers are truncated to 33 bits in iOS <14. 200 // Furthermore, only the top 4 GiB of the address space are 201 // actually available to the application. In iOS >=14, more 202 // of the address space is available, and the OS can now 203 // provide addresses outside of those 33 bits. Pick 40 bits 204 // as a reasonable balance between address space usage by the 205 // page allocator, and flexibility for what mmap'd regions 206 // we'll accept for the heap. We can't just move to the full 207 // 48 bits because this uses too much address space for older 208 // iOS versions. 209 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 210 // to a 48-bit address space like every other arm64 platform. 211 // 212 // WebAssembly currently has a limit of 4GB linear memory. 213 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 214 215 // maxAlloc is the maximum size of an allocation. On 64-bit, 216 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 217 // 32-bit, however, this is one less than 1<<32 because the 218 // number of bytes in the address space doesn't actually fit 219 // in a uintptr. 220 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 221 222 // The number of bits in a heap address, the size of heap 223 // arenas, and the L1 and L2 arena map sizes are related by 224 // 225 // (1 << addr bits) = arena size * L1 entries * L2 entries 226 // 227 // Currently, we balance these as follows: 228 // 229 // Platform Addr bits Arena size L1 entries L2 entries 230 // -------------- --------- ---------- ---------- ----------- 231 // */64-bit 48 64MB 1 4M (32MB) 232 // windows/64-bit 48 4MB 64 1M (8MB) 233 // ios/arm64 40 4MB 1 256K (2MB) 234 // */32-bit 32 4MB 1 1024 (4KB) 235 // */mips(le) 31 4MB 1 512 (2KB) 236 237 // heapArenaBytes is the size of a heap arena. The heap 238 // consists of mappings of size heapArenaBytes, aligned to 239 // heapArenaBytes. The initial heap mapping is one arena. 240 // 241 // This is currently 64MB on 64-bit non-Windows and 4MB on 242 // 32-bit and on Windows. We use smaller arenas on Windows 243 // because all committed memory is charged to the process, 244 // even if it's not touched. Hence, for processes with small 245 // heaps, the mapped arena space needs to be commensurate. 246 // This is particularly important with the race detector, 247 // since it significantly amplifies the cost of committed 248 // memory. 249 heapArenaBytes = 1 << logHeapArenaBytes 250 251 heapArenaWords = heapArenaBytes / goarch.PtrSize 252 253 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 254 // prefer using heapArenaBytes where possible (we need the 255 // constant to compute some other constants). 256 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64 257 258 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs. 259 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize) 260 261 pagesPerArena = heapArenaBytes / pageSize 262 263 // arenaL1Bits is the number of bits of the arena number 264 // covered by the first level arena map. 265 // 266 // This number should be small, since the first level arena 267 // map requires PtrSize*(1<<arenaL1Bits) of space in the 268 // binary's BSS. It can be zero, in which case the first level 269 // index is effectively unused. There is a performance benefit 270 // to this, since the generated code can be more efficient, 271 // but comes at the cost of having a large L2 mapping. 272 // 273 // We use the L1 map on 64-bit Windows because the arena size 274 // is small, but the address space is still 48 bits, and 275 // there's a high cost to having a large L2. 276 arenaL1Bits = 6 * (_64bit * goos.IsWindows) 277 278 // arenaL2Bits is the number of bits of the arena number 279 // covered by the second level arena index. 280 // 281 // The size of each arena map allocation is proportional to 282 // 1<<arenaL2Bits, so it's important that this not be too 283 // large. 48 bits leads to 32MB arena index allocations, which 284 // is about the practical threshold. 285 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 286 287 // arenaL1Shift is the number of bits to shift an arena frame 288 // number by to compute an index into the first level arena map. 289 arenaL1Shift = arenaL2Bits 290 291 // arenaBits is the total bits in a combined arena map index. 292 // This is split between the index into the L1 arena map and 293 // the L2 arena map. 294 arenaBits = arenaL1Bits + arenaL2Bits 295 296 // arenaBaseOffset is the pointer value that corresponds to 297 // index 0 in the heap arena map. 298 // 299 // On amd64, the address space is 48 bits, sign extended to 64 300 // bits. This offset lets us handle "negative" addresses (or 301 // high addresses if viewed as unsigned). 302 // 303 // On aix/ppc64, this offset allows to keep the heapAddrBits to 304 // 48. Otherwise, it would be 60 in order to handle mmap addresses 305 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 306 // case, the memory reserved in (s *pageAlloc).init for chunks 307 // is causing important slowdowns. 308 // 309 // On other platforms, the user address space is contiguous 310 // and starts at 0, so no offset is necessary. 311 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix 312 // A typed version of this constant that will make it into DWARF (for viewcore). 313 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 314 315 // Max number of threads to run garbage collection. 316 // 2, 3, and 4 are all plausible maximums depending 317 // on the hardware details of the machine. The garbage 318 // collector scales well to 32 cpus. 319 _MaxGcproc = 32 320 321 // minLegalPointer is the smallest possible legal pointer. 322 // This is the smallest possible architectural page size, 323 // since we assume that the first page is never mapped. 324 // 325 // This should agree with minZeroPage in the compiler. 326 minLegalPointer uintptr = 4096 327 328 // minHeapForMetadataHugePages sets a threshold on when certain kinds of 329 // heap metadata, currently the arenas map L2 entries and page alloc bitmap 330 // mappings, are allowed to be backed by huge pages. If the heap goal ever 331 // exceeds this threshold, then huge pages are enabled. 332 // 333 // These numbers are chosen with the assumption that huge pages are on the 334 // order of a few MiB in size. 335 // 336 // The kind of metadata this applies to has a very low overhead when compared 337 // to address space used, but their constant overheads for small heaps would 338 // be very high if they were to be backed by huge pages (e.g. a few MiB makes 339 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB 340 // heap). The benefit of huge pages is also not worth it for small heaps, 341 // because only a very, very small part of the metadata is used for small heaps. 342 // 343 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size 344 // again, then huge pages will still be enabled for this mapping. The reason is that 345 // there's no point unless we're also returning the physical memory for these 346 // metadata mappings back to the OS. That would be quite complex to do in general 347 // as the heap is likely fragmented after a reduction in heap size. 348 minHeapForMetadataHugePages = 1 << 30 349 350 // randomizeHeapBase indicates if the heap base address should be randomized. 351 // See comment in mallocinit for how the randomization is performed. 352 randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform 353 354 // randHeapBasePrefixMask is used to extract the top byte of the randomized 355 // heap base address. 356 randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8)) 357 ) 358 359 // physPageSize is the size in bytes of the OS's physical pages. 360 // Mapping and unmapping operations must be done at multiples of 361 // physPageSize. 362 // 363 // This must be set by the OS init code (typically in osinit) before 364 // mallocinit. 365 var physPageSize uintptr 366 367 // physHugePageSize is the size in bytes of the OS's default physical huge 368 // page size whose allocation is opaque to the application. It is assumed 369 // and verified to be a power of two. 370 // 371 // If set, this must be set by the OS init code (typically in osinit) before 372 // mallocinit. However, setting it at all is optional, and leaving the default 373 // value is always safe (though potentially less efficient). 374 // 375 // Since physHugePageSize is always assumed to be a power of two, 376 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 377 // The purpose of physHugePageShift is to avoid doing divisions in 378 // performance critical functions. 379 var ( 380 physHugePageSize uintptr 381 physHugePageShift uint 382 ) 383 384 var ( 385 // heapRandSeed is a random value that is populated in mallocinit if 386 // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to 387 // randomize the base heap address. 388 heapRandSeed uintptr 389 heapRandSeedBitsRemaining int 390 ) 391 392 func nextHeapRandBits(bits int) uintptr { 393 if bits > heapRandSeedBitsRemaining { 394 throw("not enough heapRandSeed bits remaining") 395 } 396 r := heapRandSeed >> (64 - bits) 397 heapRandSeed <<= bits 398 heapRandSeedBitsRemaining -= bits 399 return r 400 } 401 402 func mallocinit() { 403 if gc.SizeClassToSize[tinySizeClass] != maxTinySize { 404 throw("bad TinySizeClass") 405 } 406 407 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 { 408 // heapBits expects modular arithmetic on bitmap 409 // addresses to work. 410 throw("heapArenaBitmapWords not a power of 2") 411 } 412 413 // Check physPageSize. 414 if physPageSize == 0 { 415 // The OS init code failed to fetch the physical page size. 416 throw("failed to get system page size") 417 } 418 if physPageSize > maxPhysPageSize { 419 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 420 throw("bad system page size") 421 } 422 if physPageSize < minPhysPageSize { 423 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 424 throw("bad system page size") 425 } 426 if physPageSize&(physPageSize-1) != 0 { 427 print("system page size (", physPageSize, ") must be a power of 2\n") 428 throw("bad system page size") 429 } 430 if physHugePageSize&(physHugePageSize-1) != 0 { 431 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 432 throw("bad system huge page size") 433 } 434 if physHugePageSize > maxPhysHugePageSize { 435 // physHugePageSize is greater than the maximum supported huge page size. 436 // Don't throw here, like in the other cases, since a system configured 437 // in this way isn't wrong, we just don't have the code to support them. 438 // Instead, silently set the huge page size to zero. 439 physHugePageSize = 0 440 } 441 if physHugePageSize != 0 { 442 // Since physHugePageSize is a power of 2, it suffices to increase 443 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 444 for 1<<physHugePageShift != physHugePageSize { 445 physHugePageShift++ 446 } 447 } 448 if pagesPerArena%pagesPerSpanRoot != 0 { 449 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 450 throw("bad pagesPerSpanRoot") 451 } 452 if pagesPerArena%pagesPerReclaimerChunk != 0 { 453 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 454 throw("bad pagesPerReclaimerChunk") 455 } 456 // Check that the minimum size (exclusive) for a malloc header is also 457 // a size class boundary. This is important to making sure checks align 458 // across different parts of the runtime. 459 // 460 // While we're here, also check to make sure all these size classes' 461 // span sizes are one page. Some code relies on this. 462 minSizeForMallocHeaderIsSizeClass := false 463 sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true 464 for i := 0; i < len(gc.SizeClassToSize); i++ { 465 if gc.SizeClassToNPages[i] > 1 { 466 sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false 467 } 468 if gc.MinSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) { 469 minSizeForMallocHeaderIsSizeClass = true 470 break 471 } 472 } 473 if !minSizeForMallocHeaderIsSizeClass { 474 throw("min size of malloc header is not a size class boundary") 475 } 476 if !sizeClassesUpToMinSizeForMallocHeaderAreOnePage { 477 throw("expected all size classes up to min size for malloc header to fit in one-page spans") 478 } 479 // Check that the pointer bitmap for all small sizes without a malloc header 480 // fits in a word. 481 if gc.MinSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize { 482 throw("max pointer/scan bitmap size for headerless objects is too large") 483 } 484 485 if minTagBits > tagBits { 486 throw("tagBits too small") 487 } 488 489 // Initialize the heap. 490 mheap_.init() 491 mcache0 = allocmcache() 492 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 493 lockInit(&profInsertLock, lockRankProfInsert) 494 lockInit(&profBlockLock, lockRankProfBlock) 495 lockInit(&profMemActiveLock, lockRankProfMemActive) 496 for i := range profMemFutureLock { 497 lockInit(&profMemFutureLock[i], lockRankProfMemFuture) 498 } 499 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 500 501 // Create initial arena growth hints. 502 if isSbrkPlatform { 503 // Don't generate hints on sbrk platforms. We can 504 // only grow the break sequentially. 505 } else if goarch.PtrSize == 8 { 506 // On a 64-bit machine, we pick the following hints 507 // because: 508 // 509 // 1. Starting from the middle of the address space 510 // makes it easier to grow out a contiguous range 511 // without running in to some other mapping. 512 // 513 // 2. This makes Go heap addresses more easily 514 // recognizable when debugging. 515 // 516 // 3. Stack scanning in gccgo is still conservative, 517 // so it's important that addresses be distinguishable 518 // from other data. 519 // 520 // Starting at 0x00c0 means that the valid memory addresses 521 // will begin 0x00c0, 0x00c1, ... 522 // In little-endian, that's c0 00, c1 00, ... None of those are valid 523 // UTF-8 sequences, and they are otherwise as far away from 524 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 525 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 526 // on OS X during thread allocations. 0x00c0 causes conflicts with 527 // AddressSanitizer which reserves all memory up to 0x0100. 528 // These choices reduce the odds of a conservative garbage collector 529 // not collecting memory because some non-pointer block of memory 530 // had a bit pattern that matched a memory address. 531 // 532 // However, on arm64, we ignore all this advice above and slam the 533 // allocation at 0x40 << 32 because when using 4k pages with 3-level 534 // translation buffers, the user address space is limited to 39 bits 535 // On ios/arm64, the address space is even smaller. 536 // 537 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 538 // processes. 539 // 540 // Space mapped for user arenas comes immediately after the range 541 // originally reserved for the regular heap when race mode is not 542 // enabled because user arena chunks can never be used for regular heap 543 // allocations and we want to avoid fragmenting the address space. 544 // 545 // In race mode we have no choice but to just use the same hints because 546 // the race detector requires that the heap be mapped contiguously. 547 // 548 // If randomizeHeapBase is set, we attempt to randomize the base address 549 // as much as possible. We do this by generating a random uint64 via 550 // bootstrapRand and using it's bits to randomize portions of the base 551 // address as follows: 552 // * We first generate a random heapArenaBytes aligned address that we use for 553 // generating the hints. 554 // * On the first call to mheap.grow, we then generate a random PallocChunkBytes 555 // aligned offset into the mmap'd heap region, which we use as the base for 556 // the heap region. 557 // * We then select a page offset in that PallocChunkBytes region to start the 558 // heap at, and mark all the pages up to that offset as allocated. 559 // 560 // Our final randomized "heap base address" becomes the first byte of 561 // the first available page returned by the page allocator. This results 562 // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64) 563 // bits of entropy. 564 565 var randHeapBase uintptr 566 var randHeapBasePrefix byte 567 // heapAddrBits is 48 on most platforms, but we only use 47 of those 568 // bits in order to provide a good amount of room for the heap to grow 569 // contiguously. On amd64, there are 48 bits, but the top bit is sign 570 // extended, so we throw away another bit, just to be safe. 571 randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1) 572 if randomizeHeapBase { 573 // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes 574 // bits, using them as the top bits for randHeapBase. 575 heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64 576 577 topBits := (randHeapAddrBits - logHeapArenaBytes) 578 randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits) 579 randHeapBase = alignUp(randHeapBase, heapArenaBytes) 580 randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8)) 581 } 582 583 for i := 0x7f; i >= 0; i-- { 584 var p uintptr 585 switch { 586 case raceenabled: 587 // The TSAN runtime requires the heap 588 // to be in the range [0x00c000000000, 589 // 0x00e000000000). 590 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 591 if p >= uintptrMask&0x00e000000000 { 592 continue 593 } 594 case randomizeHeapBase: 595 prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8) 596 p = prefix | (randHeapBase & randHeapBasePrefixMask) 597 case GOARCH == "arm64" && GOOS == "ios": 598 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 599 case GOARCH == "arm64": 600 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 601 case GOOS == "aix": 602 if i == 0 { 603 // We don't use addresses directly after 0x0A00000000000000 604 // to avoid collisions with others mmaps done by non-go programs. 605 continue 606 } 607 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 608 default: 609 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 610 } 611 // Switch to generating hints for user arenas if we've gone 612 // through about half the hints. In race mode, take only about 613 // a quarter; we don't have very much space to work with. 614 hintList := &mheap_.arenaHints 615 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) { 616 hintList = &mheap_.userArena.arenaHints 617 } 618 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 619 hint.addr = p 620 hint.next, *hintList = *hintList, hint 621 } 622 } else { 623 // On a 32-bit machine, we're much more concerned 624 // about keeping the usable heap contiguous. 625 // Hence: 626 // 627 // 1. We reserve space for all heapArenas up front so 628 // they don't get interleaved with the heap. They're 629 // ~258MB, so this isn't too bad. (We could reserve a 630 // smaller amount of space up front if this is a 631 // problem.) 632 // 633 // 2. We hint the heap to start right above the end of 634 // the binary so we have the best chance of keeping it 635 // contiguous. 636 // 637 // 3. We try to stake out a reasonably large initial 638 // heap reservation. 639 640 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 641 meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation")) 642 if meta != 0 { 643 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 644 } 645 646 // We want to start the arena low, but if we're linked 647 // against C code, it's possible global constructors 648 // have called malloc and adjusted the process' brk. 649 // Query the brk so we can avoid trying to map the 650 // region over it (which will cause the kernel to put 651 // the region somewhere else, likely at a high 652 // address). 653 procBrk := sbrk0() 654 655 // If we ask for the end of the data segment but the 656 // operating system requires a little more space 657 // before we can start allocating, it will give out a 658 // slightly higher pointer. Except QEMU, which is 659 // buggy, as usual: it won't adjust the pointer 660 // upward. So adjust it upward a little bit ourselves: 661 // 1/4 MB to get away from the running binary image. 662 p := firstmoduledata.end 663 if p < procBrk { 664 p = procBrk 665 } 666 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 667 p = mheap_.heapArenaAlloc.end 668 } 669 p = alignUp(p+(256<<10), heapArenaBytes) 670 // Because we're worried about fragmentation on 671 // 32-bit, we try to make a large initial reservation. 672 arenaSizes := []uintptr{ 673 512 << 20, 674 256 << 20, 675 128 << 20, 676 } 677 for _, arenaSize := range arenaSizes { 678 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation") 679 if a != nil { 680 mheap_.arena.init(uintptr(a), size, false) 681 p = mheap_.arena.end // For hint below 682 break 683 } 684 } 685 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 686 hint.addr = p 687 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 688 689 // Place the hint for user arenas just after the large reservation. 690 // 691 // While this potentially competes with the hint above, in practice we probably 692 // aren't going to be getting this far anyway on 32-bit platforms. 693 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 694 userArenaHint.addr = p 695 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint 696 } 697 // Initialize the memory limit here because the allocator is going to look at it 698 // but we haven't called gcinit yet and we're definitely going to allocate memory before then. 699 gcController.memoryLimit.Store(math.MaxInt64) 700 } 701 702 // sysAlloc allocates heap arena space for at least n bytes. The 703 // returned pointer is always heapArenaBytes-aligned and backed by 704 // h.arenas metadata. The returned size is always a multiple of 705 // heapArenaBytes. sysAlloc returns nil on failure. 706 // There is no corresponding free function. 707 // 708 // hintList is a list of hint addresses for where to allocate new 709 // heap arenas. It must be non-nil. 710 // 711 // sysAlloc returns a memory region in the Reserved state. This region must 712 // be transitioned to Prepared and then Ready before use. 713 // 714 // arenaList is the list the arena should be added to. 715 // 716 // h must be locked. 717 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) (v unsafe.Pointer, size uintptr) { 718 assertLockHeld(&h.lock) 719 720 n = alignUp(n, heapArenaBytes) 721 722 if hintList == &h.arenaHints { 723 // First, try the arena pre-reservation. 724 // Newly-used mappings are considered released. 725 // 726 // Only do this if we're using the regular heap arena hints. 727 // This behavior is only for the heap. 728 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap") 729 if v != nil { 730 size = n 731 goto mapped 732 } 733 } 734 735 // Try to grow the heap at a hint address. 736 for *hintList != nil { 737 hint := *hintList 738 p := hint.addr 739 if hint.down { 740 p -= n 741 } 742 if p+n < p { 743 // We can't use this, so don't ask. 744 v = nil 745 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 746 // Outside addressable heap. Can't use. 747 v = nil 748 } else { 749 v = sysReserve(unsafe.Pointer(p), n, "heap reservation") 750 } 751 if p == uintptr(v) { 752 // Success. Update the hint. 753 if !hint.down { 754 p += n 755 } 756 hint.addr = p 757 size = n 758 break 759 } 760 // Failed. Discard this hint and try the next. 761 // 762 // TODO: This would be cleaner if sysReserve could be 763 // told to only return the requested address. In 764 // particular, this is already how Windows behaves, so 765 // it would simplify things there. 766 if v != nil { 767 sysFreeOS(v, n) 768 } 769 *hintList = hint.next 770 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 771 } 772 773 if size == 0 { 774 if raceenabled { 775 // The race detector assumes the heap lives in 776 // [0x00c000000000, 0x00e000000000), but we 777 // just ran out of hints in this region. Give 778 // a nice failure. 779 throw("too many address space collisions for -race mode") 780 } 781 782 // All of the hints failed, so we'll take any 783 // (sufficiently aligned) address the kernel will give 784 // us. 785 v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap") 786 if v == nil { 787 return nil, 0 788 } 789 790 // Create new hints for extending this region. 791 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 792 hint.addr, hint.down = uintptr(v), true 793 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 794 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 795 hint.addr = uintptr(v) + size 796 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 797 } 798 799 // Check for bad pointers or pointers we can't use. 800 { 801 var bad string 802 p := uintptr(v) 803 if p+size < p { 804 bad = "region exceeds uintptr range" 805 } else if arenaIndex(p) >= 1<<arenaBits { 806 bad = "base outside usable address space" 807 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 808 bad = "end outside usable address space" 809 } 810 if bad != "" { 811 // This should be impossible on most architectures, 812 // but it would be really confusing to debug. 813 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 814 throw("memory reservation exceeds address space limit") 815 } 816 } 817 818 if uintptr(v)&(heapArenaBytes-1) != 0 { 819 throw("misrounded allocation in sysAlloc") 820 } 821 822 mapped: 823 if valgrindenabled { 824 valgrindCreateMempool(v) 825 valgrindMakeMemNoAccess(v, size) 826 } 827 828 // Create arena metadata. 829 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 830 l2 := h.arenas[ri.l1()] 831 if l2 == nil { 832 // Allocate an L2 arena map. 833 // 834 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no 835 // statistic we can comfortably account for this space in. With this structure, 836 // we rely on demand paging to avoid large overheads, but tracking which memory 837 // is paged in is too expensive. Trying to account for the whole region means 838 // that it will appear like an enormous memory overhead in statistics, even though 839 // it is not. 840 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index")) 841 if l2 == nil { 842 throw("out of memory allocating heap arena map") 843 } 844 if h.arenasHugePages { 845 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 846 } else { 847 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 848 } 849 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 850 } 851 852 if l2[ri.l2()] != nil { 853 throw("arena already initialized") 854 } 855 var r *heapArena 856 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata")) 857 if r == nil { 858 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 859 if r == nil { 860 throw("out of memory allocating heap arena metadata") 861 } 862 } 863 864 // Register the arena in allArenas if requested. 865 if len((*arenaList)) == cap((*arenaList)) { 866 size := 2 * uintptr(cap((*arenaList))) * goarch.PtrSize 867 if size == 0 { 868 size = physPageSize 869 } 870 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) 871 if newArray == nil { 872 throw("out of memory allocating allArenas") 873 } 874 oldSlice := (*arenaList) 875 *(*notInHeapSlice)(unsafe.Pointer(&(*arenaList))) = notInHeapSlice{newArray, len((*arenaList)), int(size / goarch.PtrSize)} 876 copy((*arenaList), oldSlice) 877 // Do not free the old backing array because 878 // there may be concurrent readers. Since we 879 // double the array each time, this can lead 880 // to at most 2x waste. 881 } 882 (*arenaList) = (*arenaList)[:len((*arenaList))+1] 883 (*arenaList)[len((*arenaList))-1] = ri 884 885 // Store atomically just in case an object from the 886 // new heap arena becomes visible before the heap lock 887 // is released (which shouldn't happen, but there's 888 // little downside to this). 889 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 890 } 891 892 // Tell the race detector about the new heap memory. 893 if raceenabled { 894 racemapshadow(v, size) 895 } 896 897 return 898 } 899 900 // sysReserveAligned is like sysReserve, but the returned pointer is 901 // aligned to align bytes. It may reserve either n or n+align bytes, 902 // so it returns the size that was reserved. 903 func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) { 904 if isSbrkPlatform { 905 if v != nil { 906 throw("unexpected heap arena hint on sbrk platform") 907 } 908 return sysReserveAlignedSbrk(size, align) 909 } 910 // Since the alignment is rather large in uses of this 911 // function, we're not likely to get it by chance, so we ask 912 // for a larger region and remove the parts we don't need. 913 retries := 0 914 retry: 915 p := uintptr(sysReserve(v, size+align, vmaName)) 916 switch { 917 case p == 0: 918 return nil, 0 919 case p&(align-1) == 0: 920 return unsafe.Pointer(p), size + align 921 case GOOS == "windows": 922 // On Windows we can't release pieces of a 923 // reservation, so we release the whole thing and 924 // re-reserve the aligned sub-region. This may race, 925 // so we may have to try again. 926 sysFreeOS(unsafe.Pointer(p), size+align) 927 p = alignUp(p, align) 928 p2 := sysReserve(unsafe.Pointer(p), size, vmaName) 929 if p != uintptr(p2) { 930 // Must have raced. Try again. 931 sysFreeOS(p2, size) 932 if retries++; retries == 100 { 933 throw("failed to allocate aligned heap memory; too many retries") 934 } 935 goto retry 936 } 937 // Success. 938 return p2, size 939 default: 940 // Trim off the unaligned parts. 941 pAligned := alignUp(p, align) 942 sysFreeOS(unsafe.Pointer(p), pAligned-p) 943 end := pAligned + size 944 endLen := (p + size + align) - end 945 if endLen > 0 { 946 sysFreeOS(unsafe.Pointer(end), endLen) 947 } 948 return unsafe.Pointer(pAligned), size 949 } 950 } 951 952 // enableMetadataHugePages enables huge pages for various sources of heap metadata. 953 // 954 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant 955 // time, but may take time proportional to the size of the mapped heap beyond that. 956 // 957 // This function is idempotent. 958 // 959 // The heap lock must not be held over this operation, since it will briefly acquire 960 // the heap lock. 961 // 962 // Must be called on the system stack because it acquires the heap lock. 963 // 964 //go:systemstack 965 func (h *mheap) enableMetadataHugePages() { 966 // Enable huge pages for page structure. 967 h.pages.enableChunkHugePages() 968 969 // Grab the lock and set arenasHugePages if it's not. 970 // 971 // Once arenasHugePages is set, all new L2 entries will be eligible for 972 // huge pages. We'll set all the old entries after we release the lock. 973 lock(&h.lock) 974 if h.arenasHugePages { 975 unlock(&h.lock) 976 return 977 } 978 h.arenasHugePages = true 979 unlock(&h.lock) 980 981 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to 982 // just iterate over the whole thing. 983 for i := range h.arenas { 984 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) 985 if l2 == nil { 986 continue 987 } 988 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 989 } 990 } 991 992 // base address for all 0-byte allocations 993 var zerobase uintptr 994 995 // nextFreeFast returns the next free object if one is quickly available. 996 // Otherwise it returns 0. 997 func nextFreeFast(s *mspan) gclinkptr { 998 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 999 if theBit < 64 { 1000 result := s.freeindex + uint16(theBit) 1001 if result < s.nelems { 1002 freeidx := result + 1 1003 if freeidx%64 == 0 && freeidx != s.nelems { 1004 return 0 1005 } 1006 s.allocCache >>= uint(theBit + 1) 1007 s.freeindex = freeidx 1008 s.allocCount++ 1009 return gclinkptr(uintptr(result)*s.elemsize + s.base()) 1010 } 1011 } 1012 return 0 1013 } 1014 1015 // nextFree returns the next free object from the cached span if one is available. 1016 // Otherwise it refills the cache with a span with an available object and 1017 // returns that object along with a flag indicating that this was a heavy 1018 // weight allocation. If it is a heavy weight allocation the caller must 1019 // determine whether a new GC cycle needs to be started or if the GC is active 1020 // whether this goroutine needs to assist the GC. 1021 // 1022 // Must run in a non-preemptible context since otherwise the owner of 1023 // c could change. 1024 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) { 1025 s = c.alloc[spc] 1026 checkGCTrigger = false 1027 freeIndex := s.nextFreeIndex() 1028 if freeIndex == s.nelems { 1029 // The span is full. 1030 if s.allocCount != s.nelems { 1031 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 1032 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 1033 } 1034 c.refill(spc) 1035 checkGCTrigger = true 1036 s = c.alloc[spc] 1037 1038 freeIndex = s.nextFreeIndex() 1039 } 1040 1041 if freeIndex >= s.nelems { 1042 throw("freeIndex is not valid") 1043 } 1044 1045 v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base()) 1046 s.allocCount++ 1047 if s.allocCount > s.nelems { 1048 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 1049 throw("s.allocCount > s.nelems") 1050 } 1051 return 1052 } 1053 1054 // doubleCheckMalloc enables a bunch of extra checks to malloc to double-check 1055 // that various invariants are upheld. 1056 // 1057 // We might consider turning these on by default; many of them previously were. 1058 // They account for a few % of mallocgc's cost though, which does matter somewhat 1059 // at scale. 1060 const doubleCheckMalloc = false 1061 1062 // Allocate an object of size bytes. 1063 // Small objects are allocated from the per-P cache's free lists. 1064 // Large objects (> 32 kB) are allocated straight from the heap. 1065 // 1066 // mallocgc should be an internal detail, 1067 // but widely used packages access it using linkname. 1068 // Notable members of the hall of shame include: 1069 // - github.com/bytedance/gopkg 1070 // - github.com/bytedance/sonic 1071 // - github.com/cloudwego/frugal 1072 // - github.com/cockroachdb/cockroach 1073 // - github.com/cockroachdb/pebble 1074 // - github.com/ugorji/go/codec 1075 // 1076 // Do not remove or change the type signature. 1077 // See go.dev/issue/67401. 1078 // 1079 //go:linkname mallocgc 1080 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 1081 if doubleCheckMalloc { 1082 if gcphase == _GCmarktermination { 1083 throw("mallocgc called with gcphase == _GCmarktermination") 1084 } 1085 } 1086 1087 // Short-circuit zero-sized allocation requests. 1088 if size == 0 { 1089 return unsafe.Pointer(&zerobase) 1090 } 1091 1092 // It's possible for any malloc to trigger sweeping, which may in 1093 // turn queue finalizers. Record this dynamic lock edge. 1094 // N.B. Compiled away if lockrank experiment is not enabled. 1095 lockRankMayQueueFinalizer() 1096 1097 // Pre-malloc debug hooks. 1098 if debug.malloc { 1099 if x := preMallocgcDebug(size, typ); x != nil { 1100 return x 1101 } 1102 } 1103 1104 // For ASAN, we allocate extra memory around each allocation called the "redzone." 1105 // These "redzones" are marked as unaddressable. 1106 var asanRZ uintptr 1107 if asanenabled { 1108 asanRZ = redZoneSize(size) 1109 size += asanRZ 1110 } 1111 1112 // Assist the GC if needed. 1113 if gcBlackenEnabled != 0 { 1114 deductAssistCredit(size) 1115 } 1116 1117 // Actually do the allocation. 1118 var x unsafe.Pointer 1119 var elemsize uintptr 1120 if size <= maxSmallSize-gc.MallocHeaderSize { 1121 if typ == nil || !typ.Pointers() { 1122 if size < maxTinySize { 1123 x, elemsize = mallocgcTiny(size, typ) 1124 } else { 1125 x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 1126 } 1127 } else { 1128 if !needzero { 1129 throw("objects with pointers must be zeroed") 1130 } 1131 if heapBitsInSpan(size) { 1132 x, elemsize = mallocgcSmallScanNoHeader(size, typ) 1133 } else { 1134 x, elemsize = mallocgcSmallScanHeader(size, typ) 1135 } 1136 } 1137 } else { 1138 x, elemsize = mallocgcLarge(size, typ, needzero) 1139 } 1140 1141 // Notify sanitizers, if enabled. 1142 if raceenabled { 1143 racemalloc(x, size-asanRZ) 1144 } 1145 if msanenabled { 1146 msanmalloc(x, size-asanRZ) 1147 } 1148 if asanenabled { 1149 // Poison the space between the end of the requested size of x 1150 // and the end of the slot. Unpoison the requested allocation. 1151 frag := elemsize - size 1152 if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-gc.MallocHeaderSize { 1153 frag -= gc.MallocHeaderSize 1154 } 1155 asanpoison(unsafe.Add(x, size-asanRZ), asanRZ) 1156 asanunpoison(x, size-asanRZ) 1157 } 1158 if valgrindenabled { 1159 valgrindMalloc(x, size-asanRZ) 1160 } 1161 1162 // Adjust our GC assist debt to account for internal fragmentation. 1163 if gcBlackenEnabled != 0 && elemsize != 0 { 1164 if assistG := getg().m.curg; assistG != nil { 1165 assistG.gcAssistBytes -= int64(elemsize - size) 1166 } 1167 } 1168 1169 // Post-malloc debug hooks. 1170 if debug.malloc { 1171 postMallocgcDebug(x, elemsize, typ) 1172 } 1173 return x 1174 } 1175 1176 func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1177 // Set mp.mallocing to keep from being preempted by GC. 1178 mp := acquirem() 1179 if doubleCheckMalloc { 1180 if mp.mallocing != 0 { 1181 throw("malloc deadlock") 1182 } 1183 if mp.gsignal == getg() { 1184 throw("malloc during signal") 1185 } 1186 if typ != nil && typ.Pointers() { 1187 throw("expected noscan for tiny alloc") 1188 } 1189 } 1190 mp.mallocing = 1 1191 1192 // Tiny allocator. 1193 // 1194 // Tiny allocator combines several tiny allocation requests 1195 // into a single memory block. The resulting memory block 1196 // is freed when all subobjects are unreachable. The subobjects 1197 // must be noscan (don't have pointers), this ensures that 1198 // the amount of potentially wasted memory is bounded. 1199 // 1200 // Size of the memory block used for combining (maxTinySize) is tunable. 1201 // Current setting is 16 bytes, which relates to 2x worst case memory 1202 // wastage (when all but one subobjects are unreachable). 1203 // 8 bytes would result in no wastage at all, but provides less 1204 // opportunities for combining. 1205 // 32 bytes provides more opportunities for combining, 1206 // but can lead to 4x worst case wastage. 1207 // The best case winning is 8x regardless of block size. 1208 // 1209 // Objects obtained from tiny allocator must not be freed explicitly. 1210 // So when an object will be freed explicitly, we ensure that 1211 // its size >= maxTinySize. 1212 // 1213 // SetFinalizer has a special case for objects potentially coming 1214 // from tiny allocator, it such case it allows to set finalizers 1215 // for an inner byte of a memory block. 1216 // 1217 // The main targets of tiny allocator are small strings and 1218 // standalone escaping variables. On a json benchmark 1219 // the allocator reduces number of allocations by ~12% and 1220 // reduces heap size by ~20%. 1221 c := getMCache(mp) 1222 off := c.tinyoffset 1223 // Align tiny pointer for required (conservative) alignment. 1224 if size&7 == 0 { 1225 off = alignUp(off, 8) 1226 } else if goarch.PtrSize == 4 && size == 12 { 1227 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1228 // systems so that objects whose first field is a 64-bit 1229 // value is aligned to 8 bytes and does not cause a fault on 1230 // atomic access. See issue 37262. 1231 // TODO(mknyszek): Remove this workaround if/when issue 36606 1232 // is resolved. 1233 off = alignUp(off, 8) 1234 } else if size&3 == 0 { 1235 off = alignUp(off, 4) 1236 } else if size&1 == 0 { 1237 off = alignUp(off, 2) 1238 } 1239 if off+size <= maxTinySize && c.tiny != 0 { 1240 // The object fits into existing tiny block. 1241 x := unsafe.Pointer(c.tiny + off) 1242 c.tinyoffset = off + size 1243 c.tinyAllocs++ 1244 mp.mallocing = 0 1245 releasem(mp) 1246 return x, 0 1247 } 1248 // Allocate a new maxTinySize block. 1249 checkGCTrigger := false 1250 span := c.alloc[tinySpanClass] 1251 v := nextFreeFast(span) 1252 if v == 0 { 1253 v, span, checkGCTrigger = c.nextFree(tinySpanClass) 1254 } 1255 x := unsafe.Pointer(v) 1256 (*[2]uint64)(x)[0] = 0 // Always zero 1257 (*[2]uint64)(x)[1] = 0 1258 // See if we need to replace the existing tiny block with the new one 1259 // based on amount of remaining free space. 1260 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1261 // Note: disabled when race detector is on, see comment near end of this function. 1262 c.tiny = uintptr(x) 1263 c.tinyoffset = size 1264 } 1265 1266 // Ensure that the stores above that initialize x to 1267 // type-safe memory and set the heap bits occur before 1268 // the caller can make x observable to the garbage 1269 // collector. Otherwise, on weakly ordered machines, 1270 // the garbage collector could follow a pointer to x, 1271 // but see uninitialized memory or stale heap bits. 1272 publicationBarrier() 1273 1274 if writeBarrier.enabled { 1275 // Allocate black during GC. 1276 // All slots hold nil so no scanning is needed. 1277 // This may be racing with GC so do it atomically if there can be 1278 // a race marking the bit. 1279 gcmarknewobject(span, uintptr(x)) 1280 } else { 1281 // Track the last free index before the mark phase. This field 1282 // is only used by the garbage collector. During the mark phase 1283 // this is used by the conservative scanner to filter out objects 1284 // that are both free and recently-allocated. It's safe to do that 1285 // because we allocate-black if the GC is enabled. The conservative 1286 // scanner produces pointers out of thin air, so without additional 1287 // synchronization it might otherwise observe a partially-initialized 1288 // object, which could crash the program. 1289 span.freeIndexForScan = span.freeindex 1290 } 1291 1292 // Note cache c only valid while m acquired; see #47302 1293 // 1294 // N.B. Use the full size because that matches how the GC 1295 // will update the mem profile on the "free" side. 1296 // 1297 // TODO(mknyszek): We should really count the header as part 1298 // of gc_sys or something. The code below just pretends it is 1299 // internal fragmentation and matches the GC's accounting by 1300 // using the whole allocation slot. 1301 c.nextSample -= int64(span.elemsize) 1302 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1303 profilealloc(mp, x, span.elemsize) 1304 } 1305 mp.mallocing = 0 1306 releasem(mp) 1307 1308 if checkGCTrigger { 1309 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1310 gcStart(t) 1311 } 1312 } 1313 1314 if raceenabled { 1315 // Pad tinysize allocations so they are aligned with the end 1316 // of the tinyalloc region. This ensures that any arithmetic 1317 // that goes off the top end of the object will be detectable 1318 // by checkptr (issue 38872). 1319 // Note that we disable tinyalloc when raceenabled for this to work. 1320 // TODO: This padding is only performed when the race detector 1321 // is enabled. It would be nice to enable it if any package 1322 // was compiled with checkptr, but there's no easy way to 1323 // detect that (especially at compile time). 1324 // TODO: enable this padding for all allocations, not just 1325 // tinyalloc ones. It's tricky because of pointer maps. 1326 // Maybe just all noscan objects? 1327 x = add(x, span.elemsize-size) 1328 } 1329 return x, span.elemsize 1330 } 1331 1332 func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1333 // Set mp.mallocing to keep from being preempted by GC. 1334 mp := acquirem() 1335 if doubleCheckMalloc { 1336 if mp.mallocing != 0 { 1337 throw("malloc deadlock") 1338 } 1339 if mp.gsignal == getg() { 1340 throw("malloc during signal") 1341 } 1342 if typ != nil && typ.Pointers() { 1343 throw("expected noscan type for noscan alloc") 1344 } 1345 } 1346 mp.mallocing = 1 1347 1348 checkGCTrigger := false 1349 c := getMCache(mp) 1350 var sizeclass uint8 1351 if size <= gc.SmallSizeMax-8 { 1352 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1353 } else { 1354 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 1355 } 1356 size = uintptr(gc.SizeClassToSize[sizeclass]) 1357 spc := makeSpanClass(sizeclass, true) 1358 span := c.alloc[spc] 1359 v := nextFreeFast(span) 1360 if v == 0 { 1361 v, span, checkGCTrigger = c.nextFree(spc) 1362 } 1363 x := unsafe.Pointer(v) 1364 if needzero && span.needzero != 0 { 1365 memclrNoHeapPointers(x, size) 1366 } 1367 1368 // Ensure that the stores above that initialize x to 1369 // type-safe memory and set the heap bits occur before 1370 // the caller can make x observable to the garbage 1371 // collector. Otherwise, on weakly ordered machines, 1372 // the garbage collector could follow a pointer to x, 1373 // but see uninitialized memory or stale heap bits. 1374 publicationBarrier() 1375 1376 if writeBarrier.enabled { 1377 // Allocate black during GC. 1378 // All slots hold nil so no scanning is needed. 1379 // This may be racing with GC so do it atomically if there can be 1380 // a race marking the bit. 1381 gcmarknewobject(span, uintptr(x)) 1382 } else { 1383 // Track the last free index before the mark phase. This field 1384 // is only used by the garbage collector. During the mark phase 1385 // this is used by the conservative scanner to filter out objects 1386 // that are both free and recently-allocated. It's safe to do that 1387 // because we allocate-black if the GC is enabled. The conservative 1388 // scanner produces pointers out of thin air, so without additional 1389 // synchronization it might otherwise observe a partially-initialized 1390 // object, which could crash the program. 1391 span.freeIndexForScan = span.freeindex 1392 } 1393 1394 // Note cache c only valid while m acquired; see #47302 1395 // 1396 // N.B. Use the full size because that matches how the GC 1397 // will update the mem profile on the "free" side. 1398 // 1399 // TODO(mknyszek): We should really count the header as part 1400 // of gc_sys or something. The code below just pretends it is 1401 // internal fragmentation and matches the GC's accounting by 1402 // using the whole allocation slot. 1403 c.nextSample -= int64(size) 1404 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1405 profilealloc(mp, x, size) 1406 } 1407 mp.mallocing = 0 1408 releasem(mp) 1409 1410 if checkGCTrigger { 1411 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1412 gcStart(t) 1413 } 1414 } 1415 return x, size 1416 } 1417 1418 func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1419 // Set mp.mallocing to keep from being preempted by GC. 1420 mp := acquirem() 1421 if doubleCheckMalloc { 1422 if mp.mallocing != 0 { 1423 throw("malloc deadlock") 1424 } 1425 if mp.gsignal == getg() { 1426 throw("malloc during signal") 1427 } 1428 if typ == nil || !typ.Pointers() { 1429 throw("noscan allocated in scan-only path") 1430 } 1431 if !heapBitsInSpan(size) { 1432 throw("heap bits in not in span for non-header-only path") 1433 } 1434 } 1435 mp.mallocing = 1 1436 1437 checkGCTrigger := false 1438 c := getMCache(mp) 1439 sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1440 spc := makeSpanClass(sizeclass, false) 1441 span := c.alloc[spc] 1442 v := nextFreeFast(span) 1443 if v == 0 { 1444 v, span, checkGCTrigger = c.nextFree(spc) 1445 } 1446 x := unsafe.Pointer(v) 1447 if span.needzero != 0 { 1448 memclrNoHeapPointers(x, size) 1449 } 1450 if goarch.PtrSize == 8 && sizeclass == 1 { 1451 // initHeapBits already set the pointer bits for the 8-byte sizeclass 1452 // on 64-bit platforms. 1453 c.scanAlloc += 8 1454 } else { 1455 c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span) 1456 } 1457 size = uintptr(gc.SizeClassToSize[sizeclass]) 1458 1459 // Ensure that the stores above that initialize x to 1460 // type-safe memory and set the heap bits occur before 1461 // the caller can make x observable to the garbage 1462 // collector. Otherwise, on weakly ordered machines, 1463 // the garbage collector could follow a pointer to x, 1464 // but see uninitialized memory or stale heap bits. 1465 publicationBarrier() 1466 1467 if writeBarrier.enabled { 1468 // Allocate black during GC. 1469 // All slots hold nil so no scanning is needed. 1470 // This may be racing with GC so do it atomically if there can be 1471 // a race marking the bit. 1472 gcmarknewobject(span, uintptr(x)) 1473 } else { 1474 // Track the last free index before the mark phase. This field 1475 // is only used by the garbage collector. During the mark phase 1476 // this is used by the conservative scanner to filter out objects 1477 // that are both free and recently-allocated. It's safe to do that 1478 // because we allocate-black if the GC is enabled. The conservative 1479 // scanner produces pointers out of thin air, so without additional 1480 // synchronization it might otherwise observe a partially-initialized 1481 // object, which could crash the program. 1482 span.freeIndexForScan = span.freeindex 1483 } 1484 1485 // Note cache c only valid while m acquired; see #47302 1486 // 1487 // N.B. Use the full size because that matches how the GC 1488 // will update the mem profile on the "free" side. 1489 // 1490 // TODO(mknyszek): We should really count the header as part 1491 // of gc_sys or something. The code below just pretends it is 1492 // internal fragmentation and matches the GC's accounting by 1493 // using the whole allocation slot. 1494 c.nextSample -= int64(size) 1495 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1496 profilealloc(mp, x, size) 1497 } 1498 mp.mallocing = 0 1499 releasem(mp) 1500 1501 if checkGCTrigger { 1502 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1503 gcStart(t) 1504 } 1505 } 1506 return x, size 1507 } 1508 1509 func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 1510 // Set mp.mallocing to keep from being preempted by GC. 1511 mp := acquirem() 1512 if doubleCheckMalloc { 1513 if mp.mallocing != 0 { 1514 throw("malloc deadlock") 1515 } 1516 if mp.gsignal == getg() { 1517 throw("malloc during signal") 1518 } 1519 if typ == nil || !typ.Pointers() { 1520 throw("noscan allocated in scan-only path") 1521 } 1522 if heapBitsInSpan(size) { 1523 throw("heap bits in span for header-only path") 1524 } 1525 } 1526 mp.mallocing = 1 1527 1528 checkGCTrigger := false 1529 c := getMCache(mp) 1530 size += gc.MallocHeaderSize 1531 var sizeclass uint8 1532 if size <= gc.SmallSizeMax-8 { 1533 sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 1534 } else { 1535 sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 1536 } 1537 size = uintptr(gc.SizeClassToSize[sizeclass]) 1538 spc := makeSpanClass(sizeclass, false) 1539 span := c.alloc[spc] 1540 v := nextFreeFast(span) 1541 if v == 0 { 1542 v, span, checkGCTrigger = c.nextFree(spc) 1543 } 1544 x := unsafe.Pointer(v) 1545 if span.needzero != 0 { 1546 memclrNoHeapPointers(x, size) 1547 } 1548 header := (**_type)(x) 1549 x = add(x, gc.MallocHeaderSize) 1550 c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span) 1551 1552 // Ensure that the stores above that initialize x to 1553 // type-safe memory and set the heap bits occur before 1554 // the caller can make x observable to the garbage 1555 // collector. Otherwise, on weakly ordered machines, 1556 // the garbage collector could follow a pointer to x, 1557 // but see uninitialized memory or stale heap bits. 1558 publicationBarrier() 1559 1560 if writeBarrier.enabled { 1561 // Allocate black during GC. 1562 // All slots hold nil so no scanning is needed. 1563 // This may be racing with GC so do it atomically if there can be 1564 // a race marking the bit. 1565 gcmarknewobject(span, uintptr(x)) 1566 } else { 1567 // Track the last free index before the mark phase. This field 1568 // is only used by the garbage collector. During the mark phase 1569 // this is used by the conservative scanner to filter out objects 1570 // that are both free and recently-allocated. It's safe to do that 1571 // because we allocate-black if the GC is enabled. The conservative 1572 // scanner produces pointers out of thin air, so without additional 1573 // synchronization it might otherwise observe a partially-initialized 1574 // object, which could crash the program. 1575 span.freeIndexForScan = span.freeindex 1576 } 1577 1578 // Note cache c only valid while m acquired; see #47302 1579 // 1580 // N.B. Use the full size because that matches how the GC 1581 // will update the mem profile on the "free" side. 1582 // 1583 // TODO(mknyszek): We should really count the header as part 1584 // of gc_sys or something. The code below just pretends it is 1585 // internal fragmentation and matches the GC's accounting by 1586 // using the whole allocation slot. 1587 c.nextSample -= int64(size) 1588 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1589 profilealloc(mp, x, size) 1590 } 1591 mp.mallocing = 0 1592 releasem(mp) 1593 1594 if checkGCTrigger { 1595 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1596 gcStart(t) 1597 } 1598 } 1599 return x, size 1600 } 1601 1602 func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 1603 // Set mp.mallocing to keep from being preempted by GC. 1604 mp := acquirem() 1605 if doubleCheckMalloc { 1606 if mp.mallocing != 0 { 1607 throw("malloc deadlock") 1608 } 1609 if mp.gsignal == getg() { 1610 throw("malloc during signal") 1611 } 1612 } 1613 mp.mallocing = 1 1614 1615 c := getMCache(mp) 1616 // For large allocations, keep track of zeroed state so that 1617 // bulk zeroing can be happen later in a preemptible context. 1618 span := c.allocLarge(size, typ == nil || !typ.Pointers()) 1619 span.freeindex = 1 1620 span.allocCount = 1 1621 span.largeType = nil // Tell the GC not to look at this yet. 1622 size = span.elemsize 1623 x := unsafe.Pointer(span.base()) 1624 1625 // Ensure that the store above that sets largeType to 1626 // nil happens before the caller can make x observable 1627 // to the garbage collector. 1628 // 1629 // Otherwise, on weakly ordered machines, the garbage 1630 // collector could follow a pointer to x, but see a stale 1631 // largeType value. 1632 publicationBarrier() 1633 1634 if writeBarrier.enabled { 1635 // Allocate black during GC. 1636 // All slots hold nil so no scanning is needed. 1637 // This may be racing with GC so do it atomically if there can be 1638 // a race marking the bit. 1639 gcmarknewobject(span, uintptr(x)) 1640 } else { 1641 // Track the last free index before the mark phase. This field 1642 // is only used by the garbage collector. During the mark phase 1643 // this is used by the conservative scanner to filter out objects 1644 // that are both free and recently-allocated. It's safe to do that 1645 // because we allocate-black if the GC is enabled. The conservative 1646 // scanner produces pointers out of thin air, so without additional 1647 // synchronization it might otherwise observe a partially-initialized 1648 // object, which could crash the program. 1649 span.freeIndexForScan = span.freeindex 1650 } 1651 1652 // Note cache c only valid while m acquired; see #47302 1653 // 1654 // N.B. Use the full size because that matches how the GC 1655 // will update the mem profile on the "free" side. 1656 // 1657 // TODO(mknyszek): We should really count the header as part 1658 // of gc_sys or something. The code below just pretends it is 1659 // internal fragmentation and matches the GC's accounting by 1660 // using the whole allocation slot. 1661 c.nextSample -= int64(size) 1662 if c.nextSample < 0 || MemProfileRate != c.memProfRate { 1663 profilealloc(mp, x, size) 1664 } 1665 mp.mallocing = 0 1666 releasem(mp) 1667 1668 // Check to see if we need to trigger the GC. 1669 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1670 gcStart(t) 1671 } 1672 1673 // Objects can be zeroed late in a context where preemption can occur. 1674 // 1675 // x will keep the memory alive. 1676 if needzero && span.needzero != 0 { 1677 // N.B. size == fullSize always in this case. 1678 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1679 } 1680 1681 // Set the type and run the publication barrier while non-preemptible. We need to make 1682 // sure that between heapSetTypeLarge and publicationBarrier we cannot get preempted, 1683 // otherwise the GC could potentially observe non-zeroed memory but largeType set on weak 1684 // memory architectures. 1685 // 1686 // The GC can also potentially observe non-zeroed memory if conservative scanning spuriously 1687 // observes a partially-allocated object, see the freeIndexForScan update above. This case is 1688 // handled by synchronization inside heapSetTypeLarge. 1689 mp = acquirem() 1690 if typ != nil && typ.Pointers() { 1691 // Finish storing the type information, now that we're certain the memory is zeroed. 1692 getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span) 1693 } 1694 // Publish the object again, now with zeroed memory and initialized type information. 1695 // 1696 // Even if we didn't update any type information, this is necessary to ensure that, for example, 1697 // x written to a global without any synchronization still results in other goroutines observing 1698 // zeroed memory. 1699 publicationBarrier() 1700 releasem(mp) 1701 return x, size 1702 } 1703 1704 func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer { 1705 if debug.sbrk != 0 { 1706 align := uintptr(16) 1707 if typ != nil { 1708 // TODO(austin): This should be just 1709 // align = uintptr(typ.align) 1710 // but that's only 4 on 32-bit platforms, 1711 // even if there's a uint64 field in typ (see #599). 1712 // This causes 64-bit atomic accesses to panic. 1713 // Hence, we use stricter alignment that matches 1714 // the normal allocator better. 1715 if size&7 == 0 { 1716 align = 8 1717 } else if size&3 == 0 { 1718 align = 4 1719 } else if size&1 == 0 { 1720 align = 2 1721 } else { 1722 align = 1 1723 } 1724 } 1725 return persistentalloc(size, align, &memstats.other_sys) 1726 } 1727 if inittrace.active && inittrace.id == getg().goid { 1728 // Init functions are executed sequentially in a single goroutine. 1729 inittrace.allocs += 1 1730 } 1731 return nil 1732 } 1733 1734 func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) { 1735 if inittrace.active && inittrace.id == getg().goid { 1736 // Init functions are executed sequentially in a single goroutine. 1737 inittrace.bytes += uint64(elemsize) 1738 } 1739 1740 if traceAllocFreeEnabled() { 1741 trace := traceAcquire() 1742 if trace.ok() { 1743 trace.HeapObjectAlloc(uintptr(x), typ) 1744 traceRelease(trace) 1745 } 1746 } 1747 1748 // N.B. elemsize == 0 indicates a tiny allocation, since no new slot was 1749 // allocated to fulfill this call to mallocgc. This means checkfinalizer 1750 // will only flag an error if there is actually any risk. If an allocation 1751 // has the tiny block to itself, it will not get flagged, because we won't 1752 // mark the block as a tiny block. 1753 if debug.checkfinalizers != 0 && elemsize == 0 { 1754 setTinyBlockContext(unsafe.Pointer(alignDown(uintptr(x), maxTinySize))) 1755 } 1756 } 1757 1758 // deductAssistCredit reduces the current G's assist credit 1759 // by size bytes, and assists the GC if necessary. 1760 // 1761 // Caller must be preemptible. 1762 // 1763 // Returns the G for which the assist credit was accounted. 1764 func deductAssistCredit(size uintptr) { 1765 // Charge the current user G for this allocation. 1766 assistG := getg() 1767 if assistG.m.curg != nil { 1768 assistG = assistG.m.curg 1769 } 1770 // Charge the allocation against the G. We'll account 1771 // for internal fragmentation at the end of mallocgc. 1772 assistG.gcAssistBytes -= int64(size) 1773 1774 if assistG.gcAssistBytes < 0 { 1775 // This G is in debt. Assist the GC to correct 1776 // this before allocating. This must happen 1777 // before disabling preemption. 1778 gcAssistAlloc(assistG) 1779 } 1780 } 1781 1782 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 1783 // on chunks of the buffer to be zeroed, with opportunities for preemption 1784 // along the way. memclrNoHeapPointers contains no safepoints and also 1785 // cannot be preemptively scheduled, so this provides a still-efficient 1786 // block copy that can also be preempted on a reasonable granularity. 1787 // 1788 // Use this with care; if the data being cleared is tagged to contain 1789 // pointers, this allows the GC to run before it is all cleared. 1790 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 1791 v := uintptr(x) 1792 // got this from benchmarking. 128k is too small, 512k is too large. 1793 const chunkBytes = 256 * 1024 1794 vsize := v + size 1795 for voff := v; voff < vsize; voff = voff + chunkBytes { 1796 if getg().preempt { 1797 // may hold locks, e.g., profiling 1798 goschedguarded() 1799 } 1800 // clear min(avail, lump) bytes 1801 n := vsize - voff 1802 if n > chunkBytes { 1803 n = chunkBytes 1804 } 1805 memclrNoHeapPointers(unsafe.Pointer(voff), n) 1806 } 1807 } 1808 1809 // implementation of new builtin 1810 // compiler (both frontend and SSA backend) knows the signature 1811 // of this function. 1812 func newobject(typ *_type) unsafe.Pointer { 1813 return mallocgc(typ.Size_, typ, true) 1814 } 1815 1816 //go:linkname maps_newobject internal/runtime/maps.newobject 1817 func maps_newobject(typ *_type) unsafe.Pointer { 1818 return newobject(typ) 1819 } 1820 1821 // reflect_unsafe_New is meant for package reflect, 1822 // but widely used packages access it using linkname. 1823 // Notable members of the hall of shame include: 1824 // - gitee.com/quant1x/gox 1825 // - github.com/goccy/json 1826 // - github.com/modern-go/reflect2 1827 // - github.com/v2pro/plz 1828 // 1829 // Do not remove or change the type signature. 1830 // See go.dev/issue/67401. 1831 // 1832 //go:linkname reflect_unsafe_New reflect.unsafe_New 1833 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1834 return mallocgc(typ.Size_, typ, true) 1835 } 1836 1837 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1838 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1839 return mallocgc(typ.Size_, typ, true) 1840 } 1841 1842 // newarray allocates an array of n elements of type typ. 1843 // 1844 // newarray should be an internal detail, 1845 // but widely used packages access it using linkname. 1846 // Notable members of the hall of shame include: 1847 // - github.com/RomiChan/protobuf 1848 // - github.com/segmentio/encoding 1849 // - github.com/ugorji/go/codec 1850 // 1851 // Do not remove or change the type signature. 1852 // See go.dev/issue/67401. 1853 // 1854 //go:linkname newarray 1855 func newarray(typ *_type, n int) unsafe.Pointer { 1856 if n == 1 { 1857 return mallocgc(typ.Size_, typ, true) 1858 } 1859 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 1860 if overflow || mem > maxAlloc || n < 0 { 1861 panic(plainError("runtime: allocation size out of range")) 1862 } 1863 return mallocgc(mem, typ, true) 1864 } 1865 1866 // reflect_unsafe_NewArray is meant for package reflect, 1867 // but widely used packages access it using linkname. 1868 // Notable members of the hall of shame include: 1869 // - gitee.com/quant1x/gox 1870 // - github.com/bytedance/sonic 1871 // - github.com/goccy/json 1872 // - github.com/modern-go/reflect2 1873 // - github.com/segmentio/encoding 1874 // - github.com/segmentio/kafka-go 1875 // - github.com/v2pro/plz 1876 // 1877 // Do not remove or change the type signature. 1878 // See go.dev/issue/67401. 1879 // 1880 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1881 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1882 return newarray(typ, n) 1883 } 1884 1885 //go:linkname maps_newarray internal/runtime/maps.newarray 1886 func maps_newarray(typ *_type, n int) unsafe.Pointer { 1887 return newarray(typ, n) 1888 } 1889 1890 // profilealloc resets the current mcache's nextSample counter and 1891 // records a memory profile sample. 1892 // 1893 // The caller must be non-preemptible and have a P. 1894 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1895 c := getMCache(mp) 1896 if c == nil { 1897 throw("profilealloc called without a P or outside bootstrapping") 1898 } 1899 c.memProfRate = MemProfileRate 1900 c.nextSample = nextSample() 1901 mProf_Malloc(mp, x, size) 1902 } 1903 1904 // nextSample returns the next sampling point for heap profiling. The goal is 1905 // to sample allocations on average every MemProfileRate bytes, but with a 1906 // completely random distribution over the allocation timeline; this 1907 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1908 // processes, the distance between two samples follows the exponential 1909 // distribution (exp(MemProfileRate)), so the best return value is a random 1910 // number taken from an exponential distribution whose mean is MemProfileRate. 1911 func nextSample() int64 { 1912 if MemProfileRate == 0 { 1913 // Basically never sample. 1914 return math.MaxInt64 1915 } 1916 if MemProfileRate == 1 { 1917 // Sample immediately. 1918 return 0 1919 } 1920 return int64(fastexprand(MemProfileRate)) 1921 } 1922 1923 // fastexprand returns a random number from an exponential distribution with 1924 // the specified mean. 1925 func fastexprand(mean int) int32 { 1926 // Avoid overflow. Maximum possible step is 1927 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1928 switch { 1929 case mean > 0x7000000: 1930 mean = 0x7000000 1931 case mean == 0: 1932 return 0 1933 } 1934 1935 // Take a random sample of the exponential distribution exp(-mean*x). 1936 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1937 // p = 1 - exp(-mean*x), so 1938 // q = 1 - p == exp(-mean*x) 1939 // log_e(q) = -mean*x 1940 // -log_e(q)/mean = x 1941 // x = -log_e(q) * mean 1942 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1943 const randomBitCount = 26 1944 q := cheaprandn(1<<randomBitCount) + 1 1945 qlog := fastlog2(float64(q)) - randomBitCount 1946 if qlog > 0 { 1947 qlog = 0 1948 } 1949 const minusLog2 = -0.6931471805599453 // -ln(2) 1950 return int32(qlog*(minusLog2*float64(mean))) + 1 1951 } 1952 1953 type persistentAlloc struct { 1954 base *notInHeap 1955 off uintptr 1956 } 1957 1958 var globalAlloc struct { 1959 mutex 1960 persistentAlloc 1961 } 1962 1963 // persistentChunkSize is the number of bytes we allocate when we grow 1964 // a persistentAlloc. 1965 const persistentChunkSize = 256 << 10 1966 1967 // persistentChunks is a list of all the persistent chunks we have 1968 // allocated. The list is maintained through the first word in the 1969 // persistent chunk. This is updated atomically. 1970 var persistentChunks *notInHeap 1971 1972 // Wrapper around sysAlloc that can allocate small chunks. 1973 // There is no associated free operation. 1974 // Intended for things like function/type/debug-related persistent data. 1975 // If align is 0, uses default align (currently 8). 1976 // The returned memory will be zeroed. 1977 // sysStat must be non-nil. 1978 // 1979 // Consider marking persistentalloc'd types not in heap by embedding 1980 // internal/runtime/sys.NotInHeap. 1981 // 1982 // nosplit because it is used during write barriers and must not be preempted. 1983 // 1984 //go:nosplit 1985 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1986 var p *notInHeap 1987 systemstack(func() { 1988 p = persistentalloc1(size, align, sysStat) 1989 }) 1990 return unsafe.Pointer(p) 1991 } 1992 1993 // Must run on system stack because stack growth can (re)invoke it. 1994 // See issue 9174. 1995 // 1996 //go:systemstack 1997 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1998 const ( 1999 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 2000 ) 2001 2002 if size == 0 { 2003 throw("persistentalloc: size == 0") 2004 } 2005 if align != 0 { 2006 if align&(align-1) != 0 { 2007 throw("persistentalloc: align is not a power of 2") 2008 } 2009 if align > pageSize { 2010 throw("persistentalloc: align is too large") 2011 } 2012 } else { 2013 align = 8 2014 } 2015 2016 if size >= maxBlock { 2017 return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata")) 2018 } 2019 2020 mp := acquirem() 2021 var persistent *persistentAlloc 2022 if mp != nil && mp.p != 0 { 2023 persistent = &mp.p.ptr().palloc 2024 } else { 2025 lock(&globalAlloc.mutex) 2026 persistent = &globalAlloc.persistentAlloc 2027 } 2028 persistent.off = alignUp(persistent.off, align) 2029 if persistent.off+size > persistentChunkSize || persistent.base == nil { 2030 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata")) 2031 if persistent.base == nil { 2032 if persistent == &globalAlloc.persistentAlloc { 2033 unlock(&globalAlloc.mutex) 2034 } 2035 throw("runtime: cannot allocate memory") 2036 } 2037 2038 // Add the new chunk to the persistentChunks list. 2039 for { 2040 chunks := uintptr(unsafe.Pointer(persistentChunks)) 2041 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 2042 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 2043 break 2044 } 2045 } 2046 persistent.off = alignUp(goarch.PtrSize, align) 2047 } 2048 p := persistent.base.add(persistent.off) 2049 persistent.off += size 2050 releasem(mp) 2051 if persistent == &globalAlloc.persistentAlloc { 2052 unlock(&globalAlloc.mutex) 2053 } 2054 2055 if sysStat != &memstats.other_sys { 2056 sysStat.add(int64(size)) 2057 memstats.other_sys.add(-int64(size)) 2058 } 2059 return p 2060 } 2061 2062 // inPersistentAlloc reports whether p points to memory allocated by 2063 // persistentalloc. This must be nosplit because it is called by the 2064 // cgo checker code, which is called by the write barrier code. 2065 // 2066 //go:nosplit 2067 func inPersistentAlloc(p uintptr) bool { 2068 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 2069 for chunk != 0 { 2070 if p >= chunk && p < chunk+persistentChunkSize { 2071 return true 2072 } 2073 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 2074 } 2075 return false 2076 } 2077 2078 // linearAlloc is a simple linear allocator that pre-reserves a region 2079 // of memory and then optionally maps that region into the Ready state 2080 // as needed. 2081 // 2082 // The caller is responsible for locking. 2083 type linearAlloc struct { 2084 next uintptr // next free byte 2085 mapped uintptr // one byte past end of mapped space 2086 end uintptr // end of reserved space 2087 2088 mapMemory bool // transition memory from Reserved to Ready if true 2089 } 2090 2091 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 2092 if base+size < base { 2093 // Chop off the last byte. The runtime isn't prepared 2094 // to deal with situations where the bounds could overflow. 2095 // Leave that memory reserved, though, so we don't map it 2096 // later. 2097 size -= 1 2098 } 2099 l.next, l.mapped = base, base 2100 l.end = base + size 2101 l.mapMemory = mapMemory 2102 } 2103 2104 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer { 2105 p := alignUp(l.next, align) 2106 if p+size > l.end { 2107 return nil 2108 } 2109 l.next = p + size 2110 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 2111 if l.mapMemory { 2112 // Transition from Reserved to Prepared to Ready. 2113 n := pEnd - l.mapped 2114 sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName) 2115 sysUsed(unsafe.Pointer(l.mapped), n, n) 2116 } 2117 l.mapped = pEnd 2118 } 2119 return unsafe.Pointer(p) 2120 } 2121 2122 // notInHeap is off-heap memory allocated by a lower-level allocator 2123 // like sysAlloc or persistentAlloc. 2124 // 2125 // In general, it's better to use real types which embed 2126 // internal/runtime/sys.NotInHeap, but this serves as a generic type 2127 // for situations where that isn't possible (like in the allocators). 2128 // 2129 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 2130 type notInHeap struct{ _ sys.NotInHeap } 2131 2132 func (p *notInHeap) add(bytes uintptr) *notInHeap { 2133 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 2134 } 2135 2136 // redZoneSize computes the size of the redzone for a given allocation. 2137 // Refer to the implementation of the compiler-rt. 2138 func redZoneSize(userSize uintptr) uintptr { 2139 switch { 2140 case userSize <= (64 - 16): 2141 return 16 << 0 2142 case userSize <= (128 - 32): 2143 return 16 << 1 2144 case userSize <= (512 - 64): 2145 return 16 << 2 2146 case userSize <= (4096 - 128): 2147 return 16 << 3 2148 case userSize <= (1<<14)-256: 2149 return 16 << 4 2150 case userSize <= (1<<15)-512: 2151 return 16 << 5 2152 case userSize <= (1<<16)-1024: 2153 return 16 << 6 2154 default: 2155 return 16 << 7 2156 } 2157 } 2158