1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "runtime/internal/atomic" 105 "runtime/internal/math" 106 "runtime/internal/sys" 107 "unsafe" 108 ) 109 110 const ( 111 debugMalloc = false 112 113 maxTinySize = _TinySize 114 tinySizeClass = _TinySizeClass 115 maxSmallSize = _MaxSmallSize 116 117 pageShift = _PageShift 118 pageSize = _PageSize 119 pageMask = _PageMask 120 // By construction, single page spans of the smallest object class 121 // have the most objects per span. 122 maxObjsPerSpan = pageSize / 8 123 124 concurrentSweep = _ConcurrentSweep 125 126 _PageSize = 1 << _PageShift 127 _PageMask = _PageSize - 1 128 129 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 130 _64bit = 1 << (^uintptr(0) >> 63) / 2 131 132 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 133 _TinySize = 16 134 _TinySizeClass = int8(2) 135 136 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 137 138 // Per-P, per order stack segment cache size. 139 _StackCacheSize = 32 * 1024 140 141 // Number of orders that get caching. Order 0 is FixedStack 142 // and each successive order is twice as large. 143 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 144 // will be allocated directly. 145 // Since FixedStack is different on different systems, we 146 // must vary NumStackOrders to keep the same maximum cached size. 147 // OS | FixedStack | NumStackOrders 148 // -----------------+------------+--------------- 149 // linux/darwin/bsd | 2KB | 4 150 // windows/32 | 4KB | 3 151 // windows/64 | 8KB | 2 152 // plan9 | 4KB | 3 153 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 154 155 // heapAddrBits is the number of bits in a heap address. On 156 // amd64, addresses are sign-extended beyond heapAddrBits. On 157 // other arches, they are zero-extended. 158 // 159 // On most 64-bit platforms, we limit this to 48 bits based on a 160 // combination of hardware and OS limitations. 161 // 162 // amd64 hardware limits addresses to 48 bits, sign-extended 163 // to 64 bits. Addresses where the top 16 bits are not either 164 // all 0 or all 1 are "non-canonical" and invalid. Because of 165 // these "negative" addresses, we offset addresses by 1<<47 166 // (arenaBaseOffset) on amd64 before computing indexes into 167 // the heap arenas index. In 2017, amd64 hardware added 168 // support for 57 bit addresses; however, currently only Linux 169 // supports this extension and the kernel will never choose an 170 // address above 1<<47 unless mmap is called with a hint 171 // address above 1<<47 (which we never do). 172 // 173 // arm64 hardware (as of ARMv8) limits user addresses to 48 174 // bits, in the range [0, 1<<48). 175 // 176 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 177 // in hardware. On Linux, Go leans on stricter OS limits. Based 178 // on Linux's processor.h, the user address space is limited as 179 // follows on 64-bit architectures: 180 // 181 // Architecture Name Maximum Value (exclusive) 182 // --------------------------------------------------------------------- 183 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 184 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 185 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 186 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 187 // s390x TASK_SIZE 1<<64 (64 bit addresses) 188 // 189 // These limits may increase over time, but are currently at 190 // most 48 bits except on s390x. On all architectures, Linux 191 // starts placing mmap'd regions at addresses that are 192 // significantly below 48 bits, so even if it's possible to 193 // exceed Go's 48 bit limit, it's extremely unlikely in 194 // practice. 195 // 196 // On 32-bit platforms, we accept the full 32-bit address 197 // space because doing so is cheap. 198 // mips32 only has access to the low 2GB of virtual memory, so 199 // we further limit it to 31 bits. 200 // 201 // On ios/arm64, although 64-bit pointers are presumably 202 // available, pointers are truncated to 33 bits in iOS <14. 203 // Furthermore, only the top 4 GiB of the address space are 204 // actually available to the application. In iOS >=14, more 205 // of the address space is available, and the OS can now 206 // provide addresses outside of those 33 bits. Pick 40 bits 207 // as a reasonable balance between address space usage by the 208 // page allocator, and flexibility for what mmap'd regions 209 // we'll accept for the heap. We can't just move to the full 210 // 48 bits because this uses too much address space for older 211 // iOS versions. 212 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 213 // to a 48-bit address space like every other arm64 platform. 214 // 215 // WebAssembly currently has a limit of 4GB linear memory. 216 heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 40*sys.GoosIos*sys.GoarchArm64 217 218 // maxAlloc is the maximum size of an allocation. On 64-bit, 219 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 220 // 32-bit, however, this is one less than 1<<32 because the 221 // number of bytes in the address space doesn't actually fit 222 // in a uintptr. 223 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 224 225 // The number of bits in a heap address, the size of heap 226 // arenas, and the L1 and L2 arena map sizes are related by 227 // 228 // (1 << addr bits) = arena size * L1 entries * L2 entries 229 // 230 // Currently, we balance these as follows: 231 // 232 // Platform Addr bits Arena size L1 entries L2 entries 233 // -------------- --------- ---------- ---------- ----------- 234 // */64-bit 48 64MB 1 4M (32MB) 235 // windows/64-bit 48 4MB 64 1M (8MB) 236 // ios/arm64 33 4MB 1 2048 (8KB) 237 // */32-bit 32 4MB 1 1024 (4KB) 238 // */mips(le) 31 4MB 1 512 (2KB) 239 240 // heapArenaBytes is the size of a heap arena. The heap 241 // consists of mappings of size heapArenaBytes, aligned to 242 // heapArenaBytes. The initial heap mapping is one arena. 243 // 244 // This is currently 64MB on 64-bit non-Windows and 4MB on 245 // 32-bit and on Windows. We use smaller arenas on Windows 246 // because all committed memory is charged to the process, 247 // even if it's not touched. Hence, for processes with small 248 // heaps, the mapped arena space needs to be commensurate. 249 // This is particularly important with the race detector, 250 // since it significantly amplifies the cost of committed 251 // memory. 252 heapArenaBytes = 1 << logHeapArenaBytes 253 254 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 255 // prefer using heapArenaBytes where possible (we need the 256 // constant to compute some other constants). 257 logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64 258 259 // heapArenaBitmapBytes is the size of each heap arena's bitmap. 260 heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) 261 262 pagesPerArena = heapArenaBytes / pageSize 263 264 // arenaL1Bits is the number of bits of the arena number 265 // covered by the first level arena map. 266 // 267 // This number should be small, since the first level arena 268 // map requires PtrSize*(1<<arenaL1Bits) of space in the 269 // binary's BSS. It can be zero, in which case the first level 270 // index is effectively unused. There is a performance benefit 271 // to this, since the generated code can be more efficient, 272 // but comes at the cost of having a large L2 mapping. 273 // 274 // We use the L1 map on 64-bit Windows because the arena size 275 // is small, but the address space is still 48 bits, and 276 // there's a high cost to having a large L2. 277 arenaL1Bits = 6 * (_64bit * sys.GoosWindows) 278 279 // arenaL2Bits is the number of bits of the arena number 280 // covered by the second level arena index. 281 // 282 // The size of each arena map allocation is proportional to 283 // 1<<arenaL2Bits, so it's important that this not be too 284 // large. 48 bits leads to 32MB arena index allocations, which 285 // is about the practical threshold. 286 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 287 288 // arenaL1Shift is the number of bits to shift an arena frame 289 // number by to compute an index into the first level arena map. 290 arenaL1Shift = arenaL2Bits 291 292 // arenaBits is the total bits in a combined arena map index. 293 // This is split between the index into the L1 arena map and 294 // the L2 arena map. 295 arenaBits = arenaL1Bits + arenaL2Bits 296 297 // arenaBaseOffset is the pointer value that corresponds to 298 // index 0 in the heap arena map. 299 // 300 // On amd64, the address space is 48 bits, sign extended to 64 301 // bits. This offset lets us handle "negative" addresses (or 302 // high addresses if viewed as unsigned). 303 // 304 // On aix/ppc64, this offset allows to keep the heapAddrBits to 305 // 48. Otherwise, it would be 60 in order to handle mmap addresses 306 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 307 // case, the memory reserved in (s *pageAlloc).init for chunks 308 // is causing important slowdowns. 309 // 310 // On other platforms, the user address space is contiguous 311 // and starts at 0, so no offset is necessary. 312 arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix 313 // A typed version of this constant that will make it into DWARF (for viewcore). 314 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 315 316 // Max number of threads to run garbage collection. 317 // 2, 3, and 4 are all plausible maximums depending 318 // on the hardware details of the machine. The garbage 319 // collector scales well to 32 cpus. 320 _MaxGcproc = 32 321 322 // minLegalPointer is the smallest possible legal pointer. 323 // This is the smallest possible architectural page size, 324 // since we assume that the first page is never mapped. 325 // 326 // This should agree with minZeroPage in the compiler. 327 minLegalPointer uintptr = 4096 328 ) 329 330 // physPageSize is the size in bytes of the OS's physical pages. 331 // Mapping and unmapping operations must be done at multiples of 332 // physPageSize. 333 // 334 // This must be set by the OS init code (typically in osinit) before 335 // mallocinit. 336 var physPageSize uintptr 337 338 // physHugePageSize is the size in bytes of the OS's default physical huge 339 // page size whose allocation is opaque to the application. It is assumed 340 // and verified to be a power of two. 341 // 342 // If set, this must be set by the OS init code (typically in osinit) before 343 // mallocinit. However, setting it at all is optional, and leaving the default 344 // value is always safe (though potentially less efficient). 345 // 346 // Since physHugePageSize is always assumed to be a power of two, 347 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 348 // The purpose of physHugePageShift is to avoid doing divisions in 349 // performance critical functions. 350 var ( 351 physHugePageSize uintptr 352 physHugePageShift uint 353 ) 354 355 // OS memory management abstraction layer 356 // 357 // Regions of the address space managed by the runtime may be in one of four 358 // states at any given time: 359 // 1) None - Unreserved and unmapped, the default state of any region. 360 // 2) Reserved - Owned by the runtime, but accessing it would cause a fault. 361 // Does not count against the process' memory footprint. 362 // 3) Prepared - Reserved, intended not to be backed by physical memory (though 363 // an OS may implement this lazily). Can transition efficiently to 364 // Ready. Accessing memory in such a region is undefined (may 365 // fault, may give back unexpected zeroes, etc.). 366 // 4) Ready - may be accessed safely. 367 // 368 // This set of states is more than is strictly necessary to support all the 369 // currently supported platforms. One could get by with just None, Reserved, and 370 // Ready. However, the Prepared state gives us flexibility for performance 371 // purposes. For example, on POSIX-y operating systems, Reserved is usually a 372 // private anonymous mmap'd region with PROT_NONE set, and to transition 373 // to Ready would require setting PROT_READ|PROT_WRITE. However the 374 // underspecification of Prepared lets us use just MADV_FREE to transition from 375 // Ready to Prepared. Thus with the Prepared state we can set the permission 376 // bits just once early on, we can efficiently tell the OS that it's free to 377 // take pages away from us when we don't strictly need them. 378 // 379 // For each OS there is a common set of helpers defined that transition 380 // memory regions between these states. The helpers are as follows: 381 // 382 // sysAlloc transitions an OS-chosen region of memory from None to Ready. 383 // More specifically, it obtains a large chunk of zeroed memory from the 384 // operating system, typically on the order of a hundred kilobytes 385 // or a megabyte. This memory is always immediately available for use. 386 // 387 // sysFree transitions a memory region from any state to None. Therefore, it 388 // returns memory unconditionally. It is used if an out-of-memory error has been 389 // detected midway through an allocation or to carve out an aligned section of 390 // the address space. It is okay if sysFree is a no-op only if sysReserve always 391 // returns a memory region aligned to the heap allocator's alignment 392 // restrictions. 393 // 394 // sysReserve transitions a memory region from None to Reserved. It reserves 395 // address space in such a way that it would cause a fatal fault upon access 396 // (either via permissions or not committing the memory). Such a reservation is 397 // thus never backed by physical memory. 398 // If the pointer passed to it is non-nil, the caller wants the 399 // reservation there, but sysReserve can still choose another 400 // location if that one is unavailable. 401 // NOTE: sysReserve returns OS-aligned memory, but the heap allocator 402 // may use larger alignment, so the caller must be careful to realign the 403 // memory obtained by sysReserve. 404 // 405 // sysMap transitions a memory region from Reserved to Prepared. It ensures the 406 // memory region can be efficiently transitioned to Ready. 407 // 408 // sysUsed transitions a memory region from Prepared to Ready. It notifies the 409 // operating system that the memory region is needed and ensures that the region 410 // may be safely accessed. This is typically a no-op on systems that don't have 411 // an explicit commit step and hard over-commit limits, but is critical on 412 // Windows, for example. 413 // 414 // sysUnused transitions a memory region from Ready to Prepared. It notifies the 415 // operating system that the physical pages backing this memory region are no 416 // longer needed and can be reused for other purposes. The contents of a 417 // sysUnused memory region are considered forfeit and the region must not be 418 // accessed again until sysUsed is called. 419 // 420 // sysFault transitions a memory region from Ready or Prepared to Reserved. It 421 // marks a region such that it will always fault if accessed. Used only for 422 // debugging the runtime. 423 424 func mallocinit() { 425 if class_to_size[_TinySizeClass] != _TinySize { 426 throw("bad TinySizeClass") 427 } 428 429 testdefersizes() 430 431 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 { 432 // heapBits expects modular arithmetic on bitmap 433 // addresses to work. 434 throw("heapArenaBitmapBytes not a power of 2") 435 } 436 437 // Copy class sizes out for statistics table. 438 for i := range class_to_size { 439 memstats.by_size[i].size = uint32(class_to_size[i]) 440 } 441 442 // Check physPageSize. 443 if physPageSize == 0 { 444 // The OS init code failed to fetch the physical page size. 445 throw("failed to get system page size") 446 } 447 if physPageSize > maxPhysPageSize { 448 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 449 throw("bad system page size") 450 } 451 if physPageSize < minPhysPageSize { 452 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 453 throw("bad system page size") 454 } 455 if physPageSize&(physPageSize-1) != 0 { 456 print("system page size (", physPageSize, ") must be a power of 2\n") 457 throw("bad system page size") 458 } 459 if physHugePageSize&(physHugePageSize-1) != 0 { 460 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 461 throw("bad system huge page size") 462 } 463 if physHugePageSize > maxPhysHugePageSize { 464 // physHugePageSize is greater than the maximum supported huge page size. 465 // Don't throw here, like in the other cases, since a system configured 466 // in this way isn't wrong, we just don't have the code to support them. 467 // Instead, silently set the huge page size to zero. 468 physHugePageSize = 0 469 } 470 if physHugePageSize != 0 { 471 // Since physHugePageSize is a power of 2, it suffices to increase 472 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 473 for 1<<physHugePageShift != physHugePageSize { 474 physHugePageShift++ 475 } 476 } 477 if pagesPerArena%pagesPerSpanRoot != 0 { 478 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 479 throw("bad pagesPerSpanRoot") 480 } 481 if pagesPerArena%pagesPerReclaimerChunk != 0 { 482 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 483 throw("bad pagesPerReclaimerChunk") 484 } 485 486 // Initialize the heap. 487 mheap_.init() 488 mcache0 = allocmcache() 489 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 490 lockInit(&proflock, lockRankProf) 491 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 492 493 // Create initial arena growth hints. 494 if sys.PtrSize == 8 { 495 // On a 64-bit machine, we pick the following hints 496 // because: 497 // 498 // 1. Starting from the middle of the address space 499 // makes it easier to grow out a contiguous range 500 // without running in to some other mapping. 501 // 502 // 2. This makes Go heap addresses more easily 503 // recognizable when debugging. 504 // 505 // 3. Stack scanning in gccgo is still conservative, 506 // so it's important that addresses be distinguishable 507 // from other data. 508 // 509 // Starting at 0x00c0 means that the valid memory addresses 510 // will begin 0x00c0, 0x00c1, ... 511 // In little-endian, that's c0 00, c1 00, ... None of those are valid 512 // UTF-8 sequences, and they are otherwise as far away from 513 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 514 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 515 // on OS X during thread allocations. 0x00c0 causes conflicts with 516 // AddressSanitizer which reserves all memory up to 0x0100. 517 // These choices reduce the odds of a conservative garbage collector 518 // not collecting memory because some non-pointer block of memory 519 // had a bit pattern that matched a memory address. 520 // 521 // However, on arm64, we ignore all this advice above and slam the 522 // allocation at 0x40 << 32 because when using 4k pages with 3-level 523 // translation buffers, the user address space is limited to 39 bits 524 // On ios/arm64, the address space is even smaller. 525 // 526 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 527 // processes. 528 for i := 0x7f; i >= 0; i-- { 529 var p uintptr 530 switch { 531 case raceenabled: 532 // The TSAN runtime requires the heap 533 // to be in the range [0x00c000000000, 534 // 0x00e000000000). 535 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 536 if p >= uintptrMask&0x00e000000000 { 537 continue 538 } 539 case GOARCH == "arm64" && GOOS == "ios": 540 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 541 case GOARCH == "arm64": 542 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 543 case GOOS == "aix": 544 if i == 0 { 545 // We don't use addresses directly after 0x0A00000000000000 546 // to avoid collisions with others mmaps done by non-go programs. 547 continue 548 } 549 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 550 default: 551 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 552 } 553 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 554 hint.addr = p 555 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 556 } 557 } else { 558 // On a 32-bit machine, we're much more concerned 559 // about keeping the usable heap contiguous. 560 // Hence: 561 // 562 // 1. We reserve space for all heapArenas up front so 563 // they don't get interleaved with the heap. They're 564 // ~258MB, so this isn't too bad. (We could reserve a 565 // smaller amount of space up front if this is a 566 // problem.) 567 // 568 // 2. We hint the heap to start right above the end of 569 // the binary so we have the best chance of keeping it 570 // contiguous. 571 // 572 // 3. We try to stake out a reasonably large initial 573 // heap reservation. 574 575 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 576 meta := uintptr(sysReserve(nil, arenaMetaSize)) 577 if meta != 0 { 578 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 579 } 580 581 // We want to start the arena low, but if we're linked 582 // against C code, it's possible global constructors 583 // have called malloc and adjusted the process' brk. 584 // Query the brk so we can avoid trying to map the 585 // region over it (which will cause the kernel to put 586 // the region somewhere else, likely at a high 587 // address). 588 procBrk := sbrk0() 589 590 // If we ask for the end of the data segment but the 591 // operating system requires a little more space 592 // before we can start allocating, it will give out a 593 // slightly higher pointer. Except QEMU, which is 594 // buggy, as usual: it won't adjust the pointer 595 // upward. So adjust it upward a little bit ourselves: 596 // 1/4 MB to get away from the running binary image. 597 p := firstmoduledata.end 598 if p < procBrk { 599 p = procBrk 600 } 601 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 602 p = mheap_.heapArenaAlloc.end 603 } 604 p = alignUp(p+(256<<10), heapArenaBytes) 605 // Because we're worried about fragmentation on 606 // 32-bit, we try to make a large initial reservation. 607 arenaSizes := []uintptr{ 608 512 << 20, 609 256 << 20, 610 128 << 20, 611 } 612 for _, arenaSize := range arenaSizes { 613 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 614 if a != nil { 615 mheap_.arena.init(uintptr(a), size, false) 616 p = mheap_.arena.end // For hint below 617 break 618 } 619 } 620 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 621 hint.addr = p 622 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 623 } 624 } 625 626 // sysAlloc allocates heap arena space for at least n bytes. The 627 // returned pointer is always heapArenaBytes-aligned and backed by 628 // h.arenas metadata. The returned size is always a multiple of 629 // heapArenaBytes. sysAlloc returns nil on failure. 630 // There is no corresponding free function. 631 // 632 // sysAlloc returns a memory region in the Reserved state. This region must 633 // be transitioned to Prepared and then Ready before use. 634 // 635 // h must be locked. 636 func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { 637 assertLockHeld(&h.lock) 638 639 n = alignUp(n, heapArenaBytes) 640 641 // First, try the arena pre-reservation. 642 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys) 643 if v != nil { 644 size = n 645 goto mapped 646 } 647 648 // Try to grow the heap at a hint address. 649 for h.arenaHints != nil { 650 hint := h.arenaHints 651 p := hint.addr 652 if hint.down { 653 p -= n 654 } 655 if p+n < p { 656 // We can't use this, so don't ask. 657 v = nil 658 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 659 // Outside addressable heap. Can't use. 660 v = nil 661 } else { 662 v = sysReserve(unsafe.Pointer(p), n) 663 } 664 if p == uintptr(v) { 665 // Success. Update the hint. 666 if !hint.down { 667 p += n 668 } 669 hint.addr = p 670 size = n 671 break 672 } 673 // Failed. Discard this hint and try the next. 674 // 675 // TODO: This would be cleaner if sysReserve could be 676 // told to only return the requested address. In 677 // particular, this is already how Windows behaves, so 678 // it would simplify things there. 679 if v != nil { 680 sysFree(v, n, nil) 681 } 682 h.arenaHints = hint.next 683 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 684 } 685 686 if size == 0 { 687 if raceenabled { 688 // The race detector assumes the heap lives in 689 // [0x00c000000000, 0x00e000000000), but we 690 // just ran out of hints in this region. Give 691 // a nice failure. 692 throw("too many address space collisions for -race mode") 693 } 694 695 // All of the hints failed, so we'll take any 696 // (sufficiently aligned) address the kernel will give 697 // us. 698 v, size = sysReserveAligned(nil, n, heapArenaBytes) 699 if v == nil { 700 return nil, 0 701 } 702 703 // Create new hints for extending this region. 704 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 705 hint.addr, hint.down = uintptr(v), true 706 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 707 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 708 hint.addr = uintptr(v) + size 709 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 710 } 711 712 // Check for bad pointers or pointers we can't use. 713 { 714 var bad string 715 p := uintptr(v) 716 if p+size < p { 717 bad = "region exceeds uintptr range" 718 } else if arenaIndex(p) >= 1<<arenaBits { 719 bad = "base outside usable address space" 720 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 721 bad = "end outside usable address space" 722 } 723 if bad != "" { 724 // This should be impossible on most architectures, 725 // but it would be really confusing to debug. 726 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 727 throw("memory reservation exceeds address space limit") 728 } 729 } 730 731 if uintptr(v)&(heapArenaBytes-1) != 0 { 732 throw("misrounded allocation in sysAlloc") 733 } 734 735 mapped: 736 // Create arena metadata. 737 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 738 l2 := h.arenas[ri.l1()] 739 if l2 == nil { 740 // Allocate an L2 arena map. 741 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil)) 742 if l2 == nil { 743 throw("out of memory allocating heap arena map") 744 } 745 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 746 } 747 748 if l2[ri.l2()] != nil { 749 throw("arena already initialized") 750 } 751 var r *heapArena 752 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys)) 753 if r == nil { 754 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys)) 755 if r == nil { 756 throw("out of memory allocating heap arena metadata") 757 } 758 } 759 760 // Add the arena to the arenas list. 761 if len(h.allArenas) == cap(h.allArenas) { 762 size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize 763 if size == 0 { 764 size = physPageSize 765 } 766 newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys)) 767 if newArray == nil { 768 throw("out of memory allocating allArenas") 769 } 770 oldSlice := h.allArenas 771 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} 772 copy(h.allArenas, oldSlice) 773 // Do not free the old backing array because 774 // there may be concurrent readers. Since we 775 // double the array each time, this can lead 776 // to at most 2x waste. 777 } 778 h.allArenas = h.allArenas[:len(h.allArenas)+1] 779 h.allArenas[len(h.allArenas)-1] = ri 780 781 // Store atomically just in case an object from the 782 // new heap arena becomes visible before the heap lock 783 // is released (which shouldn't happen, but there's 784 // little downside to this). 785 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 786 } 787 788 // Tell the race detector about the new heap memory. 789 if raceenabled { 790 racemapshadow(v, size) 791 } 792 793 return 794 } 795 796 // sysReserveAligned is like sysReserve, but the returned pointer is 797 // aligned to align bytes. It may reserve either n or n+align bytes, 798 // so it returns the size that was reserved. 799 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 800 // Since the alignment is rather large in uses of this 801 // function, we're not likely to get it by chance, so we ask 802 // for a larger region and remove the parts we don't need. 803 retries := 0 804 retry: 805 p := uintptr(sysReserve(v, size+align)) 806 switch { 807 case p == 0: 808 return nil, 0 809 case p&(align-1) == 0: 810 // We got lucky and got an aligned region, so we can 811 // use the whole thing. 812 return unsafe.Pointer(p), size + align 813 case GOOS == "windows": 814 // On Windows we can't release pieces of a 815 // reservation, so we release the whole thing and 816 // re-reserve the aligned sub-region. This may race, 817 // so we may have to try again. 818 sysFree(unsafe.Pointer(p), size+align, nil) 819 p = alignUp(p, align) 820 p2 := sysReserve(unsafe.Pointer(p), size) 821 if p != uintptr(p2) { 822 // Must have raced. Try again. 823 sysFree(p2, size, nil) 824 if retries++; retries == 100 { 825 throw("failed to allocate aligned heap memory; too many retries") 826 } 827 goto retry 828 } 829 // Success. 830 return p2, size 831 default: 832 // Trim off the unaligned parts. 833 pAligned := alignUp(p, align) 834 sysFree(unsafe.Pointer(p), pAligned-p, nil) 835 end := pAligned + size 836 endLen := (p + size + align) - end 837 if endLen > 0 { 838 sysFree(unsafe.Pointer(end), endLen, nil) 839 } 840 return unsafe.Pointer(pAligned), size 841 } 842 } 843 844 // base address for all 0-byte allocations 845 var zerobase uintptr 846 847 // nextFreeFast returns the next free object if one is quickly available. 848 // Otherwise it returns 0. 849 func nextFreeFast(s *mspan) gclinkptr { 850 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 851 if theBit < 64 { 852 result := s.freeindex + uintptr(theBit) 853 if result < s.nelems { 854 freeidx := result + 1 855 if freeidx%64 == 0 && freeidx != s.nelems { 856 return 0 857 } 858 s.allocCache >>= uint(theBit + 1) 859 s.freeindex = freeidx 860 s.allocCount++ 861 return gclinkptr(result*s.elemsize + s.base()) 862 } 863 } 864 return 0 865 } 866 867 // nextFree returns the next free object from the cached span if one is available. 868 // Otherwise it refills the cache with a span with an available object and 869 // returns that object along with a flag indicating that this was a heavy 870 // weight allocation. If it is a heavy weight allocation the caller must 871 // determine whether a new GC cycle needs to be started or if the GC is active 872 // whether this goroutine needs to assist the GC. 873 // 874 // Must run in a non-preemptible context since otherwise the owner of 875 // c could change. 876 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 877 s = c.alloc[spc] 878 shouldhelpgc = false 879 freeIndex := s.nextFreeIndex() 880 if freeIndex == s.nelems { 881 // The span is full. 882 if uintptr(s.allocCount) != s.nelems { 883 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 884 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 885 } 886 c.refill(spc) 887 shouldhelpgc = true 888 s = c.alloc[spc] 889 890 freeIndex = s.nextFreeIndex() 891 } 892 893 if freeIndex >= s.nelems { 894 throw("freeIndex is not valid") 895 } 896 897 v = gclinkptr(freeIndex*s.elemsize + s.base()) 898 s.allocCount++ 899 if uintptr(s.allocCount) > s.nelems { 900 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 901 throw("s.allocCount > s.nelems") 902 } 903 return 904 } 905 906 // Allocate an object of size bytes. 907 // Small objects are allocated from the per-P cache's free lists. 908 // Large objects (> 32 kB) are allocated straight from the heap. 909 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 910 if gcphase == _GCmarktermination { 911 throw("mallocgc called with gcphase == _GCmarktermination") 912 } 913 914 if size == 0 { 915 return unsafe.Pointer(&zerobase) 916 } 917 918 if debug.malloc { 919 if debug.sbrk != 0 { 920 align := uintptr(16) 921 if typ != nil { 922 // TODO(austin): This should be just 923 // align = uintptr(typ.align) 924 // but that's only 4 on 32-bit platforms, 925 // even if there's a uint64 field in typ (see #599). 926 // This causes 64-bit atomic accesses to panic. 927 // Hence, we use stricter alignment that matches 928 // the normal allocator better. 929 if size&7 == 0 { 930 align = 8 931 } else if size&3 == 0 { 932 align = 4 933 } else if size&1 == 0 { 934 align = 2 935 } else { 936 align = 1 937 } 938 } 939 return persistentalloc(size, align, &memstats.other_sys) 940 } 941 942 if inittrace.active && inittrace.id == getg().goid { 943 // Init functions are executed sequentially in a single goroutine. 944 inittrace.allocs += 1 945 } 946 } 947 948 // assistG is the G to charge for this allocation, or nil if 949 // GC is not currently active. 950 var assistG *g 951 if gcBlackenEnabled != 0 { 952 // Charge the current user G for this allocation. 953 assistG = getg() 954 if assistG.m.curg != nil { 955 assistG = assistG.m.curg 956 } 957 // Charge the allocation against the G. We'll account 958 // for internal fragmentation at the end of mallocgc. 959 assistG.gcAssistBytes -= int64(size) 960 961 if assistG.gcAssistBytes < 0 { 962 // This G is in debt. Assist the GC to correct 963 // this before allocating. This must happen 964 // before disabling preemption. 965 gcAssistAlloc(assistG) 966 } 967 } 968 969 // Set mp.mallocing to keep from being preempted by GC. 970 mp := acquirem() 971 if mp.mallocing != 0 { 972 throw("malloc deadlock") 973 } 974 if mp.gsignal == getg() { 975 throw("malloc during signal") 976 } 977 mp.mallocing = 1 978 979 shouldhelpgc := false 980 dataSize := size 981 c := getMCache() 982 if c == nil { 983 throw("mallocgc called without a P or outside bootstrapping") 984 } 985 var span *mspan 986 var x unsafe.Pointer 987 noscan := typ == nil || typ.ptrdata == 0 988 // In some cases block zeroing can profitably (for latency reduction purposes) 989 // be delayed till preemption is possible; isZeroed tracks that state. 990 isZeroed := true 991 if size <= maxSmallSize { 992 if noscan && size < maxTinySize { 993 // Tiny allocator. 994 // 995 // Tiny allocator combines several tiny allocation requests 996 // into a single memory block. The resulting memory block 997 // is freed when all subobjects are unreachable. The subobjects 998 // must be noscan (don't have pointers), this ensures that 999 // the amount of potentially wasted memory is bounded. 1000 // 1001 // Size of the memory block used for combining (maxTinySize) is tunable. 1002 // Current setting is 16 bytes, which relates to 2x worst case memory 1003 // wastage (when all but one subobjects are unreachable). 1004 // 8 bytes would result in no wastage at all, but provides less 1005 // opportunities for combining. 1006 // 32 bytes provides more opportunities for combining, 1007 // but can lead to 4x worst case wastage. 1008 // The best case winning is 8x regardless of block size. 1009 // 1010 // Objects obtained from tiny allocator must not be freed explicitly. 1011 // So when an object will be freed explicitly, we ensure that 1012 // its size >= maxTinySize. 1013 // 1014 // SetFinalizer has a special case for objects potentially coming 1015 // from tiny allocator, it such case it allows to set finalizers 1016 // for an inner byte of a memory block. 1017 // 1018 // The main targets of tiny allocator are small strings and 1019 // standalone escaping variables. On a json benchmark 1020 // the allocator reduces number of allocations by ~12% and 1021 // reduces heap size by ~20%. 1022 off := c.tinyoffset 1023 // Align tiny pointer for required (conservative) alignment. 1024 if size&7 == 0 { 1025 off = alignUp(off, 8) 1026 } else if sys.PtrSize == 4 && size == 12 { 1027 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1028 // systems so that objects whose first field is a 64-bit 1029 // value is aligned to 8 bytes and does not cause a fault on 1030 // atomic access. See issue 37262. 1031 // TODO(mknyszek): Remove this workaround if/when issue 36606 1032 // is resolved. 1033 off = alignUp(off, 8) 1034 } else if size&3 == 0 { 1035 off = alignUp(off, 4) 1036 } else if size&1 == 0 { 1037 off = alignUp(off, 2) 1038 } 1039 if off+size <= maxTinySize && c.tiny != 0 { 1040 // The object fits into existing tiny block. 1041 x = unsafe.Pointer(c.tiny + off) 1042 c.tinyoffset = off + size 1043 c.tinyAllocs++ 1044 mp.mallocing = 0 1045 releasem(mp) 1046 return x 1047 } 1048 // Allocate a new maxTinySize block. 1049 span = c.alloc[tinySpanClass] 1050 v := nextFreeFast(span) 1051 if v == 0 { 1052 v, span, shouldhelpgc = c.nextFree(tinySpanClass) 1053 } 1054 x = unsafe.Pointer(v) 1055 (*[2]uint64)(x)[0] = 0 1056 (*[2]uint64)(x)[1] = 0 1057 // See if we need to replace the existing tiny block with the new one 1058 // based on amount of remaining free space. 1059 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1060 // Note: disabled when race detector is on, see comment near end of this function. 1061 c.tiny = uintptr(x) 1062 c.tinyoffset = size 1063 } 1064 size = maxTinySize 1065 } else { 1066 var sizeclass uint8 1067 if size <= smallSizeMax-8 { 1068 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1069 } else { 1070 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1071 } 1072 size = uintptr(class_to_size[sizeclass]) 1073 spc := makeSpanClass(sizeclass, noscan) 1074 span = c.alloc[spc] 1075 v := nextFreeFast(span) 1076 if v == 0 { 1077 v, span, shouldhelpgc = c.nextFree(spc) 1078 } 1079 x = unsafe.Pointer(v) 1080 if needzero && span.needzero != 0 { 1081 memclrNoHeapPointers(unsafe.Pointer(v), size) 1082 } 1083 } 1084 } else { 1085 shouldhelpgc = true 1086 // For large allocations, keep track of zeroed state so that 1087 // bulk zeroing can be happen later in a preemptible context. 1088 span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan) 1089 span.freeindex = 1 1090 span.allocCount = 1 1091 x = unsafe.Pointer(span.base()) 1092 size = span.elemsize 1093 } 1094 1095 var scanSize uintptr 1096 if !noscan { 1097 // If allocating a defer+arg block, now that we've picked a malloc size 1098 // large enough to hold everything, cut the "asked for" size down to 1099 // just the defer header, so that the GC bitmap will record the arg block 1100 // as containing nothing at all (as if it were unused space at the end of 1101 // a malloc block caused by size rounding). 1102 // The defer arg areas are scanned as part of scanstack. 1103 if typ == deferType { 1104 dataSize = unsafe.Sizeof(_defer{}) 1105 } 1106 heapBitsSetType(uintptr(x), size, dataSize, typ) 1107 if dataSize > typ.size { 1108 // Array allocation. If there are any 1109 // pointers, GC has to scan to the last 1110 // element. 1111 if typ.ptrdata != 0 { 1112 scanSize = dataSize - typ.size + typ.ptrdata 1113 } 1114 } else { 1115 scanSize = typ.ptrdata 1116 } 1117 c.scanAlloc += scanSize 1118 } 1119 1120 // Ensure that the stores above that initialize x to 1121 // type-safe memory and set the heap bits occur before 1122 // the caller can make x observable to the garbage 1123 // collector. Otherwise, on weakly ordered machines, 1124 // the garbage collector could follow a pointer to x, 1125 // but see uninitialized memory or stale heap bits. 1126 publicationBarrier() 1127 1128 // Allocate black during GC. 1129 // All slots hold nil so no scanning is needed. 1130 // This may be racing with GC so do it atomically if there can be 1131 // a race marking the bit. 1132 if gcphase != _GCoff { 1133 gcmarknewobject(span, uintptr(x), size, scanSize) 1134 } 1135 1136 if raceenabled { 1137 racemalloc(x, size) 1138 } 1139 1140 if msanenabled { 1141 msanmalloc(x, size) 1142 } 1143 1144 if rate := MemProfileRate; rate > 0 { 1145 // Note cache c only valid while m acquired; see #47302 1146 if rate != 1 && size < c.nextSample { 1147 c.nextSample -= size 1148 } else { 1149 profilealloc(mp, x, size) 1150 } 1151 } 1152 mp.mallocing = 0 1153 releasem(mp) 1154 1155 // Pointerfree data can be zeroed late in a context where preemption can occur. 1156 // x will keep the memory alive. 1157 if !isZeroed && needzero { 1158 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1159 } 1160 1161 if debug.malloc { 1162 if debug.allocfreetrace != 0 { 1163 tracealloc(x, size, typ) 1164 } 1165 1166 if inittrace.active && inittrace.id == getg().goid { 1167 // Init functions are executed sequentially in a single goroutine. 1168 inittrace.bytes += uint64(size) 1169 } 1170 } 1171 1172 if assistG != nil { 1173 // Account for internal fragmentation in the assist 1174 // debt now that we know it. 1175 assistG.gcAssistBytes -= int64(size - dataSize) 1176 } 1177 1178 if shouldhelpgc { 1179 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1180 gcStart(t) 1181 } 1182 } 1183 1184 if raceenabled && noscan && dataSize < maxTinySize { 1185 // Pad tinysize allocations so they are aligned with the end 1186 // of the tinyalloc region. This ensures that any arithmetic 1187 // that goes off the top end of the object will be detectable 1188 // by checkptr (issue 38872). 1189 // Note that we disable tinyalloc when raceenabled for this to work. 1190 // TODO: This padding is only performed when the race detector 1191 // is enabled. It would be nice to enable it if any package 1192 // was compiled with checkptr, but there's no easy way to 1193 // detect that (especially at compile time). 1194 // TODO: enable this padding for all allocations, not just 1195 // tinyalloc ones. It's tricky because of pointer maps. 1196 // Maybe just all noscan objects? 1197 x = add(x, size-dataSize) 1198 } 1199 1200 return x 1201 } 1202 1203 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 1204 // on chunks of the buffer to be zeroed, with opportunities for preemption 1205 // along the way. memclrNoHeapPointers contains no safepoints and also 1206 // cannot be preemptively scheduled, so this provides a still-efficient 1207 // block copy that can also be preempted on a reasonable granularity. 1208 // 1209 // Use this with care; if the data being cleared is tagged to contain 1210 // pointers, this allows the GC to run before it is all cleared. 1211 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 1212 v := uintptr(x) 1213 // got this from benchmarking. 128k is too small, 512k is too large. 1214 const chunkBytes = 256 * 1024 1215 vsize := v + size 1216 for voff := v; voff < vsize; voff = voff + chunkBytes { 1217 if getg().preempt { 1218 // may hold locks, e.g., profiling 1219 goschedguarded() 1220 } 1221 // clear min(avail, lump) bytes 1222 n := vsize - voff 1223 if n > chunkBytes { 1224 n = chunkBytes 1225 } 1226 memclrNoHeapPointers(unsafe.Pointer(voff), n) 1227 } 1228 } 1229 1230 // implementation of new builtin 1231 // compiler (both frontend and SSA backend) knows the signature 1232 // of this function 1233 func newobject(typ *_type) unsafe.Pointer { 1234 return mallocgc(typ.size, typ, true) 1235 } 1236 1237 //go:linkname reflect_unsafe_New reflect.unsafe_New 1238 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1239 return mallocgc(typ.size, typ, true) 1240 } 1241 1242 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1243 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1244 return mallocgc(typ.size, typ, true) 1245 } 1246 1247 // newarray allocates an array of n elements of type typ. 1248 func newarray(typ *_type, n int) unsafe.Pointer { 1249 if n == 1 { 1250 return mallocgc(typ.size, typ, true) 1251 } 1252 mem, overflow := math.MulUintptr(typ.size, uintptr(n)) 1253 if overflow || mem > maxAlloc || n < 0 { 1254 panic(plainError("runtime: allocation size out of range")) 1255 } 1256 return mallocgc(mem, typ, true) 1257 } 1258 1259 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1260 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1261 return newarray(typ, n) 1262 } 1263 1264 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1265 c := getMCache() 1266 if c == nil { 1267 throw("profilealloc called without a P or outside bootstrapping") 1268 } 1269 c.nextSample = nextSample() 1270 mProf_Malloc(x, size) 1271 } 1272 1273 // nextSample returns the next sampling point for heap profiling. The goal is 1274 // to sample allocations on average every MemProfileRate bytes, but with a 1275 // completely random distribution over the allocation timeline; this 1276 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1277 // processes, the distance between two samples follows the exponential 1278 // distribution (exp(MemProfileRate)), so the best return value is a random 1279 // number taken from an exponential distribution whose mean is MemProfileRate. 1280 func nextSample() uintptr { 1281 if MemProfileRate == 1 { 1282 // Callers assign our return value to 1283 // mcache.next_sample, but next_sample is not used 1284 // when the rate is 1. So avoid the math below and 1285 // just return something. 1286 return 0 1287 } 1288 if GOOS == "plan9" { 1289 // Plan 9 doesn't support floating point in note handler. 1290 if g := getg(); g == g.m.gsignal { 1291 return nextSampleNoFP() 1292 } 1293 } 1294 1295 return uintptr(fastexprand(MemProfileRate)) 1296 } 1297 1298 // fastexprand returns a random number from an exponential distribution with 1299 // the specified mean. 1300 func fastexprand(mean int) int32 { 1301 // Avoid overflow. Maximum possible step is 1302 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1303 switch { 1304 case mean > 0x7000000: 1305 mean = 0x7000000 1306 case mean == 0: 1307 return 0 1308 } 1309 1310 // Take a random sample of the exponential distribution exp(-mean*x). 1311 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1312 // p = 1 - exp(-mean*x), so 1313 // q = 1 - p == exp(-mean*x) 1314 // log_e(q) = -mean*x 1315 // -log_e(q)/mean = x 1316 // x = -log_e(q) * mean 1317 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1318 const randomBitCount = 26 1319 q := fastrand()%(1<<randomBitCount) + 1 1320 qlog := fastlog2(float64(q)) - randomBitCount 1321 if qlog > 0 { 1322 qlog = 0 1323 } 1324 const minusLog2 = -0.6931471805599453 // -ln(2) 1325 return int32(qlog*(minusLog2*float64(mean))) + 1 1326 } 1327 1328 // nextSampleNoFP is similar to nextSample, but uses older, 1329 // simpler code to avoid floating point. 1330 func nextSampleNoFP() uintptr { 1331 // Set first allocation sample size. 1332 rate := MemProfileRate 1333 if rate > 0x3fffffff { // make 2*rate not overflow 1334 rate = 0x3fffffff 1335 } 1336 if rate != 0 { 1337 return uintptr(fastrand() % uint32(2*rate)) 1338 } 1339 return 0 1340 } 1341 1342 type persistentAlloc struct { 1343 base *notInHeap 1344 off uintptr 1345 } 1346 1347 var globalAlloc struct { 1348 mutex 1349 persistentAlloc 1350 } 1351 1352 // persistentChunkSize is the number of bytes we allocate when we grow 1353 // a persistentAlloc. 1354 const persistentChunkSize = 256 << 10 1355 1356 // persistentChunks is a list of all the persistent chunks we have 1357 // allocated. The list is maintained through the first word in the 1358 // persistent chunk. This is updated atomically. 1359 var persistentChunks *notInHeap 1360 1361 // Wrapper around sysAlloc that can allocate small chunks. 1362 // There is no associated free operation. 1363 // Intended for things like function/type/debug-related persistent data. 1364 // If align is 0, uses default align (currently 8). 1365 // The returned memory will be zeroed. 1366 // 1367 // Consider marking persistentalloc'd types go:notinheap. 1368 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1369 var p *notInHeap 1370 systemstack(func() { 1371 p = persistentalloc1(size, align, sysStat) 1372 }) 1373 return unsafe.Pointer(p) 1374 } 1375 1376 // Must run on system stack because stack growth can (re)invoke it. 1377 // See issue 9174. 1378 //go:systemstack 1379 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1380 const ( 1381 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1382 ) 1383 1384 if size == 0 { 1385 throw("persistentalloc: size == 0") 1386 } 1387 if align != 0 { 1388 if align&(align-1) != 0 { 1389 throw("persistentalloc: align is not a power of 2") 1390 } 1391 if align > _PageSize { 1392 throw("persistentalloc: align is too large") 1393 } 1394 } else { 1395 align = 8 1396 } 1397 1398 if size >= maxBlock { 1399 return (*notInHeap)(sysAlloc(size, sysStat)) 1400 } 1401 1402 mp := acquirem() 1403 var persistent *persistentAlloc 1404 if mp != nil && mp.p != 0 { 1405 persistent = &mp.p.ptr().palloc 1406 } else { 1407 lock(&globalAlloc.mutex) 1408 persistent = &globalAlloc.persistentAlloc 1409 } 1410 persistent.off = alignUp(persistent.off, align) 1411 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1412 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1413 if persistent.base == nil { 1414 if persistent == &globalAlloc.persistentAlloc { 1415 unlock(&globalAlloc.mutex) 1416 } 1417 throw("runtime: cannot allocate memory") 1418 } 1419 1420 // Add the new chunk to the persistentChunks list. 1421 for { 1422 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1423 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1424 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1425 break 1426 } 1427 } 1428 persistent.off = alignUp(sys.PtrSize, align) 1429 } 1430 p := persistent.base.add(persistent.off) 1431 persistent.off += size 1432 releasem(mp) 1433 if persistent == &globalAlloc.persistentAlloc { 1434 unlock(&globalAlloc.mutex) 1435 } 1436 1437 if sysStat != &memstats.other_sys { 1438 sysStat.add(int64(size)) 1439 memstats.other_sys.add(-int64(size)) 1440 } 1441 return p 1442 } 1443 1444 // inPersistentAlloc reports whether p points to memory allocated by 1445 // persistentalloc. This must be nosplit because it is called by the 1446 // cgo checker code, which is called by the write barrier code. 1447 //go:nosplit 1448 func inPersistentAlloc(p uintptr) bool { 1449 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1450 for chunk != 0 { 1451 if p >= chunk && p < chunk+persistentChunkSize { 1452 return true 1453 } 1454 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1455 } 1456 return false 1457 } 1458 1459 // linearAlloc is a simple linear allocator that pre-reserves a region 1460 // of memory and then optionally maps that region into the Ready state 1461 // as needed. 1462 // 1463 // The caller is responsible for locking. 1464 type linearAlloc struct { 1465 next uintptr // next free byte 1466 mapped uintptr // one byte past end of mapped space 1467 end uintptr // end of reserved space 1468 1469 mapMemory bool // transition memory from Reserved to Ready if true 1470 } 1471 1472 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 1473 if base+size < base { 1474 // Chop off the last byte. The runtime isn't prepared 1475 // to deal with situations where the bounds could overflow. 1476 // Leave that memory reserved, though, so we don't map it 1477 // later. 1478 size -= 1 1479 } 1480 l.next, l.mapped = base, base 1481 l.end = base + size 1482 l.mapMemory = mapMemory 1483 } 1484 1485 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1486 p := alignUp(l.next, align) 1487 if p+size > l.end { 1488 return nil 1489 } 1490 l.next = p + size 1491 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1492 if l.mapMemory { 1493 // Transition from Reserved to Prepared to Ready. 1494 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat) 1495 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped) 1496 } 1497 l.mapped = pEnd 1498 } 1499 return unsafe.Pointer(p) 1500 } 1501 1502 // notInHeap is off-heap memory allocated by a lower-level allocator 1503 // like sysAlloc or persistentAlloc. 1504 // 1505 // In general, it's better to use real types marked as go:notinheap, 1506 // but this serves as a generic type for situations where that isn't 1507 // possible (like in the allocators). 1508 // 1509 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1510 // 1511 //go:notinheap 1512 type notInHeap struct{} 1513 1514 func (p *notInHeap) add(bytes uintptr) *notInHeap { 1515 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1516 } 1517