Source file src/runtime/mem.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // OS memory management abstraction layer
    10  //
    11  // Regions of the address space managed by the runtime may be in one of four
    12  // states at any given time:
    13  // 1) None - Unreserved and unmapped, the default state of any region.
    14  // 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
    15  //               Does not count against the process' memory footprint.
    16  // 3) Prepared - Reserved, intended not to be backed by physical memory (though
    17  //               an OS may implement this lazily). Can transition efficiently to
    18  //               Ready. Accessing memory in such a region is undefined (may
    19  //               fault, may give back unexpected zeroes, etc.).
    20  // 4) Ready - may be accessed safely.
    21  //
    22  // This set of states is more than strictly necessary to support all the
    23  // currently supported platforms. One could get by with just None, Reserved, and
    24  // Ready. However, the Prepared state gives us flexibility for performance
    25  // purposes. For example, on POSIX-y operating systems, Reserved is usually a
    26  // private anonymous mmap'd region with PROT_NONE set, and to transition
    27  // to Ready would require setting PROT_READ|PROT_WRITE. However the
    28  // underspecification of Prepared lets us use just MADV_FREE to transition from
    29  // Ready to Prepared. Thus with the Prepared state we can set the permission
    30  // bits just once early on, we can efficiently tell the OS that it's free to
    31  // take pages away from us when we don't strictly need them.
    32  //
    33  // This file defines a cross-OS interface for a common set of helpers
    34  // that transition memory regions between these states. The helpers call into
    35  // OS-specific implementations that handle errors, while the interface boundary
    36  // implements cross-OS functionality, like updating runtime accounting.
    37  
    38  // sysAlloc transitions an OS-chosen region of memory from None to Ready.
    39  // More specifically, it obtains a large chunk of zeroed memory from the
    40  // operating system, typically on the order of a hundred kilobytes
    41  // or a megabyte. This memory is always immediately available for use.
    42  //
    43  // sysStat must be non-nil.
    44  //
    45  // Don't split the stack as this function may be invoked without a valid G,
    46  // which prevents us from allocating more stack.
    47  //
    48  //go:nosplit
    49  func sysAlloc(n uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
    50  	sysStat.add(int64(n))
    51  	gcController.mappedReady.Add(int64(n))
    52  	p := sysAllocOS(n, vmaName)
    53  
    54  	// When using ASAN leak detection, we must tell ASAN about
    55  	// cases where we store pointers in mmapped memory.
    56  	if asanenabled {
    57  		lsanregisterrootregion(p, n)
    58  	}
    59  
    60  	return p
    61  }
    62  
    63  // sysUnused transitions a memory region from Ready to Prepared. It notifies the
    64  // operating system that the physical pages backing this memory region are no
    65  // longer needed and can be reused for other purposes. The contents of a
    66  // sysUnused memory region are considered forfeit and the region must not be
    67  // accessed again until sysUsed is called.
    68  func sysUnused(v unsafe.Pointer, n uintptr) {
    69  	gcController.mappedReady.Add(-int64(n))
    70  	sysUnusedOS(v, n)
    71  }
    72  
    73  // needZeroAfterSysUnused reports whether memory returned by sysUnused must be
    74  // zeroed for use.
    75  func needZeroAfterSysUnused() bool {
    76  	return needZeroAfterSysUnusedOS()
    77  }
    78  
    79  // sysUsed transitions a memory region from Prepared to Ready. It notifies the
    80  // operating system that the memory region is needed and ensures that the region
    81  // may be safely accessed. This is typically a no-op on systems that don't have
    82  // an explicit commit step and hard over-commit limits, but is critical on
    83  // Windows, for example.
    84  //
    85  // This operation is idempotent for memory already in the Prepared state, so
    86  // it is safe to refer, with v and n, to a range of memory that includes both
    87  // Prepared and Ready memory. However, the caller must provide the exact amount
    88  // of Prepared memory for accounting purposes.
    89  func sysUsed(v unsafe.Pointer, n, prepared uintptr) {
    90  	gcController.mappedReady.Add(int64(prepared))
    91  	sysUsedOS(v, n)
    92  }
    93  
    94  // sysHugePage does not transition memory regions, but instead provides a
    95  // hint to the OS that it would be more efficient to back this memory region
    96  // with pages of a larger size transparently.
    97  func sysHugePage(v unsafe.Pointer, n uintptr) {
    98  	sysHugePageOS(v, n)
    99  }
   100  
   101  // sysNoHugePage does not transition memory regions, but instead provides a
   102  // hint to the OS that it would be less efficient to back this memory region
   103  // with pages of a larger size transparently.
   104  func sysNoHugePage(v unsafe.Pointer, n uintptr) {
   105  	sysNoHugePageOS(v, n)
   106  }
   107  
   108  // sysHugePageCollapse attempts to immediately back the provided memory region
   109  // with huge pages. It is best-effort and may fail silently.
   110  func sysHugePageCollapse(v unsafe.Pointer, n uintptr) {
   111  	sysHugePageCollapseOS(v, n)
   112  }
   113  
   114  // sysFree transitions a memory region from any state to None. Therefore, it
   115  // returns memory unconditionally. It is used if an out-of-memory error has been
   116  // detected midway through an allocation or to carve out an aligned section of
   117  // the address space. It is okay if sysFree is a no-op only if sysReserve always
   118  // returns a memory region aligned to the heap allocator's alignment
   119  // restrictions.
   120  //
   121  // sysStat must be non-nil.
   122  //
   123  // The size and start address must exactly match the size and returned address
   124  // from the original sysAlloc/sysReserve/sysReserveAligned call. That is,
   125  // sysFree cannot be used to free a subset of a memory region.
   126  //
   127  // Don't split the stack as this function may be invoked without a valid G,
   128  // which prevents us from allocating more stack.
   129  //
   130  //go:nosplit
   131  func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
   132  	// When using ASAN leak detection, the memory being freed is known by
   133  	// the sanitizer. We need to unregister it so it's not accessed by it.
   134  	//
   135  	// lsanunregisterrootregion matches regions by start address and size,
   136  	// so it is not possible to unregister a subset of the region. This is
   137  	// why sysFree requires the full region from the initial allocation.
   138  	if asanenabled {
   139  		lsanunregisterrootregion(v, n)
   140  	}
   141  
   142  	sysStat.add(-int64(n))
   143  	gcController.mappedReady.Add(-int64(n))
   144  	sysFreeOS(v, n)
   145  }
   146  
   147  // sysFault transitions a memory region from Ready to Reserved. It
   148  // marks a region such that it will always fault if accessed. Used only for
   149  // debugging the runtime.
   150  //
   151  // TODO(mknyszek): Currently it's true that all uses of sysFault transition
   152  // memory from Ready to Reserved, but this may not be true in the future
   153  // since on every platform the operation is much more general than that.
   154  // If a transition from Prepared is ever introduced, create a new function
   155  // that elides the Ready state accounting.
   156  func sysFault(v unsafe.Pointer, n uintptr) {
   157  	gcController.mappedReady.Add(-int64(n))
   158  	sysFaultOS(v, n)
   159  }
   160  
   161  // sysReserve transitions a memory region from None to Reserved. It reserves
   162  // address space in such a way that it would cause a fatal fault upon access
   163  // (either via permissions or not committing the memory). Such a reservation is
   164  // thus never backed by physical memory.
   165  //
   166  // If the pointer passed to it is non-nil, the caller wants the reservation
   167  // there, but sysReserve can still choose another location if that one is
   168  // unavailable.
   169  //
   170  // sysReserve returns OS-aligned memory. If a larger alignment is required, use
   171  // sysReservedAligned.
   172  func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
   173  	p := sysReserveOS(v, n, vmaName)
   174  
   175  	// When using ASAN leak detection, we must tell ASAN about
   176  	// cases where we store pointers in mmapped memory.
   177  	if asanenabled {
   178  		lsanregisterrootregion(p, n)
   179  	}
   180  
   181  	return p
   182  }
   183  
   184  // sysReserveAligned transitions a memory region from None to Reserved.
   185  //
   186  // Semantics are equivlent to sysReserve, but the returned pointer is aligned
   187  // to align bytes. It may reserve either n or n+align bytes, so it returns the
   188  // size that was reserved.
   189  func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
   190  	if isSbrkPlatform {
   191  		if v != nil {
   192  			throw("unexpected heap arena hint on sbrk platform")
   193  		}
   194  		return sysReserveAlignedSbrk(size, align)
   195  	}
   196  	// Since the alignment is rather large in uses of this
   197  	// function, we're not likely to get it by chance, so we ask
   198  	// for a larger region and remove the parts we don't need.
   199  	retries := 0
   200  retry:
   201  	p := uintptr(sysReserve(v, size+align, vmaName))
   202  	switch {
   203  	case p == 0:
   204  		return nil, 0
   205  	case p&(align-1) == 0:
   206  		return unsafe.Pointer(p), size + align
   207  	case GOOS == "windows":
   208  		// On Windows we can't release pieces of a
   209  		// reservation, so we release the whole thing and
   210  		// re-reserve the aligned sub-region. This may race,
   211  		// so we may have to try again.
   212  		sysUnreserve(unsafe.Pointer(p), size+align)
   213  		p = alignUp(p, align)
   214  		p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
   215  		if p != uintptr(p2) {
   216  			// Must have raced. Try again.
   217  			sysUnreserve(p2, size)
   218  			if retries++; retries == 100 {
   219  				throw("failed to allocate aligned heap memory; too many retries")
   220  			}
   221  			goto retry
   222  		}
   223  		// Success.
   224  		return p2, size
   225  	default:
   226  		// Trim off the unaligned parts.
   227  		pAligned := alignUp(p, align)
   228  		end := pAligned + size
   229  		endLen := (p + size + align) - end
   230  
   231  		// sysUnreserve does not allow unreserving a subset of the
   232  		// region because LSAN does not allow unregistering a subset.
   233  		// So we can't call sysUnreserve. Instead we simply unregister
   234  		// the entire region from LSAN and re-register with the smaller
   235  		// region before freeing the unecessary portions, which does
   236  		// allow subsets of the region.
   237  		if asanenabled {
   238  			lsanunregisterrootregion(unsafe.Pointer(p), size+align)
   239  			lsanregisterrootregion(unsafe.Pointer(pAligned), size)
   240  		}
   241  		sysFreeOS(unsafe.Pointer(p), pAligned-p)
   242  		if endLen > 0 {
   243  			sysFreeOS(unsafe.Pointer(end), endLen)
   244  		}
   245  		return unsafe.Pointer(pAligned), size
   246  	}
   247  }
   248  
   249  // sysUnreserve transitions a memory region from Reserved to None.
   250  //
   251  // The size and start address must exactly match the size and returned address
   252  // from sysReserve/sysReserveAligned. That is, sysUnreserve cannot be used to
   253  // unreserve a subset of a memory region.
   254  //
   255  // Don't split the stack as this function may be invoked without a valid G,
   256  // which prevents us from allocating more stack.
   257  //
   258  //go:nosplit
   259  func sysUnreserve(v unsafe.Pointer, n uintptr) {
   260  	// When using ASAN leak detection, the memory being freed is known by
   261  	// the sanitizer. We need to unregister it so it's not accessed by it.
   262  	//
   263  	// lsanunregisterrootregion matches regions by start address and size,
   264  	// so it is not possible to unregister a subset of the region. This is
   265  	// why sysUnreserve requires the full region from sysReserve.
   266  	if asanenabled {
   267  		lsanunregisterrootregion(v, n)
   268  	}
   269  
   270  	sysFreeOS(v, n)
   271  }
   272  
   273  // sysMap transitions a memory region from Reserved to Prepared. It ensures the
   274  // memory region can be efficiently transitioned to Ready.
   275  //
   276  // sysStat must be non-nil.
   277  func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat, vmaName string) {
   278  	sysStat.add(int64(n))
   279  	sysMapOS(v, n, vmaName)
   280  }
   281  

View as plain text