返回介绍

上卷 程序设计

中卷 标准库

下卷 运行时

源码剖析

附录

2.5.1 同步释放

发布于 2024-10-12 19:15:56 字数 6785 浏览 0 评论 0 收藏 0

调用 scavenge 主动释放闲置物理内存(RSS)。

两个前提:

首先,向操作系统申请内存时,会将地址段保存到 mheap.alloc.inUse 内。

// mpagealloc.go

type pageAlloc struct {
    
	// inUse is a slice of ranges of address space which are
	// known by the page allocator to be currently in-use (passed
	// to grow).
	inUse addrRanges

	// scav stores the scavenger state.
	scav struct {
        
		// inUse is a slice of ranges of address space which have not
		// yet been looked at by the scavenger.
		inUse addrRanges

		// gen is the scavenge generation number.
		gen uint32

		// released is the amount of memory released this generation.
		released uintptr
	}
}
func (p *pageAlloc) grow(base, size uintptr) {
    p.sysGrow(base, limit)
	p.inUse.add(makeAddrRange(base, limit))
}

其次,每轮释放操作前调用 scavengeStartGen 初始化相关状态,重点是复制 inUse 地址段。

// mgcscavenge.go

// scavengeStartGen starts a new scavenge generation, resetting
// the scavenger's search space to the full in-use address space.

func (p *pageAlloc) scavengeStartGen() {
    
    // 复制地址段数据。
	p.inUse.cloneInto(&p.scav.inUse)

	// 确定起始地址,并调整范围 ...(提高效率)

	p.scav.gen++
	atomic.Storeuintptr(&p.scav.released, 0)
}

接下来,依次取地址段尝试物理内存释放,直到满足需求。

// mgcscavenge.go

// scavenge scavenges nbytes worth of free pages, starting with the
// highest address first. Successive calls continue from where it left
// off until the heap is exhausted. Call scavengeStartGen to bring it
// back to the top of the heap.
//
// Returns the amount of memory scavenged in bytes.

func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
	released := uintptr(0)
    
    // 循环,直到累计释放量满足参数需求。
	for released < nbytes {
		r, a := p.scavengeOne(addrs, nbytes-released)
		released += r
		addrs = a
	}
    
	return released
}
// scavengeOne walks over address range work until it finds
// a contiguous run of pages to scavenge. It will try to scavenge
// at most max bytes at once, but may scavenge more to avoid
// breaking huge pages. Once it scavenges some memory it returns
// how much it scavenged in bytes.
//
// Returns the number of bytes scavenged and the part of work
// which was not yet searched.

func (p *pageAlloc) scavengeOne(work addrRange, max uintptr) (uintptr, addrRange) {

    // 计算要释放的最大页数。
	maxPages := max / pageSize
	if max%pageSize != 0 {
		maxPages++
	}

	// Calculate the minimum number of pages we can scavenge.
	//
	// Because we can only scavenge whole physical pages, we must
	// ensure that we scavenge at least minPages each time, aligned
	// to minPages*pageSize.
	minPages := physPageSize / pageSize
	if minPages < 1 {
		minPages = 1
	}

	// Fast path: check the chunk containing the top-most address in work.
	if r, w := p.scavengeOneFast(work, minPages, maxPages); r != 0 {
		return r, w
	} else {
		work = w
	}

	// findCandidate finds the next scavenge candidate in work optimistically.
	findCandidate := func(work addrRange) (chunkIdx, bool) {
        
		// 通过摘要(summary)查找符合页数范围的内存块。
        // 通过位图(chunk)判断是否闲置且未被释放。
		for i := chunkIndex(work.limit.addr() - 1); i >= chunkIndex(work.base.addr()); i-- {
            
			// If this chunk is totally in-use or has no unscavenged pages, 
            // don't bother doing a more sophisticated check.

			// Check quickly if there are enough free pages at all.
			if p.summary[len(p.summary)-1][i].max() < uint(minPages) {
				continue
			}

			l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&p.chunks[i.l1()])))
			if l2 != nil && l2[i.l2()].hasScavengeCandidate(minPages) {
				return i, true
			}
		}
        
		return 0, false
	}

	// Slow path: iterate optimistically over the in-use address space
	// looking for any free and unscavenged page. 
	for work.size() != 0 {

		// 搜索释放目标。
		candidateChunkIdx, ok := findCandidate(work)
		if !ok {
			work.limit = work.base
			break
		}

		// 找到后,验证并释放!
		chunk := p.chunkOf(candidateChunkIdx)
		base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
		if npages > 0 {
			work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)}
			return uintptr(npages) * pageSize, work
		}
	}
    
	return 0, work
}
// returns true if there's any min-page-aligned groups of min pages of 
// free-and-unscavenged memory in the region represented by this pallocData.
func (m *pallocData) hasScavengeCandidate(min uintptr) bool {
}

最终,通过以 sysUnused 完成物理内存释放。

// mgcscavenge.go

// scavengeRangeLocked scavenges the given region of memory.
func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {

	// 起始地址。
	addr := chunkBase(ci) + uintptr(base)*pageSize

    // 释放物理内存。
	sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)

	// 归还给堆。
	p.free(addr, uintptr(npages), true)

	// 在位图标记释放。
	p.chunkOf(ci).scavenged.setRange(base, npages)
    
	return addr
}

释放操作仅针对物理内存,也就是说解除物理内存和虚拟内存的映射。分配器管理的虚拟内存并为被释放,毕竟每个进程有 256 TB 可用,且 “不占用” 物理内存,完全没必要释放虚拟内存。等该虚拟内存被复用时,会检查其释放标志,调用 sysUsed 重新关联上物理内存。

// mheap.go

func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
	s = h.allocSpan(npages, spanAllocHeap, spanclass)    
    return s
}


func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
    base, scav = h.pages.alloc(npages)
    
HaveSpan:    
    
    if scav != 0 {
		// sysUsed all the pages that are actually available
		// in the span since some of them might be scavenged.
		sysUsed(unsafe.Pointer(base), nbytes)
	}
}

触发位置

当堆内存扩张时,会尝试释放 “等量” 闲置物理内存(碎片),以避免浪费。

// mheap.go

func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
    if base == 0 {
		base, scav = h.pages.alloc(npages)
		if base == 0 {
			growth, ok = h.grow(npages)
			base, scav = h.pages.alloc(npages)
		}
	}
    
	if growth > 0 {
		scavengeGoal := atomic.Load64(&h.scavengeGoal)
		if retained := heapRetained(); retained+uint64(growth) > scavengeGoal {
			todo := growth
			h.pages.scavenge(todo)
		}
	}    
}

手工释放

用户调用 runtime/debug.FreeOSMemory,会释放全部闲置物理内存。

// mheap.go

//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
	GC()
	systemstack(func() { mheap_.scavengeAll() })
}
// scavengeAll acquires the heap lock (blocking any additional
// manipulation of the page allocator) and iterates over the whole
// heap, scavenging every free page available.

func (h *mheap) scavengeAll() {
    
	// Start a new scavenge generation so we have a chance to walk
	// over the whole heap.
	h.pages.scavengeStartGen()

    // 足够大的期望值。
	released := h.pages.scavenge(^uintptr(0))
}

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
    我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
    原文