Golang 中的defer性能提升

在Golang 1.14中新加入了开放编码(Open-coded)defer类型,编译器在ssa过程中会把被延迟的方法直接插入到函数的尾部,避免了运行时的deferproc及deferprocStack操作。

避免了在没有运行时判断下的deferreturn调用。如有运行时判断的逻辑,则 deferreturn 也进一步优化,开放编码下的 deferreturn 不会进行jmpdefer的尾递归调用,而直接在一个循环里遍历执行。

在1.14中defer的实现原理,共有三种defer模式类型,编译后一个函数里只会一种defer模式。

堆上分配

在 Golang 1.13 之前的版本中,所有 defer 都是在堆上分配 (deferProc),该机制在编译时会进行两个步骤:

  1. 在 defer 语句的位置插入 runtime.deferproc,当被执行时,延迟调用会被保存为一个 _defer 记录,并将被延迟调用的入口地址及其参数复制保存,存入 Goroutine 的调用链表中。

  2. 在函数返回之前的位置插入 runtime.deferreturn,当被执行时,会将延迟调用从 Goroutine 链表中取出并执行,多个延迟调用则以 jmpdefer 尾递归调用方式连续执行。

这种机制的主要性能问题存在于每个 defer 语句产生记录时的内存分配,以及记录参数和完成调用时参数移动的系统调用开销。

栈上分配

在Golang 1.13 版本中新加入 deferprocStack 实现了在栈上分配的形式来取代 deferproc,相比后者,栈上分配在函数返回后 _defer 便得到释放,省去了内存分配时产生的性能开销,只需适当维护 _defer 的链表即可。

编译器可以去选择使用deferproc 还是 deferprocStack,通常情况下都会使用deferprocStack,性能会提升约 30%。不过在 defer 语句出现在了循环语句里,或者无法执行更高阶的编译器优化时,亦或者同一个函数中使用了过多的 defer 时,依然会使用 deferproc

栈上分配 (deferprocStack),基本跟堆上差不多,只是分配方式改为在栈上分配,压入的函数调用栈存有_defer记录,另外编译器在ssa过程中会预留defer空间。

SSA 代表 static single-assignment,是一种IR(中间表示代码),要保证每个变量只被赋值一次。这个能帮助简化编译器的优化算法。简单来说,使用ssa可以使二进制文件大小减少了30%,性能提升5%-35%等.

// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *Node, worker int) *ssa.Func {
    name := fn.funcname()
    printssa := name == ssaDump
    var astBuf *bytes.Buffer
    if printssa {
        astBuf = &bytes.Buffer{}
        fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
        fdumplist(astBuf, "buildssa-body", fn.Nbody)
        fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
        if ssaDumpStdout {
            fmt.Println("generating SSA for", name)
            fmt.Print(astBuf.String())
        }
    }

    var s state
    s.pushLine(fn.Pos)
    defer s.popLine()

    s.hasdefer = fn.Func.HasDefer()
    if fn.Func.Pragma&CgoUnsafeArgs != 0 {
        s.cgoUnsafeArgs = true
    }

    fe := ssafn{
        curfn: fn,
        log:   printssa && ssaDumpStdout,
    }
    s.curfn = fn

    s.f = ssa.NewFunc(&fe)
    s.config = ssaConfig
    s.f.Type = fn.Type
    s.f.Config = ssaConfig
    s.f.Cache = &ssaCaches[worker]
    s.f.Cache.Reset()
    s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
    s.f.Name = name
    s.f.PrintOrHtmlSSA = printssa
    if fn.Func.Pragma&Nosplit != 0 {
        s.f.NoSplit = true
    }
    s.panics = map[funcLine]*ssa.Block{}
    s.softFloat = s.config.SoftFloat

    if printssa {
        s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f.Frontend(), name, ssaDumpCFG)
        // TODO: generate and print a mapping from nodes to values and blocks
        dumpSourcesColumn(s.f.HTMLWriter, fn)
        s.f.HTMLWriter.WriteAST("AST", astBuf)
    }

    // Allocate starting block
    s.f.Entry = s.f.NewBlock(ssa.BlockPlain)

    // Allocate starting values
    s.labels = map[string]*ssaLabel{}
    s.labeledNodes = map[*Node]*ssaLabel{}
    s.fwdVars = map[*Node]*ssa.Value{}
    s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)

    s.hasOpenDefers = Debug['N'] == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
    switch {
    case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
        // Don't support open-coded defers for 386 ONLY when using shared
        // libraries, because there is extra code (added by rewriteToUseGot())
        // preceding the deferreturn/ret code that is generated by gencallret()
        // that we don't track correctly.
        s.hasOpenDefers = false
    }
    if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
        // Skip doing open defers if there is any extra exit code (likely
        // copying heap-allocated return values or race detection), since
        // we will not generate that code in the case of the extra
        // deferreturn/ret segment.
        s.hasOpenDefers = false
    }
    if s.hasOpenDefers &&
        s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 {
        // Since we are generating defer calls at every exit for
        // open-coded defers, skip doing open-coded defers if there are
        // too many returns (especially if there are multiple defers).
        // Open-coded defers are most important for improving performance
        // for smaller functions (which don't have many returns).
        s.hasOpenDefers = false
    }

    s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
    s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])

    s.startBlock(s.f.Entry)
    s.vars[&memVar] = s.startmem
    if s.hasOpenDefers {
        // Create the deferBits variable and stack slot.  deferBits is a
        // bitmask showing which of the open-coded defers in this function
        // have been activated.
        deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8])
        s.deferBitsTemp = deferBitsTemp
        // For this value, AuxInt is initialized to zero by default
        startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
        s.vars[&deferBitsVar] = startDeferBits
        s.deferBitsAddr = s.addr(deferBitsTemp, false)
        s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
        // Make sure that the deferBits stack slot is kept alive (for use
        // by panics) and stores to deferBits are not eliminated, even if
        // all checking code on deferBits in the function exit can be
        // eliminated, because the defer statements were all
        // unconditional.
        s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
    }

    // Generate addresses of local declarations
    s.decladdrs = map[*Node]*ssa.Value{}
    for _, n := range fn.Func.Dcl {
        switch n.Class() {
        case PPARAM, PPARAMOUT:
            s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
            if n.Class() == PPARAMOUT && s.canSSA(n) {
                // Save ssa-able PPARAMOUT variables so we can
                // store them back to the stack at the end of
                // the function.
                s.returns = append(s.returns, n)
            }
        case PAUTO:
            // processed at each use, to prevent Addr coming
            // before the decl.
        case PAUTOHEAP:
            // moved to heap - already handled by frontend
        case PFUNC:
            // local function - already handled by frontend
        default:
            s.Fatalf("local variable with class %v unimplemented", n.Class())
        }
    }

    // Populate SSAable arguments.
    for _, n := range fn.Func.Dcl {
        if n.Class() == PPARAM && s.canSSA(n) {
            v := s.newValue0A(ssa.OpArg, n.Type, n)
            s.vars[n] = v
            s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
        }
    }

    // Convert the AST-based IR to the SSA-based IR
    s.stmtList(fn.Func.Enter)
    s.stmtList(fn.Nbody)

    // fallthrough to exit
    if s.curBlock != nil {
        s.pushLine(fn.Func.Endlineno)
        s.exit()
        s.popLine()
    }

    for _, b := range s.f.Blocks {
        if b.Pos != src.NoXPos {
            s.updateUnsetPredPos(b)
        }
    }

    s.insertPhis()

    // Main call to ssa package to compile function
    ssa.Compile(s.f)

    if s.hasOpenDefers {
        s.emitOpenDeferInfo()
    }

    return s.f
}

如果在构建ssa时如发现gcflags有N禁止优化的参数 或者 return数量 * defer数量超过了15不适用open-coded模式。

此外逃逸分析会判断循序的层数,如果有轮询,那么强制使用栈分配模式。

// augmentParamHole augments parameter holes as necessary for use in
// go/defer statements.
func (e *Escape) augmentParamHole(k EscHole, call, where *Node) EscHole {
    k = k.note(call, "call parameter")
    if where == nil {
        return k
    }

    // Top level defers arguments don't escape to heap, but they
    // do need to last until end of function. Tee with a
    // non-transient location to avoid arguments from being
    // transiently allocated.
    if where.Op == ODEFER && e.loopDepth == 1 {
        // force stack allocation of defer record, unless open-coded
        // defers are used (see ssa.go)
        where.Esc = EscNever
        return e.later(k)
    }

    return e.heapHole().note(where, "call parameter")
}

开放编码

Golang 1.14 版本继续加入了开发编码(open coded),该机制会将延迟调用直接插入函数返回之前,省去了运行时的 deferprocdeferprocStack 操作,在运行时的 deferreturn 也不会进行尾递归调用,而是直接在一个循环中遍历所有延迟函数执行。

这种机制使得 defer 的开销几乎可以忽略,唯一的运行时成本就是存储参与延迟调用的相关信息,不过使用这个机制还需要三个条件:

  1. 没有禁用编译器优化,即没有设置 -gcflags “-N”.
  2. 函数内 defer 的数量不超过 8 个,且返回语句与延迟语句个数的乘积不超过 15.
  3. defer 不是在循环语句中。

此外该机制还引入了一种元素 —— 延迟比特(defer bit),用于运行时记录每个 defer 是否被执行(尤其是在条件判断分支中的 defer),从而便于判断最后的延迟调用该执行哪些函数。

延迟比特的原理:

同一个函数内每出现一个 defer 都会为其分配 1个比特,如果被执行到则设为 1,否则设为 0,当到达函数返回之前需要判断延迟调用时,则用掩码判断每个位置的比特,若为 1 则调用延迟函数,否则跳过。

为了轻量,官方将延迟比特限制为 1 个字节,即 8 个比特,这就是为什么不能超过 8 个 defer 的原因,若超过依然会选择堆栈分配,但显然大部分情况不会超过 8 个。

// The constant is known to runtime.
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero

func walk(fn *Node) {
    Curfn = fn

    if Debug['W'] != 0 {
        s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
        dumplist(s, Curfn.Nbody)
    }

    lno := lineno

    // Final typecheck for any unused variables.
    for i, ln := range fn.Func.Dcl {
        if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
            ln = typecheck(ln, ctxExpr|ctxAssign)
            fn.Func.Dcl[i] = ln
        }
    }

    // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
    for _, ln := range fn.Func.Dcl {
        if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
            ln.Name.Defn.Left.Name.SetUsed(true)
        }
    }

    for _, ln := range fn.Func.Dcl {
        if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
            continue
        }
        if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
            if defn.Left.Name.Used() {
                continue
            }
            yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym)
            defn.Left.Name.SetUsed(true) // suppress repeats
        } else {
            yyerrorl(ln.Pos, "%v declared but not used", ln.Sym)
        }
    }

    lineno = lno
    if nerrors != 0 {
        return
    }
    walkstmtlist(Curfn.Nbody.Slice())
    if Debug['W'] != 0 {
        s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
        dumplist(s, Curfn.Nbody)
    }

    zeroResults()
    heapmoves()
    if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
        s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
        dumplist(s, Curfn.Func.Enter)
    }
}

在使用open code的模式的时候,默认open coded最多支持8个defer,超过则取消。

const maxOpenDefers = 8

func walkstmt(n *Node) *Node {
    ...
    switch n.Op {
    case ODEFER:
        Curfn.Func.SetHasDefer(true)
        Curfn.Func.numDefers++
        if Curfn.Func.numDefers > maxOpenDefers {
            Curfn.Func.SetOpenCodedDeferDisallowed(true)
        }

        if n.Esc != EscNever {
            Curfn.Func.SetOpenCodedDeferDisallowed(true)
        }
    ...
}

因此 open coded的使用条件是,最多8个defer,而且 return * defer < 15,无循环,gcflags无 “N” 并且取消优化。

最后编辑: kuteng  文档更新时间: 2021-09-14 14:50   作者:kuteng