Source file src/cmd/compile/internal/escape/utils.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package escape
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/typecheck"
    11  	"cmd/compile/internal/types"
    12  	"go/constant"
    13  	"go/token"
    14  )
    15  
    16  func isSliceSelfAssign(dst, src ir.Node) bool {
    17  	// Detect the following special case.
    18  	//
    19  	//	func (b *Buffer) Foo() {
    20  	//		n, m := ...
    21  	//		b.buf = b.buf[n:m]
    22  	//	}
    23  	//
    24  	// This assignment is a no-op for escape analysis,
    25  	// it does not store any new pointers into b that were not already there.
    26  	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
    27  	// Here we assume that the statement will not contain calls,
    28  	// that is, that order will move any calls to init.
    29  	// Otherwise base ONAME value could change between the moments
    30  	// when we evaluate it for dst and for src.
    31  
    32  	// dst is ONAME dereference.
    33  	var dstX ir.Node
    34  	switch dst.Op() {
    35  	default:
    36  		return false
    37  	case ir.ODEREF:
    38  		dst := dst.(*ir.StarExpr)
    39  		dstX = dst.X
    40  	case ir.ODOTPTR:
    41  		dst := dst.(*ir.SelectorExpr)
    42  		dstX = dst.X
    43  	}
    44  	if dstX.Op() != ir.ONAME {
    45  		return false
    46  	}
    47  	// src is a slice operation.
    48  	switch src.Op() {
    49  	case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
    50  		// OK.
    51  	case ir.OSLICEARR, ir.OSLICE3ARR:
    52  		// Since arrays are embedded into containing object,
    53  		// slice of non-pointer array will introduce a new pointer into b that was not already there
    54  		// (pointer to b itself). After such assignment, if b contents escape,
    55  		// b escapes as well. If we ignore such OSLICEARR, we will conclude
    56  		// that b does not escape when b contents do.
    57  		//
    58  		// Pointer to an array is OK since it's not stored inside b directly.
    59  		// For slicing an array (not pointer to array), there is an implicit OADDR.
    60  		// We check that to determine non-pointer array slicing.
    61  		src := src.(*ir.SliceExpr)
    62  		if src.X.Op() == ir.OADDR {
    63  			return false
    64  		}
    65  	default:
    66  		return false
    67  	}
    68  	// slice is applied to ONAME dereference.
    69  	var baseX ir.Node
    70  	switch base := src.(*ir.SliceExpr).X; base.Op() {
    71  	default:
    72  		return false
    73  	case ir.ODEREF:
    74  		base := base.(*ir.StarExpr)
    75  		baseX = base.X
    76  	case ir.ODOTPTR:
    77  		base := base.(*ir.SelectorExpr)
    78  		baseX = base.X
    79  	}
    80  	if baseX.Op() != ir.ONAME {
    81  		return false
    82  	}
    83  	// dst and src reference the same base ONAME.
    84  	return dstX.(*ir.Name) == baseX.(*ir.Name)
    85  }
    86  
    87  // isSelfAssign reports whether assignment from src to dst can
    88  // be ignored by the escape analysis as it's effectively a self-assignment.
    89  func isSelfAssign(dst, src ir.Node) bool {
    90  	if isSliceSelfAssign(dst, src) {
    91  		return true
    92  	}
    93  
    94  	// Detect trivial assignments that assign back to the same object.
    95  	//
    96  	// It covers these cases:
    97  	//	val.x = val.y
    98  	//	val.x[i] = val.y[j]
    99  	//	val.x1.x2 = val.x1.y2
   100  	//	... etc
   101  	//
   102  	// These assignments do not change assigned object lifetime.
   103  
   104  	if dst == nil || src == nil || dst.Op() != src.Op() {
   105  		return false
   106  	}
   107  
   108  	// The expression prefix must be both "safe" and identical.
   109  	switch dst.Op() {
   110  	case ir.ODOT, ir.ODOTPTR:
   111  		// Safe trailing accessors that are permitted to differ.
   112  		dst := dst.(*ir.SelectorExpr)
   113  		src := src.(*ir.SelectorExpr)
   114  		return ir.SameSafeExpr(dst.X, src.X)
   115  	case ir.OINDEX:
   116  		dst := dst.(*ir.IndexExpr)
   117  		src := src.(*ir.IndexExpr)
   118  		if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
   119  			return false
   120  		}
   121  		return ir.SameSafeExpr(dst.X, src.X)
   122  	default:
   123  		return false
   124  	}
   125  }
   126  
   127  // mayAffectMemory reports whether evaluation of n may affect the program's
   128  // memory state. If the expression can't affect memory state, then it can be
   129  // safely ignored by the escape analysis.
   130  func mayAffectMemory(n ir.Node) bool {
   131  	// We may want to use a list of "memory safe" ops instead of generally
   132  	// "side-effect free", which would include all calls and other ops that can
   133  	// allocate or change global state. For now, it's safer to start with the latter.
   134  	//
   135  	// We're ignoring things like division by zero, index out of range,
   136  	// and nil pointer dereference here.
   137  
   138  	// TODO(rsc): It seems like it should be possible to replace this with
   139  	// an ir.Any looking for any op that's not the ones in the case statement.
   140  	// But that produces changes in the compiled output detected by buildall.
   141  	switch n.Op() {
   142  	case ir.ONAME, ir.OLITERAL, ir.ONIL:
   143  		return false
   144  
   145  	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
   146  		n := n.(*ir.BinaryExpr)
   147  		return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
   148  
   149  	case ir.OINDEX:
   150  		n := n.(*ir.IndexExpr)
   151  		return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
   152  
   153  	case ir.OCONVNOP, ir.OCONV:
   154  		n := n.(*ir.ConvExpr)
   155  		return mayAffectMemory(n.X)
   156  
   157  	case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
   158  		n := n.(*ir.UnaryExpr)
   159  		return mayAffectMemory(n.X)
   160  
   161  	case ir.ODOT, ir.ODOTPTR:
   162  		n := n.(*ir.SelectorExpr)
   163  		return mayAffectMemory(n.X)
   164  
   165  	case ir.ODEREF:
   166  		n := n.(*ir.StarExpr)
   167  		return mayAffectMemory(n.X)
   168  
   169  	default:
   170  		return true
   171  	}
   172  }
   173  
   174  // HeapAllocReason returns the reason the given Node must be heap
   175  // allocated, or the empty string if it doesn't.
   176  func HeapAllocReason(n ir.Node) string {
   177  	if n == nil || n.Type() == nil {
   178  		return ""
   179  	}
   180  
   181  	// Parameters are always passed via the stack.
   182  	if n.Op() == ir.ONAME {
   183  		n := n.(*ir.Name)
   184  		if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
   185  			return ""
   186  		}
   187  	}
   188  
   189  	if n.Type().Size() > ir.MaxStackVarSize {
   190  		return "too large for stack"
   191  	}
   192  	if n.Type().Alignment() > int64(types.PtrSize) {
   193  		return "too aligned for stack"
   194  	}
   195  
   196  	if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Size() > ir.MaxImplicitStackVarSize {
   197  		return "too large for stack"
   198  	}
   199  	if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Alignment() > int64(types.PtrSize) {
   200  		return "too aligned for stack"
   201  	}
   202  
   203  	if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
   204  		return "too large for stack"
   205  	}
   206  	if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
   207  		return "too large for stack"
   208  	}
   209  
   210  	if n.Op() == ir.OMAKESLICE {
   211  		n := n.(*ir.MakeExpr)
   212  
   213  		r := &n.Cap
   214  		if n.Cap == nil {
   215  			r = &n.Len
   216  		}
   217  
   218  		// Try to determine static values of make() calls, to avoid allocating them on the heap.
   219  		// We are doing this in escape analysis, so that it happens after inlining and devirtualization.
   220  		if s := ir.StaticValue(*r); s.Op() == ir.OLITERAL {
   221  			lit, ok := s.(*ir.BasicLit)
   222  			if !ok || lit.Val().Kind() != constant.Int {
   223  				base.Fatalf("unexpected BasicLit Kind")
   224  			}
   225  			if constant.Compare(lit.Val(), token.GEQ, constant.MakeInt64(0)) {
   226  				*r = lit
   227  			}
   228  		}
   229  
   230  		elem := n.Type().Elem()
   231  		if elem.Size() == 0 {
   232  			// TODO: stack allocate these? See #65685.
   233  			return "zero-sized element"
   234  		}
   235  		if !ir.IsSmallIntConst(*r) {
   236  			// For non-constant sizes, we do a hybrid approach:
   237  			//
   238  			// if cap <= K {
   239  			//     var backing [K]E
   240  			//     s = backing[:len:cap]
   241  			// } else {
   242  			//     s = makeslice(E, len, cap)
   243  			// }
   244  			//
   245  			// It costs a constant amount of stack space, but may
   246  			// avoid a heap allocation.
   247  			// Note we have to be careful that assigning s[i] = v
   248  			// still escapes v, because we forbid heap->stack pointers.
   249  			// Implementation is in ../walk/builtin.go:walkMakeSlice.
   250  			return ""
   251  		}
   252  		if ir.Int64Val(*r) > ir.MaxImplicitStackVarSize/elem.Size() {
   253  			return "too large for stack"
   254  		}
   255  	}
   256  
   257  	return ""
   258  }
   259  

View as plain text