Home | History | Annotate | Download | only in gc
      1 // Copyright 2015 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package gc
      6 
      7 import (
      8 	"bytes"
      9 	"encoding/binary"
     10 	"fmt"
     11 	"html"
     12 	"os"
     13 	"sort"
     14 
     15 	"cmd/compile/internal/ssa"
     16 	"cmd/compile/internal/types"
     17 	"cmd/internal/obj"
     18 	"cmd/internal/objabi"
     19 	"cmd/internal/src"
     20 	"cmd/internal/sys"
     21 )
     22 
     23 var ssaConfig *ssa.Config
     24 var ssaCaches []ssa.Cache
     25 
     26 func initssaconfig() {
     27 	types_ := ssa.Types{
     28 		Bool:       types.Types[TBOOL],
     29 		Int8:       types.Types[TINT8],
     30 		Int16:      types.Types[TINT16],
     31 		Int32:      types.Types[TINT32],
     32 		Int64:      types.Types[TINT64],
     33 		UInt8:      types.Types[TUINT8],
     34 		UInt16:     types.Types[TUINT16],
     35 		UInt32:     types.Types[TUINT32],
     36 		UInt64:     types.Types[TUINT64],
     37 		Float32:    types.Types[TFLOAT32],
     38 		Float64:    types.Types[TFLOAT64],
     39 		Int:        types.Types[TINT],
     40 		UInt:       types.Types[TUINT],
     41 		Uintptr:    types.Types[TUINTPTR],
     42 		String:     types.Types[TSTRING],
     43 		BytePtr:    types.NewPtr(types.Types[TUINT8]),
     44 		Int32Ptr:   types.NewPtr(types.Types[TINT32]),
     45 		UInt32Ptr:  types.NewPtr(types.Types[TUINT32]),
     46 		IntPtr:     types.NewPtr(types.Types[TINT]),
     47 		UintptrPtr: types.NewPtr(types.Types[TUINTPTR]),
     48 		Float32Ptr: types.NewPtr(types.Types[TFLOAT32]),
     49 		Float64Ptr: types.NewPtr(types.Types[TFLOAT64]),
     50 		BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])),
     51 	}
     52 
     53 	if thearch.SoftFloat {
     54 		softfloatInit()
     55 	}
     56 
     57 	// Generate a few pointer types that are uncommon in the frontend but common in the backend.
     58 	// Caching is disabled in the backend, so generating these here avoids allocations.
     59 	_ = types.NewPtr(types.Types[TINTER])                             // *interface{}
     60 	_ = types.NewPtr(types.NewPtr(types.Types[TSTRING]))              // **string
     61 	_ = types.NewPtr(types.NewPtr(types.Idealstring))                 // **string
     62 	_ = types.NewPtr(types.NewSlice(types.Types[TINTER]))             // *[]interface{}
     63 	_ = types.NewPtr(types.NewPtr(types.Bytetype))                    // **byte
     64 	_ = types.NewPtr(types.NewSlice(types.Bytetype))                  // *[]byte
     65 	_ = types.NewPtr(types.NewSlice(types.Types[TSTRING]))            // *[]string
     66 	_ = types.NewPtr(types.NewSlice(types.Idealstring))               // *[]string
     67 	_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
     68 	_ = types.NewPtr(types.Types[TINT16])                             // *int16
     69 	_ = types.NewPtr(types.Types[TINT64])                             // *int64
     70 	_ = types.NewPtr(types.Errortype)                                 // *error
     71 	types.NewPtrCacheEnabled = false
     72 	ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0)
     73 	if thearch.LinkArch.Name == "386" {
     74 		ssaConfig.Set387(thearch.Use387)
     75 	}
     76 	ssaConfig.SoftFloat = thearch.SoftFloat
     77 	ssaCaches = make([]ssa.Cache, nBackendWorkers)
     78 
     79 	// Set up some runtime functions we'll need to call.
     80 	Newproc = sysfunc("newproc")
     81 	Deferproc = sysfunc("deferproc")
     82 	Deferreturn = sysfunc("deferreturn")
     83 	Duffcopy = sysfunc("duffcopy")
     84 	Duffzero = sysfunc("duffzero")
     85 	panicindex = sysfunc("panicindex")
     86 	panicslice = sysfunc("panicslice")
     87 	panicdivide = sysfunc("panicdivide")
     88 	growslice = sysfunc("growslice")
     89 	panicdottypeE = sysfunc("panicdottypeE")
     90 	panicdottypeI = sysfunc("panicdottypeI")
     91 	panicnildottype = sysfunc("panicnildottype")
     92 	assertE2I = sysfunc("assertE2I")
     93 	assertE2I2 = sysfunc("assertE2I2")
     94 	assertI2I = sysfunc("assertI2I")
     95 	assertI2I2 = sysfunc("assertI2I2")
     96 	goschedguarded = sysfunc("goschedguarded")
     97 	writeBarrier = sysfunc("writeBarrier")
     98 	writebarrierptr = sysfunc("writebarrierptr")
     99 	gcWriteBarrier = sysfunc("gcWriteBarrier")
    100 	typedmemmove = sysfunc("typedmemmove")
    101 	typedmemclr = sysfunc("typedmemclr")
    102 	Udiv = sysfunc("udiv")
    103 
    104 	// GO386=387 runtime functions
    105 	ControlWord64trunc = sysfunc("controlWord64trunc")
    106 	ControlWord32 = sysfunc("controlWord32")
    107 }
    108 
    109 // buildssa builds an SSA function for fn.
    110 // worker indicates which of the backend workers is doing the processing.
    111 func buildssa(fn *Node, worker int) *ssa.Func {
    112 	name := fn.funcname()
    113 	printssa := name == os.Getenv("GOSSAFUNC")
    114 	if printssa {
    115 		fmt.Println("generating SSA for", name)
    116 		dumplist("buildssa-enter", fn.Func.Enter)
    117 		dumplist("buildssa-body", fn.Nbody)
    118 		dumplist("buildssa-exit", fn.Func.Exit)
    119 	}
    120 
    121 	var s state
    122 	s.pushLine(fn.Pos)
    123 	defer s.popLine()
    124 
    125 	s.hasdefer = fn.Func.HasDefer()
    126 	if fn.Func.Pragma&CgoUnsafeArgs != 0 {
    127 		s.cgoUnsafeArgs = true
    128 	}
    129 
    130 	fe := ssafn{
    131 		curfn: fn,
    132 		log:   printssa,
    133 	}
    134 	s.curfn = fn
    135 
    136 	s.f = ssa.NewFunc(&fe)
    137 	s.config = ssaConfig
    138 	s.f.Config = ssaConfig
    139 	s.f.Cache = &ssaCaches[worker]
    140 	s.f.Cache.Reset()
    141 	s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
    142 	s.f.Name = name
    143 	if fn.Func.Pragma&Nosplit != 0 {
    144 		s.f.NoSplit = true
    145 	}
    146 	s.exitCode = fn.Func.Exit
    147 	s.panics = map[funcLine]*ssa.Block{}
    148 	s.softFloat = s.config.SoftFloat
    149 
    150 	if name == os.Getenv("GOSSAFUNC") {
    151 		s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name)
    152 		// TODO: generate and print a mapping from nodes to values and blocks
    153 	}
    154 
    155 	// Allocate starting block
    156 	s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
    157 
    158 	// Allocate starting values
    159 	s.labels = map[string]*ssaLabel{}
    160 	s.labeledNodes = map[*Node]*ssaLabel{}
    161 	s.fwdVars = map[*Node]*ssa.Value{}
    162 	s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
    163 	s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
    164 	s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
    165 
    166 	s.startBlock(s.f.Entry)
    167 	s.vars[&memVar] = s.startmem
    168 
    169 	// Generate addresses of local declarations
    170 	s.decladdrs = map[*Node]*ssa.Value{}
    171 	for _, n := range fn.Func.Dcl {
    172 		switch n.Class() {
    173 		case PPARAM, PPARAMOUT:
    174 			s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), n, s.sp)
    175 			if n.Class() == PPARAMOUT && s.canSSA(n) {
    176 				// Save ssa-able PPARAMOUT variables so we can
    177 				// store them back to the stack at the end of
    178 				// the function.
    179 				s.returns = append(s.returns, n)
    180 			}
    181 		case PAUTO:
    182 			// processed at each use, to prevent Addr coming
    183 			// before the decl.
    184 		case PAUTOHEAP:
    185 			// moved to heap - already handled by frontend
    186 		case PFUNC:
    187 			// local function - already handled by frontend
    188 		default:
    189 			s.Fatalf("local variable with class %v unimplemented", n.Class())
    190 		}
    191 	}
    192 
    193 	// Populate SSAable arguments.
    194 	for _, n := range fn.Func.Dcl {
    195 		if n.Class() == PPARAM && s.canSSA(n) {
    196 			s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n)
    197 		}
    198 	}
    199 
    200 	// Convert the AST-based IR to the SSA-based IR
    201 	s.stmtList(fn.Func.Enter)
    202 	s.stmtList(fn.Nbody)
    203 
    204 	// fallthrough to exit
    205 	if s.curBlock != nil {
    206 		s.pushLine(fn.Func.Endlineno)
    207 		s.exit()
    208 		s.popLine()
    209 	}
    210 
    211 	for _, b := range s.f.Blocks {
    212 		if b.Pos != src.NoXPos {
    213 			s.updateUnsetPredPos(b)
    214 		}
    215 	}
    216 
    217 	s.insertPhis()
    218 
    219 	// Don't carry reference this around longer than necessary
    220 	s.exitCode = Nodes{}
    221 
    222 	// Main call to ssa package to compile function
    223 	ssa.Compile(s.f)
    224 	return s.f
    225 }
    226 
    227 // updateUnsetPredPos propagates the earliest-value position information for b
    228 // towards all of b's predecessors that need a position, and recurs on that
    229 // predecessor if its position is updated. B should have a non-empty position.
    230 func (s *state) updateUnsetPredPos(b *ssa.Block) {
    231 	if b.Pos == src.NoXPos {
    232 		s.Fatalf("Block %s should have a position", b)
    233 	}
    234 	bestPos := src.NoXPos
    235 	for _, e := range b.Preds {
    236 		p := e.Block()
    237 		if !p.LackingPos() {
    238 			continue
    239 		}
    240 		if bestPos == src.NoXPos {
    241 			bestPos = b.Pos
    242 			for _, v := range b.Values {
    243 				if v.LackingPos() {
    244 					continue
    245 				}
    246 				if v.Pos != src.NoXPos {
    247 					// Assume values are still in roughly textual order;
    248 					// TODO: could also seek minimum position?
    249 					bestPos = v.Pos
    250 					break
    251 				}
    252 			}
    253 		}
    254 		p.Pos = bestPos
    255 		s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
    256 	}
    257 	return
    258 }
    259 
    260 type state struct {
    261 	// configuration (arch) information
    262 	config *ssa.Config
    263 
    264 	// function we're building
    265 	f *ssa.Func
    266 
    267 	// Node for function
    268 	curfn *Node
    269 
    270 	// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
    271 	labels       map[string]*ssaLabel
    272 	labeledNodes map[*Node]*ssaLabel
    273 
    274 	// Code that must precede any return
    275 	// (e.g., copying value of heap-escaped paramout back to true paramout)
    276 	exitCode Nodes
    277 
    278 	// unlabeled break and continue statement tracking
    279 	breakTo    *ssa.Block // current target for plain break statement
    280 	continueTo *ssa.Block // current target for plain continue statement
    281 
    282 	// current location where we're interpreting the AST
    283 	curBlock *ssa.Block
    284 
    285 	// variable assignments in the current block (map from variable symbol to ssa value)
    286 	// *Node is the unique identifier (an ONAME Node) for the variable.
    287 	// TODO: keep a single varnum map, then make all of these maps slices instead?
    288 	vars map[*Node]*ssa.Value
    289 
    290 	// fwdVars are variables that are used before they are defined in the current block.
    291 	// This map exists just to coalesce multiple references into a single FwdRef op.
    292 	// *Node is the unique identifier (an ONAME Node) for the variable.
    293 	fwdVars map[*Node]*ssa.Value
    294 
    295 	// all defined variables at the end of each block. Indexed by block ID.
    296 	defvars []map[*Node]*ssa.Value
    297 
    298 	// addresses of PPARAM and PPARAMOUT variables.
    299 	decladdrs map[*Node]*ssa.Value
    300 
    301 	// starting values. Memory, stack pointer, and globals pointer
    302 	startmem *ssa.Value
    303 	sp       *ssa.Value
    304 	sb       *ssa.Value
    305 
    306 	// line number stack. The current line number is top of stack
    307 	line []src.XPos
    308 	// the last line number processed; it may have been popped
    309 	lastPos src.XPos
    310 
    311 	// list of panic calls by function name and line number.
    312 	// Used to deduplicate panic calls.
    313 	panics map[funcLine]*ssa.Block
    314 
    315 	// list of PPARAMOUT (return) variables.
    316 	returns []*Node
    317 
    318 	cgoUnsafeArgs bool
    319 	hasdefer      bool // whether the function contains a defer statement
    320 	softFloat     bool
    321 }
    322 
    323 type funcLine struct {
    324 	f    *obj.LSym
    325 	base *src.PosBase
    326 	line uint
    327 }
    328 
    329 type ssaLabel struct {
    330 	target         *ssa.Block // block identified by this label
    331 	breakTarget    *ssa.Block // block to break to in control flow node identified by this label
    332 	continueTarget *ssa.Block // block to continue to in control flow node identified by this label
    333 }
    334 
    335 // label returns the label associated with sym, creating it if necessary.
    336 func (s *state) label(sym *types.Sym) *ssaLabel {
    337 	lab := s.labels[sym.Name]
    338 	if lab == nil {
    339 		lab = new(ssaLabel)
    340 		s.labels[sym.Name] = lab
    341 	}
    342 	return lab
    343 }
    344 
    345 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
    346 func (s *state) Log() bool                            { return s.f.Log() }
    347 func (s *state) Fatalf(msg string, args ...interface{}) {
    348 	s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
    349 }
    350 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
    351 func (s *state) Debug_checknil() bool                                { return s.f.Frontend().Debug_checknil() }
    352 
    353 var (
    354 	// dummy node for the memory variable
    355 	memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
    356 
    357 	// dummy nodes for temporary variables
    358 	ptrVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
    359 	lenVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
    360 	newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
    361 	capVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
    362 	typVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
    363 	okVar     = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
    364 )
    365 
    366 // startBlock sets the current block we're generating code in to b.
    367 func (s *state) startBlock(b *ssa.Block) {
    368 	if s.curBlock != nil {
    369 		s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
    370 	}
    371 	s.curBlock = b
    372 	s.vars = map[*Node]*ssa.Value{}
    373 	for n := range s.fwdVars {
    374 		delete(s.fwdVars, n)
    375 	}
    376 }
    377 
    378 // endBlock marks the end of generating code for the current block.
    379 // Returns the (former) current block. Returns nil if there is no current
    380 // block, i.e. if no code flows to the current execution point.
    381 func (s *state) endBlock() *ssa.Block {
    382 	b := s.curBlock
    383 	if b == nil {
    384 		return nil
    385 	}
    386 	for len(s.defvars) <= int(b.ID) {
    387 		s.defvars = append(s.defvars, nil)
    388 	}
    389 	s.defvars[b.ID] = s.vars
    390 	s.curBlock = nil
    391 	s.vars = nil
    392 	if b.LackingPos() {
    393 		// Empty plain blocks get the line of their successor (handled after all blocks created),
    394 		// except for increment blocks in For statements (handled in ssa conversion of OFOR),
    395 		// and for blocks ending in GOTO/BREAK/CONTINUE.
    396 		b.Pos = src.NoXPos
    397 	} else {
    398 		b.Pos = s.lastPos
    399 	}
    400 	return b
    401 }
    402 
    403 // pushLine pushes a line number on the line number stack.
    404 func (s *state) pushLine(line src.XPos) {
    405 	if !line.IsKnown() {
    406 		// the frontend may emit node with line number missing,
    407 		// use the parent line number in this case.
    408 		line = s.peekPos()
    409 		if Debug['K'] != 0 {
    410 			Warn("buildssa: unknown position (line 0)")
    411 		}
    412 	} else {
    413 		s.lastPos = line
    414 	}
    415 
    416 	s.line = append(s.line, line)
    417 }
    418 
    419 // popLine pops the top of the line number stack.
    420 func (s *state) popLine() {
    421 	s.line = s.line[:len(s.line)-1]
    422 }
    423 
    424 // peekPos peeks the top of the line number stack.
    425 func (s *state) peekPos() src.XPos {
    426 	return s.line[len(s.line)-1]
    427 }
    428 
    429 // newValue0 adds a new value with no arguments to the current block.
    430 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
    431 	return s.curBlock.NewValue0(s.peekPos(), op, t)
    432 }
    433 
    434 // newValue0A adds a new value with no arguments and an aux value to the current block.
    435 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
    436 	return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
    437 }
    438 
    439 // newValue0I adds a new value with no arguments and an auxint value to the current block.
    440 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
    441 	return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
    442 }
    443 
    444 // newValue1 adds a new value with one argument to the current block.
    445 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
    446 	return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
    447 }
    448 
    449 // newValue1A adds a new value with one argument and an aux value to the current block.
    450 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
    451 	return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
    452 }
    453 
    454 // newValue1I adds a new value with one argument and an auxint value to the current block.
    455 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
    456 	return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
    457 }
    458 
    459 // newValue2 adds a new value with two arguments to the current block.
    460 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
    461 	return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
    462 }
    463 
    464 // newValue2I adds a new value with two arguments and an auxint value to the current block.
    465 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
    466 	return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
    467 }
    468 
    469 // newValue3 adds a new value with three arguments to the current block.
    470 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
    471 	return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
    472 }
    473 
    474 // newValue3I adds a new value with three arguments and an auxint value to the current block.
    475 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
    476 	return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
    477 }
    478 
    479 // newValue3A adds a new value with three arguments and an aux value to the current block.
    480 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
    481 	return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
    482 }
    483 
    484 // newValue4 adds a new value with four arguments to the current block.
    485 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
    486 	return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
    487 }
    488 
    489 // entryNewValue0 adds a new value with no arguments to the entry block.
    490 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
    491 	return s.f.Entry.NewValue0(src.NoXPos, op, t)
    492 }
    493 
    494 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
    495 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
    496 	return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
    497 }
    498 
    499 // entryNewValue1 adds a new value with one argument to the entry block.
    500 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
    501 	return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
    502 }
    503 
    504 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
    505 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
    506 	return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
    507 }
    508 
    509 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
    510 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
    511 	return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
    512 }
    513 
    514 // entryNewValue2 adds a new value with two arguments to the entry block.
    515 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
    516 	return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
    517 }
    518 
    519 // const* routines add a new const value to the entry block.
    520 func (s *state) constSlice(t *types.Type) *ssa.Value {
    521 	return s.f.ConstSlice(s.peekPos(), t)
    522 }
    523 func (s *state) constInterface(t *types.Type) *ssa.Value {
    524 	return s.f.ConstInterface(s.peekPos(), t)
    525 }
    526 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
    527 func (s *state) constEmptyString(t *types.Type) *ssa.Value {
    528 	return s.f.ConstEmptyString(s.peekPos(), t)
    529 }
    530 func (s *state) constBool(c bool) *ssa.Value {
    531 	return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c)
    532 }
    533 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
    534 	return s.f.ConstInt8(s.peekPos(), t, c)
    535 }
    536 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
    537 	return s.f.ConstInt16(s.peekPos(), t, c)
    538 }
    539 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
    540 	return s.f.ConstInt32(s.peekPos(), t, c)
    541 }
    542 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
    543 	return s.f.ConstInt64(s.peekPos(), t, c)
    544 }
    545 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
    546 	return s.f.ConstFloat32(s.peekPos(), t, c)
    547 }
    548 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
    549 	return s.f.ConstFloat64(s.peekPos(), t, c)
    550 }
    551 func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
    552 	if s.config.PtrSize == 8 {
    553 		return s.constInt64(t, c)
    554 	}
    555 	if int64(int32(c)) != c {
    556 		s.Fatalf("integer constant too big %d", c)
    557 	}
    558 	return s.constInt32(t, int32(c))
    559 }
    560 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
    561 	return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
    562 }
    563 
    564 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a
    565 // soft-float runtime function instead (when emitting soft-float code).
    566 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
    567 	if s.softFloat {
    568 		if c, ok := s.sfcall(op, arg); ok {
    569 			return c
    570 		}
    571 	}
    572 	return s.newValue1(op, t, arg)
    573 }
    574 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
    575 	if s.softFloat {
    576 		if c, ok := s.sfcall(op, arg0, arg1); ok {
    577 			return c
    578 		}
    579 	}
    580 	return s.newValue2(op, t, arg0, arg1)
    581 }
    582 
    583 // stmtList converts the statement list n to SSA and adds it to s.
    584 func (s *state) stmtList(l Nodes) {
    585 	for _, n := range l.Slice() {
    586 		s.stmt(n)
    587 	}
    588 }
    589 
    590 // stmt converts the statement n to SSA and adds it to s.
    591 func (s *state) stmt(n *Node) {
    592 	if !(n.Op == OVARKILL || n.Op == OVARLIVE) {
    593 		// OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
    594 		s.pushLine(n.Pos)
    595 		defer s.popLine()
    596 	}
    597 
    598 	// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
    599 	// then this code is dead. Stop here.
    600 	if s.curBlock == nil && n.Op != OLABEL {
    601 		return
    602 	}
    603 
    604 	s.stmtList(n.Ninit)
    605 	switch n.Op {
    606 
    607 	case OBLOCK:
    608 		s.stmtList(n.List)
    609 
    610 	// No-ops
    611 	case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
    612 
    613 	// Expression statements
    614 	case OCALLFUNC:
    615 		if isIntrinsicCall(n) {
    616 			s.intrinsicCall(n)
    617 			return
    618 		}
    619 		fallthrough
    620 
    621 	case OCALLMETH, OCALLINTER:
    622 		s.call(n, callNormal)
    623 		if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
    624 			if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
    625 				n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") {
    626 				m := s.mem()
    627 				b := s.endBlock()
    628 				b.Kind = ssa.BlockExit
    629 				b.SetControl(m)
    630 				// TODO: never rewrite OPANIC to OCALLFUNC in the
    631 				// first place. Need to wait until all backends
    632 				// go through SSA.
    633 			}
    634 		}
    635 	case ODEFER:
    636 		s.call(n.Left, callDefer)
    637 	case OPROC:
    638 		s.call(n.Left, callGo)
    639 
    640 	case OAS2DOTTYPE:
    641 		res, resok := s.dottype(n.Rlist.First(), true)
    642 		deref := false
    643 		if !canSSAType(n.Rlist.First().Type) {
    644 			if res.Op != ssa.OpLoad {
    645 				s.Fatalf("dottype of non-load")
    646 			}
    647 			mem := s.mem()
    648 			if mem.Op == ssa.OpVarKill {
    649 				mem = mem.Args[0]
    650 			}
    651 			if res.Args[1] != mem {
    652 				s.Fatalf("memory no longer live from 2-result dottype load")
    653 			}
    654 			deref = true
    655 			res = res.Args[0]
    656 		}
    657 		s.assign(n.List.First(), res, deref, 0)
    658 		s.assign(n.List.Second(), resok, false, 0)
    659 		return
    660 
    661 	case OAS2FUNC:
    662 		// We come here only when it is an intrinsic call returning two values.
    663 		if !isIntrinsicCall(n.Rlist.First()) {
    664 			s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
    665 		}
    666 		v := s.intrinsicCall(n.Rlist.First())
    667 		v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
    668 		v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
    669 		s.assign(n.List.First(), v1, false, 0)
    670 		s.assign(n.List.Second(), v2, false, 0)
    671 		return
    672 
    673 	case ODCL:
    674 		if n.Left.Class() == PAUTOHEAP {
    675 			Fatalf("DCL %v", n)
    676 		}
    677 
    678 	case OLABEL:
    679 		sym := n.Left.Sym
    680 		lab := s.label(sym)
    681 
    682 		// Associate label with its control flow node, if any
    683 		if ctl := n.labeledControl(); ctl != nil {
    684 			s.labeledNodes[ctl] = lab
    685 		}
    686 
    687 		// The label might already have a target block via a goto.
    688 		if lab.target == nil {
    689 			lab.target = s.f.NewBlock(ssa.BlockPlain)
    690 		}
    691 
    692 		// Go to that label.
    693 		// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
    694 		if s.curBlock != nil {
    695 			b := s.endBlock()
    696 			b.AddEdgeTo(lab.target)
    697 		}
    698 		s.startBlock(lab.target)
    699 
    700 	case OGOTO:
    701 		sym := n.Left.Sym
    702 
    703 		lab := s.label(sym)
    704 		if lab.target == nil {
    705 			lab.target = s.f.NewBlock(ssa.BlockPlain)
    706 		}
    707 
    708 		b := s.endBlock()
    709 		b.Pos = s.lastPos // Do this even if b is an empty block.
    710 		b.AddEdgeTo(lab.target)
    711 
    712 	case OAS:
    713 		if n.Left == n.Right && n.Left.Op == ONAME {
    714 			// An x=x assignment. No point in doing anything
    715 			// here. In addition, skipping this assignment
    716 			// prevents generating:
    717 			//   VARDEF x
    718 			//   COPY x -> x
    719 			// which is bad because x is incorrectly considered
    720 			// dead before the vardef. See issue #14904.
    721 			return
    722 		}
    723 
    724 		// Evaluate RHS.
    725 		rhs := n.Right
    726 		if rhs != nil {
    727 			switch rhs.Op {
    728 			case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
    729 				// All literals with nonzero fields have already been
    730 				// rewritten during walk. Any that remain are just T{}
    731 				// or equivalents. Use the zero value.
    732 				if !iszero(rhs) {
    733 					Fatalf("literal with nonzero value in SSA: %v", rhs)
    734 				}
    735 				rhs = nil
    736 			case OAPPEND:
    737 				// Check whether we're writing the result of an append back to the same slice.
    738 				// If so, we handle it specially to avoid write barriers on the fast
    739 				// (non-growth) path.
    740 				if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 {
    741 					break
    742 				}
    743 				// If the slice can be SSA'd, it'll be on the stack,
    744 				// so there will be no write barriers,
    745 				// so there's no need to attempt to prevent them.
    746 				if s.canSSA(n.Left) {
    747 					if Debug_append > 0 { // replicating old diagnostic message
    748 						Warnl(n.Pos, "append: len-only update (in local slice)")
    749 					}
    750 					break
    751 				}
    752 				if Debug_append > 0 {
    753 					Warnl(n.Pos, "append: len-only update")
    754 				}
    755 				s.append(rhs, true)
    756 				return
    757 			}
    758 		}
    759 
    760 		if isblank(n.Left) {
    761 			// _ = rhs
    762 			// Just evaluate rhs for side-effects.
    763 			if rhs != nil {
    764 				s.expr(rhs)
    765 			}
    766 			return
    767 		}
    768 
    769 		var t *types.Type
    770 		if n.Right != nil {
    771 			t = n.Right.Type
    772 		} else {
    773 			t = n.Left.Type
    774 		}
    775 
    776 		var r *ssa.Value
    777 		deref := !canSSAType(t)
    778 		if deref {
    779 			if rhs == nil {
    780 				r = nil // Signal assign to use OpZero.
    781 			} else {
    782 				r = s.addr(rhs, false)
    783 			}
    784 		} else {
    785 			if rhs == nil {
    786 				r = s.zeroVal(t)
    787 			} else {
    788 				r = s.expr(rhs)
    789 			}
    790 		}
    791 
    792 		var skip skipMask
    793 		if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
    794 			// We're assigning a slicing operation back to its source.
    795 			// Don't write back fields we aren't changing. See issue #14855.
    796 			i, j, k := rhs.SliceBounds()
    797 			if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
    798 				// [0:...] is the same as [:...]
    799 				i = nil
    800 			}
    801 			// TODO: detect defaults for len/cap also.
    802 			// Currently doesn't really work because (*p)[:len(*p)] appears here as:
    803 			//    tmp = len(*p)
    804 			//    (*p)[:tmp]
    805 			//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
    806 			//      j = nil
    807 			//}
    808 			//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
    809 			//      k = nil
    810 			//}
    811 			if i == nil {
    812 				skip |= skipPtr
    813 				if j == nil {
    814 					skip |= skipLen
    815 				}
    816 				if k == nil {
    817 					skip |= skipCap
    818 				}
    819 			}
    820 		}
    821 
    822 		s.assign(n.Left, r, deref, skip)
    823 
    824 	case OIF:
    825 		bThen := s.f.NewBlock(ssa.BlockPlain)
    826 		bEnd := s.f.NewBlock(ssa.BlockPlain)
    827 		var bElse *ssa.Block
    828 		var likely int8
    829 		if n.Likely() {
    830 			likely = 1
    831 		}
    832 		if n.Rlist.Len() != 0 {
    833 			bElse = s.f.NewBlock(ssa.BlockPlain)
    834 			s.condBranch(n.Left, bThen, bElse, likely)
    835 		} else {
    836 			s.condBranch(n.Left, bThen, bEnd, likely)
    837 		}
    838 
    839 		s.startBlock(bThen)
    840 		s.stmtList(n.Nbody)
    841 		if b := s.endBlock(); b != nil {
    842 			b.AddEdgeTo(bEnd)
    843 		}
    844 
    845 		if n.Rlist.Len() != 0 {
    846 			s.startBlock(bElse)
    847 			s.stmtList(n.Rlist)
    848 			if b := s.endBlock(); b != nil {
    849 				b.AddEdgeTo(bEnd)
    850 			}
    851 		}
    852 		s.startBlock(bEnd)
    853 
    854 	case ORETURN:
    855 		s.stmtList(n.List)
    856 		b := s.exit()
    857 		b.Pos = s.lastPos
    858 
    859 	case ORETJMP:
    860 		s.stmtList(n.List)
    861 		b := s.exit()
    862 		b.Kind = ssa.BlockRetJmp // override BlockRet
    863 		b.Aux = n.Sym.Linksym()
    864 
    865 	case OCONTINUE, OBREAK:
    866 		var to *ssa.Block
    867 		if n.Left == nil {
    868 			// plain break/continue
    869 			switch n.Op {
    870 			case OCONTINUE:
    871 				to = s.continueTo
    872 			case OBREAK:
    873 				to = s.breakTo
    874 			}
    875 		} else {
    876 			// labeled break/continue; look up the target
    877 			sym := n.Left.Sym
    878 			lab := s.label(sym)
    879 			switch n.Op {
    880 			case OCONTINUE:
    881 				to = lab.continueTarget
    882 			case OBREAK:
    883 				to = lab.breakTarget
    884 			}
    885 		}
    886 
    887 		b := s.endBlock()
    888 		b.Pos = s.lastPos // Do this even if b is an empty block.
    889 		b.AddEdgeTo(to)
    890 
    891 	case OFOR, OFORUNTIL:
    892 		// OFOR: for Ninit; Left; Right { Nbody }
    893 		// For      = cond; body; incr
    894 		// Foruntil = body; incr; cond
    895 		bCond := s.f.NewBlock(ssa.BlockPlain)
    896 		bBody := s.f.NewBlock(ssa.BlockPlain)
    897 		bIncr := s.f.NewBlock(ssa.BlockPlain)
    898 		bEnd := s.f.NewBlock(ssa.BlockPlain)
    899 
    900 		// first, jump to condition test (OFOR) or body (OFORUNTIL)
    901 		b := s.endBlock()
    902 		if n.Op == OFOR {
    903 			b.AddEdgeTo(bCond)
    904 			// generate code to test condition
    905 			s.startBlock(bCond)
    906 			if n.Left != nil {
    907 				s.condBranch(n.Left, bBody, bEnd, 1)
    908 			} else {
    909 				b := s.endBlock()
    910 				b.Kind = ssa.BlockPlain
    911 				b.AddEdgeTo(bBody)
    912 			}
    913 
    914 		} else {
    915 			b.AddEdgeTo(bBody)
    916 		}
    917 
    918 		// set up for continue/break in body
    919 		prevContinue := s.continueTo
    920 		prevBreak := s.breakTo
    921 		s.continueTo = bIncr
    922 		s.breakTo = bEnd
    923 		lab := s.labeledNodes[n]
    924 		if lab != nil {
    925 			// labeled for loop
    926 			lab.continueTarget = bIncr
    927 			lab.breakTarget = bEnd
    928 		}
    929 
    930 		// generate body
    931 		s.startBlock(bBody)
    932 		s.stmtList(n.Nbody)
    933 
    934 		// tear down continue/break
    935 		s.continueTo = prevContinue
    936 		s.breakTo = prevBreak
    937 		if lab != nil {
    938 			lab.continueTarget = nil
    939 			lab.breakTarget = nil
    940 		}
    941 
    942 		// done with body, goto incr
    943 		if b := s.endBlock(); b != nil {
    944 			b.AddEdgeTo(bIncr)
    945 		}
    946 
    947 		// generate incr
    948 		s.startBlock(bIncr)
    949 		if n.Right != nil {
    950 			s.stmt(n.Right)
    951 		}
    952 		if b := s.endBlock(); b != nil {
    953 			b.AddEdgeTo(bCond)
    954 			// It can happen that bIncr ends in a block containing only VARKILL,
    955 			// and that muddles the debugging experience.
    956 			if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
    957 				b.Pos = bCond.Pos
    958 			}
    959 		}
    960 
    961 		if n.Op == OFORUNTIL {
    962 			// generate code to test condition
    963 			s.startBlock(bCond)
    964 			if n.Left != nil {
    965 				s.condBranch(n.Left, bBody, bEnd, 1)
    966 			} else {
    967 				b := s.endBlock()
    968 				b.Kind = ssa.BlockPlain
    969 				b.AddEdgeTo(bBody)
    970 			}
    971 		}
    972 
    973 		s.startBlock(bEnd)
    974 
    975 	case OSWITCH, OSELECT:
    976 		// These have been mostly rewritten by the front end into their Nbody fields.
    977 		// Our main task is to correctly hook up any break statements.
    978 		bEnd := s.f.NewBlock(ssa.BlockPlain)
    979 
    980 		prevBreak := s.breakTo
    981 		s.breakTo = bEnd
    982 		lab := s.labeledNodes[n]
    983 		if lab != nil {
    984 			// labeled
    985 			lab.breakTarget = bEnd
    986 		}
    987 
    988 		// generate body code
    989 		s.stmtList(n.Nbody)
    990 
    991 		s.breakTo = prevBreak
    992 		if lab != nil {
    993 			lab.breakTarget = nil
    994 		}
    995 
    996 		// walk adds explicit OBREAK nodes to the end of all reachable code paths.
    997 		// If we still have a current block here, then mark it unreachable.
    998 		if s.curBlock != nil {
    999 			m := s.mem()
   1000 			b := s.endBlock()
   1001 			b.Kind = ssa.BlockExit
   1002 			b.SetControl(m)
   1003 		}
   1004 		s.startBlock(bEnd)
   1005 
   1006 	case OVARKILL:
   1007 		// Insert a varkill op to record that a variable is no longer live.
   1008 		// We only care about liveness info at call sites, so putting the
   1009 		// varkill in the store chain is enough to keep it correctly ordered
   1010 		// with respect to call ops.
   1011 		if !s.canSSA(n.Left) {
   1012 			s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem())
   1013 		}
   1014 
   1015 	case OVARLIVE:
   1016 		// Insert a varlive op to record that a variable is still live.
   1017 		if !n.Left.Addrtaken() {
   1018 			s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
   1019 		}
   1020 		switch n.Left.Class() {
   1021 		case PAUTO, PPARAM, PPARAMOUT:
   1022 		default:
   1023 			s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
   1024 		}
   1025 		s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
   1026 
   1027 	case OCHECKNIL:
   1028 		p := s.expr(n.Left)
   1029 		s.nilCheck(p)
   1030 
   1031 	default:
   1032 		s.Fatalf("unhandled stmt %v", n.Op)
   1033 	}
   1034 }
   1035 
   1036 // exit processes any code that needs to be generated just before returning.
   1037 // It returns a BlockRet block that ends the control flow. Its control value
   1038 // will be set to the final memory state.
   1039 func (s *state) exit() *ssa.Block {
   1040 	if s.hasdefer {
   1041 		s.rtcall(Deferreturn, true, nil)
   1042 	}
   1043 
   1044 	// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
   1045 	// variables back to the stack.
   1046 	s.stmtList(s.exitCode)
   1047 
   1048 	// Store SSAable PPARAMOUT variables back to stack locations.
   1049 	for _, n := range s.returns {
   1050 		addr := s.decladdrs[n]
   1051 		val := s.variable(n, n.Type)
   1052 		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
   1053 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem())
   1054 		// TODO: if val is ever spilled, we'd like to use the
   1055 		// PPARAMOUT slot for spilling it. That won't happen
   1056 		// currently.
   1057 	}
   1058 
   1059 	// Do actual return.
   1060 	m := s.mem()
   1061 	b := s.endBlock()
   1062 	b.Kind = ssa.BlockRet
   1063 	b.SetControl(m)
   1064 	return b
   1065 }
   1066 
   1067 type opAndType struct {
   1068 	op    Op
   1069 	etype types.EType
   1070 }
   1071 
   1072 var opToSSA = map[opAndType]ssa.Op{
   1073 	opAndType{OADD, TINT8}:    ssa.OpAdd8,
   1074 	opAndType{OADD, TUINT8}:   ssa.OpAdd8,
   1075 	opAndType{OADD, TINT16}:   ssa.OpAdd16,
   1076 	opAndType{OADD, TUINT16}:  ssa.OpAdd16,
   1077 	opAndType{OADD, TINT32}:   ssa.OpAdd32,
   1078 	opAndType{OADD, TUINT32}:  ssa.OpAdd32,
   1079 	opAndType{OADD, TPTR32}:   ssa.OpAdd32,
   1080 	opAndType{OADD, TINT64}:   ssa.OpAdd64,
   1081 	opAndType{OADD, TUINT64}:  ssa.OpAdd64,
   1082 	opAndType{OADD, TPTR64}:   ssa.OpAdd64,
   1083 	opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
   1084 	opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
   1085 
   1086 	opAndType{OSUB, TINT8}:    ssa.OpSub8,
   1087 	opAndType{OSUB, TUINT8}:   ssa.OpSub8,
   1088 	opAndType{OSUB, TINT16}:   ssa.OpSub16,
   1089 	opAndType{OSUB, TUINT16}:  ssa.OpSub16,
   1090 	opAndType{OSUB, TINT32}:   ssa.OpSub32,
   1091 	opAndType{OSUB, TUINT32}:  ssa.OpSub32,
   1092 	opAndType{OSUB, TINT64}:   ssa.OpSub64,
   1093 	opAndType{OSUB, TUINT64}:  ssa.OpSub64,
   1094 	opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
   1095 	opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
   1096 
   1097 	opAndType{ONOT, TBOOL}: ssa.OpNot,
   1098 
   1099 	opAndType{OMINUS, TINT8}:    ssa.OpNeg8,
   1100 	opAndType{OMINUS, TUINT8}:   ssa.OpNeg8,
   1101 	opAndType{OMINUS, TINT16}:   ssa.OpNeg16,
   1102 	opAndType{OMINUS, TUINT16}:  ssa.OpNeg16,
   1103 	opAndType{OMINUS, TINT32}:   ssa.OpNeg32,
   1104 	opAndType{OMINUS, TUINT32}:  ssa.OpNeg32,
   1105 	opAndType{OMINUS, TINT64}:   ssa.OpNeg64,
   1106 	opAndType{OMINUS, TUINT64}:  ssa.OpNeg64,
   1107 	opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
   1108 	opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
   1109 
   1110 	opAndType{OCOM, TINT8}:   ssa.OpCom8,
   1111 	opAndType{OCOM, TUINT8}:  ssa.OpCom8,
   1112 	opAndType{OCOM, TINT16}:  ssa.OpCom16,
   1113 	opAndType{OCOM, TUINT16}: ssa.OpCom16,
   1114 	opAndType{OCOM, TINT32}:  ssa.OpCom32,
   1115 	opAndType{OCOM, TUINT32}: ssa.OpCom32,
   1116 	opAndType{OCOM, TINT64}:  ssa.OpCom64,
   1117 	opAndType{OCOM, TUINT64}: ssa.OpCom64,
   1118 
   1119 	opAndType{OIMAG, TCOMPLEX64}:  ssa.OpComplexImag,
   1120 	opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
   1121 	opAndType{OREAL, TCOMPLEX64}:  ssa.OpComplexReal,
   1122 	opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
   1123 
   1124 	opAndType{OMUL, TINT8}:    ssa.OpMul8,
   1125 	opAndType{OMUL, TUINT8}:   ssa.OpMul8,
   1126 	opAndType{OMUL, TINT16}:   ssa.OpMul16,
   1127 	opAndType{OMUL, TUINT16}:  ssa.OpMul16,
   1128 	opAndType{OMUL, TINT32}:   ssa.OpMul32,
   1129 	opAndType{OMUL, TUINT32}:  ssa.OpMul32,
   1130 	opAndType{OMUL, TINT64}:   ssa.OpMul64,
   1131 	opAndType{OMUL, TUINT64}:  ssa.OpMul64,
   1132 	opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
   1133 	opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
   1134 
   1135 	opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
   1136 	opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
   1137 
   1138 	opAndType{ODIV, TINT8}:   ssa.OpDiv8,
   1139 	opAndType{ODIV, TUINT8}:  ssa.OpDiv8u,
   1140 	opAndType{ODIV, TINT16}:  ssa.OpDiv16,
   1141 	opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
   1142 	opAndType{ODIV, TINT32}:  ssa.OpDiv32,
   1143 	opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
   1144 	opAndType{ODIV, TINT64}:  ssa.OpDiv64,
   1145 	opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
   1146 
   1147 	opAndType{OMOD, TINT8}:   ssa.OpMod8,
   1148 	opAndType{OMOD, TUINT8}:  ssa.OpMod8u,
   1149 	opAndType{OMOD, TINT16}:  ssa.OpMod16,
   1150 	opAndType{OMOD, TUINT16}: ssa.OpMod16u,
   1151 	opAndType{OMOD, TINT32}:  ssa.OpMod32,
   1152 	opAndType{OMOD, TUINT32}: ssa.OpMod32u,
   1153 	opAndType{OMOD, TINT64}:  ssa.OpMod64,
   1154 	opAndType{OMOD, TUINT64}: ssa.OpMod64u,
   1155 
   1156 	opAndType{OAND, TINT8}:   ssa.OpAnd8,
   1157 	opAndType{OAND, TUINT8}:  ssa.OpAnd8,
   1158 	opAndType{OAND, TINT16}:  ssa.OpAnd16,
   1159 	opAndType{OAND, TUINT16}: ssa.OpAnd16,
   1160 	opAndType{OAND, TINT32}:  ssa.OpAnd32,
   1161 	opAndType{OAND, TUINT32}: ssa.OpAnd32,
   1162 	opAndType{OAND, TINT64}:  ssa.OpAnd64,
   1163 	opAndType{OAND, TUINT64}: ssa.OpAnd64,
   1164 
   1165 	opAndType{OOR, TINT8}:   ssa.OpOr8,
   1166 	opAndType{OOR, TUINT8}:  ssa.OpOr8,
   1167 	opAndType{OOR, TINT16}:  ssa.OpOr16,
   1168 	opAndType{OOR, TUINT16}: ssa.OpOr16,
   1169 	opAndType{OOR, TINT32}:  ssa.OpOr32,
   1170 	opAndType{OOR, TUINT32}: ssa.OpOr32,
   1171 	opAndType{OOR, TINT64}:  ssa.OpOr64,
   1172 	opAndType{OOR, TUINT64}: ssa.OpOr64,
   1173 
   1174 	opAndType{OXOR, TINT8}:   ssa.OpXor8,
   1175 	opAndType{OXOR, TUINT8}:  ssa.OpXor8,
   1176 	opAndType{OXOR, TINT16}:  ssa.OpXor16,
   1177 	opAndType{OXOR, TUINT16}: ssa.OpXor16,
   1178 	opAndType{OXOR, TINT32}:  ssa.OpXor32,
   1179 	opAndType{OXOR, TUINT32}: ssa.OpXor32,
   1180 	opAndType{OXOR, TINT64}:  ssa.OpXor64,
   1181 	opAndType{OXOR, TUINT64}: ssa.OpXor64,
   1182 
   1183 	opAndType{OEQ, TBOOL}:      ssa.OpEqB,
   1184 	opAndType{OEQ, TINT8}:      ssa.OpEq8,
   1185 	opAndType{OEQ, TUINT8}:     ssa.OpEq8,
   1186 	opAndType{OEQ, TINT16}:     ssa.OpEq16,
   1187 	opAndType{OEQ, TUINT16}:    ssa.OpEq16,
   1188 	opAndType{OEQ, TINT32}:     ssa.OpEq32,
   1189 	opAndType{OEQ, TUINT32}:    ssa.OpEq32,
   1190 	opAndType{OEQ, TINT64}:     ssa.OpEq64,
   1191 	opAndType{OEQ, TUINT64}:    ssa.OpEq64,
   1192 	opAndType{OEQ, TINTER}:     ssa.OpEqInter,
   1193 	opAndType{OEQ, TSLICE}:     ssa.OpEqSlice,
   1194 	opAndType{OEQ, TFUNC}:      ssa.OpEqPtr,
   1195 	opAndType{OEQ, TMAP}:       ssa.OpEqPtr,
   1196 	opAndType{OEQ, TCHAN}:      ssa.OpEqPtr,
   1197 	opAndType{OEQ, TPTR32}:     ssa.OpEqPtr,
   1198 	opAndType{OEQ, TPTR64}:     ssa.OpEqPtr,
   1199 	opAndType{OEQ, TUINTPTR}:   ssa.OpEqPtr,
   1200 	opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
   1201 	opAndType{OEQ, TFLOAT64}:   ssa.OpEq64F,
   1202 	opAndType{OEQ, TFLOAT32}:   ssa.OpEq32F,
   1203 
   1204 	opAndType{ONE, TBOOL}:      ssa.OpNeqB,
   1205 	opAndType{ONE, TINT8}:      ssa.OpNeq8,
   1206 	opAndType{ONE, TUINT8}:     ssa.OpNeq8,
   1207 	opAndType{ONE, TINT16}:     ssa.OpNeq16,
   1208 	opAndType{ONE, TUINT16}:    ssa.OpNeq16,
   1209 	opAndType{ONE, TINT32}:     ssa.OpNeq32,
   1210 	opAndType{ONE, TUINT32}:    ssa.OpNeq32,
   1211 	opAndType{ONE, TINT64}:     ssa.OpNeq64,
   1212 	opAndType{ONE, TUINT64}:    ssa.OpNeq64,
   1213 	opAndType{ONE, TINTER}:     ssa.OpNeqInter,
   1214 	opAndType{ONE, TSLICE}:     ssa.OpNeqSlice,
   1215 	opAndType{ONE, TFUNC}:      ssa.OpNeqPtr,
   1216 	opAndType{ONE, TMAP}:       ssa.OpNeqPtr,
   1217 	opAndType{ONE, TCHAN}:      ssa.OpNeqPtr,
   1218 	opAndType{ONE, TPTR32}:     ssa.OpNeqPtr,
   1219 	opAndType{ONE, TPTR64}:     ssa.OpNeqPtr,
   1220 	opAndType{ONE, TUINTPTR}:   ssa.OpNeqPtr,
   1221 	opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
   1222 	opAndType{ONE, TFLOAT64}:   ssa.OpNeq64F,
   1223 	opAndType{ONE, TFLOAT32}:   ssa.OpNeq32F,
   1224 
   1225 	opAndType{OLT, TINT8}:    ssa.OpLess8,
   1226 	opAndType{OLT, TUINT8}:   ssa.OpLess8U,
   1227 	opAndType{OLT, TINT16}:   ssa.OpLess16,
   1228 	opAndType{OLT, TUINT16}:  ssa.OpLess16U,
   1229 	opAndType{OLT, TINT32}:   ssa.OpLess32,
   1230 	opAndType{OLT, TUINT32}:  ssa.OpLess32U,
   1231 	opAndType{OLT, TINT64}:   ssa.OpLess64,
   1232 	opAndType{OLT, TUINT64}:  ssa.OpLess64U,
   1233 	opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
   1234 	opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
   1235 
   1236 	opAndType{OGT, TINT8}:    ssa.OpGreater8,
   1237 	opAndType{OGT, TUINT8}:   ssa.OpGreater8U,
   1238 	opAndType{OGT, TINT16}:   ssa.OpGreater16,
   1239 	opAndType{OGT, TUINT16}:  ssa.OpGreater16U,
   1240 	opAndType{OGT, TINT32}:   ssa.OpGreater32,
   1241 	opAndType{OGT, TUINT32}:  ssa.OpGreater32U,
   1242 	opAndType{OGT, TINT64}:   ssa.OpGreater64,
   1243 	opAndType{OGT, TUINT64}:  ssa.OpGreater64U,
   1244 	opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
   1245 	opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
   1246 
   1247 	opAndType{OLE, TINT8}:    ssa.OpLeq8,
   1248 	opAndType{OLE, TUINT8}:   ssa.OpLeq8U,
   1249 	opAndType{OLE, TINT16}:   ssa.OpLeq16,
   1250 	opAndType{OLE, TUINT16}:  ssa.OpLeq16U,
   1251 	opAndType{OLE, TINT32}:   ssa.OpLeq32,
   1252 	opAndType{OLE, TUINT32}:  ssa.OpLeq32U,
   1253 	opAndType{OLE, TINT64}:   ssa.OpLeq64,
   1254 	opAndType{OLE, TUINT64}:  ssa.OpLeq64U,
   1255 	opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
   1256 	opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
   1257 
   1258 	opAndType{OGE, TINT8}:    ssa.OpGeq8,
   1259 	opAndType{OGE, TUINT8}:   ssa.OpGeq8U,
   1260 	opAndType{OGE, TINT16}:   ssa.OpGeq16,
   1261 	opAndType{OGE, TUINT16}:  ssa.OpGeq16U,
   1262 	opAndType{OGE, TINT32}:   ssa.OpGeq32,
   1263 	opAndType{OGE, TUINT32}:  ssa.OpGeq32U,
   1264 	opAndType{OGE, TINT64}:   ssa.OpGeq64,
   1265 	opAndType{OGE, TUINT64}:  ssa.OpGeq64U,
   1266 	opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
   1267 	opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
   1268 }
   1269 
   1270 func (s *state) concreteEtype(t *types.Type) types.EType {
   1271 	e := t.Etype
   1272 	switch e {
   1273 	default:
   1274 		return e
   1275 	case TINT:
   1276 		if s.config.PtrSize == 8 {
   1277 			return TINT64
   1278 		}
   1279 		return TINT32
   1280 	case TUINT:
   1281 		if s.config.PtrSize == 8 {
   1282 			return TUINT64
   1283 		}
   1284 		return TUINT32
   1285 	case TUINTPTR:
   1286 		if s.config.PtrSize == 8 {
   1287 			return TUINT64
   1288 		}
   1289 		return TUINT32
   1290 	}
   1291 }
   1292 
   1293 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
   1294 	etype := s.concreteEtype(t)
   1295 	x, ok := opToSSA[opAndType{op, etype}]
   1296 	if !ok {
   1297 		s.Fatalf("unhandled binary op %v %s", op, etype)
   1298 	}
   1299 	return x
   1300 }
   1301 
   1302 func floatForComplex(t *types.Type) *types.Type {
   1303 	if t.Size() == 8 {
   1304 		return types.Types[TFLOAT32]
   1305 	} else {
   1306 		return types.Types[TFLOAT64]
   1307 	}
   1308 }
   1309 
   1310 type opAndTwoTypes struct {
   1311 	op     Op
   1312 	etype1 types.EType
   1313 	etype2 types.EType
   1314 }
   1315 
   1316 type twoTypes struct {
   1317 	etype1 types.EType
   1318 	etype2 types.EType
   1319 }
   1320 
   1321 type twoOpsAndType struct {
   1322 	op1              ssa.Op
   1323 	op2              ssa.Op
   1324 	intermediateType types.EType
   1325 }
   1326 
   1327 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
   1328 
   1329 	twoTypes{TINT8, TFLOAT32}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
   1330 	twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
   1331 	twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
   1332 	twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
   1333 
   1334 	twoTypes{TINT8, TFLOAT64}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
   1335 	twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
   1336 	twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
   1337 	twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
   1338 
   1339 	twoTypes{TFLOAT32, TINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
   1340 	twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
   1341 	twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
   1342 	twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
   1343 
   1344 	twoTypes{TFLOAT64, TINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
   1345 	twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
   1346 	twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
   1347 	twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
   1348 	// unsigned
   1349 	twoTypes{TUINT8, TFLOAT32}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
   1350 	twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
   1351 	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
   1352 	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto32F, branchy code expansion instead
   1353 
   1354 	twoTypes{TUINT8, TFLOAT64}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
   1355 	twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
   1356 	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
   1357 	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto64F, branchy code expansion instead
   1358 
   1359 	twoTypes{TFLOAT32, TUINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
   1360 	twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
   1361 	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
   1362 	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt32Fto64U, branchy code expansion instead
   1363 
   1364 	twoTypes{TFLOAT64, TUINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
   1365 	twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
   1366 	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
   1367 	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt64Fto64U, branchy code expansion instead
   1368 
   1369 	// float
   1370 	twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
   1371 	twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
   1372 	twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
   1373 	twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
   1374 }
   1375 
   1376 // this map is used only for 32-bit arch, and only includes the difference
   1377 // on 32-bit arch, don't use int64<->float conversion for uint32
   1378 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
   1379 	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
   1380 	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
   1381 	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
   1382 	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
   1383 }
   1384 
   1385 // uint64<->float conversions, only on machines that have intructions for that
   1386 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
   1387 	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
   1388 	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
   1389 	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
   1390 	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
   1391 }
   1392 
   1393 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
   1394 	opAndTwoTypes{OLSH, TINT8, TUINT8}:   ssa.OpLsh8x8,
   1395 	opAndTwoTypes{OLSH, TUINT8, TUINT8}:  ssa.OpLsh8x8,
   1396 	opAndTwoTypes{OLSH, TINT8, TUINT16}:  ssa.OpLsh8x16,
   1397 	opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
   1398 	opAndTwoTypes{OLSH, TINT8, TUINT32}:  ssa.OpLsh8x32,
   1399 	opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
   1400 	opAndTwoTypes{OLSH, TINT8, TUINT64}:  ssa.OpLsh8x64,
   1401 	opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
   1402 
   1403 	opAndTwoTypes{OLSH, TINT16, TUINT8}:   ssa.OpLsh16x8,
   1404 	opAndTwoTypes{OLSH, TUINT16, TUINT8}:  ssa.OpLsh16x8,
   1405 	opAndTwoTypes{OLSH, TINT16, TUINT16}:  ssa.OpLsh16x16,
   1406 	opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
   1407 	opAndTwoTypes{OLSH, TINT16, TUINT32}:  ssa.OpLsh16x32,
   1408 	opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
   1409 	opAndTwoTypes{OLSH, TINT16, TUINT64}:  ssa.OpLsh16x64,
   1410 	opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
   1411 
   1412 	opAndTwoTypes{OLSH, TINT32, TUINT8}:   ssa.OpLsh32x8,
   1413 	opAndTwoTypes{OLSH, TUINT32, TUINT8}:  ssa.OpLsh32x8,
   1414 	opAndTwoTypes{OLSH, TINT32, TUINT16}:  ssa.OpLsh32x16,
   1415 	opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
   1416 	opAndTwoTypes{OLSH, TINT32, TUINT32}:  ssa.OpLsh32x32,
   1417 	opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
   1418 	opAndTwoTypes{OLSH, TINT32, TUINT64}:  ssa.OpLsh32x64,
   1419 	opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
   1420 
   1421 	opAndTwoTypes{OLSH, TINT64, TUINT8}:   ssa.OpLsh64x8,
   1422 	opAndTwoTypes{OLSH, TUINT64, TUINT8}:  ssa.OpLsh64x8,
   1423 	opAndTwoTypes{OLSH, TINT64, TUINT16}:  ssa.OpLsh64x16,
   1424 	opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
   1425 	opAndTwoTypes{OLSH, TINT64, TUINT32}:  ssa.OpLsh64x32,
   1426 	opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
   1427 	opAndTwoTypes{OLSH, TINT64, TUINT64}:  ssa.OpLsh64x64,
   1428 	opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
   1429 
   1430 	opAndTwoTypes{ORSH, TINT8, TUINT8}:   ssa.OpRsh8x8,
   1431 	opAndTwoTypes{ORSH, TUINT8, TUINT8}:  ssa.OpRsh8Ux8,
   1432 	opAndTwoTypes{ORSH, TINT8, TUINT16}:  ssa.OpRsh8x16,
   1433 	opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
   1434 	opAndTwoTypes{ORSH, TINT8, TUINT32}:  ssa.OpRsh8x32,
   1435 	opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
   1436 	opAndTwoTypes{ORSH, TINT8, TUINT64}:  ssa.OpRsh8x64,
   1437 	opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
   1438 
   1439 	opAndTwoTypes{ORSH, TINT16, TUINT8}:   ssa.OpRsh16x8,
   1440 	opAndTwoTypes{ORSH, TUINT16, TUINT8}:  ssa.OpRsh16Ux8,
   1441 	opAndTwoTypes{ORSH, TINT16, TUINT16}:  ssa.OpRsh16x16,
   1442 	opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
   1443 	opAndTwoTypes{ORSH, TINT16, TUINT32}:  ssa.OpRsh16x32,
   1444 	opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
   1445 	opAndTwoTypes{ORSH, TINT16, TUINT64}:  ssa.OpRsh16x64,
   1446 	opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
   1447 
   1448 	opAndTwoTypes{ORSH, TINT32, TUINT8}:   ssa.OpRsh32x8,
   1449 	opAndTwoTypes{ORSH, TUINT32, TUINT8}:  ssa.OpRsh32Ux8,
   1450 	opAndTwoTypes{ORSH, TINT32, TUINT16}:  ssa.OpRsh32x16,
   1451 	opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
   1452 	opAndTwoTypes{ORSH, TINT32, TUINT32}:  ssa.OpRsh32x32,
   1453 	opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
   1454 	opAndTwoTypes{ORSH, TINT32, TUINT64}:  ssa.OpRsh32x64,
   1455 	opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
   1456 
   1457 	opAndTwoTypes{ORSH, TINT64, TUINT8}:   ssa.OpRsh64x8,
   1458 	opAndTwoTypes{ORSH, TUINT64, TUINT8}:  ssa.OpRsh64Ux8,
   1459 	opAndTwoTypes{ORSH, TINT64, TUINT16}:  ssa.OpRsh64x16,
   1460 	opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
   1461 	opAndTwoTypes{ORSH, TINT64, TUINT32}:  ssa.OpRsh64x32,
   1462 	opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
   1463 	opAndTwoTypes{ORSH, TINT64, TUINT64}:  ssa.OpRsh64x64,
   1464 	opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
   1465 }
   1466 
   1467 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
   1468 	etype1 := s.concreteEtype(t)
   1469 	etype2 := s.concreteEtype(u)
   1470 	x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
   1471 	if !ok {
   1472 		s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
   1473 	}
   1474 	return x
   1475 }
   1476 
   1477 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
   1478 func (s *state) expr(n *Node) *ssa.Value {
   1479 	if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
   1480 		// ONAMEs and named OLITERALs have the line number
   1481 		// of the decl, not the use. See issue 14742.
   1482 		s.pushLine(n.Pos)
   1483 		defer s.popLine()
   1484 	}
   1485 
   1486 	s.stmtList(n.Ninit)
   1487 	switch n.Op {
   1488 	case OARRAYBYTESTRTMP:
   1489 		slice := s.expr(n.Left)
   1490 		ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
   1491 		len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
   1492 		return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
   1493 	case OSTRARRAYBYTETMP:
   1494 		str := s.expr(n.Left)
   1495 		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
   1496 		len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
   1497 		return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
   1498 	case OCFUNC:
   1499 		aux := n.Left.Sym.Linksym()
   1500 		return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
   1501 	case ONAME:
   1502 		if n.Class() == PFUNC {
   1503 			// "value" of a function is the address of the function's closure
   1504 			sym := funcsym(n.Sym).Linksym()
   1505 			return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
   1506 		}
   1507 		if s.canSSA(n) {
   1508 			return s.variable(n, n.Type)
   1509 		}
   1510 		addr := s.addr(n, false)
   1511 		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
   1512 	case OCLOSUREVAR:
   1513 		addr := s.addr(n, false)
   1514 		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
   1515 	case OLITERAL:
   1516 		switch u := n.Val().U.(type) {
   1517 		case *Mpint:
   1518 			i := u.Int64()
   1519 			switch n.Type.Size() {
   1520 			case 1:
   1521 				return s.constInt8(n.Type, int8(i))
   1522 			case 2:
   1523 				return s.constInt16(n.Type, int16(i))
   1524 			case 4:
   1525 				return s.constInt32(n.Type, int32(i))
   1526 			case 8:
   1527 				return s.constInt64(n.Type, i)
   1528 			default:
   1529 				s.Fatalf("bad integer size %d", n.Type.Size())
   1530 				return nil
   1531 			}
   1532 		case string:
   1533 			if u == "" {
   1534 				return s.constEmptyString(n.Type)
   1535 			}
   1536 			return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
   1537 		case bool:
   1538 			return s.constBool(u)
   1539 		case *NilVal:
   1540 			t := n.Type
   1541 			switch {
   1542 			case t.IsSlice():
   1543 				return s.constSlice(t)
   1544 			case t.IsInterface():
   1545 				return s.constInterface(t)
   1546 			default:
   1547 				return s.constNil(t)
   1548 			}
   1549 		case *Mpflt:
   1550 			switch n.Type.Size() {
   1551 			case 4:
   1552 				return s.constFloat32(n.Type, u.Float32())
   1553 			case 8:
   1554 				return s.constFloat64(n.Type, u.Float64())
   1555 			default:
   1556 				s.Fatalf("bad float size %d", n.Type.Size())
   1557 				return nil
   1558 			}
   1559 		case *Mpcplx:
   1560 			r := &u.Real
   1561 			i := &u.Imag
   1562 			switch n.Type.Size() {
   1563 			case 8:
   1564 				pt := types.Types[TFLOAT32]
   1565 				return s.newValue2(ssa.OpComplexMake, n.Type,
   1566 					s.constFloat32(pt, r.Float32()),
   1567 					s.constFloat32(pt, i.Float32()))
   1568 			case 16:
   1569 				pt := types.Types[TFLOAT64]
   1570 				return s.newValue2(ssa.OpComplexMake, n.Type,
   1571 					s.constFloat64(pt, r.Float64()),
   1572 					s.constFloat64(pt, i.Float64()))
   1573 			default:
   1574 				s.Fatalf("bad float size %d", n.Type.Size())
   1575 				return nil
   1576 			}
   1577 
   1578 		default:
   1579 			s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
   1580 			return nil
   1581 		}
   1582 	case OCONVNOP:
   1583 		to := n.Type
   1584 		from := n.Left.Type
   1585 
   1586 		// Assume everything will work out, so set up our return value.
   1587 		// Anything interesting that happens from here is a fatal.
   1588 		x := s.expr(n.Left)
   1589 
   1590 		// Special case for not confusing GC and liveness.
   1591 		// We don't want pointers accidentally classified
   1592 		// as not-pointers or vice-versa because of copy
   1593 		// elision.
   1594 		if to.IsPtrShaped() != from.IsPtrShaped() {
   1595 			return s.newValue2(ssa.OpConvert, to, x, s.mem())
   1596 		}
   1597 
   1598 		v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
   1599 
   1600 		// CONVNOP closure
   1601 		if to.Etype == TFUNC && from.IsPtrShaped() {
   1602 			return v
   1603 		}
   1604 
   1605 		// named <--> unnamed type or typed <--> untyped const
   1606 		if from.Etype == to.Etype {
   1607 			return v
   1608 		}
   1609 
   1610 		// unsafe.Pointer <--> *T
   1611 		if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
   1612 			return v
   1613 		}
   1614 
   1615 		// map <--> *hmap
   1616 		if to.Etype == TMAP && from.IsPtr() &&
   1617 			to.MapType().Hmap == from.Elem() {
   1618 			return v
   1619 		}
   1620 
   1621 		dowidth(from)
   1622 		dowidth(to)
   1623 		if from.Width != to.Width {
   1624 			s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
   1625 			return nil
   1626 		}
   1627 		if etypesign(from.Etype) != etypesign(to.Etype) {
   1628 			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
   1629 			return nil
   1630 		}
   1631 
   1632 		if instrumenting {
   1633 			// These appear to be fine, but they fail the
   1634 			// integer constraint below, so okay them here.
   1635 			// Sample non-integer conversion: map[string]string -> *uint8
   1636 			return v
   1637 		}
   1638 
   1639 		if etypesign(from.Etype) == 0 {
   1640 			s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
   1641 			return nil
   1642 		}
   1643 
   1644 		// integer, same width, same sign
   1645 		return v
   1646 
   1647 	case OCONV:
   1648 		x := s.expr(n.Left)
   1649 		ft := n.Left.Type // from type
   1650 		tt := n.Type      // to type
   1651 		if ft.IsBoolean() && tt.IsKind(TUINT8) {
   1652 			// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
   1653 			return s.newValue1(ssa.OpCopy, n.Type, x)
   1654 		}
   1655 		if ft.IsInteger() && tt.IsInteger() {
   1656 			var op ssa.Op
   1657 			if tt.Size() == ft.Size() {
   1658 				op = ssa.OpCopy
   1659 			} else if tt.Size() < ft.Size() {
   1660 				// truncation
   1661 				switch 10*ft.Size() + tt.Size() {
   1662 				case 21:
   1663 					op = ssa.OpTrunc16to8
   1664 				case 41:
   1665 					op = ssa.OpTrunc32to8
   1666 				case 42:
   1667 					op = ssa.OpTrunc32to16
   1668 				case 81:
   1669 					op = ssa.OpTrunc64to8
   1670 				case 82:
   1671 					op = ssa.OpTrunc64to16
   1672 				case 84:
   1673 					op = ssa.OpTrunc64to32
   1674 				default:
   1675 					s.Fatalf("weird integer truncation %v -> %v", ft, tt)
   1676 				}
   1677 			} else if ft.IsSigned() {
   1678 				// sign extension
   1679 				switch 10*ft.Size() + tt.Size() {
   1680 				case 12:
   1681 					op = ssa.OpSignExt8to16
   1682 				case 14:
   1683 					op = ssa.OpSignExt8to32
   1684 				case 18:
   1685 					op = ssa.OpSignExt8to64
   1686 				case 24:
   1687 					op = ssa.OpSignExt16to32
   1688 				case 28:
   1689 					op = ssa.OpSignExt16to64
   1690 				case 48:
   1691 					op = ssa.OpSignExt32to64
   1692 				default:
   1693 					s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
   1694 				}
   1695 			} else {
   1696 				// zero extension
   1697 				switch 10*ft.Size() + tt.Size() {
   1698 				case 12:
   1699 					op = ssa.OpZeroExt8to16
   1700 				case 14:
   1701 					op = ssa.OpZeroExt8to32
   1702 				case 18:
   1703 					op = ssa.OpZeroExt8to64
   1704 				case 24:
   1705 					op = ssa.OpZeroExt16to32
   1706 				case 28:
   1707 					op = ssa.OpZeroExt16to64
   1708 				case 48:
   1709 					op = ssa.OpZeroExt32to64
   1710 				default:
   1711 					s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
   1712 				}
   1713 			}
   1714 			return s.newValue1(op, n.Type, x)
   1715 		}
   1716 
   1717 		if ft.IsFloat() || tt.IsFloat() {
   1718 			conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
   1719 			if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
   1720 				if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
   1721 					conv = conv1
   1722 				}
   1723 			}
   1724 			if thearch.LinkArch.Family == sys.ARM64 || s.softFloat {
   1725 				if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
   1726 					conv = conv1
   1727 				}
   1728 			}
   1729 
   1730 			if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
   1731 				if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
   1732 					// tt is float32 or float64, and ft is also unsigned
   1733 					if tt.Size() == 4 {
   1734 						return s.uint32Tofloat32(n, x, ft, tt)
   1735 					}
   1736 					if tt.Size() == 8 {
   1737 						return s.uint32Tofloat64(n, x, ft, tt)
   1738 					}
   1739 				} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
   1740 					// ft is float32 or float64, and tt is unsigned integer
   1741 					if ft.Size() == 4 {
   1742 						return s.float32ToUint32(n, x, ft, tt)
   1743 					}
   1744 					if ft.Size() == 8 {
   1745 						return s.float64ToUint32(n, x, ft, tt)
   1746 					}
   1747 				}
   1748 			}
   1749 
   1750 			if !ok {
   1751 				s.Fatalf("weird float conversion %v -> %v", ft, tt)
   1752 			}
   1753 			op1, op2, it := conv.op1, conv.op2, conv.intermediateType
   1754 
   1755 			if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
   1756 				// normal case, not tripping over unsigned 64
   1757 				if op1 == ssa.OpCopy {
   1758 					if op2 == ssa.OpCopy {
   1759 						return x
   1760 					}
   1761 					return s.newValueOrSfCall1(op2, n.Type, x)
   1762 				}
   1763 				if op2 == ssa.OpCopy {
   1764 					return s.newValueOrSfCall1(op1, n.Type, x)
   1765 				}
   1766 				return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
   1767 			}
   1768 			// Tricky 64-bit unsigned cases.
   1769 			if ft.IsInteger() {
   1770 				// tt is float32 or float64, and ft is also unsigned
   1771 				if tt.Size() == 4 {
   1772 					return s.uint64Tofloat32(n, x, ft, tt)
   1773 				}
   1774 				if tt.Size() == 8 {
   1775 					return s.uint64Tofloat64(n, x, ft, tt)
   1776 				}
   1777 				s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
   1778 			}
   1779 			// ft is float32 or float64, and tt is unsigned integer
   1780 			if ft.Size() == 4 {
   1781 				return s.float32ToUint64(n, x, ft, tt)
   1782 			}
   1783 			if ft.Size() == 8 {
   1784 				return s.float64ToUint64(n, x, ft, tt)
   1785 			}
   1786 			s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
   1787 			return nil
   1788 		}
   1789 
   1790 		if ft.IsComplex() && tt.IsComplex() {
   1791 			var op ssa.Op
   1792 			if ft.Size() == tt.Size() {
   1793 				switch ft.Size() {
   1794 				case 8:
   1795 					op = ssa.OpRound32F
   1796 				case 16:
   1797 					op = ssa.OpRound64F
   1798 				default:
   1799 					s.Fatalf("weird complex conversion %v -> %v", ft, tt)
   1800 				}
   1801 			} else if ft.Size() == 8 && tt.Size() == 16 {
   1802 				op = ssa.OpCvt32Fto64F
   1803 			} else if ft.Size() == 16 && tt.Size() == 8 {
   1804 				op = ssa.OpCvt64Fto32F
   1805 			} else {
   1806 				s.Fatalf("weird complex conversion %v -> %v", ft, tt)
   1807 			}
   1808 			ftp := floatForComplex(ft)
   1809 			ttp := floatForComplex(tt)
   1810 			return s.newValue2(ssa.OpComplexMake, tt,
   1811 				s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
   1812 				s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
   1813 		}
   1814 
   1815 		s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
   1816 		return nil
   1817 
   1818 	case ODOTTYPE:
   1819 		res, _ := s.dottype(n, false)
   1820 		return res
   1821 
   1822 	// binary ops
   1823 	case OLT, OEQ, ONE, OLE, OGE, OGT:
   1824 		a := s.expr(n.Left)
   1825 		b := s.expr(n.Right)
   1826 		if n.Left.Type.IsComplex() {
   1827 			pt := floatForComplex(n.Left.Type)
   1828 			op := s.ssaOp(OEQ, pt)
   1829 			r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
   1830 			i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
   1831 			c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
   1832 			switch n.Op {
   1833 			case OEQ:
   1834 				return c
   1835 			case ONE:
   1836 				return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
   1837 			default:
   1838 				s.Fatalf("ordered complex compare %v", n.Op)
   1839 			}
   1840 		}
   1841 		if n.Left.Type.IsFloat() {
   1842 			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
   1843 		}
   1844 		return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
   1845 	case OMUL:
   1846 		a := s.expr(n.Left)
   1847 		b := s.expr(n.Right)
   1848 		if n.Type.IsComplex() {
   1849 			mulop := ssa.OpMul64F
   1850 			addop := ssa.OpAdd64F
   1851 			subop := ssa.OpSub64F
   1852 			pt := floatForComplex(n.Type) // Could be Float32 or Float64
   1853 			wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancelation error
   1854 
   1855 			areal := s.newValue1(ssa.OpComplexReal, pt, a)
   1856 			breal := s.newValue1(ssa.OpComplexReal, pt, b)
   1857 			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
   1858 			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
   1859 
   1860 			if pt != wt { // Widen for calculation
   1861 				areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
   1862 				breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
   1863 				aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
   1864 				bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
   1865 			}
   1866 
   1867 			xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
   1868 			ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
   1869 
   1870 			if pt != wt { // Narrow to store back
   1871 				xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
   1872 				ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
   1873 			}
   1874 
   1875 			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
   1876 		}
   1877 
   1878 		if n.Type.IsFloat() {
   1879 			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   1880 		}
   1881 
   1882 		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   1883 
   1884 	case ODIV:
   1885 		a := s.expr(n.Left)
   1886 		b := s.expr(n.Right)
   1887 		if n.Type.IsComplex() {
   1888 			// TODO this is not executed because the front-end substitutes a runtime call.
   1889 			// That probably ought to change; with modest optimization the widen/narrow
   1890 			// conversions could all be elided in larger expression trees.
   1891 			mulop := ssa.OpMul64F
   1892 			addop := ssa.OpAdd64F
   1893 			subop := ssa.OpSub64F
   1894 			divop := ssa.OpDiv64F
   1895 			pt := floatForComplex(n.Type) // Could be Float32 or Float64
   1896 			wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancelation error
   1897 
   1898 			areal := s.newValue1(ssa.OpComplexReal, pt, a)
   1899 			breal := s.newValue1(ssa.OpComplexReal, pt, b)
   1900 			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
   1901 			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
   1902 
   1903 			if pt != wt { // Widen for calculation
   1904 				areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
   1905 				breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
   1906 				aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
   1907 				bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
   1908 			}
   1909 
   1910 			denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
   1911 			xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
   1912 			ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
   1913 
   1914 			// TODO not sure if this is best done in wide precision or narrow
   1915 			// Double-rounding might be an issue.
   1916 			// Note that the pre-SSA implementation does the entire calculation
   1917 			// in wide format, so wide is compatible.
   1918 			xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
   1919 			ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
   1920 
   1921 			if pt != wt { // Narrow to store back
   1922 				xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
   1923 				ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
   1924 			}
   1925 			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
   1926 		}
   1927 		if n.Type.IsFloat() {
   1928 			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   1929 		}
   1930 		return s.intDivide(n, a, b)
   1931 	case OMOD:
   1932 		a := s.expr(n.Left)
   1933 		b := s.expr(n.Right)
   1934 		return s.intDivide(n, a, b)
   1935 	case OADD, OSUB:
   1936 		a := s.expr(n.Left)
   1937 		b := s.expr(n.Right)
   1938 		if n.Type.IsComplex() {
   1939 			pt := floatForComplex(n.Type)
   1940 			op := s.ssaOp(n.Op, pt)
   1941 			return s.newValue2(ssa.OpComplexMake, n.Type,
   1942 				s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
   1943 				s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
   1944 		}
   1945 		if n.Type.IsFloat() {
   1946 			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   1947 		}
   1948 		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   1949 	case OAND, OOR, OXOR:
   1950 		a := s.expr(n.Left)
   1951 		b := s.expr(n.Right)
   1952 		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   1953 	case OLSH, ORSH:
   1954 		a := s.expr(n.Left)
   1955 		b := s.expr(n.Right)
   1956 		return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
   1957 	case OANDAND, OOROR:
   1958 		// To implement OANDAND (and OOROR), we introduce a
   1959 		// new temporary variable to hold the result. The
   1960 		// variable is associated with the OANDAND node in the
   1961 		// s.vars table (normally variables are only
   1962 		// associated with ONAME nodes). We convert
   1963 		//     A && B
   1964 		// to
   1965 		//     var = A
   1966 		//     if var {
   1967 		//         var = B
   1968 		//     }
   1969 		// Using var in the subsequent block introduces the
   1970 		// necessary phi variable.
   1971 		el := s.expr(n.Left)
   1972 		s.vars[n] = el
   1973 
   1974 		b := s.endBlock()
   1975 		b.Kind = ssa.BlockIf
   1976 		b.SetControl(el)
   1977 		// In theory, we should set b.Likely here based on context.
   1978 		// However, gc only gives us likeliness hints
   1979 		// in a single place, for plain OIF statements,
   1980 		// and passing around context is finnicky, so don't bother for now.
   1981 
   1982 		bRight := s.f.NewBlock(ssa.BlockPlain)
   1983 		bResult := s.f.NewBlock(ssa.BlockPlain)
   1984 		if n.Op == OANDAND {
   1985 			b.AddEdgeTo(bRight)
   1986 			b.AddEdgeTo(bResult)
   1987 		} else if n.Op == OOROR {
   1988 			b.AddEdgeTo(bResult)
   1989 			b.AddEdgeTo(bRight)
   1990 		}
   1991 
   1992 		s.startBlock(bRight)
   1993 		er := s.expr(n.Right)
   1994 		s.vars[n] = er
   1995 
   1996 		b = s.endBlock()
   1997 		b.AddEdgeTo(bResult)
   1998 
   1999 		s.startBlock(bResult)
   2000 		return s.variable(n, types.Types[TBOOL])
   2001 	case OCOMPLEX:
   2002 		r := s.expr(n.Left)
   2003 		i := s.expr(n.Right)
   2004 		return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
   2005 
   2006 	// unary ops
   2007 	case OMINUS:
   2008 		a := s.expr(n.Left)
   2009 		if n.Type.IsComplex() {
   2010 			tp := floatForComplex(n.Type)
   2011 			negop := s.ssaOp(n.Op, tp)
   2012 			return s.newValue2(ssa.OpComplexMake, n.Type,
   2013 				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
   2014 				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
   2015 		}
   2016 		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
   2017 	case ONOT, OCOM:
   2018 		a := s.expr(n.Left)
   2019 		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
   2020 	case OIMAG, OREAL:
   2021 		a := s.expr(n.Left)
   2022 		return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
   2023 	case OPLUS:
   2024 		return s.expr(n.Left)
   2025 
   2026 	case OADDR:
   2027 		return s.addr(n.Left, n.Bounded())
   2028 
   2029 	case OINDREGSP:
   2030 		addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
   2031 		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
   2032 
   2033 	case OIND:
   2034 		p := s.exprPtr(n.Left, false, n.Pos)
   2035 		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
   2036 
   2037 	case ODOT:
   2038 		t := n.Left.Type
   2039 		if canSSAType(t) {
   2040 			v := s.expr(n.Left)
   2041 			return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
   2042 		}
   2043 		if n.Left.Op == OSTRUCTLIT {
   2044 			// All literals with nonzero fields have already been
   2045 			// rewritten during walk. Any that remain are just T{}
   2046 			// or equivalents. Use the zero value.
   2047 			if !iszero(n.Left) {
   2048 				Fatalf("literal with nonzero value in SSA: %v", n.Left)
   2049 			}
   2050 			return s.zeroVal(n.Type)
   2051 		}
   2052 		p := s.addr(n, false)
   2053 		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
   2054 
   2055 	case ODOTPTR:
   2056 		p := s.exprPtr(n.Left, false, n.Pos)
   2057 		p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
   2058 		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
   2059 
   2060 	case OINDEX:
   2061 		switch {
   2062 		case n.Left.Type.IsString():
   2063 			if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
   2064 				// Replace "abc"[1] with 'b'.
   2065 				// Delayed until now because "abc"[1] is not an ideal constant.
   2066 				// See test/fixedbugs/issue11370.go.
   2067 				return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
   2068 			}
   2069 			a := s.expr(n.Left)
   2070 			i := s.expr(n.Right)
   2071 			i = s.extendIndex(i, panicindex)
   2072 			if !n.Bounded() {
   2073 				len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
   2074 				s.boundsCheck(i, len)
   2075 			}
   2076 			ptrtyp := s.f.Config.Types.BytePtr
   2077 			ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
   2078 			if Isconst(n.Right, CTINT) {
   2079 				ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
   2080 			} else {
   2081 				ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
   2082 			}
   2083 			return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem())
   2084 		case n.Left.Type.IsSlice():
   2085 			p := s.addr(n, false)
   2086 			return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
   2087 		case n.Left.Type.IsArray():
   2088 			if bound := n.Left.Type.NumElem(); bound <= 1 {
   2089 				// SSA can handle arrays of length at most 1.
   2090 				a := s.expr(n.Left)
   2091 				i := s.expr(n.Right)
   2092 				if bound == 0 {
   2093 					// Bounds check will never succeed.  Might as well
   2094 					// use constants for the bounds check.
   2095 					z := s.constInt(types.Types[TINT], 0)
   2096 					s.boundsCheck(z, z)
   2097 					// The return value won't be live, return junk.
   2098 					return s.newValue0(ssa.OpUnknown, n.Type)
   2099 				}
   2100 				i = s.extendIndex(i, panicindex)
   2101 				if !n.Bounded() {
   2102 					s.boundsCheck(i, s.constInt(types.Types[TINT], bound))
   2103 				}
   2104 				return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
   2105 			}
   2106 			p := s.addr(n, false)
   2107 			return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
   2108 		default:
   2109 			s.Fatalf("bad type for index %v", n.Left.Type)
   2110 			return nil
   2111 		}
   2112 
   2113 	case OLEN, OCAP:
   2114 		switch {
   2115 		case n.Left.Type.IsSlice():
   2116 			op := ssa.OpSliceLen
   2117 			if n.Op == OCAP {
   2118 				op = ssa.OpSliceCap
   2119 			}
   2120 			return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
   2121 		case n.Left.Type.IsString(): // string; not reachable for OCAP
   2122 			return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
   2123 		case n.Left.Type.IsMap(), n.Left.Type.IsChan():
   2124 			return s.referenceTypeBuiltin(n, s.expr(n.Left))
   2125 		default: // array
   2126 			return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
   2127 		}
   2128 
   2129 	case OSPTR:
   2130 		a := s.expr(n.Left)
   2131 		if n.Left.Type.IsSlice() {
   2132 			return s.newValue1(ssa.OpSlicePtr, n.Type, a)
   2133 		} else {
   2134 			return s.newValue1(ssa.OpStringPtr, n.Type, a)
   2135 		}
   2136 
   2137 	case OITAB:
   2138 		a := s.expr(n.Left)
   2139 		return s.newValue1(ssa.OpITab, n.Type, a)
   2140 
   2141 	case OIDATA:
   2142 		a := s.expr(n.Left)
   2143 		return s.newValue1(ssa.OpIData, n.Type, a)
   2144 
   2145 	case OEFACE:
   2146 		tab := s.expr(n.Left)
   2147 		data := s.expr(n.Right)
   2148 		return s.newValue2(ssa.OpIMake, n.Type, tab, data)
   2149 
   2150 	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
   2151 		v := s.expr(n.Left)
   2152 		var i, j, k *ssa.Value
   2153 		low, high, max := n.SliceBounds()
   2154 		if low != nil {
   2155 			i = s.extendIndex(s.expr(low), panicslice)
   2156 		}
   2157 		if high != nil {
   2158 			j = s.extendIndex(s.expr(high), panicslice)
   2159 		}
   2160 		if max != nil {
   2161 			k = s.extendIndex(s.expr(max), panicslice)
   2162 		}
   2163 		p, l, c := s.slice(n.Left.Type, v, i, j, k)
   2164 		return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
   2165 
   2166 	case OSLICESTR:
   2167 		v := s.expr(n.Left)
   2168 		var i, j *ssa.Value
   2169 		low, high, _ := n.SliceBounds()
   2170 		if low != nil {
   2171 			i = s.extendIndex(s.expr(low), panicslice)
   2172 		}
   2173 		if high != nil {
   2174 			j = s.extendIndex(s.expr(high), panicslice)
   2175 		}
   2176 		p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
   2177 		return s.newValue2(ssa.OpStringMake, n.Type, p, l)
   2178 
   2179 	case OCALLFUNC:
   2180 		if isIntrinsicCall(n) {
   2181 			return s.intrinsicCall(n)
   2182 		}
   2183 		fallthrough
   2184 
   2185 	case OCALLINTER, OCALLMETH:
   2186 		a := s.call(n, callNormal)
   2187 		return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
   2188 
   2189 	case OGETG:
   2190 		return s.newValue1(ssa.OpGetG, n.Type, s.mem())
   2191 
   2192 	case OAPPEND:
   2193 		return s.append(n, false)
   2194 
   2195 	case OSTRUCTLIT, OARRAYLIT:
   2196 		// All literals with nonzero fields have already been
   2197 		// rewritten during walk. Any that remain are just T{}
   2198 		// or equivalents. Use the zero value.
   2199 		if !iszero(n) {
   2200 			Fatalf("literal with nonzero value in SSA: %v", n)
   2201 		}
   2202 		return s.zeroVal(n.Type)
   2203 
   2204 	default:
   2205 		s.Fatalf("unhandled expr %v", n.Op)
   2206 		return nil
   2207 	}
   2208 }
   2209 
   2210 // append converts an OAPPEND node to SSA.
   2211 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
   2212 // adds it to s, and returns the Value.
   2213 // If inplace is true, it writes the result of the OAPPEND expression n
   2214 // back to the slice being appended to, and returns nil.
   2215 // inplace MUST be set to false if the slice can be SSA'd.
   2216 func (s *state) append(n *Node, inplace bool) *ssa.Value {
   2217 	// If inplace is false, process as expression "append(s, e1, e2, e3)":
   2218 	//
   2219 	// ptr, len, cap := s
   2220 	// newlen := len + 3
   2221 	// if newlen > cap {
   2222 	//     ptr, len, cap = growslice(s, newlen)
   2223 	//     newlen = len + 3 // recalculate to avoid a spill
   2224 	// }
   2225 	// // with write barriers, if needed:
   2226 	// *(ptr+len) = e1
   2227 	// *(ptr+len+1) = e2
   2228 	// *(ptr+len+2) = e3
   2229 	// return makeslice(ptr, newlen, cap)
   2230 	//
   2231 	//
   2232 	// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
   2233 	//
   2234 	// a := &s
   2235 	// ptr, len, cap := s
   2236 	// newlen := len + 3
   2237 	// if newlen > cap {
   2238 	//    newptr, len, newcap = growslice(ptr, len, cap, newlen)
   2239 	//    vardef(a)       // if necessary, advise liveness we are writing a new a
   2240 	//    *a.cap = newcap // write before ptr to avoid a spill
   2241 	//    *a.ptr = newptr // with write barrier
   2242 	// }
   2243 	// newlen = len + 3 // recalculate to avoid a spill
   2244 	// *a.len = newlen
   2245 	// // with write barriers, if needed:
   2246 	// *(ptr+len) = e1
   2247 	// *(ptr+len+1) = e2
   2248 	// *(ptr+len+2) = e3
   2249 
   2250 	et := n.Type.Elem()
   2251 	pt := types.NewPtr(et)
   2252 
   2253 	// Evaluate slice
   2254 	sn := n.List.First() // the slice node is the first in the list
   2255 
   2256 	var slice, addr *ssa.Value
   2257 	if inplace {
   2258 		addr = s.addr(sn, false)
   2259 		slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
   2260 	} else {
   2261 		slice = s.expr(sn)
   2262 	}
   2263 
   2264 	// Allocate new blocks
   2265 	grow := s.f.NewBlock(ssa.BlockPlain)
   2266 	assign := s.f.NewBlock(ssa.BlockPlain)
   2267 
   2268 	// Decide if we need to grow
   2269 	nargs := int64(n.List.Len() - 1)
   2270 	p := s.newValue1(ssa.OpSlicePtr, pt, slice)
   2271 	l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
   2272 	c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
   2273 	nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
   2274 
   2275 	cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c)
   2276 	s.vars[&ptrVar] = p
   2277 
   2278 	if !inplace {
   2279 		s.vars[&newlenVar] = nl
   2280 		s.vars[&capVar] = c
   2281 	} else {
   2282 		s.vars[&lenVar] = l
   2283 	}
   2284 
   2285 	b := s.endBlock()
   2286 	b.Kind = ssa.BlockIf
   2287 	b.Likely = ssa.BranchUnlikely
   2288 	b.SetControl(cmp)
   2289 	b.AddEdgeTo(grow)
   2290 	b.AddEdgeTo(assign)
   2291 
   2292 	// Call growslice
   2293 	s.startBlock(grow)
   2294 	taddr := s.expr(n.Left)
   2295 	r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
   2296 
   2297 	if inplace {
   2298 		if sn.Op == ONAME && sn.Class() != PEXTERN {
   2299 			// Tell liveness we're about to build a new slice
   2300 			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
   2301 		}
   2302 		capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
   2303 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
   2304 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem())
   2305 		// load the value we just stored to avoid having to spill it
   2306 		s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
   2307 		s.vars[&lenVar] = r[1] // avoid a spill in the fast path
   2308 	} else {
   2309 		s.vars[&ptrVar] = r[0]
   2310 		s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
   2311 		s.vars[&capVar] = r[2]
   2312 	}
   2313 
   2314 	b = s.endBlock()
   2315 	b.AddEdgeTo(assign)
   2316 
   2317 	// assign new elements to slots
   2318 	s.startBlock(assign)
   2319 
   2320 	if inplace {
   2321 		l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
   2322 		nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
   2323 		lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
   2324 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
   2325 	}
   2326 
   2327 	// Evaluate args
   2328 	type argRec struct {
   2329 		// if store is true, we're appending the value v.  If false, we're appending the
   2330 		// value at *v.
   2331 		v     *ssa.Value
   2332 		store bool
   2333 	}
   2334 	args := make([]argRec, 0, nargs)
   2335 	for _, n := range n.List.Slice()[1:] {
   2336 		if canSSAType(n.Type) {
   2337 			args = append(args, argRec{v: s.expr(n), store: true})
   2338 		} else {
   2339 			v := s.addr(n, false)
   2340 			args = append(args, argRec{v: v})
   2341 		}
   2342 	}
   2343 
   2344 	p = s.variable(&ptrVar, pt) // generates phi for ptr
   2345 	if !inplace {
   2346 		nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
   2347 		c = s.variable(&capVar, types.Types[TINT])     // generates phi for cap
   2348 	}
   2349 	p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
   2350 	for i, arg := range args {
   2351 		addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
   2352 		if arg.store {
   2353 			s.storeType(et, addr, arg.v, 0)
   2354 		} else {
   2355 			store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem())
   2356 			store.Aux = et
   2357 			s.vars[&memVar] = store
   2358 		}
   2359 	}
   2360 
   2361 	delete(s.vars, &ptrVar)
   2362 	if inplace {
   2363 		delete(s.vars, &lenVar)
   2364 		return nil
   2365 	}
   2366 	delete(s.vars, &newlenVar)
   2367 	delete(s.vars, &capVar)
   2368 	// make result
   2369 	return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
   2370 }
   2371 
   2372 // condBranch evaluates the boolean expression cond and branches to yes
   2373 // if cond is true and no if cond is false.
   2374 // This function is intended to handle && and || better than just calling
   2375 // s.expr(cond) and branching on the result.
   2376 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
   2377 	switch cond.Op {
   2378 	case OANDAND:
   2379 		mid := s.f.NewBlock(ssa.BlockPlain)
   2380 		s.stmtList(cond.Ninit)
   2381 		s.condBranch(cond.Left, mid, no, max8(likely, 0))
   2382 		s.startBlock(mid)
   2383 		s.condBranch(cond.Right, yes, no, likely)
   2384 		return
   2385 		// Note: if likely==1, then both recursive calls pass 1.
   2386 		// If likely==-1, then we don't have enough information to decide
   2387 		// whether the first branch is likely or not. So we pass 0 for
   2388 		// the likeliness of the first branch.
   2389 		// TODO: have the frontend give us branch prediction hints for
   2390 		// OANDAND and OOROR nodes (if it ever has such info).
   2391 	case OOROR:
   2392 		mid := s.f.NewBlock(ssa.BlockPlain)
   2393 		s.stmtList(cond.Ninit)
   2394 		s.condBranch(cond.Left, yes, mid, min8(likely, 0))
   2395 		s.startBlock(mid)
   2396 		s.condBranch(cond.Right, yes, no, likely)
   2397 		return
   2398 		// Note: if likely==-1, then both recursive calls pass -1.
   2399 		// If likely==1, then we don't have enough info to decide
   2400 		// the likelihood of the first branch.
   2401 	case ONOT:
   2402 		s.stmtList(cond.Ninit)
   2403 		s.condBranch(cond.Left, no, yes, -likely)
   2404 		return
   2405 	}
   2406 	c := s.expr(cond)
   2407 	b := s.endBlock()
   2408 	b.Kind = ssa.BlockIf
   2409 	b.SetControl(c)
   2410 	b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
   2411 	b.AddEdgeTo(yes)
   2412 	b.AddEdgeTo(no)
   2413 }
   2414 
   2415 type skipMask uint8
   2416 
   2417 const (
   2418 	skipPtr skipMask = 1 << iota
   2419 	skipLen
   2420 	skipCap
   2421 )
   2422 
   2423 // assign does left = right.
   2424 // Right has already been evaluated to ssa, left has not.
   2425 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
   2426 // If deref is true and right == nil, just do left = 0.
   2427 // skip indicates assignments (at the top level) that can be avoided.
   2428 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
   2429 	if left.Op == ONAME && isblank(left) {
   2430 		return
   2431 	}
   2432 	t := left.Type
   2433 	dowidth(t)
   2434 	if s.canSSA(left) {
   2435 		if deref {
   2436 			s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
   2437 		}
   2438 		if left.Op == ODOT {
   2439 			// We're assigning to a field of an ssa-able value.
   2440 			// We need to build a new structure with the new value for the
   2441 			// field we're assigning and the old values for the other fields.
   2442 			// For instance:
   2443 			//   type T struct {a, b, c int}
   2444 			//   var T x
   2445 			//   x.b = 5
   2446 			// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
   2447 
   2448 			// Grab information about the structure type.
   2449 			t := left.Left.Type
   2450 			nf := t.NumFields()
   2451 			idx := fieldIdx(left)
   2452 
   2453 			// Grab old value of structure.
   2454 			old := s.expr(left.Left)
   2455 
   2456 			// Make new structure.
   2457 			new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
   2458 
   2459 			// Add fields as args.
   2460 			for i := 0; i < nf; i++ {
   2461 				if i == idx {
   2462 					new.AddArg(right)
   2463 				} else {
   2464 					new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
   2465 				}
   2466 			}
   2467 
   2468 			// Recursively assign the new value we've made to the base of the dot op.
   2469 			s.assign(left.Left, new, false, 0)
   2470 			// TODO: do we need to update named values here?
   2471 			return
   2472 		}
   2473 		if left.Op == OINDEX && left.Left.Type.IsArray() {
   2474 			// We're assigning to an element of an ssa-able array.
   2475 			// a[i] = v
   2476 			t := left.Left.Type
   2477 			n := t.NumElem()
   2478 
   2479 			i := s.expr(left.Right) // index
   2480 			if n == 0 {
   2481 				// The bounds check must fail.  Might as well
   2482 				// ignore the actual index and just use zeros.
   2483 				z := s.constInt(types.Types[TINT], 0)
   2484 				s.boundsCheck(z, z)
   2485 				return
   2486 			}
   2487 			if n != 1 {
   2488 				s.Fatalf("assigning to non-1-length array")
   2489 			}
   2490 			// Rewrite to a = [1]{v}
   2491 			i = s.extendIndex(i, panicindex)
   2492 			s.boundsCheck(i, s.constInt(types.Types[TINT], 1))
   2493 			v := s.newValue1(ssa.OpArrayMake1, t, right)
   2494 			s.assign(left.Left, v, false, 0)
   2495 			return
   2496 		}
   2497 		// Update variable assignment.
   2498 		s.vars[left] = right
   2499 		s.addNamedValue(left, right)
   2500 		return
   2501 	}
   2502 	// Left is not ssa-able. Compute its address.
   2503 	addr := s.addr(left, false)
   2504 	if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 {
   2505 		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem())
   2506 	}
   2507 	if isReflectHeaderDataField(left) {
   2508 		// Package unsafe's documentation says storing pointers into
   2509 		// reflect.SliceHeader and reflect.StringHeader's Data fields
   2510 		// is valid, even though they have type uintptr (#19168).
   2511 		// Mark it pointer type to signal the writebarrier pass to
   2512 		// insert a write barrier.
   2513 		t = types.Types[TUNSAFEPTR]
   2514 	}
   2515 	if deref {
   2516 		// Treat as a mem->mem move.
   2517 		var store *ssa.Value
   2518 		if right == nil {
   2519 			store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem())
   2520 		} else {
   2521 			store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem())
   2522 		}
   2523 		store.Aux = t
   2524 		s.vars[&memVar] = store
   2525 		return
   2526 	}
   2527 	// Treat as a store.
   2528 	s.storeType(t, addr, right, skip)
   2529 }
   2530 
   2531 // zeroVal returns the zero value for type t.
   2532 func (s *state) zeroVal(t *types.Type) *ssa.Value {
   2533 	switch {
   2534 	case t.IsInteger():
   2535 		switch t.Size() {
   2536 		case 1:
   2537 			return s.constInt8(t, 0)
   2538 		case 2:
   2539 			return s.constInt16(t, 0)
   2540 		case 4:
   2541 			return s.constInt32(t, 0)
   2542 		case 8:
   2543 			return s.constInt64(t, 0)
   2544 		default:
   2545 			s.Fatalf("bad sized integer type %v", t)
   2546 		}
   2547 	case t.IsFloat():
   2548 		switch t.Size() {
   2549 		case 4:
   2550 			return s.constFloat32(t, 0)
   2551 		case 8:
   2552 			return s.constFloat64(t, 0)
   2553 		default:
   2554 			s.Fatalf("bad sized float type %v", t)
   2555 		}
   2556 	case t.IsComplex():
   2557 		switch t.Size() {
   2558 		case 8:
   2559 			z := s.constFloat32(types.Types[TFLOAT32], 0)
   2560 			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
   2561 		case 16:
   2562 			z := s.constFloat64(types.Types[TFLOAT64], 0)
   2563 			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
   2564 		default:
   2565 			s.Fatalf("bad sized complex type %v", t)
   2566 		}
   2567 
   2568 	case t.IsString():
   2569 		return s.constEmptyString(t)
   2570 	case t.IsPtrShaped():
   2571 		return s.constNil(t)
   2572 	case t.IsBoolean():
   2573 		return s.constBool(false)
   2574 	case t.IsInterface():
   2575 		return s.constInterface(t)
   2576 	case t.IsSlice():
   2577 		return s.constSlice(t)
   2578 	case t.IsStruct():
   2579 		n := t.NumFields()
   2580 		v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
   2581 		for i := 0; i < n; i++ {
   2582 			v.AddArg(s.zeroVal(t.FieldType(i)))
   2583 		}
   2584 		return v
   2585 	case t.IsArray():
   2586 		switch t.NumElem() {
   2587 		case 0:
   2588 			return s.entryNewValue0(ssa.OpArrayMake0, t)
   2589 		case 1:
   2590 			return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
   2591 		}
   2592 	}
   2593 	s.Fatalf("zero for type %v not implemented", t)
   2594 	return nil
   2595 }
   2596 
   2597 type callKind int8
   2598 
   2599 const (
   2600 	callNormal callKind = iota
   2601 	callDefer
   2602 	callGo
   2603 )
   2604 
   2605 type sfRtCallDef struct {
   2606 	rtfn  *obj.LSym
   2607 	rtype types.EType
   2608 }
   2609 
   2610 var softFloatOps map[ssa.Op]sfRtCallDef
   2611 
   2612 func softfloatInit() {
   2613 	// Some of these operations get transformed by sfcall.
   2614 	softFloatOps = map[ssa.Op]sfRtCallDef{
   2615 		ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
   2616 		ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
   2617 		ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
   2618 		ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
   2619 		ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
   2620 		ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
   2621 		ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
   2622 		ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
   2623 
   2624 		ssa.OpEq64F:      sfRtCallDef{sysfunc("feq64"), TBOOL},
   2625 		ssa.OpEq32F:      sfRtCallDef{sysfunc("feq32"), TBOOL},
   2626 		ssa.OpNeq64F:     sfRtCallDef{sysfunc("feq64"), TBOOL},
   2627 		ssa.OpNeq32F:     sfRtCallDef{sysfunc("feq32"), TBOOL},
   2628 		ssa.OpLess64F:    sfRtCallDef{sysfunc("fgt64"), TBOOL},
   2629 		ssa.OpLess32F:    sfRtCallDef{sysfunc("fgt32"), TBOOL},
   2630 		ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
   2631 		ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
   2632 		ssa.OpLeq64F:     sfRtCallDef{sysfunc("fge64"), TBOOL},
   2633 		ssa.OpLeq32F:     sfRtCallDef{sysfunc("fge32"), TBOOL},
   2634 		ssa.OpGeq64F:     sfRtCallDef{sysfunc("fge64"), TBOOL},
   2635 		ssa.OpGeq32F:     sfRtCallDef{sysfunc("fge32"), TBOOL},
   2636 
   2637 		ssa.OpCvt32to32F:  sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
   2638 		ssa.OpCvt32Fto32:  sfRtCallDef{sysfunc("f32toint32"), TINT32},
   2639 		ssa.OpCvt64to32F:  sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
   2640 		ssa.OpCvt32Fto64:  sfRtCallDef{sysfunc("f32toint64"), TINT64},
   2641 		ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
   2642 		ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
   2643 		ssa.OpCvt32to64F:  sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
   2644 		ssa.OpCvt64Fto32:  sfRtCallDef{sysfunc("f64toint32"), TINT32},
   2645 		ssa.OpCvt64to64F:  sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
   2646 		ssa.OpCvt64Fto64:  sfRtCallDef{sysfunc("f64toint64"), TINT64},
   2647 		ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
   2648 		ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
   2649 		ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
   2650 		ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
   2651 	}
   2652 }
   2653 
   2654 // TODO: do not emit sfcall if operation can be optimized to constant in later
   2655 // opt phase
   2656 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
   2657 	if callDef, ok := softFloatOps[op]; ok {
   2658 		switch op {
   2659 		case ssa.OpLess32F,
   2660 			ssa.OpLess64F,
   2661 			ssa.OpLeq32F,
   2662 			ssa.OpLeq64F:
   2663 			args[0], args[1] = args[1], args[0]
   2664 		case ssa.OpSub32F,
   2665 			ssa.OpSub64F:
   2666 			args[1] = s.newValue1(s.ssaOp(OMINUS, types.Types[callDef.rtype]), args[1].Type, args[1])
   2667 		}
   2668 
   2669 		result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
   2670 		if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
   2671 			result = s.newValue1(ssa.OpNot, result.Type, result)
   2672 		}
   2673 		return result, true
   2674 	}
   2675 	return nil, false
   2676 }
   2677 
   2678 var intrinsics map[intrinsicKey]intrinsicBuilder
   2679 
   2680 // An intrinsicBuilder converts a call node n into an ssa value that
   2681 // implements that call as an intrinsic. args is a list of arguments to the func.
   2682 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
   2683 
   2684 type intrinsicKey struct {
   2685 	arch *sys.Arch
   2686 	pkg  string
   2687 	fn   string
   2688 }
   2689 
   2690 func init() {
   2691 	intrinsics = map[intrinsicKey]intrinsicBuilder{}
   2692 
   2693 	var all []*sys.Arch
   2694 	var p4 []*sys.Arch
   2695 	var p8 []*sys.Arch
   2696 	for _, a := range sys.Archs {
   2697 		all = append(all, a)
   2698 		if a.PtrSize == 4 {
   2699 			p4 = append(p4, a)
   2700 		} else {
   2701 			p8 = append(p8, a)
   2702 		}
   2703 	}
   2704 
   2705 	// add adds the intrinsic b for pkg.fn for the given list of architectures.
   2706 	add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
   2707 		for _, a := range archs {
   2708 			intrinsics[intrinsicKey{a, pkg, fn}] = b
   2709 		}
   2710 	}
   2711 	// addF does the same as add but operates on architecture families.
   2712 	addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
   2713 		m := 0
   2714 		for _, f := range archFamilies {
   2715 			if f >= 32 {
   2716 				panic("too many architecture families")
   2717 			}
   2718 			m |= 1 << uint(f)
   2719 		}
   2720 		for _, a := range all {
   2721 			if m>>uint(a.Family)&1 != 0 {
   2722 				intrinsics[intrinsicKey{a, pkg, fn}] = b
   2723 			}
   2724 		}
   2725 	}
   2726 	// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
   2727 	alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
   2728 		for _, a := range archs {
   2729 			if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
   2730 				intrinsics[intrinsicKey{a, pkg, fn}] = b
   2731 			}
   2732 		}
   2733 	}
   2734 
   2735 	/******** runtime ********/
   2736 	if !instrumenting {
   2737 		add("runtime", "slicebytetostringtmp",
   2738 			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2739 				// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
   2740 				// for the backend instead of slicebytetostringtmp calls
   2741 				// when not instrumenting.
   2742 				slice := args[0]
   2743 				ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
   2744 				len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
   2745 				return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
   2746 			},
   2747 			all...)
   2748 	}
   2749 	add("runtime", "KeepAlive",
   2750 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2751 			data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
   2752 			s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
   2753 			return nil
   2754 		},
   2755 		all...)
   2756 	add("runtime", "getclosureptr",
   2757 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2758 			return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
   2759 		},
   2760 		all...)
   2761 
   2762 	addF("runtime", "getcallerpc",
   2763 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2764 			return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
   2765 		}, sys.AMD64, sys.I386)
   2766 
   2767 	add("runtime", "getcallersp",
   2768 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2769 			return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
   2770 		},
   2771 		all...)
   2772 
   2773 	/******** runtime/internal/sys ********/
   2774 	addF("runtime/internal/sys", "Ctz32",
   2775 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2776 			return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
   2777 		},
   2778 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   2779 	addF("runtime/internal/sys", "Ctz64",
   2780 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2781 			return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
   2782 		},
   2783 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   2784 	addF("runtime/internal/sys", "Bswap32",
   2785 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2786 			return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
   2787 		},
   2788 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
   2789 	addF("runtime/internal/sys", "Bswap64",
   2790 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2791 			return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
   2792 		},
   2793 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
   2794 
   2795 	/******** runtime/internal/atomic ********/
   2796 	addF("runtime/internal/atomic", "Load",
   2797 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2798 			v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
   2799 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2800 			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
   2801 		},
   2802 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
   2803 	addF("runtime/internal/atomic", "Load64",
   2804 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2805 			v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
   2806 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2807 			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
   2808 		},
   2809 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
   2810 	addF("runtime/internal/atomic", "Loadp",
   2811 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2812 			v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
   2813 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2814 			return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
   2815 		},
   2816 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
   2817 
   2818 	addF("runtime/internal/atomic", "Store",
   2819 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2820 			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
   2821 			return nil
   2822 		},
   2823 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
   2824 	addF("runtime/internal/atomic", "Store64",
   2825 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2826 			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
   2827 			return nil
   2828 		},
   2829 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
   2830 	addF("runtime/internal/atomic", "StorepNoWB",
   2831 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2832 			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
   2833 			return nil
   2834 		},
   2835 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64)
   2836 
   2837 	addF("runtime/internal/atomic", "Xchg",
   2838 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2839 			v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
   2840 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2841 			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
   2842 		},
   2843 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
   2844 	addF("runtime/internal/atomic", "Xchg64",
   2845 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2846 			v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
   2847 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2848 			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
   2849 		},
   2850 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
   2851 
   2852 	addF("runtime/internal/atomic", "Xadd",
   2853 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2854 			v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
   2855 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2856 			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
   2857 		},
   2858 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
   2859 	addF("runtime/internal/atomic", "Xadd64",
   2860 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2861 			v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
   2862 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2863 			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
   2864 		},
   2865 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
   2866 
   2867 	addF("runtime/internal/atomic", "Cas",
   2868 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2869 			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
   2870 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2871 			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
   2872 		},
   2873 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
   2874 	addF("runtime/internal/atomic", "Cas64",
   2875 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2876 			v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
   2877 			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
   2878 			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
   2879 		},
   2880 		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
   2881 
   2882 	addF("runtime/internal/atomic", "And8",
   2883 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2884 			s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
   2885 			return nil
   2886 		},
   2887 		sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
   2888 	addF("runtime/internal/atomic", "Or8",
   2889 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2890 			s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
   2891 			return nil
   2892 		},
   2893 		sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
   2894 
   2895 	alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
   2896 	alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
   2897 	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
   2898 	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
   2899 	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
   2900 	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
   2901 	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
   2902 	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
   2903 	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
   2904 	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
   2905 	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
   2906 	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
   2907 	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
   2908 	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
   2909 	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
   2910 	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
   2911 
   2912 	/******** math ********/
   2913 	addF("math", "Sqrt",
   2914 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2915 			return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
   2916 		},
   2917 		sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
   2918 	addF("math", "Trunc",
   2919 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2920 			return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
   2921 		},
   2922 		sys.PPC64, sys.S390X)
   2923 	addF("math", "Ceil",
   2924 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2925 			return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
   2926 		},
   2927 		sys.PPC64, sys.S390X)
   2928 	addF("math", "Floor",
   2929 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2930 			return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
   2931 		},
   2932 		sys.PPC64, sys.S390X)
   2933 	addF("math", "Round",
   2934 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2935 			return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
   2936 		},
   2937 		sys.S390X)
   2938 	addF("math", "RoundToEven",
   2939 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2940 			return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
   2941 		},
   2942 		sys.S390X)
   2943 	addF("math", "Abs",
   2944 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2945 			return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
   2946 		},
   2947 		sys.PPC64)
   2948 	addF("math", "Copysign",
   2949 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2950 			return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
   2951 		},
   2952 		sys.PPC64)
   2953 
   2954 	makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2955 		return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   2956 			aux := syslook("support_sse41").Sym.Linksym()
   2957 			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
   2958 			v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
   2959 			b := s.endBlock()
   2960 			b.Kind = ssa.BlockIf
   2961 			b.SetControl(v)
   2962 			bTrue := s.f.NewBlock(ssa.BlockPlain)
   2963 			bFalse := s.f.NewBlock(ssa.BlockPlain)
   2964 			bEnd := s.f.NewBlock(ssa.BlockPlain)
   2965 			b.AddEdgeTo(bTrue)
   2966 			b.AddEdgeTo(bFalse)
   2967 			b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
   2968 
   2969 			// We have the intrinsic - use it directly.
   2970 			s.startBlock(bTrue)
   2971 			s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
   2972 			s.endBlock().AddEdgeTo(bEnd)
   2973 
   2974 			// Call the pure Go version.
   2975 			s.startBlock(bFalse)
   2976 			a := s.call(n, callNormal)
   2977 			s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TFLOAT64], a, s.mem())
   2978 			s.endBlock().AddEdgeTo(bEnd)
   2979 
   2980 			// Merge results.
   2981 			s.startBlock(bEnd)
   2982 			return s.variable(n, types.Types[TFLOAT64])
   2983 		}
   2984 	}
   2985 	addF("math", "RoundToEven",
   2986 		makeRoundAMD64(ssa.OpRoundToEven),
   2987 		sys.AMD64)
   2988 	addF("math", "Floor",
   2989 		makeRoundAMD64(ssa.OpFloor),
   2990 		sys.AMD64)
   2991 	addF("math", "Ceil",
   2992 		makeRoundAMD64(ssa.OpCeil),
   2993 		sys.AMD64)
   2994 	addF("math", "Trunc",
   2995 		makeRoundAMD64(ssa.OpTrunc),
   2996 		sys.AMD64)
   2997 
   2998 	/******** math/bits ********/
   2999 	addF("math/bits", "TrailingZeros64",
   3000 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3001 			return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
   3002 		},
   3003 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3004 	addF("math/bits", "TrailingZeros32",
   3005 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3006 			return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
   3007 		},
   3008 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3009 	addF("math/bits", "TrailingZeros16",
   3010 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3011 			x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
   3012 			c := s.constInt32(types.Types[TUINT32], 1<<16)
   3013 			y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
   3014 			return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
   3015 		},
   3016 		sys.ARM, sys.MIPS)
   3017 	addF("math/bits", "TrailingZeros16",
   3018 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3019 			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
   3020 			c := s.constInt64(types.Types[TUINT64], 1<<16)
   3021 			y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
   3022 			return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
   3023 		},
   3024 		sys.AMD64, sys.ARM64, sys.S390X)
   3025 	addF("math/bits", "TrailingZeros8",
   3026 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3027 			x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
   3028 			c := s.constInt32(types.Types[TUINT32], 1<<8)
   3029 			y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
   3030 			return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
   3031 		},
   3032 		sys.ARM, sys.MIPS)
   3033 	addF("math/bits", "TrailingZeros8",
   3034 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3035 			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
   3036 			c := s.constInt64(types.Types[TUINT64], 1<<8)
   3037 			y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
   3038 			return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
   3039 		},
   3040 		sys.AMD64, sys.ARM64, sys.S390X)
   3041 	alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
   3042 	alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
   3043 	// ReverseBytes inlines correctly, no need to intrinsify it.
   3044 	// ReverseBytes16 lowers to a rotate, no need for anything special here.
   3045 	addF("math/bits", "Len64",
   3046 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3047 			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
   3048 		},
   3049 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3050 	addF("math/bits", "Len32",
   3051 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3052 			if s.config.PtrSize == 4 {
   3053 				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
   3054 			}
   3055 			x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
   3056 			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
   3057 		},
   3058 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3059 	addF("math/bits", "Len16",
   3060 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3061 			if s.config.PtrSize == 4 {
   3062 				x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
   3063 				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
   3064 			}
   3065 			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
   3066 			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
   3067 		},
   3068 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3069 	// Note: disabled on AMD64 because the Go code is faster!
   3070 	addF("math/bits", "Len8",
   3071 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3072 			if s.config.PtrSize == 4 {
   3073 				x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
   3074 				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
   3075 			}
   3076 			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
   3077 			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
   3078 		},
   3079 		sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3080 
   3081 	addF("math/bits", "Len",
   3082 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3083 			if s.config.PtrSize == 4 {
   3084 				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
   3085 			}
   3086 			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
   3087 		},
   3088 		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
   3089 	// LeadingZeros is handled because it trivially calls Len.
   3090 	addF("math/bits", "Reverse64",
   3091 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3092 			return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
   3093 		},
   3094 		sys.ARM64)
   3095 	addF("math/bits", "Reverse32",
   3096 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3097 			return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
   3098 		},
   3099 		sys.ARM64)
   3100 	addF("math/bits", "Reverse16",
   3101 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3102 			return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
   3103 		},
   3104 		sys.ARM64)
   3105 	addF("math/bits", "Reverse8",
   3106 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3107 			return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
   3108 		},
   3109 		sys.ARM64)
   3110 	addF("math/bits", "Reverse",
   3111 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3112 			if s.config.PtrSize == 4 {
   3113 				return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
   3114 			}
   3115 			return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
   3116 		},
   3117 		sys.ARM64)
   3118 	makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3119 		return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3120 			aux := syslook("support_popcnt").Sym.Linksym()
   3121 			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
   3122 			v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
   3123 			b := s.endBlock()
   3124 			b.Kind = ssa.BlockIf
   3125 			b.SetControl(v)
   3126 			bTrue := s.f.NewBlock(ssa.BlockPlain)
   3127 			bFalse := s.f.NewBlock(ssa.BlockPlain)
   3128 			bEnd := s.f.NewBlock(ssa.BlockPlain)
   3129 			b.AddEdgeTo(bTrue)
   3130 			b.AddEdgeTo(bFalse)
   3131 			b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
   3132 
   3133 			// We have the intrinsic - use it directly.
   3134 			s.startBlock(bTrue)
   3135 			op := op64
   3136 			if s.config.PtrSize == 4 {
   3137 				op = op32
   3138 			}
   3139 			s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
   3140 			s.endBlock().AddEdgeTo(bEnd)
   3141 
   3142 			// Call the pure Go version.
   3143 			s.startBlock(bFalse)
   3144 			a := s.call(n, callNormal)
   3145 			s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem())
   3146 			s.endBlock().AddEdgeTo(bEnd)
   3147 
   3148 			// Merge results.
   3149 			s.startBlock(bEnd)
   3150 			return s.variable(n, types.Types[TINT])
   3151 		}
   3152 	}
   3153 	addF("math/bits", "OnesCount64",
   3154 		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
   3155 		sys.AMD64)
   3156 	addF("math/bits", "OnesCount64",
   3157 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3158 			return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
   3159 		},
   3160 		sys.PPC64)
   3161 	addF("math/bits", "OnesCount32",
   3162 		makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
   3163 		sys.AMD64)
   3164 	addF("math/bits", "OnesCount32",
   3165 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3166 			return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
   3167 		},
   3168 		sys.PPC64)
   3169 	addF("math/bits", "OnesCount16",
   3170 		makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
   3171 		sys.AMD64)
   3172 	// Note: no OnesCount8, the Go implementation is faster - just a table load.
   3173 	addF("math/bits", "OnesCount",
   3174 		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
   3175 		sys.AMD64)
   3176 
   3177 	/******** sync/atomic ********/
   3178 
   3179 	// Note: these are disabled by flag_race in findIntrinsic below.
   3180 	alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
   3181 	alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
   3182 	alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
   3183 	alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
   3184 	alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
   3185 	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
   3186 	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
   3187 
   3188 	alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
   3189 	alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
   3190 	// Note: not StorePointer, that needs a write barrier.  Same below for {CompareAnd}Swap.
   3191 	alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
   3192 	alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
   3193 	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
   3194 	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
   3195 
   3196 	alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
   3197 	alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
   3198 	alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
   3199 	alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
   3200 	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
   3201 	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
   3202 
   3203 	alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
   3204 	alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
   3205 	alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
   3206 	alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
   3207 	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
   3208 	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
   3209 
   3210 	alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
   3211 	alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
   3212 	alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
   3213 	alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
   3214 	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
   3215 	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
   3216 
   3217 	/******** math/big ********/
   3218 	add("math/big", "mulWW",
   3219 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3220 			return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
   3221 		},
   3222 		sys.ArchAMD64)
   3223 	add("math/big", "divWW",
   3224 		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
   3225 			return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
   3226 		},
   3227 		sys.ArchAMD64)
   3228 }
   3229 
   3230 // findIntrinsic returns a function which builds the SSA equivalent of the
   3231 // function identified by the symbol sym.  If sym is not an intrinsic call, returns nil.
   3232 func findIntrinsic(sym *types.Sym) intrinsicBuilder {
   3233 	if ssa.IntrinsicsDisable {
   3234 		return nil
   3235 	}
   3236 	if sym == nil || sym.Pkg == nil {
   3237 		return nil
   3238 	}
   3239 	pkg := sym.Pkg.Path
   3240 	if sym.Pkg == localpkg {
   3241 		pkg = myimportpath
   3242 	}
   3243 	if flag_race && pkg == "sync/atomic" {
   3244 		// The race detector needs to be able to intercept these calls.
   3245 		// We can't intrinsify them.
   3246 		return nil
   3247 	}
   3248 	// Skip intrinsifying math functions (which may contain hard-float
   3249 	// instructions) when soft-float
   3250 	if thearch.SoftFloat && pkg == "math" {
   3251 		return nil
   3252 	}
   3253 
   3254 	fn := sym.Name
   3255 	return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
   3256 }
   3257 
   3258 func isIntrinsicCall(n *Node) bool {
   3259 	if n == nil || n.Left == nil {
   3260 		return false
   3261 	}
   3262 	return findIntrinsic(n.Left.Sym) != nil
   3263 }
   3264 
   3265 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
   3266 func (s *state) intrinsicCall(n *Node) *ssa.Value {
   3267 	v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
   3268 	if ssa.IntrinsicsDebug > 0 {
   3269 		x := v
   3270 		if x == nil {
   3271 			x = s.mem()
   3272 		}
   3273 		if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
   3274 			x = x.Args[0]
   3275 		}
   3276 		Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
   3277 	}
   3278 	return v
   3279 }
   3280 
   3281 type callArg struct {
   3282 	offset int64
   3283 	v      *ssa.Value
   3284 }
   3285 type byOffset []callArg
   3286 
   3287 func (x byOffset) Len() int      { return len(x) }
   3288 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
   3289 func (x byOffset) Less(i, j int) bool {
   3290 	return x[i].offset < x[j].offset
   3291 }
   3292 
   3293 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
   3294 func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
   3295 	// This code is complicated because of how walk transforms calls. For a call node,
   3296 	// each entry in n.List is either an assignment to OINDREGSP which actually
   3297 	// stores an arg, or an assignment to a temporary which computes an arg
   3298 	// which is later assigned.
   3299 	// The args can also be out of order.
   3300 	// TODO: when walk goes away someday, this code can go away also.
   3301 	var args []callArg
   3302 	temps := map[*Node]*ssa.Value{}
   3303 	for _, a := range n.List.Slice() {
   3304 		if a.Op != OAS {
   3305 			s.Fatalf("non-assignment as a function argument %v", a.Op)
   3306 		}
   3307 		l, r := a.Left, a.Right
   3308 		switch l.Op {
   3309 		case ONAME:
   3310 			// Evaluate and store to "temporary".
   3311 			// Walk ensures these temporaries are dead outside of n.
   3312 			temps[l] = s.expr(r)
   3313 		case OINDREGSP:
   3314 			// Store a value to an argument slot.
   3315 			var v *ssa.Value
   3316 			if x, ok := temps[r]; ok {
   3317 				// This is a previously computed temporary.
   3318 				v = x
   3319 			} else {
   3320 				// This is an explicit value; evaluate it.
   3321 				v = s.expr(r)
   3322 			}
   3323 			args = append(args, callArg{l.Xoffset, v})
   3324 		default:
   3325 			s.Fatalf("function argument assignment target not allowed: %v", l.Op)
   3326 		}
   3327 	}
   3328 	sort.Sort(byOffset(args))
   3329 	res := make([]*ssa.Value, len(args))
   3330 	for i, a := range args {
   3331 		res[i] = a.v
   3332 	}
   3333 	return res
   3334 }
   3335 
   3336 // Calls the function n using the specified call type.
   3337 // Returns the address of the return value (or nil if none).
   3338 func (s *state) call(n *Node, k callKind) *ssa.Value {
   3339 	var sym *types.Sym     // target symbol (if static)
   3340 	var closure *ssa.Value // ptr to closure to run (if dynamic)
   3341 	var codeptr *ssa.Value // ptr to target code (if dynamic)
   3342 	var rcvr *ssa.Value    // receiver to set
   3343 	fn := n.Left
   3344 	switch n.Op {
   3345 	case OCALLFUNC:
   3346 		if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
   3347 			sym = fn.Sym
   3348 			break
   3349 		}
   3350 		closure = s.expr(fn)
   3351 	case OCALLMETH:
   3352 		if fn.Op != ODOTMETH {
   3353 			Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
   3354 		}
   3355 		if k == callNormal {
   3356 			sym = fn.Sym
   3357 			break
   3358 		}
   3359 		// Make a name n2 for the function.
   3360 		// fn.Sym might be sync.(*Mutex).Unlock.
   3361 		// Make a PFUNC node out of that, then evaluate it.
   3362 		// We get back an SSA value representing &sync.(*Mutex).Unlockf.
   3363 		// We can then pass that to defer or go.
   3364 		n2 := newnamel(fn.Pos, fn.Sym)
   3365 		n2.Name.Curfn = s.curfn
   3366 		n2.SetClass(PFUNC)
   3367 		n2.Pos = fn.Pos
   3368 		n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
   3369 		closure = s.expr(n2)
   3370 		// Note: receiver is already assigned in n.List, so we don't
   3371 		// want to set it here.
   3372 	case OCALLINTER:
   3373 		if fn.Op != ODOTINTER {
   3374 			Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
   3375 		}
   3376 		i := s.expr(fn.Left)
   3377 		itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
   3378 		s.nilCheck(itab)
   3379 		itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
   3380 		itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
   3381 		if k == callNormal {
   3382 			codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem())
   3383 		} else {
   3384 			closure = itab
   3385 		}
   3386 		rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
   3387 	}
   3388 	dowidth(fn.Type)
   3389 	stksize := fn.Type.ArgWidth() // includes receiver
   3390 
   3391 	// Run all argument assignments. The arg slots have already
   3392 	// been offset by the appropriate amount (+2*widthptr for go/defer,
   3393 	// +widthptr for interface calls).
   3394 	// For OCALLMETH, the receiver is set in these statements.
   3395 	s.stmtList(n.List)
   3396 
   3397 	// Set receiver (for interface calls)
   3398 	if rcvr != nil {
   3399 		argStart := Ctxt.FixedFrameSize()
   3400 		if k != callNormal {
   3401 			argStart += int64(2 * Widthptr)
   3402 		}
   3403 		addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
   3404 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
   3405 	}
   3406 
   3407 	// Defer/go args
   3408 	if k != callNormal {
   3409 		// Write argsize and closure (args to Newproc/Deferproc).
   3410 		argStart := Ctxt.FixedFrameSize()
   3411 		argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
   3412 		addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
   3413 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
   3414 		addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
   3415 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
   3416 		stksize += 2 * int64(Widthptr)
   3417 	}
   3418 
   3419 	// call target
   3420 	var call *ssa.Value
   3421 	switch {
   3422 	case k == callDefer:
   3423 		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem())
   3424 	case k == callGo:
   3425 		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem())
   3426 	case closure != nil:
   3427 		codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem())
   3428 		call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
   3429 	case codeptr != nil:
   3430 		call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
   3431 	case sym != nil:
   3432 		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
   3433 	default:
   3434 		Fatalf("bad call type %v %v", n.Op, n)
   3435 	}
   3436 	call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
   3437 	s.vars[&memVar] = call
   3438 
   3439 	// Finish block for defers
   3440 	if k == callDefer {
   3441 		b := s.endBlock()
   3442 		b.Kind = ssa.BlockDefer
   3443 		b.SetControl(call)
   3444 		bNext := s.f.NewBlock(ssa.BlockPlain)
   3445 		b.AddEdgeTo(bNext)
   3446 		// Add recover edge to exit code.
   3447 		r := s.f.NewBlock(ssa.BlockPlain)
   3448 		s.startBlock(r)
   3449 		s.exit()
   3450 		b.AddEdgeTo(r)
   3451 		b.Likely = ssa.BranchLikely
   3452 		s.startBlock(bNext)
   3453 	}
   3454 
   3455 	res := n.Left.Type.Results()
   3456 	if res.NumFields() == 0 || k != callNormal {
   3457 		// call has no return value. Continue with the next statement.
   3458 		return nil
   3459 	}
   3460 	fp := res.Field(0)
   3461 	return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
   3462 }
   3463 
   3464 // etypesign returns the signed-ness of e, for integer/pointer etypes.
   3465 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
   3466 func etypesign(e types.EType) int8 {
   3467 	switch e {
   3468 	case TINT8, TINT16, TINT32, TINT64, TINT:
   3469 		return -1
   3470 	case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
   3471 		return +1
   3472 	}
   3473 	return 0
   3474 }
   3475 
   3476 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
   3477 // The value that the returned Value represents is guaranteed to be non-nil.
   3478 // If bounded is true then this address does not require a nil check for its operand
   3479 // even if that would otherwise be implied.
   3480 func (s *state) addr(n *Node, bounded bool) *ssa.Value {
   3481 	t := types.NewPtr(n.Type)
   3482 	switch n.Op {
   3483 	case ONAME:
   3484 		switch n.Class() {
   3485 		case PEXTERN:
   3486 			// global variable
   3487 			v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
   3488 			// TODO: Make OpAddr use AuxInt as well as Aux.
   3489 			if n.Xoffset != 0 {
   3490 				v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
   3491 			}
   3492 			return v
   3493 		case PPARAM:
   3494 			// parameter slot
   3495 			v := s.decladdrs[n]
   3496 			if v != nil {
   3497 				return v
   3498 			}
   3499 			if n == nodfp {
   3500 				// Special arg that points to the frame pointer (Used by ORECOVER).
   3501 				return s.entryNewValue1A(ssa.OpAddr, t, n, s.sp)
   3502 			}
   3503 			s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
   3504 			return nil
   3505 		case PAUTO:
   3506 			return s.newValue1A(ssa.OpAddr, t, n, s.sp)
   3507 		case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
   3508 			// ensure that we reuse symbols for out parameters so
   3509 			// that cse works on their addresses
   3510 			return s.newValue1A(ssa.OpAddr, t, n, s.sp)
   3511 		default:
   3512 			s.Fatalf("variable address class %v not implemented", n.Class())
   3513 			return nil
   3514 		}
   3515 	case OINDREGSP:
   3516 		// indirect off REGSP
   3517 		// used for storing/loading arguments/returns to/from callees
   3518 		return s.constOffPtrSP(t, n.Xoffset)
   3519 	case OINDEX:
   3520 		if n.Left.Type.IsSlice() {
   3521 			a := s.expr(n.Left)
   3522 			i := s.expr(n.Right)
   3523 			i = s.extendIndex(i, panicindex)
   3524 			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
   3525 			if !n.Bounded() {
   3526 				s.boundsCheck(i, len)
   3527 			}
   3528 			p := s.newValue1(ssa.OpSlicePtr, t, a)
   3529 			return s.newValue2(ssa.OpPtrIndex, t, p, i)
   3530 		} else { // array
   3531 			a := s.addr(n.Left, bounded)
   3532 			i := s.expr(n.Right)
   3533 			i = s.extendIndex(i, panicindex)
   3534 			len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
   3535 			if !n.Bounded() {
   3536 				s.boundsCheck(i, len)
   3537 			}
   3538 			return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
   3539 		}
   3540 	case OIND:
   3541 		return s.exprPtr(n.Left, bounded, n.Pos)
   3542 	case ODOT:
   3543 		p := s.addr(n.Left, bounded)
   3544 		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
   3545 	case ODOTPTR:
   3546 		p := s.exprPtr(n.Left, bounded, n.Pos)
   3547 		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
   3548 	case OCLOSUREVAR:
   3549 		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
   3550 			s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
   3551 	case OCONVNOP:
   3552 		addr := s.addr(n.Left, bounded)
   3553 		return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
   3554 	case OCALLFUNC, OCALLINTER, OCALLMETH:
   3555 		return s.call(n, callNormal)
   3556 	case ODOTTYPE:
   3557 		v, _ := s.dottype(n, false)
   3558 		if v.Op != ssa.OpLoad {
   3559 			s.Fatalf("dottype of non-load")
   3560 		}
   3561 		if v.Args[1] != s.mem() {
   3562 			s.Fatalf("memory no longer live from dottype load")
   3563 		}
   3564 		return v.Args[0]
   3565 	default:
   3566 		s.Fatalf("unhandled addr %v", n.Op)
   3567 		return nil
   3568 	}
   3569 }
   3570 
   3571 // canSSA reports whether n is SSA-able.
   3572 // n must be an ONAME (or an ODOT sequence with an ONAME base).
   3573 func (s *state) canSSA(n *Node) bool {
   3574 	if Debug['N'] != 0 {
   3575 		return false
   3576 	}
   3577 	for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
   3578 		n = n.Left
   3579 	}
   3580 	if n.Op != ONAME {
   3581 		return false
   3582 	}
   3583 	if n.Addrtaken() {
   3584 		return false
   3585 	}
   3586 	if n.isParamHeapCopy() {
   3587 		return false
   3588 	}
   3589 	if n.Class() == PAUTOHEAP {
   3590 		Fatalf("canSSA of PAUTOHEAP %v", n)
   3591 	}
   3592 	switch n.Class() {
   3593 	case PEXTERN:
   3594 		return false
   3595 	case PPARAMOUT:
   3596 		if s.hasdefer {
   3597 			// TODO: handle this case? Named return values must be
   3598 			// in memory so that the deferred function can see them.
   3599 			// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
   3600 			// Or maybe not, see issue 18860.  Even unnamed return values
   3601 			// must be written back so if a defer recovers, the caller can see them.
   3602 			return false
   3603 		}
   3604 		if s.cgoUnsafeArgs {
   3605 			// Cgo effectively takes the address of all result args,
   3606 			// but the compiler can't see that.
   3607 			return false
   3608 		}
   3609 	}
   3610 	if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
   3611 		// wrappers generated by genwrapper need to update
   3612 		// the .this pointer in place.
   3613 		// TODO: treat as a PPARMOUT?
   3614 		return false
   3615 	}
   3616 	return canSSAType(n.Type)
   3617 	// TODO: try to make more variables SSAable?
   3618 }
   3619 
   3620 // canSSA reports whether variables of type t are SSA-able.
   3621 func canSSAType(t *types.Type) bool {
   3622 	dowidth(t)
   3623 	if t.Width > int64(4*Widthptr) {
   3624 		// 4*Widthptr is an arbitrary constant. We want it
   3625 		// to be at least 3*Widthptr so slices can be registerized.
   3626 		// Too big and we'll introduce too much register pressure.
   3627 		return false
   3628 	}
   3629 	switch t.Etype {
   3630 	case TARRAY:
   3631 		// We can't do larger arrays because dynamic indexing is
   3632 		// not supported on SSA variables.
   3633 		// TODO: allow if all indexes are constant.
   3634 		if t.NumElem() <= 1 {
   3635 			return canSSAType(t.Elem())
   3636 		}
   3637 		return false
   3638 	case TSTRUCT:
   3639 		if t.NumFields() > ssa.MaxStruct {
   3640 			return false
   3641 		}
   3642 		for _, t1 := range t.Fields().Slice() {
   3643 			if !canSSAType(t1.Type) {
   3644 				return false
   3645 			}
   3646 		}
   3647 		return true
   3648 	default:
   3649 		return true
   3650 	}
   3651 }
   3652 
   3653 // exprPtr evaluates n to a pointer and nil-checks it.
   3654 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
   3655 	p := s.expr(n)
   3656 	if bounded || n.NonNil() {
   3657 		if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
   3658 			s.f.Warnl(lineno, "removed nil check")
   3659 		}
   3660 		return p
   3661 	}
   3662 	s.nilCheck(p)
   3663 	return p
   3664 }
   3665 
   3666 // nilCheck generates nil pointer checking code.
   3667 // Used only for automatically inserted nil checks,
   3668 // not for user code like 'x != nil'.
   3669 func (s *state) nilCheck(ptr *ssa.Value) {
   3670 	if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
   3671 		return
   3672 	}
   3673 	s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
   3674 }
   3675 
   3676 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
   3677 // Starts a new block on return.
   3678 // idx is already converted to full int width.
   3679 func (s *state) boundsCheck(idx, len *ssa.Value) {
   3680 	if Debug['B'] != 0 {
   3681 		return
   3682 	}
   3683 
   3684 	// bounds check
   3685 	cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
   3686 	s.check(cmp, panicindex)
   3687 }
   3688 
   3689 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
   3690 // Starts a new block on return.
   3691 // idx and len are already converted to full int width.
   3692 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
   3693 	if Debug['B'] != 0 {
   3694 		return
   3695 	}
   3696 
   3697 	// bounds check
   3698 	cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
   3699 	s.check(cmp, panicslice)
   3700 }
   3701 
   3702 // If cmp (a bool) is false, panic using the given function.
   3703 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
   3704 	b := s.endBlock()
   3705 	b.Kind = ssa.BlockIf
   3706 	b.SetControl(cmp)
   3707 	b.Likely = ssa.BranchLikely
   3708 	bNext := s.f.NewBlock(ssa.BlockPlain)
   3709 	line := s.peekPos()
   3710 	pos := Ctxt.PosTable.Pos(line)
   3711 	fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
   3712 	bPanic := s.panics[fl]
   3713 	if bPanic == nil {
   3714 		bPanic = s.f.NewBlock(ssa.BlockPlain)
   3715 		s.panics[fl] = bPanic
   3716 		s.startBlock(bPanic)
   3717 		// The panic call takes/returns memory to ensure that the right
   3718 		// memory state is observed if the panic happens.
   3719 		s.rtcall(fn, false, nil)
   3720 	}
   3721 	b.AddEdgeTo(bNext)
   3722 	b.AddEdgeTo(bPanic)
   3723 	s.startBlock(bNext)
   3724 }
   3725 
   3726 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
   3727 	needcheck := true
   3728 	switch b.Op {
   3729 	case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
   3730 		if b.AuxInt != 0 {
   3731 			needcheck = false
   3732 		}
   3733 	}
   3734 	if needcheck {
   3735 		// do a size-appropriate check for zero
   3736 		cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
   3737 		s.check(cmp, panicdivide)
   3738 	}
   3739 	return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
   3740 }
   3741 
   3742 // rtcall issues a call to the given runtime function fn with the listed args.
   3743 // Returns a slice of results of the given result types.
   3744 // The call is added to the end of the current block.
   3745 // If returns is false, the block is marked as an exit block.
   3746 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
   3747 	// Write args to the stack
   3748 	off := Ctxt.FixedFrameSize()
   3749 	for _, arg := range args {
   3750 		t := arg.Type
   3751 		off = Rnd(off, t.Alignment())
   3752 		ptr := s.constOffPtrSP(t.PtrTo(), off)
   3753 		size := t.Size()
   3754 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem())
   3755 		off += size
   3756 	}
   3757 	off = Rnd(off, int64(Widthreg))
   3758 
   3759 	// Issue call
   3760 	call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
   3761 	s.vars[&memVar] = call
   3762 
   3763 	if !returns {
   3764 		// Finish block
   3765 		b := s.endBlock()
   3766 		b.Kind = ssa.BlockExit
   3767 		b.SetControl(call)
   3768 		call.AuxInt = off - Ctxt.FixedFrameSize()
   3769 		if len(results) > 0 {
   3770 			Fatalf("panic call can't have results")
   3771 		}
   3772 		return nil
   3773 	}
   3774 
   3775 	// Load results
   3776 	res := make([]*ssa.Value, len(results))
   3777 	for i, t := range results {
   3778 		off = Rnd(off, t.Alignment())
   3779 		ptr := s.constOffPtrSP(types.NewPtr(t), off)
   3780 		res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
   3781 		off += t.Size()
   3782 	}
   3783 	off = Rnd(off, int64(Widthptr))
   3784 
   3785 	// Remember how much callee stack space we needed.
   3786 	call.AuxInt = off
   3787 
   3788 	return res
   3789 }
   3790 
   3791 // do *left = right for type t.
   3792 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) {
   3793 	if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
   3794 		// Known to not have write barrier. Store the whole type.
   3795 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
   3796 		return
   3797 	}
   3798 
   3799 	// store scalar fields first, so write barrier stores for
   3800 	// pointer fields can be grouped together, and scalar values
   3801 	// don't need to be live across the write barrier call.
   3802 	// TODO: if the writebarrier pass knows how to reorder stores,
   3803 	// we can do a single store here as long as skip==0.
   3804 	s.storeTypeScalars(t, left, right, skip)
   3805 	if skip&skipPtr == 0 && types.Haspointers(t) {
   3806 		s.storeTypePtrs(t, left, right)
   3807 	}
   3808 }
   3809 
   3810 // do *left = right for all scalar (non-pointer) parts of t.
   3811 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
   3812 	switch {
   3813 	case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
   3814 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
   3815 	case t.IsPtrShaped():
   3816 		// no scalar fields.
   3817 	case t.IsString():
   3818 		if skip&skipLen != 0 {
   3819 			return
   3820 		}
   3821 		len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
   3822 		lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
   3823 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
   3824 	case t.IsSlice():
   3825 		if skip&skipLen == 0 {
   3826 			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
   3827 			lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
   3828 			s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
   3829 		}
   3830 		if skip&skipCap == 0 {
   3831 			cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
   3832 			capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
   3833 			s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
   3834 		}
   3835 	case t.IsInterface():
   3836 		// itab field doesn't need a write barrier (even though it is a pointer).
   3837 		itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
   3838 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
   3839 	case t.IsStruct():
   3840 		n := t.NumFields()
   3841 		for i := 0; i < n; i++ {
   3842 			ft := t.FieldType(i)
   3843 			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
   3844 			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
   3845 			s.storeTypeScalars(ft, addr, val, 0)
   3846 		}
   3847 	case t.IsArray() && t.NumElem() == 0:
   3848 		// nothing
   3849 	case t.IsArray() && t.NumElem() == 1:
   3850 		s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
   3851 	default:
   3852 		s.Fatalf("bad write barrier type %v", t)
   3853 	}
   3854 }
   3855 
   3856 // do *left = right for all pointer parts of t.
   3857 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
   3858 	switch {
   3859 	case t.IsPtrShaped():
   3860 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
   3861 	case t.IsString():
   3862 		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
   3863 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
   3864 	case t.IsSlice():
   3865 		elType := types.NewPtr(t.Elem())
   3866 		ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
   3867 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, elType, left, ptr, s.mem())
   3868 	case t.IsInterface():
   3869 		// itab field is treated as a scalar.
   3870 		idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
   3871 		idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
   3872 		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
   3873 	case t.IsStruct():
   3874 		n := t.NumFields()
   3875 		for i := 0; i < n; i++ {
   3876 			ft := t.FieldType(i)
   3877 			if !types.Haspointers(ft) {
   3878 				continue
   3879 			}
   3880 			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
   3881 			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
   3882 			s.storeTypePtrs(ft, addr, val)
   3883 		}
   3884 	case t.IsArray() && t.NumElem() == 0:
   3885 		// nothing
   3886 	case t.IsArray() && t.NumElem() == 1:
   3887 		s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
   3888 	default:
   3889 		s.Fatalf("bad write barrier type %v", t)
   3890 	}
   3891 }
   3892 
   3893 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
   3894 // i,j,k may be nil, in which case they are set to their default value.
   3895 // t is a slice, ptr to array, or string type.
   3896 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
   3897 	var elemtype *types.Type
   3898 	var ptrtype *types.Type
   3899 	var ptr *ssa.Value
   3900 	var len *ssa.Value
   3901 	var cap *ssa.Value
   3902 	zero := s.constInt(types.Types[TINT], 0)
   3903 	switch {
   3904 	case t.IsSlice():
   3905 		elemtype = t.Elem()
   3906 		ptrtype = types.NewPtr(elemtype)
   3907 		ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
   3908 		len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
   3909 		cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
   3910 	case t.IsString():
   3911 		elemtype = types.Types[TUINT8]
   3912 		ptrtype = types.NewPtr(elemtype)
   3913 		ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
   3914 		len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
   3915 		cap = len
   3916 	case t.IsPtr():
   3917 		if !t.Elem().IsArray() {
   3918 			s.Fatalf("bad ptr to array in slice %v\n", t)
   3919 		}
   3920 		elemtype = t.Elem().Elem()
   3921 		ptrtype = types.NewPtr(elemtype)
   3922 		s.nilCheck(v)
   3923 		ptr = v
   3924 		len = s.constInt(types.Types[TINT], t.Elem().NumElem())
   3925 		cap = len
   3926 	default:
   3927 		s.Fatalf("bad type in slice %v\n", t)
   3928 	}
   3929 
   3930 	// Set default values
   3931 	if i == nil {
   3932 		i = zero
   3933 	}
   3934 	if j == nil {
   3935 		j = len
   3936 	}
   3937 	if k == nil {
   3938 		k = cap
   3939 	}
   3940 
   3941 	// Panic if slice indices are not in bounds.
   3942 	s.sliceBoundsCheck(i, j)
   3943 	if j != k {
   3944 		s.sliceBoundsCheck(j, k)
   3945 	}
   3946 	if k != cap {
   3947 		s.sliceBoundsCheck(k, cap)
   3948 	}
   3949 
   3950 	// Generate the following code assuming that indexes are in bounds.
   3951 	// The masking is to make sure that we don't generate a slice
   3952 	// that points to the next object in memory.
   3953 	// rlen = j - i
   3954 	// rcap = k - i
   3955 	// delta = i * elemsize
   3956 	// rptr = p + delta&mask(rcap)
   3957 	// result = (SliceMake rptr rlen rcap)
   3958 	// where mask(x) is 0 if x==0 and -1 if x>0.
   3959 	subOp := s.ssaOp(OSUB, types.Types[TINT])
   3960 	mulOp := s.ssaOp(OMUL, types.Types[TINT])
   3961 	andOp := s.ssaOp(OAND, types.Types[TINT])
   3962 	rlen := s.newValue2(subOp, types.Types[TINT], j, i)
   3963 	var rcap *ssa.Value
   3964 	switch {
   3965 	case t.IsString():
   3966 		// Capacity of the result is unimportant. However, we use
   3967 		// rcap to test if we've generated a zero-length slice.
   3968 		// Use length of strings for that.
   3969 		rcap = rlen
   3970 	case j == k:
   3971 		rcap = rlen
   3972 	default:
   3973 		rcap = s.newValue2(subOp, types.Types[TINT], k, i)
   3974 	}
   3975 
   3976 	var rptr *ssa.Value
   3977 	if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
   3978 		// No pointer arithmetic necessary.
   3979 		rptr = ptr
   3980 	} else {
   3981 		// delta = # of bytes to offset pointer by.
   3982 		delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width))
   3983 		// If we're slicing to the point where the capacity is zero,
   3984 		// zero out the delta.
   3985 		mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
   3986 		delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
   3987 		// Compute rptr = ptr + delta
   3988 		rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
   3989 	}
   3990 
   3991 	return rptr, rlen, rcap
   3992 }
   3993 
   3994 type u642fcvtTab struct {
   3995 	geq, cvt2F, and, rsh, or, add ssa.Op
   3996 	one                           func(*state, *types.Type, int64) *ssa.Value
   3997 }
   3998 
   3999 var u64_f64 = u642fcvtTab{
   4000 	geq:   ssa.OpGeq64,
   4001 	cvt2F: ssa.OpCvt64to64F,
   4002 	and:   ssa.OpAnd64,
   4003 	rsh:   ssa.OpRsh64Ux64,
   4004 	or:    ssa.OpOr64,
   4005 	add:   ssa.OpAdd64F,
   4006 	one:   (*state).constInt64,
   4007 }
   4008 
   4009 var u64_f32 = u642fcvtTab{
   4010 	geq:   ssa.OpGeq64,
   4011 	cvt2F: ssa.OpCvt64to32F,
   4012 	and:   ssa.OpAnd64,
   4013 	rsh:   ssa.OpRsh64Ux64,
   4014 	or:    ssa.OpOr64,
   4015 	add:   ssa.OpAdd32F,
   4016 	one:   (*state).constInt64,
   4017 }
   4018 
   4019 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4020 	return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
   4021 }
   4022 
   4023 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4024 	return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
   4025 }
   4026 
   4027 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4028 	// if x >= 0 {
   4029 	//    result = (floatY) x
   4030 	// } else {
   4031 	// 	  y = uintX(x) ; y = x & 1
   4032 	// 	  z = uintX(x) ; z = z >> 1
   4033 	// 	  z = z >> 1
   4034 	// 	  z = z | y
   4035 	// 	  result = floatY(z)
   4036 	// 	  result = result + result
   4037 	// }
   4038 	//
   4039 	// Code borrowed from old code generator.
   4040 	// What's going on: large 64-bit "unsigned" looks like
   4041 	// negative number to hardware's integer-to-float
   4042 	// conversion. However, because the mantissa is only
   4043 	// 63 bits, we don't need the LSB, so instead we do an
   4044 	// unsigned right shift (divide by two), convert, and
   4045 	// double. However, before we do that, we need to be
   4046 	// sure that we do not lose a "1" if that made the
   4047 	// difference in the resulting rounding. Therefore, we
   4048 	// preserve it, and OR (not ADD) it back in. The case
   4049 	// that matters is when the eleven discarded bits are
   4050 	// equal to 10000000001; that rounds up, and the 1 cannot
   4051 	// be lost else it would round down if the LSB of the
   4052 	// candidate mantissa is 0.
   4053 	cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft))
   4054 	b := s.endBlock()
   4055 	b.Kind = ssa.BlockIf
   4056 	b.SetControl(cmp)
   4057 	b.Likely = ssa.BranchLikely
   4058 
   4059 	bThen := s.f.NewBlock(ssa.BlockPlain)
   4060 	bElse := s.f.NewBlock(ssa.BlockPlain)
   4061 	bAfter := s.f.NewBlock(ssa.BlockPlain)
   4062 
   4063 	b.AddEdgeTo(bThen)
   4064 	s.startBlock(bThen)
   4065 	a0 := s.newValue1(cvttab.cvt2F, tt, x)
   4066 	s.vars[n] = a0
   4067 	s.endBlock()
   4068 	bThen.AddEdgeTo(bAfter)
   4069 
   4070 	b.AddEdgeTo(bElse)
   4071 	s.startBlock(bElse)
   4072 	one := cvttab.one(s, ft, 1)
   4073 	y := s.newValue2(cvttab.and, ft, x, one)
   4074 	z := s.newValue2(cvttab.rsh, ft, x, one)
   4075 	z = s.newValue2(cvttab.or, ft, z, y)
   4076 	a := s.newValue1(cvttab.cvt2F, tt, z)
   4077 	a1 := s.newValue2(cvttab.add, tt, a, a)
   4078 	s.vars[n] = a1
   4079 	s.endBlock()
   4080 	bElse.AddEdgeTo(bAfter)
   4081 
   4082 	s.startBlock(bAfter)
   4083 	return s.variable(n, n.Type)
   4084 }
   4085 
   4086 type u322fcvtTab struct {
   4087 	cvtI2F, cvtF2F ssa.Op
   4088 }
   4089 
   4090 var u32_f64 = u322fcvtTab{
   4091 	cvtI2F: ssa.OpCvt32to64F,
   4092 	cvtF2F: ssa.OpCopy,
   4093 }
   4094 
   4095 var u32_f32 = u322fcvtTab{
   4096 	cvtI2F: ssa.OpCvt32to32F,
   4097 	cvtF2F: ssa.OpCvt64Fto32F,
   4098 }
   4099 
   4100 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4101 	return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
   4102 }
   4103 
   4104 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4105 	return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
   4106 }
   4107 
   4108 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4109 	// if x >= 0 {
   4110 	// 	result = floatY(x)
   4111 	// } else {
   4112 	// 	result = floatY(float64(x) + (1<<32))
   4113 	// }
   4114 	cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft))
   4115 	b := s.endBlock()
   4116 	b.Kind = ssa.BlockIf
   4117 	b.SetControl(cmp)
   4118 	b.Likely = ssa.BranchLikely
   4119 
   4120 	bThen := s.f.NewBlock(ssa.BlockPlain)
   4121 	bElse := s.f.NewBlock(ssa.BlockPlain)
   4122 	bAfter := s.f.NewBlock(ssa.BlockPlain)
   4123 
   4124 	b.AddEdgeTo(bThen)
   4125 	s.startBlock(bThen)
   4126 	a0 := s.newValue1(cvttab.cvtI2F, tt, x)
   4127 	s.vars[n] = a0
   4128 	s.endBlock()
   4129 	bThen.AddEdgeTo(bAfter)
   4130 
   4131 	b.AddEdgeTo(bElse)
   4132 	s.startBlock(bElse)
   4133 	a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
   4134 	twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
   4135 	a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
   4136 	a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
   4137 
   4138 	s.vars[n] = a3
   4139 	s.endBlock()
   4140 	bElse.AddEdgeTo(bAfter)
   4141 
   4142 	s.startBlock(bAfter)
   4143 	return s.variable(n, n.Type)
   4144 }
   4145 
   4146 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
   4147 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
   4148 	if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
   4149 		s.Fatalf("node must be a map or a channel")
   4150 	}
   4151 	// if n == nil {
   4152 	//   return 0
   4153 	// } else {
   4154 	//   // len
   4155 	//   return *((*int)n)
   4156 	//   // cap
   4157 	//   return *(((*int)n)+1)
   4158 	// }
   4159 	lenType := n.Type
   4160 	nilValue := s.constNil(types.Types[TUINTPTR])
   4161 	cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
   4162 	b := s.endBlock()
   4163 	b.Kind = ssa.BlockIf
   4164 	b.SetControl(cmp)
   4165 	b.Likely = ssa.BranchUnlikely
   4166 
   4167 	bThen := s.f.NewBlock(ssa.BlockPlain)
   4168 	bElse := s.f.NewBlock(ssa.BlockPlain)
   4169 	bAfter := s.f.NewBlock(ssa.BlockPlain)
   4170 
   4171 	// length/capacity of a nil map/chan is zero
   4172 	b.AddEdgeTo(bThen)
   4173 	s.startBlock(bThen)
   4174 	s.vars[n] = s.zeroVal(lenType)
   4175 	s.endBlock()
   4176 	bThen.AddEdgeTo(bAfter)
   4177 
   4178 	b.AddEdgeTo(bElse)
   4179 	s.startBlock(bElse)
   4180 	switch n.Op {
   4181 	case OLEN:
   4182 		// length is stored in the first word for map/chan
   4183 		s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
   4184 	case OCAP:
   4185 		// capacity is stored in the second word for chan
   4186 		sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
   4187 		s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
   4188 	default:
   4189 		s.Fatalf("op must be OLEN or OCAP")
   4190 	}
   4191 	s.endBlock()
   4192 	bElse.AddEdgeTo(bAfter)
   4193 
   4194 	s.startBlock(bAfter)
   4195 	return s.variable(n, lenType)
   4196 }
   4197 
   4198 type f2uCvtTab struct {
   4199 	ltf, cvt2U, subf, or ssa.Op
   4200 	floatValue           func(*state, *types.Type, float64) *ssa.Value
   4201 	intValue             func(*state, *types.Type, int64) *ssa.Value
   4202 	cutoff               uint64
   4203 }
   4204 
   4205 var f32_u64 = f2uCvtTab{
   4206 	ltf:        ssa.OpLess32F,
   4207 	cvt2U:      ssa.OpCvt32Fto64,
   4208 	subf:       ssa.OpSub32F,
   4209 	or:         ssa.OpOr64,
   4210 	floatValue: (*state).constFloat32,
   4211 	intValue:   (*state).constInt64,
   4212 	cutoff:     9223372036854775808,
   4213 }
   4214 
   4215 var f64_u64 = f2uCvtTab{
   4216 	ltf:        ssa.OpLess64F,
   4217 	cvt2U:      ssa.OpCvt64Fto64,
   4218 	subf:       ssa.OpSub64F,
   4219 	or:         ssa.OpOr64,
   4220 	floatValue: (*state).constFloat64,
   4221 	intValue:   (*state).constInt64,
   4222 	cutoff:     9223372036854775808,
   4223 }
   4224 
   4225 var f32_u32 = f2uCvtTab{
   4226 	ltf:        ssa.OpLess32F,
   4227 	cvt2U:      ssa.OpCvt32Fto32,
   4228 	subf:       ssa.OpSub32F,
   4229 	or:         ssa.OpOr32,
   4230 	floatValue: (*state).constFloat32,
   4231 	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
   4232 	cutoff:     2147483648,
   4233 }
   4234 
   4235 var f64_u32 = f2uCvtTab{
   4236 	ltf:        ssa.OpLess64F,
   4237 	cvt2U:      ssa.OpCvt64Fto32,
   4238 	subf:       ssa.OpSub64F,
   4239 	or:         ssa.OpOr32,
   4240 	floatValue: (*state).constFloat64,
   4241 	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
   4242 	cutoff:     2147483648,
   4243 }
   4244 
   4245 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4246 	return s.floatToUint(&f32_u64, n, x, ft, tt)
   4247 }
   4248 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4249 	return s.floatToUint(&f64_u64, n, x, ft, tt)
   4250 }
   4251 
   4252 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4253 	return s.floatToUint(&f32_u32, n, x, ft, tt)
   4254 }
   4255 
   4256 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4257 	return s.floatToUint(&f64_u32, n, x, ft, tt)
   4258 }
   4259 
   4260 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
   4261 	// cutoff:=1<<(intY_Size-1)
   4262 	// if x < floatX(cutoff) {
   4263 	// 	result = uintY(x)
   4264 	// } else {
   4265 	// 	y = x - floatX(cutoff)
   4266 	// 	z = uintY(y)
   4267 	// 	result = z | -(cutoff)
   4268 	// }
   4269 	cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
   4270 	cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
   4271 	b := s.endBlock()
   4272 	b.Kind = ssa.BlockIf
   4273 	b.SetControl(cmp)
   4274 	b.Likely = ssa.BranchLikely
   4275 
   4276 	bThen := s.f.NewBlock(ssa.BlockPlain)
   4277 	bElse := s.f.NewBlock(ssa.BlockPlain)
   4278 	bAfter := s.f.NewBlock(ssa.BlockPlain)
   4279 
   4280 	b.AddEdgeTo(bThen)
   4281 	s.startBlock(bThen)
   4282 	a0 := s.newValue1(cvttab.cvt2U, tt, x)
   4283 	s.vars[n] = a0
   4284 	s.endBlock()
   4285 	bThen.AddEdgeTo(bAfter)
   4286 
   4287 	b.AddEdgeTo(bElse)
   4288 	s.startBlock(bElse)
   4289 	y := s.newValue2(cvttab.subf, ft, x, cutoff)
   4290 	y = s.newValue1(cvttab.cvt2U, tt, y)
   4291 	z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
   4292 	a1 := s.newValue2(cvttab.or, tt, y, z)
   4293 	s.vars[n] = a1
   4294 	s.endBlock()
   4295 	bElse.AddEdgeTo(bAfter)
   4296 
   4297 	s.startBlock(bAfter)
   4298 	return s.variable(n, n.Type)
   4299 }
   4300 
   4301 // dottype generates SSA for a type assertion node.
   4302 // commaok indicates whether to panic or return a bool.
   4303 // If commaok is false, resok will be nil.
   4304 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
   4305 	iface := s.expr(n.Left)   // input interface
   4306 	target := s.expr(n.Right) // target type
   4307 	byteptr := s.f.Config.Types.BytePtr
   4308 
   4309 	if n.Type.IsInterface() {
   4310 		if n.Type.IsEmptyInterface() {
   4311 			// Converting to an empty interface.
   4312 			// Input could be an empty or nonempty interface.
   4313 			if Debug_typeassert > 0 {
   4314 				Warnl(n.Pos, "type assertion inlined")
   4315 			}
   4316 
   4317 			// Get itab/type field from input.
   4318 			itab := s.newValue1(ssa.OpITab, byteptr, iface)
   4319 			// Conversion succeeds iff that field is not nil.
   4320 			cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
   4321 
   4322 			if n.Left.Type.IsEmptyInterface() && commaok {
   4323 				// Converting empty interface to empty interface with ,ok is just a nil check.
   4324 				return iface, cond
   4325 			}
   4326 
   4327 			// Branch on nilness.
   4328 			b := s.endBlock()
   4329 			b.Kind = ssa.BlockIf
   4330 			b.SetControl(cond)
   4331 			b.Likely = ssa.BranchLikely
   4332 			bOk := s.f.NewBlock(ssa.BlockPlain)
   4333 			bFail := s.f.NewBlock(ssa.BlockPlain)
   4334 			b.AddEdgeTo(bOk)
   4335 			b.AddEdgeTo(bFail)
   4336 
   4337 			if !commaok {
   4338 				// On failure, panic by calling panicnildottype.
   4339 				s.startBlock(bFail)
   4340 				s.rtcall(panicnildottype, false, nil, target)
   4341 
   4342 				// On success, return (perhaps modified) input interface.
   4343 				s.startBlock(bOk)
   4344 				if n.Left.Type.IsEmptyInterface() {
   4345 					res = iface // Use input interface unchanged.
   4346 					return
   4347 				}
   4348 				// Load type out of itab, build interface with existing idata.
   4349 				off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
   4350 				typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
   4351 				idata := s.newValue1(ssa.OpIData, n.Type, iface)
   4352 				res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
   4353 				return
   4354 			}
   4355 
   4356 			s.startBlock(bOk)
   4357 			// nonempty -> empty
   4358 			// Need to load type from itab
   4359 			off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
   4360 			s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
   4361 			s.endBlock()
   4362 
   4363 			// itab is nil, might as well use that as the nil result.
   4364 			s.startBlock(bFail)
   4365 			s.vars[&typVar] = itab
   4366 			s.endBlock()
   4367 
   4368 			// Merge point.
   4369 			bEnd := s.f.NewBlock(ssa.BlockPlain)
   4370 			bOk.AddEdgeTo(bEnd)
   4371 			bFail.AddEdgeTo(bEnd)
   4372 			s.startBlock(bEnd)
   4373 			idata := s.newValue1(ssa.OpIData, n.Type, iface)
   4374 			res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
   4375 			resok = cond
   4376 			delete(s.vars, &typVar)
   4377 			return
   4378 		}
   4379 		// converting to a nonempty interface needs a runtime call.
   4380 		if Debug_typeassert > 0 {
   4381 			Warnl(n.Pos, "type assertion not inlined")
   4382 		}
   4383 		if n.Left.Type.IsEmptyInterface() {
   4384 			if commaok {
   4385 				call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
   4386 				return call[0], call[1]
   4387 			}
   4388 			return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
   4389 		}
   4390 		if commaok {
   4391 			call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
   4392 			return call[0], call[1]
   4393 		}
   4394 		return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
   4395 	}
   4396 
   4397 	if Debug_typeassert > 0 {
   4398 		Warnl(n.Pos, "type assertion inlined")
   4399 	}
   4400 
   4401 	// Converting to a concrete type.
   4402 	direct := isdirectiface(n.Type)
   4403 	itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
   4404 	if Debug_typeassert > 0 {
   4405 		Warnl(n.Pos, "type assertion inlined")
   4406 	}
   4407 	var targetITab *ssa.Value
   4408 	if n.Left.Type.IsEmptyInterface() {
   4409 		// Looking for pointer to target type.
   4410 		targetITab = target
   4411 	} else {
   4412 		// Looking for pointer to itab for target type and source interface.
   4413 		targetITab = s.expr(n.List.First())
   4414 	}
   4415 
   4416 	var tmp *Node       // temporary for use with large types
   4417 	var addr *ssa.Value // address of tmp
   4418 	if commaok && !canSSAType(n.Type) {
   4419 		// unSSAable type, use temporary.
   4420 		// TODO: get rid of some of these temporaries.
   4421 		tmp = tempAt(n.Pos, s.curfn, n.Type)
   4422 		addr = s.addr(tmp, false)
   4423 		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
   4424 	}
   4425 
   4426 	cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
   4427 	b := s.endBlock()
   4428 	b.Kind = ssa.BlockIf
   4429 	b.SetControl(cond)
   4430 	b.Likely = ssa.BranchLikely
   4431 
   4432 	bOk := s.f.NewBlock(ssa.BlockPlain)
   4433 	bFail := s.f.NewBlock(ssa.BlockPlain)
   4434 	b.AddEdgeTo(bOk)
   4435 	b.AddEdgeTo(bFail)
   4436 
   4437 	if !commaok {
   4438 		// on failure, panic by calling panicdottype
   4439 		s.startBlock(bFail)
   4440 		taddr := s.expr(n.Right.Right)
   4441 		if n.Left.Type.IsEmptyInterface() {
   4442 			s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
   4443 		} else {
   4444 			s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
   4445 		}
   4446 
   4447 		// on success, return data from interface
   4448 		s.startBlock(bOk)
   4449 		if direct {
   4450 			return s.newValue1(ssa.OpIData, n.Type, iface), nil
   4451 		}
   4452 		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
   4453 		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
   4454 	}
   4455 
   4456 	// commaok is the more complicated case because we have
   4457 	// a control flow merge point.
   4458 	bEnd := s.f.NewBlock(ssa.BlockPlain)
   4459 	// Note that we need a new valVar each time (unlike okVar where we can
   4460 	// reuse the variable) because it might have a different type every time.
   4461 	valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
   4462 
   4463 	// type assertion succeeded
   4464 	s.startBlock(bOk)
   4465 	if tmp == nil {
   4466 		if direct {
   4467 			s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
   4468 		} else {
   4469 			p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
   4470 			s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
   4471 		}
   4472 	} else {
   4473 		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
   4474 		store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem())
   4475 		store.Aux = n.Type
   4476 		s.vars[&memVar] = store
   4477 	}
   4478 	s.vars[&okVar] = s.constBool(true)
   4479 	s.endBlock()
   4480 	bOk.AddEdgeTo(bEnd)
   4481 
   4482 	// type assertion failed
   4483 	s.startBlock(bFail)
   4484 	if tmp == nil {
   4485 		s.vars[valVar] = s.zeroVal(n.Type)
   4486 	} else {
   4487 		store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem())
   4488 		store.Aux = n.Type
   4489 		s.vars[&memVar] = store
   4490 	}
   4491 	s.vars[&okVar] = s.constBool(false)
   4492 	s.endBlock()
   4493 	bFail.AddEdgeTo(bEnd)
   4494 
   4495 	// merge point
   4496 	s.startBlock(bEnd)
   4497 	if tmp == nil {
   4498 		res = s.variable(valVar, n.Type)
   4499 		delete(s.vars, valVar)
   4500 	} else {
   4501 		res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
   4502 		s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
   4503 	}
   4504 	resok = s.variable(&okVar, types.Types[TBOOL])
   4505 	delete(s.vars, &okVar)
   4506 	return res, resok
   4507 }
   4508 
   4509 // variable returns the value of a variable at the current location.
   4510 func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
   4511 	v := s.vars[name]
   4512 	if v != nil {
   4513 		return v
   4514 	}
   4515 	v = s.fwdVars[name]
   4516 	if v != nil {
   4517 		return v
   4518 	}
   4519 
   4520 	if s.curBlock == s.f.Entry {
   4521 		// No variable should be live at entry.
   4522 		s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
   4523 	}
   4524 	// Make a FwdRef, which records a value that's live on block input.
   4525 	// We'll find the matching definition as part of insertPhis.
   4526 	v = s.newValue0A(ssa.OpFwdRef, t, name)
   4527 	s.fwdVars[name] = v
   4528 	s.addNamedValue(name, v)
   4529 	return v
   4530 }
   4531 
   4532 func (s *state) mem() *ssa.Value {
   4533 	return s.variable(&memVar, types.TypeMem)
   4534 }
   4535 
   4536 func (s *state) addNamedValue(n *Node, v *ssa.Value) {
   4537 	if n.Class() == Pxxx {
   4538 		// Don't track our dummy nodes (&memVar etc.).
   4539 		return
   4540 	}
   4541 	if n.IsAutoTmp() {
   4542 		// Don't track temporary variables.
   4543 		return
   4544 	}
   4545 	if n.Class() == PPARAMOUT {
   4546 		// Don't track named output values.  This prevents return values
   4547 		// from being assigned too early. See #14591 and #14762. TODO: allow this.
   4548 		return
   4549 	}
   4550 	if n.Class() == PAUTO && n.Xoffset != 0 {
   4551 		s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
   4552 	}
   4553 	loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
   4554 	values, ok := s.f.NamedValues[loc]
   4555 	if !ok {
   4556 		s.f.Names = append(s.f.Names, loc)
   4557 	}
   4558 	s.f.NamedValues[loc] = append(values, v)
   4559 }
   4560 
   4561 // Branch is an unresolved branch.
   4562 type Branch struct {
   4563 	P *obj.Prog  // branch instruction
   4564 	B *ssa.Block // target
   4565 }
   4566 
   4567 // SSAGenState contains state needed during Prog generation.
   4568 type SSAGenState struct {
   4569 	pp *Progs
   4570 
   4571 	// Branches remembers all the branch instructions we've seen
   4572 	// and where they would like to go.
   4573 	Branches []Branch
   4574 
   4575 	// bstart remembers where each block starts (indexed by block ID)
   4576 	bstart []*obj.Prog
   4577 
   4578 	// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
   4579 	SSEto387 map[int16]int16
   4580 	// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
   4581 	ScratchFpMem *Node
   4582 
   4583 	maxarg int64 // largest frame size for arguments to calls made by the function
   4584 
   4585 	// Map from GC safe points to stack map index, generated by
   4586 	// liveness analysis.
   4587 	stackMapIndex map[*ssa.Value]int
   4588 }
   4589 
   4590 // Prog appends a new Prog.
   4591 func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
   4592 	return s.pp.Prog(as)
   4593 }
   4594 
   4595 // Pc returns the current Prog.
   4596 func (s *SSAGenState) Pc() *obj.Prog {
   4597 	return s.pp.next
   4598 }
   4599 
   4600 // SetPos sets the current source position.
   4601 func (s *SSAGenState) SetPos(pos src.XPos) {
   4602 	s.pp.pos = pos
   4603 }
   4604 
   4605 // DebugFriendlySetPos sets the position subject to heuristics
   4606 // that reduce "jumpy" line number churn when debugging.
   4607 // Spill/fill/copy instructions from the register allocator,
   4608 // phi functions, and instructions with a no-pos position
   4609 // are examples of instructions that can cause churn.
   4610 func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
   4611 	// The two choices here are either to leave lineno unchanged,
   4612 	// or to explicitly set it to src.NoXPos.  Leaving it unchanged
   4613 	// (reusing the preceding line number) produces slightly better-
   4614 	// looking assembly language output from the compiler, and is
   4615 	// expected by some already-existing tests.
   4616 	// The debug information appears to be the same in either case
   4617 	switch v.Op {
   4618 	case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
   4619 		// leave the position unchanged from beginning of block
   4620 		// or previous line number.
   4621 	default:
   4622 		if v.Pos != src.NoXPos {
   4623 			s.SetPos(v.Pos)
   4624 		}
   4625 	}
   4626 }
   4627 
   4628 // genssa appends entries to pp for each instruction in f.
   4629 func genssa(f *ssa.Func, pp *Progs) {
   4630 	var s SSAGenState
   4631 
   4632 	e := f.Frontend().(*ssafn)
   4633 
   4634 	s.stackMapIndex = liveness(e, f)
   4635 
   4636 	// Remember where each block starts.
   4637 	s.bstart = make([]*obj.Prog, f.NumBlocks())
   4638 	s.pp = pp
   4639 	var progToValue map[*obj.Prog]*ssa.Value
   4640 	var progToBlock map[*obj.Prog]*ssa.Block
   4641 	var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
   4642 	var logProgs = e.log
   4643 	if logProgs {
   4644 		progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
   4645 		progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
   4646 		f.Logf("genssa %s\n", f.Name)
   4647 		progToBlock[s.pp.next] = f.Blocks[0]
   4648 	}
   4649 
   4650 	if thearch.Use387 {
   4651 		s.SSEto387 = map[int16]int16{}
   4652 	}
   4653 
   4654 	s.ScratchFpMem = e.scratchFpMem
   4655 
   4656 	logLocationLists := Debug_locationlist != 0
   4657 	if Ctxt.Flag_locationlists {
   4658 		e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists)
   4659 		valueToProgAfter = make([]*obj.Prog, f.NumValues())
   4660 	}
   4661 
   4662 	// Emit basic blocks
   4663 	for i, b := range f.Blocks {
   4664 		s.bstart[b.ID] = s.pp.next
   4665 		// Emit values in block
   4666 		thearch.SSAMarkMoves(&s, b)
   4667 		for _, v := range b.Values {
   4668 			x := s.pp.next
   4669 			s.DebugFriendlySetPosFrom(v)
   4670 			switch v.Op {
   4671 			case ssa.OpInitMem:
   4672 				// memory arg needs no code
   4673 			case ssa.OpArg:
   4674 				// input args need no code
   4675 			case ssa.OpSP, ssa.OpSB:
   4676 				// nothing to do
   4677 			case ssa.OpSelect0, ssa.OpSelect1:
   4678 				// nothing to do
   4679 			case ssa.OpGetG:
   4680 				// nothing to do when there's a g register,
   4681 				// and checkLower complains if there's not
   4682 			case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
   4683 				// nothing to do; already used by liveness
   4684 			case ssa.OpVarKill:
   4685 				// Zero variable if it is ambiguously live.
   4686 				// After the VARKILL anything this variable references
   4687 				// might be collected. If it were to become live again later,
   4688 				// the GC will see references to already-collected objects.
   4689 				// See issue 20029.
   4690 				n := v.Aux.(*Node)
   4691 				if n.Name.Needzero() {
   4692 					if n.Class() != PAUTO {
   4693 						v.Fatalf("zero of variable which isn't PAUTO %v", n)
   4694 					}
   4695 					if n.Type.Size()%int64(Widthptr) != 0 {
   4696 						v.Fatalf("zero of variable not a multiple of ptr size %v", n)
   4697 					}
   4698 					thearch.ZeroAuto(s.pp, n)
   4699 				}
   4700 			case ssa.OpPhi:
   4701 				CheckLoweredPhi(v)
   4702 			case ssa.OpRegKill:
   4703 				// nothing to do
   4704 			default:
   4705 				// let the backend handle it
   4706 				thearch.SSAGenValue(&s, v)
   4707 			}
   4708 
   4709 			if Ctxt.Flag_locationlists {
   4710 				valueToProgAfter[v.ID] = s.pp.next
   4711 			}
   4712 			if logProgs {
   4713 				for ; x != s.pp.next; x = x.Link {
   4714 					progToValue[x] = v
   4715 				}
   4716 			}
   4717 		}
   4718 		// Emit control flow instructions for block
   4719 		var next *ssa.Block
   4720 		if i < len(f.Blocks)-1 && Debug['N'] == 0 {
   4721 			// If -N, leave next==nil so every block with successors
   4722 			// ends in a JMP (except call blocks - plive doesn't like
   4723 			// select{send,recv} followed by a JMP call).  Helps keep
   4724 			// line numbers for otherwise empty blocks.
   4725 			next = f.Blocks[i+1]
   4726 		}
   4727 		x := s.pp.next
   4728 		s.SetPos(b.Pos)
   4729 		thearch.SSAGenBlock(&s, b, next)
   4730 		if logProgs {
   4731 			for ; x != s.pp.next; x = x.Link {
   4732 				progToBlock[x] = b
   4733 			}
   4734 		}
   4735 	}
   4736 
   4737 	if Ctxt.Flag_locationlists {
   4738 		for i := range f.Blocks {
   4739 			blockDebug := e.curfn.Func.DebugInfo.Blocks[i]
   4740 			for _, locList := range blockDebug.Variables {
   4741 				for _, loc := range locList.Locations {
   4742 					if loc.Start == ssa.BlockStart {
   4743 						loc.StartProg = s.bstart[f.Blocks[i].ID]
   4744 					} else {
   4745 						loc.StartProg = valueToProgAfter[loc.Start.ID]
   4746 					}
   4747 					if loc.End == nil {
   4748 						Fatalf("empty loc %v compiling %v", loc, f.Name)
   4749 					}
   4750 
   4751 					if loc.End == ssa.BlockEnd {
   4752 						// If this variable was live at the end of the block, it should be
   4753 						// live over the control flow instructions. Extend it up to the
   4754 						// beginning of the next block.
   4755 						// If this is the last block, then there's no Prog to use for it, and
   4756 						// EndProg is unset.
   4757 						if i < len(f.Blocks)-1 {
   4758 							loc.EndProg = s.bstart[f.Blocks[i+1].ID]
   4759 						}
   4760 					} else {
   4761 						// Advance the "end" forward by one; the end-of-range doesn't take effect
   4762 						// until the instruction actually executes.
   4763 						loc.EndProg = valueToProgAfter[loc.End.ID].Link
   4764 						if loc.EndProg == nil {
   4765 							Fatalf("nil loc.EndProg compiling %v, loc=%v", f.Name, loc)
   4766 						}
   4767 					}
   4768 					if !logLocationLists {
   4769 						loc.Start = nil
   4770 						loc.End = nil
   4771 					}
   4772 				}
   4773 			}
   4774 		}
   4775 	}
   4776 
   4777 	// Resolve branches
   4778 	for _, br := range s.Branches {
   4779 		br.P.To.Val = s.bstart[br.B.ID]
   4780 	}
   4781 
   4782 	if logProgs {
   4783 		filename := ""
   4784 		for p := pp.Text; p != nil; p = p.Link {
   4785 			if p.Pos.IsKnown() && p.InnermostFilename() != filename {
   4786 				filename = p.InnermostFilename()
   4787 				f.Logf("# %s\n", filename)
   4788 			}
   4789 
   4790 			var s string
   4791 			if v, ok := progToValue[p]; ok {
   4792 				s = v.String()
   4793 			} else if b, ok := progToBlock[p]; ok {
   4794 				s = b.String()
   4795 			} else {
   4796 				s = "   " // most value and branch strings are 2-3 characters long
   4797 			}
   4798 			f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
   4799 		}
   4800 		if f.HTMLWriter != nil {
   4801 			// LineHist is defunct now - this code won't do
   4802 			// anything.
   4803 			// TODO: fix this (ideally without a global variable)
   4804 			// saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly
   4805 			// pp.Text.Ctxt.LineHist.PrintFilenameOnly = true
   4806 			var buf bytes.Buffer
   4807 			buf.WriteString("<code>")
   4808 			buf.WriteString("<dl class=\"ssa-gen\">")
   4809 			filename := ""
   4810 			for p := pp.Text; p != nil; p = p.Link {
   4811 				// Don't spam every line with the file name, which is often huge.
   4812 				// Only print changes, and "unknown" is not a change.
   4813 				if p.Pos.IsKnown() && p.InnermostFilename() != filename {
   4814 					filename = p.InnermostFilename()
   4815 					buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
   4816 					buf.WriteString(html.EscapeString("# " + filename))
   4817 					buf.WriteString("</dd>")
   4818 				}
   4819 
   4820 				buf.WriteString("<dt class=\"ssa-prog-src\">")
   4821 				if v, ok := progToValue[p]; ok {
   4822 					buf.WriteString(v.HTML())
   4823 				} else if b, ok := progToBlock[p]; ok {
   4824 					buf.WriteString("<b>" + b.HTML() + "</b>")
   4825 				}
   4826 				buf.WriteString("</dt>")
   4827 				buf.WriteString("<dd class=\"ssa-prog\">")
   4828 				buf.WriteString(fmt.Sprintf("%.5d <span class=\"line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), html.EscapeString(p.InstructionString())))
   4829 				buf.WriteString("</dd>")
   4830 			}
   4831 			buf.WriteString("</dl>")
   4832 			buf.WriteString("</code>")
   4833 			f.HTMLWriter.WriteColumn("genssa", "ssa-prog", buf.String())
   4834 			// pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved
   4835 		}
   4836 	}
   4837 
   4838 	defframe(&s, e)
   4839 	if Debug['f'] != 0 {
   4840 		frame(0)
   4841 	}
   4842 
   4843 	f.HTMLWriter.Close()
   4844 	f.HTMLWriter = nil
   4845 }
   4846 
   4847 func defframe(s *SSAGenState, e *ssafn) {
   4848 	pp := s.pp
   4849 
   4850 	frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
   4851 	if thearch.PadFrame != nil {
   4852 		frame = thearch.PadFrame(frame)
   4853 	}
   4854 
   4855 	// Fill in argument and frame size.
   4856 	pp.Text.To.Type = obj.TYPE_TEXTSIZE
   4857 	pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
   4858 	pp.Text.To.Offset = frame
   4859 
   4860 	// Insert code to zero ambiguously live variables so that the
   4861 	// garbage collector only sees initialized values when it
   4862 	// looks for pointers.
   4863 	p := pp.Text
   4864 	var lo, hi int64
   4865 
   4866 	// Opaque state for backend to use. Current backends use it to
   4867 	// keep track of which helper registers have been zeroed.
   4868 	var state uint32
   4869 
   4870 	// Iterate through declarations. They are sorted in decreasing Xoffset order.
   4871 	for _, n := range e.curfn.Func.Dcl {
   4872 		if !n.Name.Needzero() {
   4873 			continue
   4874 		}
   4875 		if n.Class() != PAUTO {
   4876 			Fatalf("needzero class %d", n.Class())
   4877 		}
   4878 		if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
   4879 			Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
   4880 		}
   4881 
   4882 		if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
   4883 			// Merge with range we already have.
   4884 			lo = n.Xoffset
   4885 			continue
   4886 		}
   4887 
   4888 		// Zero old range
   4889 		p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
   4890 
   4891 		// Set new range.
   4892 		lo = n.Xoffset
   4893 		hi = lo + n.Type.Size()
   4894 	}
   4895 
   4896 	// Zero final range.
   4897 	thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
   4898 }
   4899 
   4900 type FloatingEQNEJump struct {
   4901 	Jump  obj.As
   4902 	Index int
   4903 }
   4904 
   4905 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) {
   4906 	p := s.Prog(jumps.Jump)
   4907 	p.To.Type = obj.TYPE_BRANCH
   4908 	p.Pos = b.Pos
   4909 	to := jumps.Index
   4910 	s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
   4911 }
   4912 
   4913 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
   4914 	switch next {
   4915 	case b.Succs[0].Block():
   4916 		s.oneFPJump(b, &jumps[0][0])
   4917 		s.oneFPJump(b, &jumps[0][1])
   4918 	case b.Succs[1].Block():
   4919 		s.oneFPJump(b, &jumps[1][0])
   4920 		s.oneFPJump(b, &jumps[1][1])
   4921 	default:
   4922 		s.oneFPJump(b, &jumps[1][0])
   4923 		s.oneFPJump(b, &jumps[1][1])
   4924 		q := s.Prog(obj.AJMP)
   4925 		q.Pos = b.Pos
   4926 		q.To.Type = obj.TYPE_BRANCH
   4927 		s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
   4928 	}
   4929 }
   4930 
   4931 func AuxOffset(v *ssa.Value) (offset int64) {
   4932 	if v.Aux == nil {
   4933 		return 0
   4934 	}
   4935 	n, ok := v.Aux.(*Node)
   4936 	if !ok {
   4937 		v.Fatalf("bad aux type in %s\n", v.LongString())
   4938 	}
   4939 	if n.Class() == PAUTO {
   4940 		return n.Xoffset
   4941 	}
   4942 	return 0
   4943 }
   4944 
   4945 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
   4946 func AddAux(a *obj.Addr, v *ssa.Value) {
   4947 	AddAux2(a, v, v.AuxInt)
   4948 }
   4949 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
   4950 	if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
   4951 		v.Fatalf("bad AddAux addr %v", a)
   4952 	}
   4953 	// add integer offset
   4954 	a.Offset += offset
   4955 
   4956 	// If no additional symbol offset, we're done.
   4957 	if v.Aux == nil {
   4958 		return
   4959 	}
   4960 	// Add symbol's offset from its base register.
   4961 	switch n := v.Aux.(type) {
   4962 	case *obj.LSym:
   4963 		a.Name = obj.NAME_EXTERN
   4964 		a.Sym = n
   4965 	case *Node:
   4966 		if n.Class() == PPARAM || n.Class() == PPARAMOUT {
   4967 			a.Name = obj.NAME_PARAM
   4968 			a.Sym = n.Orig.Sym.Linksym()
   4969 			a.Offset += n.Xoffset
   4970 			break
   4971 		}
   4972 		a.Name = obj.NAME_AUTO
   4973 		a.Sym = n.Sym.Linksym()
   4974 		a.Offset += n.Xoffset
   4975 	default:
   4976 		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
   4977 	}
   4978 }
   4979 
   4980 // extendIndex extends v to a full int width.
   4981 // panic using the given function if v does not fit in an int (only on 32-bit archs).
   4982 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value {
   4983 	size := v.Type.Size()
   4984 	if size == s.config.PtrSize {
   4985 		return v
   4986 	}
   4987 	if size > s.config.PtrSize {
   4988 		// truncate 64-bit indexes on 32-bit pointer archs. Test the
   4989 		// high word and branch to out-of-bounds failure if it is not 0.
   4990 		if Debug['B'] == 0 {
   4991 			hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v)
   4992 			cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
   4993 			s.check(cmp, panicfn)
   4994 		}
   4995 		return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v)
   4996 	}
   4997 
   4998 	// Extend value to the required size
   4999 	var op ssa.Op
   5000 	if v.Type.IsSigned() {
   5001 		switch 10*size + s.config.PtrSize {
   5002 		case 14:
   5003 			op = ssa.OpSignExt8to32
   5004 		case 18:
   5005 			op = ssa.OpSignExt8to64
   5006 		case 24:
   5007 			op = ssa.OpSignExt16to32
   5008 		case 28:
   5009 			op = ssa.OpSignExt16to64
   5010 		case 48:
   5011 			op = ssa.OpSignExt32to64
   5012 		default:
   5013 			s.Fatalf("bad signed index extension %s", v.Type)
   5014 		}
   5015 	} else {
   5016 		switch 10*size + s.config.PtrSize {
   5017 		case 14:
   5018 			op = ssa.OpZeroExt8to32
   5019 		case 18:
   5020 			op = ssa.OpZeroExt8to64
   5021 		case 24:
   5022 			op = ssa.OpZeroExt16to32
   5023 		case 28:
   5024 			op = ssa.OpZeroExt16to64
   5025 		case 48:
   5026 			op = ssa.OpZeroExt32to64
   5027 		default:
   5028 			s.Fatalf("bad unsigned index extension %s", v.Type)
   5029 		}
   5030 	}
   5031 	return s.newValue1(op, types.Types[TINT], v)
   5032 }
   5033 
   5034 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
   5035 // Called during ssaGenValue.
   5036 func CheckLoweredPhi(v *ssa.Value) {
   5037 	if v.Op != ssa.OpPhi {
   5038 		v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
   5039 	}
   5040 	if v.Type.IsMemory() {
   5041 		return
   5042 	}
   5043 	f := v.Block.Func
   5044 	loc := f.RegAlloc[v.ID]
   5045 	for _, a := range v.Args {
   5046 		if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
   5047 			v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
   5048 		}
   5049 	}
   5050 }
   5051 
   5052 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
   5053 // The output of LoweredGetClosurePtr is generally hardwired to the correct register.
   5054 // That register contains the closure pointer on closure entry.
   5055 func CheckLoweredGetClosurePtr(v *ssa.Value) {
   5056 	entry := v.Block.Func.Entry
   5057 	if entry != v.Block || entry.Values[0] != v {
   5058 		Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
   5059 	}
   5060 }
   5061 
   5062 // AutoVar returns a *Node and int64 representing the auto variable and offset within it
   5063 // where v should be spilled.
   5064 func AutoVar(v *ssa.Value) (*Node, int64) {
   5065 	loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
   5066 	if v.Type.Size() > loc.Type.Size() {
   5067 		v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
   5068 	}
   5069 	return loc.N.(*Node), loc.Off
   5070 }
   5071 
   5072 func AddrAuto(a *obj.Addr, v *ssa.Value) {
   5073 	n, off := AutoVar(v)
   5074 	a.Type = obj.TYPE_MEM
   5075 	a.Sym = n.Sym.Linksym()
   5076 	a.Reg = int16(thearch.REGSP)
   5077 	a.Offset = n.Xoffset + off
   5078 	if n.Class() == PPARAM || n.Class() == PPARAMOUT {
   5079 		a.Name = obj.NAME_PARAM
   5080 	} else {
   5081 		a.Name = obj.NAME_AUTO
   5082 	}
   5083 }
   5084 
   5085 func (s *SSAGenState) AddrScratch(a *obj.Addr) {
   5086 	if s.ScratchFpMem == nil {
   5087 		panic("no scratch memory available; forgot to declare usesScratch for Op?")
   5088 	}
   5089 	a.Type = obj.TYPE_MEM
   5090 	a.Name = obj.NAME_AUTO
   5091 	a.Sym = s.ScratchFpMem.Sym.Linksym()
   5092 	a.Reg = int16(thearch.REGSP)
   5093 	a.Offset = s.ScratchFpMem.Xoffset
   5094 }
   5095 
   5096 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
   5097 	idx, ok := s.stackMapIndex[v]
   5098 	if !ok {
   5099 		Fatalf("missing stack map index for %v", v.LongString())
   5100 	}
   5101 	p := s.Prog(obj.APCDATA)
   5102 	Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
   5103 	Addrconst(&p.To, int64(idx))
   5104 
   5105 	if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
   5106 		// Deferred calls will appear to be returning to
   5107 		// the CALL deferreturn(SB) that we are about to emit.
   5108 		// However, the stack trace code will show the line
   5109 		// of the instruction byte before the return PC.
   5110 		// To avoid that being an unrelated instruction,
   5111 		// insert an actual hardware NOP that will have the right line number.
   5112 		// This is different from obj.ANOP, which is a virtual no-op
   5113 		// that doesn't make it into the instruction stream.
   5114 		thearch.Ginsnop(s.pp)
   5115 	}
   5116 
   5117 	p = s.Prog(obj.ACALL)
   5118 	if sym, ok := v.Aux.(*obj.LSym); ok {
   5119 		p.To.Type = obj.TYPE_MEM
   5120 		p.To.Name = obj.NAME_EXTERN
   5121 		p.To.Sym = sym
   5122 
   5123 		// Record call graph information for nowritebarrierrec
   5124 		// analysis.
   5125 		if nowritebarrierrecCheck != nil {
   5126 			nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos)
   5127 		}
   5128 	} else {
   5129 		// TODO(mdempsky): Can these differences be eliminated?
   5130 		switch thearch.LinkArch.Family {
   5131 		case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
   5132 			p.To.Type = obj.TYPE_REG
   5133 		case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
   5134 			p.To.Type = obj.TYPE_MEM
   5135 		default:
   5136 			Fatalf("unknown indirect call family")
   5137 		}
   5138 		p.To.Reg = v.Args[0].Reg()
   5139 	}
   5140 	if s.maxarg < v.AuxInt {
   5141 		s.maxarg = v.AuxInt
   5142 	}
   5143 	return p
   5144 }
   5145 
   5146 // fieldIdx finds the index of the field referred to by the ODOT node n.
   5147 func fieldIdx(n *Node) int {
   5148 	t := n.Left.Type
   5149 	f := n.Sym
   5150 	if !t.IsStruct() {
   5151 		panic("ODOT's LHS is not a struct")
   5152 	}
   5153 
   5154 	var i int
   5155 	for _, t1 := range t.Fields().Slice() {
   5156 		if t1.Sym != f {
   5157 			i++
   5158 			continue
   5159 		}
   5160 		if t1.Offset != n.Xoffset {
   5161 			panic("field offset doesn't match")
   5162 		}
   5163 		return i
   5164 	}
   5165 	panic(fmt.Sprintf("can't find field in expr %v\n", n))
   5166 
   5167 	// TODO: keep the result of this function somewhere in the ODOT Node
   5168 	// so we don't have to recompute it each time we need it.
   5169 }
   5170 
   5171 // ssafn holds frontend information about a function that the backend is processing.
   5172 // It also exports a bunch of compiler services for the ssa backend.
   5173 type ssafn struct {
   5174 	curfn        *Node
   5175 	strings      map[string]interface{} // map from constant string to data symbols
   5176 	scratchFpMem *Node                  // temp for floating point register / memory moves on some architectures
   5177 	stksize      int64                  // stack size for current frame
   5178 	stkptrsize   int64                  // prefix of stack containing pointers
   5179 	log          bool
   5180 }
   5181 
   5182 // StringData returns a symbol (a *types.Sym wrapped in an interface) which
   5183 // is the data component of a global string constant containing s.
   5184 func (e *ssafn) StringData(s string) interface{} {
   5185 	if aux, ok := e.strings[s]; ok {
   5186 		return aux
   5187 	}
   5188 	if e.strings == nil {
   5189 		e.strings = make(map[string]interface{})
   5190 	}
   5191 	data := stringsym(e.curfn.Pos, s)
   5192 	e.strings[s] = data
   5193 	return data
   5194 }
   5195 
   5196 func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
   5197 	n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
   5198 	return n
   5199 }
   5200 
   5201 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
   5202 	n := name.N.(*Node)
   5203 	ptrType := types.NewPtr(types.Types[TUINT8])
   5204 	lenType := types.Types[TINT]
   5205 	if n.Class() == PAUTO && !n.Addrtaken() {
   5206 		// Split this string up into two separate variables.
   5207 		p := e.splitSlot(&name, ".ptr", 0, ptrType)
   5208 		l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
   5209 		return p, l
   5210 	}
   5211 	// Return the two parts of the larger variable.
   5212 	return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
   5213 }
   5214 
   5215 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
   5216 	n := name.N.(*Node)
   5217 	t := types.NewPtr(types.Types[TUINT8])
   5218 	if n.Class() == PAUTO && !n.Addrtaken() {
   5219 		// Split this interface up into two separate variables.
   5220 		f := ".itab"
   5221 		if n.Type.IsEmptyInterface() {
   5222 			f = ".type"
   5223 		}
   5224 		c := e.splitSlot(&name, f, 0, t)
   5225 		d := e.splitSlot(&name, ".data", t.Size(), t)
   5226 		return c, d
   5227 	}
   5228 	// Return the two parts of the larger variable.
   5229 	return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
   5230 }
   5231 
   5232 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
   5233 	n := name.N.(*Node)
   5234 	ptrType := types.NewPtr(name.Type.ElemType())
   5235 	lenType := types.Types[TINT]
   5236 	if n.Class() == PAUTO && !n.Addrtaken() {
   5237 		// Split this slice up into three separate variables.
   5238 		p := e.splitSlot(&name, ".ptr", 0, ptrType)
   5239 		l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
   5240 		c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
   5241 		return p, l, c
   5242 	}
   5243 	// Return the three parts of the larger variable.
   5244 	return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
   5245 		ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
   5246 		ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
   5247 }
   5248 
   5249 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
   5250 	n := name.N.(*Node)
   5251 	s := name.Type.Size() / 2
   5252 	var t *types.Type
   5253 	if s == 8 {
   5254 		t = types.Types[TFLOAT64]
   5255 	} else {
   5256 		t = types.Types[TFLOAT32]
   5257 	}
   5258 	if n.Class() == PAUTO && !n.Addrtaken() {
   5259 		// Split this complex up into two separate variables.
   5260 		r := e.splitSlot(&name, ".real", 0, t)
   5261 		i := e.splitSlot(&name, ".imag", t.Size(), t)
   5262 		return r, i
   5263 	}
   5264 	// Return the two parts of the larger variable.
   5265 	return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
   5266 }
   5267 
   5268 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
   5269 	n := name.N.(*Node)
   5270 	var t *types.Type
   5271 	if name.Type.IsSigned() {
   5272 		t = types.Types[TINT32]
   5273 	} else {
   5274 		t = types.Types[TUINT32]
   5275 	}
   5276 	if n.Class() == PAUTO && !n.Addrtaken() {
   5277 		// Split this int64 up into two separate variables.
   5278 		if thearch.LinkArch.ByteOrder == binary.BigEndian {
   5279 			return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
   5280 		}
   5281 		return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
   5282 	}
   5283 	// Return the two parts of the larger variable.
   5284 	if thearch.LinkArch.ByteOrder == binary.BigEndian {
   5285 		return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
   5286 	}
   5287 	return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
   5288 }
   5289 
   5290 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
   5291 	n := name.N.(*Node)
   5292 	st := name.Type
   5293 	ft := st.FieldType(i)
   5294 	var offset int64
   5295 	for f := 0; f < i; f++ {
   5296 		offset += st.FieldType(f).Size()
   5297 	}
   5298 	if n.Class() == PAUTO && !n.Addrtaken() {
   5299 		// Note: the _ field may appear several times.  But
   5300 		// have no fear, identically-named but distinct Autos are
   5301 		// ok, albeit maybe confusing for a debugger.
   5302 		return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
   5303 	}
   5304 	return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
   5305 }
   5306 
   5307 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
   5308 	n := name.N.(*Node)
   5309 	at := name.Type
   5310 	if at.NumElem() != 1 {
   5311 		Fatalf("bad array size")
   5312 	}
   5313 	et := at.ElemType()
   5314 	if n.Class() == PAUTO && !n.Addrtaken() {
   5315 		return e.splitSlot(&name, "[0]", 0, et)
   5316 	}
   5317 	return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
   5318 }
   5319 
   5320 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
   5321 	return itabsym(it, offset)
   5322 }
   5323 
   5324 // splitSlot returns a slot representing the data of parent starting at offset.
   5325 func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
   5326 	s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
   5327 
   5328 	n := &Node{
   5329 		Name: new(Name),
   5330 		Op:   ONAME,
   5331 		Pos:  parent.N.(*Node).Pos,
   5332 	}
   5333 	n.Orig = n
   5334 
   5335 	s.Def = asTypesNode(n)
   5336 	asNode(s.Def).Name.SetUsed(true)
   5337 	n.Sym = s
   5338 	n.Type = t
   5339 	n.SetClass(PAUTO)
   5340 	n.SetAddable(true)
   5341 	n.Esc = EscNever
   5342 	n.Name.Curfn = e.curfn
   5343 	e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
   5344 	dowidth(t)
   5345 	return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
   5346 }
   5347 
   5348 func (e *ssafn) CanSSA(t *types.Type) bool {
   5349 	return canSSAType(t)
   5350 }
   5351 
   5352 func (e *ssafn) Line(pos src.XPos) string {
   5353 	return linestr(pos)
   5354 }
   5355 
   5356 // Log logs a message from the compiler.
   5357 func (e *ssafn) Logf(msg string, args ...interface{}) {
   5358 	if e.log {
   5359 		fmt.Printf(msg, args...)
   5360 	}
   5361 }
   5362 
   5363 func (e *ssafn) Log() bool {
   5364 	return e.log
   5365 }
   5366 
   5367 // Fatal reports a compiler error and exits.
   5368 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
   5369 	lineno = pos
   5370 	Fatalf(msg, args...)
   5371 }
   5372 
   5373 // Warnl reports a "warning", which is usually flag-triggered
   5374 // logging output for the benefit of tests.
   5375 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
   5376 	Warnl(pos, fmt_, args...)
   5377 }
   5378 
   5379 func (e *ssafn) Debug_checknil() bool {
   5380 	return Debug_checknil != 0
   5381 }
   5382 
   5383 func (e *ssafn) Debug_eagerwb() bool {
   5384 	return Debug_eagerwb != 0
   5385 }
   5386 
   5387 func (e *ssafn) UseWriteBarrier() bool {
   5388 	return use_writebarrier
   5389 }
   5390 
   5391 func (e *ssafn) Syslook(name string) *obj.LSym {
   5392 	switch name {
   5393 	case "goschedguarded":
   5394 		return goschedguarded
   5395 	case "writeBarrier":
   5396 		return writeBarrier
   5397 	case "writebarrierptr":
   5398 		return writebarrierptr
   5399 	case "gcWriteBarrier":
   5400 		return gcWriteBarrier
   5401 	case "typedmemmove":
   5402 		return typedmemmove
   5403 	case "typedmemclr":
   5404 		return typedmemclr
   5405 	}
   5406 	Fatalf("unknown Syslook func %v", name)
   5407 	return nil
   5408 }
   5409 
   5410 func (e *ssafn) SetWBPos(pos src.XPos) {
   5411 	e.curfn.Func.setWBPos(pos)
   5412 }
   5413 
   5414 func (n *Node) Typ() *types.Type {
   5415 	return n.Type
   5416 }
   5417 func (n *Node) StorageClass() ssa.StorageClass {
   5418 	switch n.Class() {
   5419 	case PPARAM:
   5420 		return ssa.ClassParam
   5421 	case PPARAMOUT:
   5422 		return ssa.ClassParamOut
   5423 	case PAUTO:
   5424 		return ssa.ClassAuto
   5425 	default:
   5426 		Fatalf("untranslateable storage class for %v: %s", n, n.Class())
   5427 		return 0
   5428 	}
   5429 }
   5430