1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/ssa" 9 "cmd/compile/internal/types" 10 "cmd/internal/dwarf" 11 "cmd/internal/obj" 12 "cmd/internal/objabi" 13 "cmd/internal/src" 14 "cmd/internal/sys" 15 "fmt" 16 "math" 17 "math/rand" 18 "sort" 19 "strings" 20 "sync" 21 "time" 22 ) 23 24 // "Portable" code generation. 25 26 var ( 27 nBackendWorkers int // number of concurrent backend workers, set by a compiler flag 28 compilequeue []*Node // functions waiting to be compiled 29 ) 30 31 func emitptrargsmap() { 32 if Curfn.funcname() == "_" { 33 return 34 } 35 sym := lookup(fmt.Sprintf("%s.args_stackmap", Curfn.funcname())) 36 lsym := sym.Linksym() 37 38 nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr)) 39 bv := bvalloc(int32(nptr) * 2) 40 nbitmap := 1 41 if Curfn.Type.NumResults() > 0 { 42 nbitmap = 2 43 } 44 off := duint32(lsym, 0, uint32(nbitmap)) 45 off = duint32(lsym, off, uint32(bv.n)) 46 47 if Curfn.IsMethod() { 48 onebitwalktype1(Curfn.Type.Recvs(), 0, bv) 49 } 50 if Curfn.Type.NumParams() > 0 { 51 onebitwalktype1(Curfn.Type.Params(), 0, bv) 52 } 53 off = dbvec(lsym, off, bv) 54 55 if Curfn.Type.NumResults() > 0 { 56 onebitwalktype1(Curfn.Type.Results(), 0, bv) 57 off = dbvec(lsym, off, bv) 58 } 59 60 ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL) 61 } 62 63 // cmpstackvarlt reports whether the stack variable a sorts before b. 64 // 65 // Sort the list of stack variables. Autos after anything else, 66 // within autos, unused after used, within used, things with 67 // pointers first, zeroed things first, and then decreasing size. 68 // Because autos are laid out in decreasing addresses 69 // on the stack, pointers first, zeroed things first and decreasing size 70 // really means, in memory, things with pointers needing zeroing at 71 // the top of the stack and increasing in size. 72 // Non-autos sort on offset. 73 func cmpstackvarlt(a, b *Node) bool { 74 if (a.Class() == PAUTO) != (b.Class() == PAUTO) { 75 return b.Class() == PAUTO 76 } 77 78 if a.Class() != PAUTO { 79 return a.Xoffset < b.Xoffset 80 } 81 82 if a.Name.Used() != b.Name.Used() { 83 return a.Name.Used() 84 } 85 86 ap := types.Haspointers(a.Type) 87 bp := types.Haspointers(b.Type) 88 if ap != bp { 89 return ap 90 } 91 92 ap = a.Name.Needzero() 93 bp = b.Name.Needzero() 94 if ap != bp { 95 return ap 96 } 97 98 if a.Type.Width != b.Type.Width { 99 return a.Type.Width > b.Type.Width 100 } 101 102 return a.Sym.Name < b.Sym.Name 103 } 104 105 // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. 106 type byStackVar []*Node 107 108 func (s byStackVar) Len() int { return len(s) } 109 func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } 110 func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 111 112 func (s *ssafn) AllocFrame(f *ssa.Func) { 113 s.stksize = 0 114 s.stkptrsize = 0 115 fn := s.curfn.Func 116 117 // Mark the PAUTO's unused. 118 for _, ln := range fn.Dcl { 119 if ln.Class() == PAUTO { 120 ln.Name.SetUsed(false) 121 } 122 } 123 124 for _, l := range f.RegAlloc { 125 if ls, ok := l.(ssa.LocalSlot); ok { 126 ls.N.(*Node).Name.SetUsed(true) 127 } 128 } 129 130 scratchUsed := false 131 for _, b := range f.Blocks { 132 for _, v := range b.Values { 133 if n, ok := v.Aux.(*Node); ok { 134 switch n.Class() { 135 case PPARAM, PPARAMOUT: 136 // Don't modify nodfp; it is a global. 137 if n != nodfp { 138 n.Name.SetUsed(true) 139 } 140 case PAUTO: 141 n.Name.SetUsed(true) 142 } 143 } 144 if !scratchUsed { 145 scratchUsed = v.Op.UsesScratch() 146 } 147 148 } 149 } 150 151 if f.Config.NeedsFpScratch && scratchUsed { 152 s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64]) 153 } 154 155 sort.Sort(byStackVar(fn.Dcl)) 156 157 // Reassign stack offsets of the locals that are used. 158 for i, n := range fn.Dcl { 159 if n.Op != ONAME || n.Class() != PAUTO { 160 continue 161 } 162 if !n.Name.Used() { 163 fn.Dcl = fn.Dcl[:i] 164 break 165 } 166 167 dowidth(n.Type) 168 w := n.Type.Width 169 if w >= thearch.MAXWIDTH || w < 0 { 170 Fatalf("bad width") 171 } 172 s.stksize += w 173 s.stksize = Rnd(s.stksize, int64(n.Type.Align)) 174 if types.Haspointers(n.Type) { 175 s.stkptrsize = s.stksize 176 } 177 if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { 178 s.stksize = Rnd(s.stksize, int64(Widthptr)) 179 } 180 n.Xoffset = -s.stksize 181 } 182 183 s.stksize = Rnd(s.stksize, int64(Widthreg)) 184 s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) 185 } 186 187 func compile(fn *Node) { 188 Curfn = fn 189 dowidth(fn.Type) 190 191 if fn.Nbody.Len() == 0 { 192 emitptrargsmap() 193 return 194 } 195 196 saveerrors() 197 198 order(fn) 199 if nerrors != 0 { 200 return 201 } 202 203 walk(fn) 204 if nerrors != 0 { 205 return 206 } 207 if instrumenting { 208 instrument(fn) 209 } 210 211 // From this point, there should be no uses of Curfn. Enforce that. 212 Curfn = nil 213 214 // Set up the function's LSym early to avoid data races with the assemblers. 215 fn.Func.initLSym() 216 217 if compilenow() { 218 compileSSA(fn, 0) 219 } else { 220 compilequeue = append(compilequeue, fn) 221 } 222 } 223 224 // compilenow reports whether to compile immediately. 225 // If functions are not compiled immediately, 226 // they are enqueued in compilequeue, 227 // which is drained by compileFunctions. 228 func compilenow() bool { 229 return nBackendWorkers == 1 && Debug_compilelater == 0 230 } 231 232 const maxStackSize = 1 << 30 233 234 // compileSSA builds an SSA backend function, 235 // uses it to generate a plist, 236 // and flushes that plist to machine code. 237 // worker indicates which of the backend workers is doing the processing. 238 func compileSSA(fn *Node, worker int) { 239 f := buildssa(fn, worker) 240 if f.Frontend().(*ssafn).stksize >= maxStackSize { 241 largeStackFramesMu.Lock() 242 largeStackFrames = append(largeStackFrames, fn.Pos) 243 largeStackFramesMu.Unlock() 244 return 245 } 246 pp := newProgs(fn, worker) 247 genssa(f, pp) 248 pp.Flush() 249 // fieldtrack must be called after pp.Flush. See issue 20014. 250 fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack) 251 pp.Free() 252 } 253 254 func init() { 255 if raceEnabled { 256 rand.Seed(time.Now().UnixNano()) 257 } 258 } 259 260 // compileFunctions compiles all functions in compilequeue. 261 // It fans out nBackendWorkers to do the work 262 // and waits for them to complete. 263 func compileFunctions() { 264 if len(compilequeue) != 0 { 265 sizeCalculationDisabled = true // not safe to calculate sizes concurrently 266 if raceEnabled { 267 // Randomize compilation order to try to shake out races. 268 tmp := make([]*Node, len(compilequeue)) 269 perm := rand.Perm(len(compilequeue)) 270 for i, v := range perm { 271 tmp[v] = compilequeue[i] 272 } 273 copy(compilequeue, tmp) 274 } else { 275 // Compile the longest functions first, 276 // since they're most likely to be the slowest. 277 // This helps avoid stragglers. 278 obj.SortSlice(compilequeue, func(i, j int) bool { 279 return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len() 280 }) 281 } 282 var wg sync.WaitGroup 283 Ctxt.InParallel = true 284 c := make(chan *Node, nBackendWorkers) 285 for i := 0; i < nBackendWorkers; i++ { 286 wg.Add(1) 287 go func(worker int) { 288 for fn := range c { 289 compileSSA(fn, worker) 290 } 291 wg.Done() 292 }(i) 293 } 294 for _, fn := range compilequeue { 295 c <- fn 296 } 297 close(c) 298 compilequeue = nil 299 wg.Wait() 300 Ctxt.InParallel = false 301 sizeCalculationDisabled = false 302 } 303 } 304 305 func debuginfo(fnsym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { 306 fn := curfn.(*Node) 307 debugInfo := fn.Func.DebugInfo 308 fn.Func.DebugInfo = nil 309 if fn.Func.Nname != nil { 310 if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { 311 Fatalf("unexpected fnsym: %v != %v", fnsym, expect) 312 } 313 } 314 315 var automDecls []*Node 316 // Populate Automs for fn. 317 for _, n := range fn.Func.Dcl { 318 if n.Op != ONAME { // might be OTYPE or OLITERAL 319 continue 320 } 321 var name obj.AddrName 322 switch n.Class() { 323 case PAUTO: 324 if !n.Name.Used() { 325 // Text == nil -> generating abstract function 326 if fnsym.Func.Text != nil { 327 Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") 328 } 329 continue 330 } 331 name = obj.NAME_AUTO 332 case PPARAM, PPARAMOUT: 333 name = obj.NAME_PARAM 334 default: 335 continue 336 } 337 automDecls = append(automDecls, n) 338 gotype := ngotype(n).Linksym() 339 fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ 340 Asym: Ctxt.Lookup(n.Sym.Name), 341 Aoffset: int32(n.Xoffset), 342 Name: name, 343 Gotype: gotype, 344 }) 345 } 346 347 decls, dwarfVars := createDwarfVars(fnsym, debugInfo, automDecls) 348 349 var varScopes []ScopeID 350 for _, decl := range decls { 351 pos := decl.Pos 352 if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) { 353 // It's not clear which position is correct for captured variables here: 354 // * decl.Pos is the wrong position for captured variables, in the inner 355 // function, but it is the right position in the outer function. 356 // * decl.Name.Defn is nil for captured variables that were arguments 357 // on the outer function, however the decl.Pos for those seems to be 358 // correct. 359 // * decl.Name.Defn is the "wrong" thing for variables declared in the 360 // header of a type switch, it's their position in the header, rather 361 // than the position of the case statement. In principle this is the 362 // right thing, but here we prefer the latter because it makes each 363 // instance of the header variable local to the lexical block of its 364 // case statement. 365 // This code is probably wrong for type switch variables that are also 366 // captured. 367 pos = decl.Name.Defn.Pos 368 } 369 varScopes = append(varScopes, findScope(fn.Func.Marks, pos)) 370 } 371 372 scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) 373 var inlcalls dwarf.InlCalls 374 if genDwarfInline > 0 { 375 inlcalls = assembleInlines(fnsym, fn, dwarfVars) 376 } 377 return scopes, inlcalls 378 } 379 380 // createSimpleVars creates a DWARF entry for every variable declared in the 381 // function, claiming that they are permanently on the stack. 382 func createSimpleVars(automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { 383 var vars []*dwarf.Var 384 var decls []*Node 385 selected := make(map[*Node]bool) 386 for _, n := range automDecls { 387 if n.IsAutoTmp() { 388 continue 389 } 390 var abbrev int 391 offs := n.Xoffset 392 393 switch n.Class() { 394 case PAUTO: 395 abbrev = dwarf.DW_ABRV_AUTO 396 if Ctxt.FixedFrameSize() == 0 { 397 offs -= int64(Widthptr) 398 } 399 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { 400 offs -= int64(Widthptr) 401 } 402 403 case PPARAM, PPARAMOUT: 404 abbrev = dwarf.DW_ABRV_PARAM 405 offs += Ctxt.FixedFrameSize() 406 default: 407 Fatalf("createSimpleVars unexpected type %v for node %v", n.Class(), n) 408 } 409 410 selected[n] = true 411 typename := dwarf.InfoPrefix + typesymname(n.Type) 412 decls = append(decls, n) 413 inlIndex := 0 414 if genDwarfInline > 1 { 415 if n.InlFormal() || n.InlLocal() { 416 inlIndex = posInlIndex(n.Pos) + 1 417 if n.InlFormal() { 418 abbrev = dwarf.DW_ABRV_PARAM 419 } 420 } 421 } 422 declpos := Ctxt.InnermostPos(n.Pos) 423 vars = append(vars, &dwarf.Var{ 424 Name: n.Sym.Name, 425 IsReturnValue: n.Class() == PPARAMOUT, 426 IsInlFormal: n.InlFormal(), 427 Abbrev: abbrev, 428 StackOffset: int32(offs), 429 Type: Ctxt.Lookup(typename), 430 DeclFile: declpos.Base().SymFilename(), 431 DeclLine: declpos.Line(), 432 DeclCol: declpos.Col(), 433 InlIndex: int32(inlIndex), 434 ChildIndex: -1, 435 }) 436 } 437 return decls, vars, selected 438 } 439 440 type varPart struct { 441 varOffset int64 442 slot ssa.SlotID 443 } 444 445 func createComplexVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { 446 for _, blockDebug := range debugInfo.Blocks { 447 for _, locList := range blockDebug.Variables { 448 for _, loc := range locList.Locations { 449 if loc.StartProg != nil { 450 loc.StartPC = loc.StartProg.Pc 451 } 452 if loc.EndProg != nil { 453 loc.EndPC = loc.EndProg.Pc 454 } else { 455 loc.EndPC = fnsym.Size 456 } 457 if Debug_locationlist == 0 { 458 loc.EndProg = nil 459 loc.StartProg = nil 460 } 461 } 462 } 463 } 464 465 // Group SSA variables by the user variable they were decomposed from. 466 varParts := map[*Node][]varPart{} 467 ssaVars := make(map[*Node]bool) 468 for slotID, slot := range debugInfo.VarSlots { 469 for slot.SplitOf != nil { 470 slot = slot.SplitOf 471 } 472 n := slot.N.(*Node) 473 ssaVars[n] = true 474 varParts[n] = append(varParts[n], varPart{varOffset(slot), ssa.SlotID(slotID)}) 475 } 476 477 // Produce a DWARF variable entry for each user variable. 478 // Don't iterate over the map -- that's nondeterministic, and 479 // createComplexVar has side effects. Instead, go by slot. 480 var decls []*Node 481 var vars []*dwarf.Var 482 for _, slot := range debugInfo.VarSlots { 483 for slot.SplitOf != nil { 484 slot = slot.SplitOf 485 } 486 n := slot.N.(*Node) 487 parts := varParts[n] 488 if parts == nil { 489 continue 490 } 491 // Don't work on this variable again, no matter how many slots it has. 492 delete(varParts, n) 493 494 // Get the order the parts need to be in to represent the memory 495 // of the decomposed user variable. 496 sort.Sort(partsByVarOffset(parts)) 497 498 if dvar := createComplexVar(debugInfo, n, parts); dvar != nil { 499 decls = append(decls, n) 500 vars = append(vars, dvar) 501 } 502 } 503 504 return decls, vars, ssaVars 505 } 506 507 func createDwarfVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var) { 508 // Collect a raw list of DWARF vars. 509 var vars []*dwarf.Var 510 var decls []*Node 511 var selected map[*Node]bool 512 if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && debugInfo != nil { 513 decls, vars, selected = createComplexVars(fnsym, debugInfo, automDecls) 514 } else { 515 decls, vars, selected = createSimpleVars(automDecls) 516 } 517 518 var dcl []*Node 519 if fnsym.WasInlined() { 520 dcl = preInliningDcls(fnsym) 521 } else { 522 dcl = automDecls 523 } 524 525 // If optimization is enabled, the list above will typically be 526 // missing some of the original pre-optimization variables in the 527 // function (they may have been promoted to registers, folded into 528 // constants, dead-coded away, etc). Here we add back in entries 529 // for selected missing vars. Note that the recipe below creates a 530 // conservative location. The idea here is that we want to 531 // communicate to the user that "yes, there is a variable named X 532 // in this function, but no, I don't have enough information to 533 // reliably report its contents." 534 for _, n := range dcl { 535 if _, found := selected[n]; found { 536 continue 537 } 538 c := n.Sym.Name[0] 539 if c == '.' || n.Type.IsUntyped() { 540 continue 541 } 542 typename := dwarf.InfoPrefix + typesymname(n.Type) 543 decls = append(decls, n) 544 abbrev := dwarf.DW_ABRV_AUTO_LOCLIST 545 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 546 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 547 } 548 inlIndex := 0 549 if genDwarfInline > 1 { 550 if n.InlFormal() || n.InlLocal() { 551 inlIndex = posInlIndex(n.Pos) + 1 552 if n.InlFormal() { 553 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 554 } 555 } 556 } 557 declpos := Ctxt.InnermostPos(n.Pos) 558 vars = append(vars, &dwarf.Var{ 559 Name: n.Sym.Name, 560 IsReturnValue: n.Class() == PPARAMOUT, 561 Abbrev: abbrev, 562 StackOffset: int32(n.Xoffset), 563 Type: Ctxt.Lookup(typename), 564 DeclFile: declpos.Base().SymFilename(), 565 DeclLine: declpos.Line(), 566 DeclCol: declpos.Col(), 567 InlIndex: int32(inlIndex), 568 ChildIndex: -1, 569 }) 570 // Append a "deleted auto" entry to the autom list so as to 571 // insure that the type in question is picked up by the linker. 572 // See issue 22941. 573 gotype := ngotype(n).Linksym() 574 fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ 575 Asym: Ctxt.Lookup(n.Sym.Name), 576 Aoffset: int32(-1), 577 Name: obj.NAME_DELETED_AUTO, 578 Gotype: gotype, 579 }) 580 581 } 582 583 return decls, vars 584 } 585 586 // Given a function that was inlined at some point during the 587 // compilation, return a sorted list of nodes corresponding to the 588 // autos/locals in that function prior to inlining. If this is a 589 // function that is not local to the package being compiled, then the 590 // names of the variables may have been "versioned" to avoid conflicts 591 // with local vars; disregard this versioning when sorting. 592 func preInliningDcls(fnsym *obj.LSym) []*Node { 593 fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) 594 var dcl, rdcl []*Node 595 if fn.Name.Defn != nil { 596 dcl = fn.Func.Inldcl.Slice() // local function 597 } else { 598 dcl = fn.Func.Dcl // imported function 599 } 600 for _, n := range dcl { 601 c := n.Sym.Name[0] 602 // Avoid reporting "_" parameters, since if there are more than 603 // one, it can result in a collision later on, as in #23179. 604 if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() { 605 continue 606 } 607 rdcl = append(rdcl, n) 608 } 609 sort.Sort(byNodeName(rdcl)) 610 return rdcl 611 } 612 613 func cmpNodeName(a, b *Node) bool { 614 aart := 0 615 if strings.HasPrefix(a.Sym.Name, "~") { 616 aart = 1 617 } 618 bart := 0 619 if strings.HasPrefix(b.Sym.Name, "~") { 620 bart = 1 621 } 622 if aart != bart { 623 return aart < bart 624 } 625 626 aname := unversion(a.Sym.Name) 627 bname := unversion(b.Sym.Name) 628 return aname < bname 629 } 630 631 // byNodeName implements sort.Interface for []*Node using cmpNodeName. 632 type byNodeName []*Node 633 634 func (s byNodeName) Len() int { return len(s) } 635 func (s byNodeName) Less(i, j int) bool { return cmpNodeName(s[i], s[j]) } 636 func (s byNodeName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 637 638 // varOffset returns the offset of slot within the user variable it was 639 // decomposed from. This has nothing to do with its stack offset. 640 func varOffset(slot *ssa.LocalSlot) int64 { 641 offset := slot.Off 642 for ; slot.SplitOf != nil; slot = slot.SplitOf { 643 offset += slot.SplitOffset 644 } 645 return offset 646 } 647 648 type partsByVarOffset []varPart 649 650 func (a partsByVarOffset) Len() int { return len(a) } 651 func (a partsByVarOffset) Less(i, j int) bool { return a[i].varOffset < a[j].varOffset } 652 func (a partsByVarOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 653 654 // stackOffset returns the stack location of a LocalSlot relative to the 655 // stack pointer, suitable for use in a DWARF location entry. This has nothing 656 // to do with its offset in the user variable. 657 func stackOffset(slot *ssa.LocalSlot) int32 { 658 n := slot.N.(*Node) 659 var base int64 660 switch n.Class() { 661 case PAUTO: 662 if Ctxt.FixedFrameSize() == 0 { 663 base -= int64(Widthptr) 664 } 665 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { 666 base -= int64(Widthptr) 667 } 668 case PPARAM, PPARAMOUT: 669 base += Ctxt.FixedFrameSize() 670 } 671 return int32(base + n.Xoffset + slot.Off) 672 } 673 674 // createComplexVar builds a DWARF variable entry and location list representing n. 675 func createComplexVar(debugInfo *ssa.FuncDebug, n *Node, parts []varPart) *dwarf.Var { 676 slots := debugInfo.Slots 677 var offs int64 // base stack offset for this kind of variable 678 var abbrev int 679 switch n.Class() { 680 case PAUTO: 681 abbrev = dwarf.DW_ABRV_AUTO_LOCLIST 682 if Ctxt.FixedFrameSize() == 0 { 683 offs -= int64(Widthptr) 684 } 685 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { 686 offs -= int64(Widthptr) 687 } 688 689 case PPARAM, PPARAMOUT: 690 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 691 offs += Ctxt.FixedFrameSize() 692 default: 693 return nil 694 } 695 696 gotype := ngotype(n).Linksym() 697 typename := dwarf.InfoPrefix + gotype.Name[len("type."):] 698 inlIndex := 0 699 if genDwarfInline > 1 { 700 if n.InlFormal() || n.InlLocal() { 701 inlIndex = posInlIndex(n.Pos) + 1 702 if n.InlFormal() { 703 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 704 } 705 } 706 } 707 declpos := Ctxt.InnermostPos(n.Pos) 708 dvar := &dwarf.Var{ 709 Name: n.Sym.Name, 710 IsReturnValue: n.Class() == PPARAMOUT, 711 IsInlFormal: n.InlFormal(), 712 Abbrev: abbrev, 713 Type: Ctxt.Lookup(typename), 714 // The stack offset is used as a sorting key, so for decomposed 715 // variables just give it the lowest one. It's not used otherwise. 716 // This won't work well if the first slot hasn't been assigned a stack 717 // location, but it's not obvious how to do better. 718 StackOffset: int32(stackOffset(slots[parts[0].slot])), 719 DeclFile: declpos.Base().SymFilename(), 720 DeclLine: declpos.Line(), 721 DeclCol: declpos.Col(), 722 InlIndex: int32(inlIndex), 723 ChildIndex: -1, 724 } 725 726 if Debug_locationlist != 0 { 727 Ctxt.Logf("Building location list for %+v. Parts:\n", n) 728 for _, part := range parts { 729 Ctxt.Logf("\t%v => %v\n", debugInfo.Slots[part.slot], debugInfo.SlotLocsString(part.slot)) 730 } 731 } 732 733 // Given a variable that's been decomposed into multiple parts, 734 // its location list may need a new entry after the beginning or 735 // end of every location entry for each of its parts. For example: 736 // 737 // [variable] [pc range] 738 // string.ptr |----|-----| |----| 739 // string.len |------------| |--| 740 // ... needs a location list like: 741 // string |----|-----|-| |--|-| 742 // 743 // Note that location entries may or may not line up with each other, 744 // and some of the result will only have one or the other part. 745 // 746 // To build the resulting list: 747 // - keep a "current" pointer for each part 748 // - find the next transition point 749 // - advance the current pointer for each part up to that transition point 750 // - build the piece for the range between that transition point and the next 751 // - repeat 752 753 type locID struct { 754 block int 755 loc int 756 } 757 findLoc := func(part varPart, id locID) *ssa.VarLoc { 758 if id.block >= len(debugInfo.Blocks) { 759 return nil 760 } 761 return debugInfo.Blocks[id.block].Variables[part.slot].Locations[id.loc] 762 } 763 nextLoc := func(part varPart, id locID) (locID, *ssa.VarLoc) { 764 // Check if there's another loc in this block 765 id.loc++ 766 if b := debugInfo.Blocks[id.block]; b != nil && id.loc < len(b.Variables[part.slot].Locations) { 767 return id, findLoc(part, id) 768 } 769 // Find the next block that has a loc for this part. 770 id.loc = 0 771 id.block++ 772 for ; id.block < len(debugInfo.Blocks); id.block++ { 773 if b := debugInfo.Blocks[id.block]; b != nil && len(b.Variables[part.slot].Locations) != 0 { 774 return id, findLoc(part, id) 775 } 776 } 777 return id, nil 778 } 779 curLoc := make([]locID, len(slots)) 780 // Position each pointer at the first entry for its slot. 781 for _, part := range parts { 782 if b := debugInfo.Blocks[0]; b != nil && len(b.Variables[part.slot].Locations) != 0 { 783 // Block 0 has an entry; no need to advance. 784 continue 785 } 786 curLoc[part.slot], _ = nextLoc(part, curLoc[part.slot]) 787 } 788 789 // findBoundaryAfter finds the next beginning or end of a piece after currentPC. 790 findBoundaryAfter := func(currentPC int64) int64 { 791 min := int64(math.MaxInt64) 792 for _, part := range parts { 793 // For each part, find the first PC greater than current. Doesn't 794 // matter if it's a start or an end, since we're looking for any boundary. 795 // If it's the new winner, save it. 796 onePart: 797 for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) { 798 for _, pc := range [2]int64{loc.StartPC, loc.EndPC} { 799 if pc > currentPC { 800 if pc < min { 801 min = pc 802 } 803 break onePart 804 } 805 } 806 } 807 } 808 return min 809 } 810 var start int64 811 end := findBoundaryAfter(0) 812 for { 813 // Advance to the next chunk. 814 start = end 815 end = findBoundaryAfter(start) 816 if end == math.MaxInt64 { 817 break 818 } 819 820 dloc := dwarf.Location{StartPC: start, EndPC: end} 821 if Debug_locationlist != 0 { 822 Ctxt.Logf("Processing range %x -> %x\n", start, end) 823 } 824 825 // Advance curLoc to the last location that starts before/at start. 826 // After this loop, if there's a location that covers [start, end), it will be current. 827 // Otherwise the current piece will be too early. 828 for _, part := range parts { 829 choice := locID{-1, -1} 830 for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) { 831 if loc.StartPC > start { 832 break //overshot 833 } 834 choice = i // best yet 835 } 836 if choice.block != -1 { 837 curLoc[part.slot] = choice 838 } 839 if Debug_locationlist != 0 { 840 Ctxt.Logf("\t %v => %v", slots[part.slot], curLoc[part.slot]) 841 } 842 } 843 if Debug_locationlist != 0 { 844 Ctxt.Logf("\n") 845 } 846 // Assemble the location list entry for this chunk. 847 present := 0 848 for _, part := range parts { 849 dpiece := dwarf.Piece{ 850 Length: slots[part.slot].Type.Size(), 851 } 852 loc := findLoc(part, curLoc[part.slot]) 853 if loc == nil || start >= loc.EndPC || end <= loc.StartPC { 854 if Debug_locationlist != 0 { 855 Ctxt.Logf("\t%v: missing", slots[part.slot]) 856 } 857 dpiece.Missing = true 858 dloc.Pieces = append(dloc.Pieces, dpiece) 859 continue 860 } 861 present++ 862 if Debug_locationlist != 0 { 863 Ctxt.Logf("\t%v: %v", slots[part.slot], debugInfo.Blocks[curLoc[part.slot].block].LocString(loc)) 864 } 865 if loc.OnStack { 866 dpiece.OnStack = true 867 dpiece.StackOffset = stackOffset(slots[loc.StackLocation]) 868 } else { 869 for reg := 0; reg < len(debugInfo.Registers); reg++ { 870 if loc.Registers&(1<<uint8(reg)) != 0 { 871 dpiece.RegNum = Ctxt.Arch.DWARFRegisters[debugInfo.Registers[reg].ObjNum()] 872 } 873 } 874 } 875 dloc.Pieces = append(dloc.Pieces, dpiece) 876 } 877 if present == 0 { 878 if Debug_locationlist != 0 { 879 Ctxt.Logf(" -> totally missing\n") 880 } 881 continue 882 } 883 // Extend the previous entry if possible. 884 if len(dvar.LocationList) > 0 { 885 prev := &dvar.LocationList[len(dvar.LocationList)-1] 886 if prev.EndPC == dloc.StartPC && len(prev.Pieces) == len(dloc.Pieces) { 887 equal := true 888 for i := range prev.Pieces { 889 if prev.Pieces[i] != dloc.Pieces[i] { 890 equal = false 891 } 892 } 893 if equal { 894 prev.EndPC = end 895 if Debug_locationlist != 0 { 896 Ctxt.Logf("-> merged with previous, now %#v\n", prev) 897 } 898 continue 899 } 900 } 901 } 902 dvar.LocationList = append(dvar.LocationList, dloc) 903 if Debug_locationlist != 0 { 904 Ctxt.Logf("-> added: %#v\n", dloc) 905 } 906 } 907 return dvar 908 } 909 910 // fieldtrack adds R_USEFIELD relocations to fnsym to record any 911 // struct fields that it used. 912 func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { 913 if fnsym == nil { 914 return 915 } 916 if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 { 917 return 918 } 919 920 trackSyms := make([]*types.Sym, 0, len(tracked)) 921 for sym := range tracked { 922 trackSyms = append(trackSyms, sym) 923 } 924 sort.Sort(symByName(trackSyms)) 925 for _, sym := range trackSyms { 926 r := obj.Addrel(fnsym) 927 r.Sym = sym.Linksym() 928 r.Type = objabi.R_USEFIELD 929 } 930 } 931 932 type symByName []*types.Sym 933 934 func (a symByName) Len() int { return len(a) } 935 func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name } 936 func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 937