1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatsetmu sync.Mutex // protects signatset 38 signatset = make(map[*types.Type]struct{}) 39 40 itabs []itabEntry 41 ptabs []ptabEntry 42 ) 43 44 type Sig struct { 45 name string 46 pkg *types.Pkg 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 offset int32 52 } 53 54 // siglt sorts method signatures by name, then package path. 55 func siglt(a, b *Sig) bool { 56 if a.name != b.name { 57 return a.name < b.name 58 } 59 if a.pkg == b.pkg { 60 return false 61 } 62 if a.pkg == nil { 63 return true 64 } 65 if b.pkg == nil { 66 return false 67 } 68 return a.pkg.Path < b.pkg.Path 69 } 70 71 // Builds a type representing a Bucket structure for 72 // the given map type. This type is not visible to users - 73 // we include only enough information to generate a correct GC 74 // program for it. 75 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 76 const ( 77 BUCKETSIZE = 8 78 MAXKEYSIZE = 128 79 MAXVALSIZE = 128 80 ) 81 82 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 83 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 84 85 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 86 if t.Sym == nil && len(methods(t)) == 0 { 87 return 0 88 } 89 return 4 + 2 + 2 + 4 + 4 90 } 91 92 func makefield(name string, t *types.Type) *types.Field { 93 f := types.NewField() 94 f.Type = t 95 f.Sym = (*types.Pkg)(nil).Lookup(name) 96 return f 97 } 98 99 // bmap makes the map bucket type given the type of the map. 100 func bmap(t *types.Type) *types.Type { 101 if t.MapType().Bucket != nil { 102 return t.MapType().Bucket 103 } 104 105 bucket := types.New(TSTRUCT) 106 keytype := t.Key() 107 valtype := t.Val() 108 dowidth(keytype) 109 dowidth(valtype) 110 if keytype.Width > MAXKEYSIZE { 111 keytype = types.NewPtr(keytype) 112 } 113 if valtype.Width > MAXVALSIZE { 114 valtype = types.NewPtr(valtype) 115 } 116 117 field := make([]*types.Field, 0, 5) 118 119 // The first field is: uint8 topbits[BUCKETSIZE]. 120 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 121 field = append(field, makefield("topbits", arr)) 122 123 arr = types.NewArray(keytype, BUCKETSIZE) 124 arr.SetNoalg(true) 125 keys := makefield("keys", arr) 126 field = append(field, keys) 127 128 arr = types.NewArray(valtype, BUCKETSIZE) 129 arr.SetNoalg(true) 130 values := makefield("values", arr) 131 field = append(field, values) 132 133 // Make sure the overflow pointer is the last memory in the struct, 134 // because the runtime assumes it can use size-ptrSize as the 135 // offset of the overflow pointer. We double-check that property 136 // below once the offsets and size are computed. 137 // 138 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 139 // On 32-bit systems, the max alignment is 32-bit, and the 140 // overflow pointer will add another 32-bit field, and the struct 141 // will end with no padding. 142 // On 64-bit systems, the max alignment is 64-bit, and the 143 // overflow pointer will add another 64-bit field, and the struct 144 // will end with no padding. 145 // On nacl/amd64p32, however, the max alignment is 64-bit, 146 // but the overflow pointer will add only a 32-bit field, 147 // so if the struct needs 64-bit padding (because a key or value does) 148 // then it would end with an extra 32-bit padding field. 149 // Preempt that by emitting the padding here. 150 if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr { 151 field = append(field, makefield("pad", types.Types[TUINTPTR])) 152 } 153 154 // If keys and values have no pointers, the map implementation 155 // can keep a list of overflow pointers on the side so that 156 // buckets can be marked as having no pointers. 157 // Arrange for the bucket to have no pointers by changing 158 // the type of the overflow field to uintptr in this case. 159 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 160 otyp := types.NewPtr(bucket) 161 if !types.Haspointers(valtype) && !types.Haspointers(keytype) { 162 otyp = types.Types[TUINTPTR] 163 } 164 overflow := makefield("overflow", otyp) 165 field = append(field, overflow) 166 167 // link up fields 168 bucket.SetNoalg(true) 169 bucket.SetFields(field[:]) 170 dowidth(bucket) 171 172 // Check invariants that map code depends on. 173 if !IsComparable(t.Key()) { 174 Fatalf("unsupported map key type for %v", t) 175 } 176 if BUCKETSIZE < 8 { 177 Fatalf("bucket size too small for proper alignment") 178 } 179 if keytype.Align > BUCKETSIZE { 180 Fatalf("key align too big for %v", t) 181 } 182 if valtype.Align > BUCKETSIZE { 183 Fatalf("value align too big for %v", t) 184 } 185 if keytype.Width > MAXKEYSIZE { 186 Fatalf("key size to large for %v", t) 187 } 188 if valtype.Width > MAXVALSIZE { 189 Fatalf("value size to large for %v", t) 190 } 191 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { 192 Fatalf("key indirect incorrect for %v", t) 193 } 194 if t.Val().Width > MAXVALSIZE && !valtype.IsPtr() { 195 Fatalf("value indirect incorrect for %v", t) 196 } 197 if keytype.Width%int64(keytype.Align) != 0 { 198 Fatalf("key size not a multiple of key align for %v", t) 199 } 200 if valtype.Width%int64(valtype.Align) != 0 { 201 Fatalf("value size not a multiple of value align for %v", t) 202 } 203 if bucket.Align%keytype.Align != 0 { 204 Fatalf("bucket align not multiple of key align %v", t) 205 } 206 if bucket.Align%valtype.Align != 0 { 207 Fatalf("bucket align not multiple of value align %v", t) 208 } 209 if keys.Offset%int64(keytype.Align) != 0 { 210 Fatalf("bad alignment of keys in bmap for %v", t) 211 } 212 if values.Offset%int64(valtype.Align) != 0 { 213 Fatalf("bad alignment of values in bmap for %v", t) 214 } 215 216 // Double-check that overflow field is final memory in struct, 217 // with no padding at end. See comment above. 218 if overflow.Offset != bucket.Width-int64(Widthptr) { 219 Fatalf("bad offset of overflow in bmap for %v", t) 220 } 221 222 t.MapType().Bucket = bucket 223 224 bucket.StructType().Map = t 225 return bucket 226 } 227 228 // hmap builds a type representing a Hmap structure for the given map type. 229 // Make sure this stays in sync with ../../../../runtime/hashmap.go. 230 func hmap(t *types.Type) *types.Type { 231 if t.MapType().Hmap != nil { 232 return t.MapType().Hmap 233 } 234 235 bmap := bmap(t) 236 237 // build a struct: 238 // type hmap struct { 239 // count int 240 // flags uint8 241 // B uint8 242 // noverflow uint16 243 // hash0 uint32 244 // buckets *bmap 245 // oldbuckets *bmap 246 // nevacuate uintptr 247 // extra unsafe.Pointer // *mapextra 248 // } 249 // must match ../../../../runtime/hashmap.go:hmap. 250 fields := []*types.Field{ 251 makefield("count", types.Types[TINT]), 252 makefield("flags", types.Types[TUINT8]), 253 makefield("B", types.Types[TUINT8]), 254 makefield("noverflow", types.Types[TUINT16]), 255 makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP. 256 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. 257 makefield("oldbuckets", types.NewPtr(bmap)), 258 makefield("nevacuate", types.Types[TUINTPTR]), 259 makefield("extra", types.Types[TUNSAFEPTR]), 260 } 261 262 hmap := types.New(TSTRUCT) 263 hmap.SetNoalg(true) 264 hmap.SetFields(fields) 265 dowidth(hmap) 266 267 // The size of hmap should be 48 bytes on 64 bit 268 // and 28 bytes on 32 bit platforms. 269 if size := int64(8 + 5*Widthptr); hmap.Width != size { 270 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) 271 } 272 273 t.MapType().Hmap = hmap 274 hmap.StructType().Map = t 275 return hmap 276 } 277 278 // hiter builds a type representing an Hiter structure for the given map type. 279 // Make sure this stays in sync with ../../../../runtime/hashmap.go. 280 func hiter(t *types.Type) *types.Type { 281 if t.MapType().Hiter != nil { 282 return t.MapType().Hiter 283 } 284 285 hmap := hmap(t) 286 bmap := bmap(t) 287 288 // build a struct: 289 // type hiter struct { 290 // key *Key 291 // val *Value 292 // t unsafe.Pointer // *MapType 293 // h *hmap 294 // buckets *bmap 295 // bptr *bmap 296 // overflow unsafe.Pointer // *[]*bmap 297 // oldoverflow unsafe.Pointer // *[]*bmap 298 // startBucket uintptr 299 // offset uint8 300 // wrapped bool 301 // B uint8 302 // i uint8 303 // bucket uintptr 304 // checkBucket uintptr 305 // } 306 // must match ../../../../runtime/hashmap.go:hiter. 307 fields := []*types.Field{ 308 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. 309 makefield("val", types.NewPtr(t.Val())), // Used in range.go for TMAP. 310 makefield("t", types.Types[TUNSAFEPTR]), 311 makefield("h", types.NewPtr(hmap)), 312 makefield("buckets", types.NewPtr(bmap)), 313 makefield("bptr", types.NewPtr(bmap)), 314 makefield("overflow", types.Types[TUNSAFEPTR]), 315 makefield("oldoverflow", types.Types[TUNSAFEPTR]), 316 makefield("startBucket", types.Types[TUINTPTR]), 317 makefield("offset", types.Types[TUINT8]), 318 makefield("wrapped", types.Types[TBOOL]), 319 makefield("B", types.Types[TUINT8]), 320 makefield("i", types.Types[TUINT8]), 321 makefield("bucket", types.Types[TUINTPTR]), 322 makefield("checkBucket", types.Types[TUINTPTR]), 323 } 324 325 // build iterator struct holding the above fields 326 hiter := types.New(TSTRUCT) 327 hiter.SetNoalg(true) 328 hiter.SetFields(fields) 329 dowidth(hiter) 330 if hiter.Width != int64(12*Widthptr) { 331 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) 332 } 333 t.MapType().Hiter = hiter 334 hiter.StructType().Map = t 335 return hiter 336 } 337 338 // f is method type, with receiver. 339 // return function type, receiver as first argument (or not). 340 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 341 var in []*Node 342 if receiver != nil { 343 d := nod(ODCLFIELD, nil, nil) 344 d.Type = receiver 345 in = append(in, d) 346 } 347 348 var d *Node 349 for _, t := range f.Params().Fields().Slice() { 350 d = nod(ODCLFIELD, nil, nil) 351 d.Type = t.Type 352 d.SetIsddd(t.Isddd()) 353 in = append(in, d) 354 } 355 356 var out []*Node 357 for _, t := range f.Results().Fields().Slice() { 358 d = nod(ODCLFIELD, nil, nil) 359 d.Type = t.Type 360 out = append(out, d) 361 } 362 363 t := functype(nil, in, out) 364 if f.Nname() != nil { 365 // Link to name of original method function. 366 t.SetNname(f.Nname()) 367 } 368 369 return t 370 } 371 372 // methods returns the methods of the non-interface type t, sorted by name. 373 // Generates stub functions as needed. 374 func methods(t *types.Type) []*Sig { 375 // method type 376 mt := methtype(t) 377 378 if mt == nil { 379 return nil 380 } 381 expandmeth(mt) 382 383 // type stored in interface word 384 it := t 385 386 if !isdirectiface(it) { 387 it = types.NewPtr(t) 388 } 389 390 // make list of methods for t, 391 // generating code if necessary. 392 var ms []*Sig 393 for _, f := range mt.AllMethods().Slice() { 394 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 395 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 396 } 397 if f.Type.Recv() == nil { 398 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 399 } 400 if f.Nointerface() { 401 continue 402 } 403 404 method := f.Sym 405 if method == nil { 406 continue 407 } 408 409 // get receiver type for this particular method. 410 // if pointer receiver but non-pointer t and 411 // this is not an embedded pointer inside a struct, 412 // method does not apply. 413 this := f.Type.Recv().Type 414 415 if this.IsPtr() && this.Elem() == t { 416 continue 417 } 418 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 419 continue 420 } 421 422 var sig Sig 423 ms = append(ms, &sig) 424 425 sig.name = method.Name 426 if !exportname(method.Name) { 427 if method.Pkg == nil { 428 Fatalf("methods: missing package") 429 } 430 sig.pkg = method.Pkg 431 } 432 433 sig.isym = methodsym(method, it, true) 434 sig.tsym = methodsym(method, t, false) 435 sig.type_ = methodfunc(f.Type, t) 436 sig.mtype = methodfunc(f.Type, nil) 437 438 if !sig.isym.Siggen() { 439 sig.isym.SetSiggen(true) 440 if !eqtype(this, it) || this.Width < int64(Widthptr) { 441 compiling_wrappers = true 442 genwrapper(it, f, sig.isym, true) 443 compiling_wrappers = false 444 } 445 } 446 447 if !sig.tsym.Siggen() { 448 sig.tsym.SetSiggen(true) 449 if !eqtype(this, t) { 450 compiling_wrappers = true 451 genwrapper(t, f, sig.tsym, false) 452 compiling_wrappers = false 453 } 454 } 455 } 456 457 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 458 return ms 459 } 460 461 // imethods returns the methods of the interface type t, sorted by name. 462 func imethods(t *types.Type) []*Sig { 463 var methods []*Sig 464 for _, f := range t.Fields().Slice() { 465 if f.Type.Etype != TFUNC || f.Sym == nil { 466 continue 467 } 468 method := f.Sym 469 var sig = Sig{ 470 name: method.Name, 471 } 472 if !exportname(method.Name) { 473 if method.Pkg == nil { 474 Fatalf("imethods: missing package") 475 } 476 sig.pkg = method.Pkg 477 } 478 479 sig.mtype = f.Type 480 sig.offset = 0 481 sig.type_ = methodfunc(f.Type, nil) 482 483 if n := len(methods); n > 0 { 484 last := methods[n-1] 485 if !(siglt(last, &sig)) { 486 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 487 } 488 } 489 methods = append(methods, &sig) 490 491 // Compiler can only refer to wrappers for non-blank methods. 492 if method.IsBlank() { 493 continue 494 } 495 496 // NOTE(rsc): Perhaps an oversight that 497 // IfaceType.Method is not in the reflect data. 498 // Generate the method body, so that compiled 499 // code can refer to it. 500 isym := methodsym(method, t, false) 501 if !isym.Siggen() { 502 isym.SetSiggen(true) 503 genwrapper(t, f, isym, false) 504 } 505 } 506 507 return methods 508 } 509 510 func dimportpath(p *types.Pkg) { 511 if p.Pathsym != nil { 512 return 513 } 514 515 // If we are compiling the runtime package, there are two runtime packages around 516 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 517 // both of them, so just produce one for localpkg. 518 if myimportpath == "runtime" && p == Runtimepkg { 519 return 520 } 521 522 var str string 523 if p == localpkg { 524 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 525 str = myimportpath 526 } else { 527 str = p.Path 528 } 529 530 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 531 ot := dnameData(s, 0, str, "", nil, false) 532 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 533 p.Pathsym = s 534 } 535 536 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 537 if pkg == nil { 538 return duintptr(s, ot, 0) 539 } 540 541 if pkg == localpkg && myimportpath == "" { 542 // If we don't know the full import path of the package being compiled 543 // (i.e. -p was not passed on the compiler command line), emit a reference to 544 // type..importpath.""., which the linker will rewrite using the correct import path. 545 // Every package that imports this one directly defines the symbol. 546 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 547 ns := Ctxt.Lookup(`type..importpath."".`) 548 return dsymptr(s, ot, ns, 0) 549 } 550 551 dimportpath(pkg) 552 return dsymptr(s, ot, pkg.Pathsym, 0) 553 } 554 555 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 556 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 557 if pkg == nil { 558 return duint32(s, ot, 0) 559 } 560 if pkg == localpkg && myimportpath == "" { 561 // If we don't know the full import path of the package being compiled 562 // (i.e. -p was not passed on the compiler command line), emit a reference to 563 // type..importpath.""., which the linker will rewrite using the correct import path. 564 // Every package that imports this one directly defines the symbol. 565 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 566 ns := Ctxt.Lookup(`type..importpath."".`) 567 return dsymptrOff(s, ot, ns, 0) 568 } 569 570 dimportpath(pkg) 571 return dsymptrOff(s, ot, pkg.Pathsym, 0) 572 } 573 574 // dnameField dumps a reflect.name for a struct field. 575 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 576 if !exportname(ft.Sym.Name) && ft.Sym.Pkg != spkg { 577 Fatalf("package mismatch for %v", ft.Sym) 578 } 579 nsym := dname(ft.Sym.Name, ft.Note, nil, exportname(ft.Sym.Name)) 580 return dsymptr(lsym, ot, nsym, 0) 581 } 582 583 // dnameData writes the contents of a reflect.name into s at offset ot. 584 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 585 if len(name) > 1<<16-1 { 586 Fatalf("name too long: %s", name) 587 } 588 if len(tag) > 1<<16-1 { 589 Fatalf("tag too long: %s", tag) 590 } 591 592 // Encode name and tag. See reflect/type.go for details. 593 var bits byte 594 l := 1 + 2 + len(name) 595 if exported { 596 bits |= 1 << 0 597 } 598 if len(tag) > 0 { 599 l += 2 + len(tag) 600 bits |= 1 << 1 601 } 602 if pkg != nil { 603 bits |= 1 << 2 604 } 605 b := make([]byte, l) 606 b[0] = bits 607 b[1] = uint8(len(name) >> 8) 608 b[2] = uint8(len(name)) 609 copy(b[3:], name) 610 if len(tag) > 0 { 611 tb := b[3+len(name):] 612 tb[0] = uint8(len(tag) >> 8) 613 tb[1] = uint8(len(tag)) 614 copy(tb[2:], tag) 615 } 616 617 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 618 619 if pkg != nil { 620 ot = dgopkgpathOff(s, ot, pkg) 621 } 622 623 return ot 624 } 625 626 var dnameCount int 627 628 // dname creates a reflect.name for a struct field or method. 629 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 630 // Write out data as "type.." to signal two things to the 631 // linker, first that when dynamically linking, the symbol 632 // should be moved to a relro section, and second that the 633 // contents should not be decoded as a type. 634 sname := "type..namedata." 635 if pkg == nil { 636 // In the common case, share data with other packages. 637 if name == "" { 638 if exported { 639 sname += "-noname-exported." + tag 640 } else { 641 sname += "-noname-unexported." + tag 642 } 643 } else { 644 if exported { 645 sname += name + "." + tag 646 } else { 647 sname += name + "-" + tag 648 } 649 } 650 } else { 651 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 652 dnameCount++ 653 } 654 s := Ctxt.Lookup(sname) 655 if len(s.P) > 0 { 656 return s 657 } 658 ot := dnameData(s, 0, name, tag, pkg, exported) 659 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 660 return s 661 } 662 663 // dextratype dumps the fields of a runtime.uncommontype. 664 // dataAdd is the offset in bytes after the header where the 665 // backing array of the []method field is written (by dextratypeData). 666 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 667 m := methods(t) 668 if t.Sym == nil && len(m) == 0 { 669 return ot 670 } 671 noff := int(Rnd(int64(ot), int64(Widthptr))) 672 if noff != ot { 673 Fatalf("unexpected alignment in dextratype for %v", t) 674 } 675 676 for _, a := range m { 677 dtypesym(a.type_) 678 } 679 680 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 681 682 dataAdd += uncommonSize(t) 683 mcount := len(m) 684 if mcount != int(uint16(mcount)) { 685 Fatalf("too many methods on %v: %d", t, mcount) 686 } 687 if dataAdd != int(uint32(dataAdd)) { 688 Fatalf("methods are too far away on %v: %d", t, dataAdd) 689 } 690 691 ot = duint16(lsym, ot, uint16(mcount)) 692 ot = duint16(lsym, ot, 0) 693 ot = duint32(lsym, ot, uint32(dataAdd)) 694 ot = duint32(lsym, ot, 0) 695 return ot 696 } 697 698 func typePkg(t *types.Type) *types.Pkg { 699 tsym := t.Sym 700 if tsym == nil { 701 switch t.Etype { 702 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 703 if t.Elem() != nil { 704 tsym = t.Elem().Sym 705 } 706 } 707 } 708 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 709 return tsym.Pkg 710 } 711 return nil 712 } 713 714 // dextratypeData dumps the backing array for the []method field of 715 // runtime.uncommontype. 716 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 717 for _, a := range methods(t) { 718 // ../../../../runtime/type.go:/method 719 exported := exportname(a.name) 720 var pkg *types.Pkg 721 if !exported && a.pkg != typePkg(t) { 722 pkg = a.pkg 723 } 724 nsym := dname(a.name, "", pkg, exported) 725 726 ot = dsymptrOff(lsym, ot, nsym, 0) 727 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) 728 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 729 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 730 } 731 return ot 732 } 733 734 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 735 duint32(s, ot, 0) 736 r := obj.Addrel(s) 737 r.Off = int32(ot) 738 r.Siz = 4 739 r.Sym = x 740 r.Type = objabi.R_METHODOFF 741 return ot + 4 742 } 743 744 var kinds = []int{ 745 TINT: objabi.KindInt, 746 TUINT: objabi.KindUint, 747 TINT8: objabi.KindInt8, 748 TUINT8: objabi.KindUint8, 749 TINT16: objabi.KindInt16, 750 TUINT16: objabi.KindUint16, 751 TINT32: objabi.KindInt32, 752 TUINT32: objabi.KindUint32, 753 TINT64: objabi.KindInt64, 754 TUINT64: objabi.KindUint64, 755 TUINTPTR: objabi.KindUintptr, 756 TFLOAT32: objabi.KindFloat32, 757 TFLOAT64: objabi.KindFloat64, 758 TBOOL: objabi.KindBool, 759 TSTRING: objabi.KindString, 760 TPTR32: objabi.KindPtr, 761 TPTR64: objabi.KindPtr, 762 TSTRUCT: objabi.KindStruct, 763 TINTER: objabi.KindInterface, 764 TCHAN: objabi.KindChan, 765 TMAP: objabi.KindMap, 766 TARRAY: objabi.KindArray, 767 TSLICE: objabi.KindSlice, 768 TFUNC: objabi.KindFunc, 769 TCOMPLEX64: objabi.KindComplex64, 770 TCOMPLEX128: objabi.KindComplex128, 771 TUNSAFEPTR: objabi.KindUnsafePointer, 772 } 773 774 // typeptrdata returns the length in bytes of the prefix of t 775 // containing pointer data. Anything after this offset is scalar data. 776 func typeptrdata(t *types.Type) int64 { 777 if !types.Haspointers(t) { 778 return 0 779 } 780 781 switch t.Etype { 782 case TPTR32, 783 TPTR64, 784 TUNSAFEPTR, 785 TFUNC, 786 TCHAN, 787 TMAP: 788 return int64(Widthptr) 789 790 case TSTRING: 791 // struct { byte *str; intgo len; } 792 return int64(Widthptr) 793 794 case TINTER: 795 // struct { Itab *tab; void *data; } or 796 // struct { Type *type; void *data; } 797 return 2 * int64(Widthptr) 798 799 case TSLICE: 800 // struct { byte *array; uintgo len; uintgo cap; } 801 return int64(Widthptr) 802 803 case TARRAY: 804 // haspointers already eliminated t.NumElem() == 0. 805 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 806 807 case TSTRUCT: 808 // Find the last field that has pointers. 809 var lastPtrField *types.Field 810 for _, t1 := range t.Fields().Slice() { 811 if types.Haspointers(t1.Type) { 812 lastPtrField = t1 813 } 814 } 815 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 816 817 default: 818 Fatalf("typeptrdata: unexpected type, %v", t) 819 return 0 820 } 821 } 822 823 // tflag is documented in reflect/type.go. 824 // 825 // tflag values must be kept in sync with copies in: 826 // cmd/compile/internal/gc/reflect.go 827 // cmd/link/internal/ld/decodesym.go 828 // reflect/type.go 829 // runtime/type.go 830 const ( 831 tflagUncommon = 1 << 0 832 tflagExtraStar = 1 << 1 833 tflagNamed = 1 << 2 834 ) 835 836 var ( 837 algarray *obj.LSym 838 memhashvarlen *obj.LSym 839 memequalvarlen *obj.LSym 840 ) 841 842 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 843 func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { 844 if ot != 0 { 845 Fatalf("dcommontype %d", ot) 846 } 847 848 sizeofAlg := 2 * Widthptr 849 if algarray == nil { 850 algarray = sysfunc("algarray") 851 } 852 dowidth(t) 853 alg := algtype(t) 854 var algsym *obj.LSym 855 if alg == ASPECIAL || alg == AMEM { 856 algsym = dalgsym(t) 857 } 858 859 sptrWeak := true 860 var sptr *obj.LSym 861 if !t.IsPtr() || t.PtrBase != nil { 862 tptr := types.NewPtr(t) 863 if t.Sym != nil || methods(tptr) != nil { 864 sptrWeak = false 865 } 866 sptr = dtypesym(tptr) 867 } 868 869 gcsym, useGCProg, ptrdata := dgcsym(t) 870 871 // ../../../../reflect/type.go:/^type.rtype 872 // actual type structure 873 // type rtype struct { 874 // size uintptr 875 // ptrdata uintptr 876 // hash uint32 877 // tflag tflag 878 // align uint8 879 // fieldAlign uint8 880 // kind uint8 881 // alg *typeAlg 882 // gcdata *byte 883 // str nameOff 884 // ptrToThis typeOff 885 // } 886 ot = duintptr(lsym, ot, uint64(t.Width)) 887 ot = duintptr(lsym, ot, uint64(ptrdata)) 888 ot = duint32(lsym, ot, typehash(t)) 889 890 var tflag uint8 891 if uncommonSize(t) != 0 { 892 tflag |= tflagUncommon 893 } 894 if t.Sym != nil && t.Sym.Name != "" { 895 tflag |= tflagNamed 896 } 897 898 exported := false 899 p := t.LongString() 900 // If we're writing out type T, 901 // we are very likely to write out type *T as well. 902 // Use the string "*T"[1:] for "T", so that the two 903 // share storage. This is a cheap way to reduce the 904 // amount of space taken up by reflect strings. 905 if !strings.HasPrefix(p, "*") { 906 p = "*" + p 907 tflag |= tflagExtraStar 908 if t.Sym != nil { 909 exported = exportname(t.Sym.Name) 910 } 911 } else { 912 if t.Elem() != nil && t.Elem().Sym != nil { 913 exported = exportname(t.Elem().Sym.Name) 914 } 915 } 916 917 ot = duint8(lsym, ot, tflag) 918 919 // runtime (and common sense) expects alignment to be a power of two. 920 i := int(t.Align) 921 922 if i == 0 { 923 i = 1 924 } 925 if i&(i-1) != 0 { 926 Fatalf("invalid alignment %d for %v", t.Align, t) 927 } 928 ot = duint8(lsym, ot, t.Align) // align 929 ot = duint8(lsym, ot, t.Align) // fieldAlign 930 931 i = kinds[t.Etype] 932 if !types.Haspointers(t) { 933 i |= objabi.KindNoPointers 934 } 935 if isdirectiface(t) { 936 i |= objabi.KindDirectIface 937 } 938 if useGCProg { 939 i |= objabi.KindGCProg 940 } 941 ot = duint8(lsym, ot, uint8(i)) // kind 942 if algsym == nil { 943 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 944 } else { 945 ot = dsymptr(lsym, ot, algsym, 0) 946 } 947 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 948 949 nsym := dname(p, "", nil, exported) 950 ot = dsymptrOff(lsym, ot, nsym, 0) // str 951 // ptrToThis 952 if sptr == nil { 953 ot = duint32(lsym, ot, 0) 954 } else if sptrWeak { 955 ot = dsymptrWeakOff(lsym, ot, sptr) 956 } else { 957 ot = dsymptrOff(lsym, ot, sptr, 0) 958 } 959 960 return ot 961 } 962 963 // typeHasNoAlg returns whether t does not have any associated hash/eq 964 // algorithms because t, or some component of t, is marked Noalg. 965 func typeHasNoAlg(t *types.Type) bool { 966 a, bad := algtype1(t) 967 return a == ANOEQ && bad.Noalg() 968 } 969 970 func typesymname(t *types.Type) string { 971 name := t.ShortString() 972 // Use a separate symbol name for Noalg types for #17752. 973 if typeHasNoAlg(t) { 974 name = "noalg." + name 975 } 976 return name 977 } 978 979 // Fake package for runtime type info (headers) 980 // Don't access directly, use typeLookup below. 981 var ( 982 typepkgmu sync.Mutex // protects typepkg lookups 983 typepkg = types.NewPkg("type", "type") 984 ) 985 986 func typeLookup(name string) *types.Sym { 987 typepkgmu.Lock() 988 s := typepkg.Lookup(name) 989 typepkgmu.Unlock() 990 return s 991 } 992 993 func typesym(t *types.Type) *types.Sym { 994 return typeLookup(typesymname(t)) 995 } 996 997 // tracksym returns the symbol for tracking use of field/method f, assumed 998 // to be a member of struct/interface type t. 999 func tracksym(t *types.Type, f *types.Field) *types.Sym { 1000 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 1001 } 1002 1003 func typesymprefix(prefix string, t *types.Type) *types.Sym { 1004 p := prefix + "." + t.ShortString() 1005 s := typeLookup(p) 1006 1007 //print("algsym: %s -> %+S\n", p, s); 1008 1009 return s 1010 } 1011 1012 func typenamesym(t *types.Type) *types.Sym { 1013 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 1014 Fatalf("typenamesym %v", t) 1015 } 1016 s := typesym(t) 1017 signatsetmu.Lock() 1018 addsignat(t) 1019 signatsetmu.Unlock() 1020 return s 1021 } 1022 1023 func typename(t *types.Type) *Node { 1024 s := typenamesym(t) 1025 if s.Def == nil { 1026 n := newnamel(src.NoXPos, s) 1027 n.Type = types.Types[TUINT8] 1028 n.SetClass(PEXTERN) 1029 n.SetTypecheck(1) 1030 s.Def = asTypesNode(n) 1031 } 1032 1033 n := nod(OADDR, asNode(s.Def), nil) 1034 n.Type = types.NewPtr(asNode(s.Def).Type) 1035 n.SetAddable(true) 1036 n.SetTypecheck(1) 1037 return n 1038 } 1039 1040 func itabname(t, itype *types.Type) *Node { 1041 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 1042 Fatalf("itabname(%v, %v)", t, itype) 1043 } 1044 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 1045 if s.Def == nil { 1046 n := newname(s) 1047 n.Type = types.Types[TUINT8] 1048 n.SetClass(PEXTERN) 1049 n.SetTypecheck(1) 1050 s.Def = asTypesNode(n) 1051 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 1052 } 1053 1054 n := nod(OADDR, asNode(s.Def), nil) 1055 n.Type = types.NewPtr(asNode(s.Def).Type) 1056 n.SetAddable(true) 1057 n.SetTypecheck(1) 1058 return n 1059 } 1060 1061 // isreflexive reports whether t has a reflexive equality operator. 1062 // That is, if x==x for all x of type t. 1063 func isreflexive(t *types.Type) bool { 1064 switch t.Etype { 1065 case TBOOL, 1066 TINT, 1067 TUINT, 1068 TINT8, 1069 TUINT8, 1070 TINT16, 1071 TUINT16, 1072 TINT32, 1073 TUINT32, 1074 TINT64, 1075 TUINT64, 1076 TUINTPTR, 1077 TPTR32, 1078 TPTR64, 1079 TUNSAFEPTR, 1080 TSTRING, 1081 TCHAN: 1082 return true 1083 1084 case TFLOAT32, 1085 TFLOAT64, 1086 TCOMPLEX64, 1087 TCOMPLEX128, 1088 TINTER: 1089 return false 1090 1091 case TARRAY: 1092 return isreflexive(t.Elem()) 1093 1094 case TSTRUCT: 1095 for _, t1 := range t.Fields().Slice() { 1096 if !isreflexive(t1.Type) { 1097 return false 1098 } 1099 } 1100 return true 1101 1102 default: 1103 Fatalf("bad type for map key: %v", t) 1104 return false 1105 } 1106 } 1107 1108 // needkeyupdate reports whether map updates with t as a key 1109 // need the key to be updated. 1110 func needkeyupdate(t *types.Type) bool { 1111 switch t.Etype { 1112 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1113 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: 1114 return false 1115 1116 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1117 TINTER, 1118 TSTRING: // strings might have smaller backing stores 1119 return true 1120 1121 case TARRAY: 1122 return needkeyupdate(t.Elem()) 1123 1124 case TSTRUCT: 1125 for _, t1 := range t.Fields().Slice() { 1126 if needkeyupdate(t1.Type) { 1127 return true 1128 } 1129 } 1130 return false 1131 1132 default: 1133 Fatalf("bad type for map key: %v", t) 1134 return true 1135 } 1136 } 1137 1138 // formalType replaces byte and rune aliases with real types. 1139 // They've been separate internally to make error messages 1140 // better, but we have to merge them in the reflect tables. 1141 func formalType(t *types.Type) *types.Type { 1142 if t == types.Bytetype || t == types.Runetype { 1143 return types.Types[t.Etype] 1144 } 1145 return t 1146 } 1147 1148 func dtypesym(t *types.Type) *obj.LSym { 1149 t = formalType(t) 1150 if t.IsUntyped() { 1151 Fatalf("dtypesym %v", t) 1152 } 1153 1154 s := typesym(t) 1155 lsym := s.Linksym() 1156 if s.Siggen() { 1157 return lsym 1158 } 1159 s.SetSiggen(true) 1160 1161 // special case (look for runtime below): 1162 // when compiling package runtime, 1163 // emit the type structures for int, float, etc. 1164 tbase := t 1165 1166 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1167 tbase = t.Elem() 1168 } 1169 dupok := 0 1170 if tbase.Sym == nil { 1171 dupok = obj.DUPOK 1172 } 1173 1174 if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc 1175 // named types from other files are defined only by those files 1176 if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { 1177 return lsym 1178 } 1179 // TODO(mdempsky): Investigate whether this can happen. 1180 if isforw[tbase.Etype] { 1181 return lsym 1182 } 1183 } 1184 1185 ot := 0 1186 switch t.Etype { 1187 default: 1188 ot = dcommontype(lsym, ot, t) 1189 ot = dextratype(lsym, ot, t, 0) 1190 1191 case TARRAY: 1192 // ../../../../runtime/type.go:/arrayType 1193 s1 := dtypesym(t.Elem()) 1194 t2 := types.NewSlice(t.Elem()) 1195 s2 := dtypesym(t2) 1196 ot = dcommontype(lsym, ot, t) 1197 ot = dsymptr(lsym, ot, s1, 0) 1198 ot = dsymptr(lsym, ot, s2, 0) 1199 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1200 ot = dextratype(lsym, ot, t, 0) 1201 1202 case TSLICE: 1203 // ../../../../runtime/type.go:/sliceType 1204 s1 := dtypesym(t.Elem()) 1205 ot = dcommontype(lsym, ot, t) 1206 ot = dsymptr(lsym, ot, s1, 0) 1207 ot = dextratype(lsym, ot, t, 0) 1208 1209 case TCHAN: 1210 // ../../../../runtime/type.go:/chanType 1211 s1 := dtypesym(t.Elem()) 1212 ot = dcommontype(lsym, ot, t) 1213 ot = dsymptr(lsym, ot, s1, 0) 1214 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1215 ot = dextratype(lsym, ot, t, 0) 1216 1217 case TFUNC: 1218 for _, t1 := range t.Recvs().Fields().Slice() { 1219 dtypesym(t1.Type) 1220 } 1221 isddd := false 1222 for _, t1 := range t.Params().Fields().Slice() { 1223 isddd = t1.Isddd() 1224 dtypesym(t1.Type) 1225 } 1226 for _, t1 := range t.Results().Fields().Slice() { 1227 dtypesym(t1.Type) 1228 } 1229 1230 ot = dcommontype(lsym, ot, t) 1231 inCount := t.NumRecvs() + t.NumParams() 1232 outCount := t.NumResults() 1233 if isddd { 1234 outCount |= 1 << 15 1235 } 1236 ot = duint16(lsym, ot, uint16(inCount)) 1237 ot = duint16(lsym, ot, uint16(outCount)) 1238 if Widthptr == 8 { 1239 ot += 4 // align for *rtype 1240 } 1241 1242 dataAdd := (inCount + t.NumResults()) * Widthptr 1243 ot = dextratype(lsym, ot, t, dataAdd) 1244 1245 // Array of rtype pointers follows funcType. 1246 for _, t1 := range t.Recvs().Fields().Slice() { 1247 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1248 } 1249 for _, t1 := range t.Params().Fields().Slice() { 1250 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1251 } 1252 for _, t1 := range t.Results().Fields().Slice() { 1253 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1254 } 1255 1256 case TINTER: 1257 m := imethods(t) 1258 n := len(m) 1259 for _, a := range m { 1260 dtypesym(a.type_) 1261 } 1262 1263 // ../../../../runtime/type.go:/interfaceType 1264 ot = dcommontype(lsym, ot, t) 1265 1266 var tpkg *types.Pkg 1267 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1268 tpkg = t.Sym.Pkg 1269 } 1270 ot = dgopkgpath(lsym, ot, tpkg) 1271 1272 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1273 ot = duintptr(lsym, ot, uint64(n)) 1274 ot = duintptr(lsym, ot, uint64(n)) 1275 dataAdd := imethodSize() * n 1276 ot = dextratype(lsym, ot, t, dataAdd) 1277 1278 for _, a := range m { 1279 // ../../../../runtime/type.go:/imethod 1280 exported := exportname(a.name) 1281 var pkg *types.Pkg 1282 if !exported && a.pkg != tpkg { 1283 pkg = a.pkg 1284 } 1285 nsym := dname(a.name, "", pkg, exported) 1286 1287 ot = dsymptrOff(lsym, ot, nsym, 0) 1288 ot = dsymptrOff(lsym, ot, dtypesym(a.type_), 0) 1289 } 1290 1291 // ../../../../runtime/type.go:/mapType 1292 case TMAP: 1293 s1 := dtypesym(t.Key()) 1294 s2 := dtypesym(t.Val()) 1295 s3 := dtypesym(bmap(t)) 1296 s4 := dtypesym(hmap(t)) 1297 ot = dcommontype(lsym, ot, t) 1298 ot = dsymptr(lsym, ot, s1, 0) 1299 ot = dsymptr(lsym, ot, s2, 0) 1300 ot = dsymptr(lsym, ot, s3, 0) 1301 ot = dsymptr(lsym, ot, s4, 0) 1302 if t.Key().Width > MAXKEYSIZE { 1303 ot = duint8(lsym, ot, uint8(Widthptr)) 1304 ot = duint8(lsym, ot, 1) // indirect 1305 } else { 1306 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1307 ot = duint8(lsym, ot, 0) // not indirect 1308 } 1309 1310 if t.Val().Width > MAXVALSIZE { 1311 ot = duint8(lsym, ot, uint8(Widthptr)) 1312 ot = duint8(lsym, ot, 1) // indirect 1313 } else { 1314 ot = duint8(lsym, ot, uint8(t.Val().Width)) 1315 ot = duint8(lsym, ot, 0) // not indirect 1316 } 1317 1318 ot = duint16(lsym, ot, uint16(bmap(t).Width)) 1319 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1320 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1321 ot = dextratype(lsym, ot, t, 0) 1322 1323 case TPTR32, TPTR64: 1324 if t.Elem().Etype == TANY { 1325 // ../../../../runtime/type.go:/UnsafePointerType 1326 ot = dcommontype(lsym, ot, t) 1327 ot = dextratype(lsym, ot, t, 0) 1328 1329 break 1330 } 1331 1332 // ../../../../runtime/type.go:/ptrType 1333 s1 := dtypesym(t.Elem()) 1334 1335 ot = dcommontype(lsym, ot, t) 1336 ot = dsymptr(lsym, ot, s1, 0) 1337 ot = dextratype(lsym, ot, t, 0) 1338 1339 // ../../../../runtime/type.go:/structType 1340 // for security, only the exported fields. 1341 case TSTRUCT: 1342 fields := t.Fields().Slice() 1343 for _, t1 := range fields { 1344 dtypesym(t1.Type) 1345 } 1346 1347 // All non-exported struct field names within a struct 1348 // type must originate from a single package. By 1349 // identifying and recording that package within the 1350 // struct type descriptor, we can omit that 1351 // information from the field descriptors. 1352 var spkg *types.Pkg 1353 for _, f := range fields { 1354 if !exportname(f.Sym.Name) { 1355 spkg = f.Sym.Pkg 1356 break 1357 } 1358 } 1359 1360 ot = dcommontype(lsym, ot, t) 1361 ot = dgopkgpath(lsym, ot, spkg) 1362 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1363 ot = duintptr(lsym, ot, uint64(len(fields))) 1364 ot = duintptr(lsym, ot, uint64(len(fields))) 1365 1366 dataAdd := len(fields) * structfieldSize() 1367 ot = dextratype(lsym, ot, t, dataAdd) 1368 1369 for _, f := range fields { 1370 // ../../../../runtime/type.go:/structField 1371 ot = dnameField(lsym, ot, spkg, f) 1372 ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) 1373 offsetAnon := uint64(f.Offset) << 1 1374 if offsetAnon>>1 != uint64(f.Offset) { 1375 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1376 } 1377 if f.Embedded != 0 { 1378 offsetAnon |= 1 1379 } 1380 ot = duintptr(lsym, ot, offsetAnon) 1381 } 1382 } 1383 1384 ot = dextratypeData(lsym, ot, t) 1385 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1386 1387 // The linker will leave a table of all the typelinks for 1388 // types in the binary, so the runtime can find them. 1389 // 1390 // When buildmode=shared, all types are in typelinks so the 1391 // runtime can deduplicate type pointers. 1392 keep := Ctxt.Flag_dynlink 1393 if !keep && t.Sym == nil { 1394 // For an unnamed type, we only need the link if the type can 1395 // be created at run time by reflect.PtrTo and similar 1396 // functions. If the type exists in the program, those 1397 // functions must return the existing type structure rather 1398 // than creating a new one. 1399 switch t.Etype { 1400 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1401 keep = true 1402 } 1403 } 1404 // Do not put Noalg types in typelinks. See issue #22605. 1405 if typeHasNoAlg(t) { 1406 keep = false 1407 } 1408 lsym.Set(obj.AttrMakeTypelink, keep) 1409 1410 return lsym 1411 } 1412 1413 // for each itabEntry, gather the methods on 1414 // the concrete type that implement the interface 1415 func peekitabs() { 1416 for i := range itabs { 1417 tab := &itabs[i] 1418 methods := genfun(tab.t, tab.itype) 1419 if len(methods) == 0 { 1420 continue 1421 } 1422 tab.entries = methods 1423 } 1424 } 1425 1426 // for the given concrete type and interface 1427 // type, return the (sorted) set of methods 1428 // on the concrete type that implement the interface 1429 func genfun(t, it *types.Type) []*obj.LSym { 1430 if t == nil || it == nil { 1431 return nil 1432 } 1433 sigs := imethods(it) 1434 methods := methods(t) 1435 out := make([]*obj.LSym, 0, len(sigs)) 1436 if len(sigs) == 0 { 1437 return nil 1438 } 1439 1440 // both sigs and methods are sorted by name, 1441 // so we can find the intersect in a single pass 1442 for _, m := range methods { 1443 if m.name == sigs[0].name { 1444 out = append(out, m.isym.Linksym()) 1445 sigs = sigs[1:] 1446 if len(sigs) == 0 { 1447 break 1448 } 1449 } 1450 } 1451 1452 return out 1453 } 1454 1455 // itabsym uses the information gathered in 1456 // peekitabs to de-virtualize interface methods. 1457 // Since this is called by the SSA backend, it shouldn't 1458 // generate additional Nodes, Syms, etc. 1459 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1460 var syms []*obj.LSym 1461 if it == nil { 1462 return nil 1463 } 1464 1465 for i := range itabs { 1466 e := &itabs[i] 1467 if e.lsym == it { 1468 syms = e.entries 1469 break 1470 } 1471 } 1472 if syms == nil { 1473 return nil 1474 } 1475 1476 // keep this arithmetic in sync with *itab layout 1477 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1478 if methodnum >= len(syms) { 1479 return nil 1480 } 1481 return syms[methodnum] 1482 } 1483 1484 func addsignat(t *types.Type) { 1485 signatset[t] = struct{}{} 1486 } 1487 1488 func addsignats(dcls []*Node) { 1489 // copy types from dcl list to signatset 1490 for _, n := range dcls { 1491 if n.Op == OTYPE { 1492 addsignat(n.Type) 1493 } 1494 } 1495 } 1496 1497 func dumpsignats() { 1498 // Process signatset. Use a loop, as dtypesym adds 1499 // entries to signatset while it is being processed. 1500 signats := make([]typeAndStr, len(signatset)) 1501 for len(signatset) > 0 { 1502 signats = signats[:0] 1503 // Transfer entries to a slice and sort, for reproducible builds. 1504 for t := range signatset { 1505 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1506 delete(signatset, t) 1507 } 1508 sort.Sort(typesByString(signats)) 1509 for _, ts := range signats { 1510 t := ts.t 1511 dtypesym(t) 1512 if t.Sym != nil { 1513 dtypesym(types.NewPtr(t)) 1514 } 1515 } 1516 } 1517 } 1518 1519 func dumptabs() { 1520 // process itabs 1521 for _, i := range itabs { 1522 // dump empty itab symbol into i.sym 1523 // type itab struct { 1524 // inter *interfacetype 1525 // _type *_type 1526 // hash uint32 1527 // _ [4]byte 1528 // fun [1]uintptr // variable sized 1529 // } 1530 o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) 1531 o = dsymptr(i.lsym, o, dtypesym(i.t), 0) 1532 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1533 o += 4 // skip unused field 1534 for _, fn := range genfun(i.t, i.itype) { 1535 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1536 } 1537 // Nothing writes static itabs, so they are read only. 1538 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1539 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1540 dsymptr(ilink, 0, i.lsym, 0) 1541 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1542 } 1543 1544 // process ptabs 1545 if localpkg.Name == "main" && len(ptabs) > 0 { 1546 ot := 0 1547 s := Ctxt.Lookup("go.plugin.tabs") 1548 for _, p := range ptabs { 1549 // Dump ptab symbol into go.pluginsym package. 1550 // 1551 // type ptab struct { 1552 // name nameOff 1553 // typ typeOff // pointer to symbol 1554 // } 1555 nsym := dname(p.s.Name, "", nil, true) 1556 ot = dsymptrOff(s, ot, nsym, 0) 1557 ot = dsymptrOff(s, ot, dtypesym(p.t), 0) 1558 } 1559 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1560 1561 ot = 0 1562 s = Ctxt.Lookup("go.plugin.exports") 1563 for _, p := range ptabs { 1564 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1565 } 1566 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1567 } 1568 } 1569 1570 func dumpimportstrings() { 1571 // generate import strings for imported packages 1572 for _, p := range types.ImportedPkgList() { 1573 dimportpath(p) 1574 } 1575 } 1576 1577 func dumpbasictypes() { 1578 // do basic types if compiling package runtime. 1579 // they have to be in at least one package, 1580 // and runtime is always loaded implicitly, 1581 // so this is as good as any. 1582 // another possible choice would be package main, 1583 // but using runtime means fewer copies in object files. 1584 if myimportpath == "runtime" { 1585 for i := types.EType(1); i <= TBOOL; i++ { 1586 dtypesym(types.NewPtr(types.Types[i])) 1587 } 1588 dtypesym(types.NewPtr(types.Types[TSTRING])) 1589 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1590 1591 // emit type structs for error and func(error) string. 1592 // The latter is the type of an auto-generated wrapper. 1593 dtypesym(types.NewPtr(types.Errortype)) 1594 1595 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1596 1597 // add paths for runtime and main, which 6l imports implicitly. 1598 dimportpath(Runtimepkg) 1599 1600 if flag_race { 1601 dimportpath(racepkg) 1602 } 1603 if flag_msan { 1604 dimportpath(msanpkg) 1605 } 1606 dimportpath(types.NewPkg("main", "")) 1607 } 1608 } 1609 1610 type typeAndStr struct { 1611 t *types.Type 1612 short string 1613 regular string 1614 } 1615 1616 type typesByString []typeAndStr 1617 1618 func (a typesByString) Len() int { return len(a) } 1619 func (a typesByString) Less(i, j int) bool { 1620 if a[i].short != a[j].short { 1621 return a[i].short < a[j].short 1622 } 1623 // When the only difference between the types is whether 1624 // they refer to byte or uint8, such as **byte vs **uint8, 1625 // the types' ShortStrings can be identical. 1626 // To preserve deterministic sort ordering, sort these by String(). 1627 return a[i].regular < a[j].regular 1628 } 1629 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1630 1631 func dalgsym(t *types.Type) *obj.LSym { 1632 var lsym *obj.LSym 1633 var hashfunc *obj.LSym 1634 var eqfunc *obj.LSym 1635 1636 // dalgsym is only called for a type that needs an algorithm table, 1637 // which implies that the type is comparable (or else it would use ANOEQ). 1638 1639 if algtype(t) == AMEM { 1640 // we use one algorithm table for all AMEM types of a given size 1641 p := fmt.Sprintf(".alg%d", t.Width) 1642 1643 s := typeLookup(p) 1644 lsym = s.Linksym() 1645 if s.AlgGen() { 1646 return lsym 1647 } 1648 s.SetAlgGen(true) 1649 1650 if memhashvarlen == nil { 1651 memhashvarlen = sysfunc("memhash_varlen") 1652 memequalvarlen = sysfunc("memequal_varlen") 1653 } 1654 1655 // make hash closure 1656 p = fmt.Sprintf(".hashfunc%d", t.Width) 1657 1658 hashfunc = typeLookup(p).Linksym() 1659 1660 ot := 0 1661 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1662 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1663 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1664 1665 // make equality closure 1666 p = fmt.Sprintf(".eqfunc%d", t.Width) 1667 1668 eqfunc = typeLookup(p).Linksym() 1669 1670 ot = 0 1671 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1672 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1673 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1674 } else { 1675 // generate an alg table specific to this type 1676 s := typesymprefix(".alg", t) 1677 lsym = s.Linksym() 1678 1679 hash := typesymprefix(".hash", t) 1680 eq := typesymprefix(".eq", t) 1681 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1682 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1683 1684 genhash(hash, t) 1685 geneq(eq, t) 1686 1687 // make Go funcs (closures) for calling hash and equal from Go 1688 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1689 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1690 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1691 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1692 } 1693 1694 // ../../../../runtime/alg.go:/typeAlg 1695 ot := 0 1696 1697 ot = dsymptr(lsym, ot, hashfunc, 0) 1698 ot = dsymptr(lsym, ot, eqfunc, 0) 1699 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1700 return lsym 1701 } 1702 1703 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1704 // which holds 1-bit entries describing where pointers are in a given type. 1705 // Above this length, the GC information is recorded as a GC program, 1706 // which can express repetition compactly. In either form, the 1707 // information is used by the runtime to initialize the heap bitmap, 1708 // and for large types (like 128 or more words), they are roughly the 1709 // same speed. GC programs are never much larger and often more 1710 // compact. (If large arrays are involved, they can be arbitrarily 1711 // more compact.) 1712 // 1713 // The cutoff must be large enough that any allocation large enough to 1714 // use a GC program is large enough that it does not share heap bitmap 1715 // bytes with any other objects, allowing the GC program execution to 1716 // assume an aligned start and not use atomic operations. In the current 1717 // runtime, this means all malloc size classes larger than the cutoff must 1718 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1719 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1720 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1721 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1722 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1723 // must be >= 4. 1724 // 1725 // We used to use 16 because the GC programs do have some constant overhead 1726 // to get started, and processing 128 pointers seems to be enough to 1727 // amortize that overhead well. 1728 // 1729 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1730 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1731 // use bitmaps for objects up to 64 kB in size. 1732 // 1733 // Also known to reflect/type.go. 1734 // 1735 const maxPtrmaskBytes = 2048 1736 1737 // dgcsym emits and returns a data symbol containing GC information for type t, 1738 // along with a boolean reporting whether the UseGCProg bit should be set in 1739 // the type kind, and the ptrdata field to record in the reflect type information. 1740 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1741 ptrdata = typeptrdata(t) 1742 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1743 lsym = dgcptrmask(t) 1744 return 1745 } 1746 1747 useGCProg = true 1748 lsym, ptrdata = dgcprog(t) 1749 return 1750 } 1751 1752 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1753 func dgcptrmask(t *types.Type) *obj.LSym { 1754 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1755 fillptrmask(t, ptrmask) 1756 p := fmt.Sprintf("gcbits.%x", ptrmask) 1757 1758 sym := Runtimepkg.Lookup(p) 1759 lsym := sym.Linksym() 1760 if !sym.Uniq() { 1761 sym.SetUniq(true) 1762 for i, x := range ptrmask { 1763 duint8(lsym, i, x) 1764 } 1765 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1766 } 1767 return lsym 1768 } 1769 1770 // fillptrmask fills in ptrmask with 1s corresponding to the 1771 // word offsets in t that hold pointers. 1772 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1773 func fillptrmask(t *types.Type, ptrmask []byte) { 1774 for i := range ptrmask { 1775 ptrmask[i] = 0 1776 } 1777 if !types.Haspointers(t) { 1778 return 1779 } 1780 1781 vec := bvalloc(8 * int32(len(ptrmask))) 1782 onebitwalktype1(t, 0, vec) 1783 1784 nptr := typeptrdata(t) / int64(Widthptr) 1785 for i := int64(0); i < nptr; i++ { 1786 if vec.Get(int32(i)) { 1787 ptrmask[i/8] |= 1 << (uint(i) % 8) 1788 } 1789 } 1790 } 1791 1792 // dgcprog emits and returns the symbol containing a GC program for type t 1793 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1794 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1795 // For non-trivial arrays, the program describes the full t.Width size. 1796 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1797 dowidth(t) 1798 if t.Width == BADWIDTH { 1799 Fatalf("dgcprog: %v badwidth", t) 1800 } 1801 lsym := typesymprefix(".gcprog", t).Linksym() 1802 var p GCProg 1803 p.init(lsym) 1804 p.emit(t, 0) 1805 offset := p.w.BitIndex() * int64(Widthptr) 1806 p.end() 1807 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1808 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1809 } 1810 return lsym, offset 1811 } 1812 1813 type GCProg struct { 1814 lsym *obj.LSym 1815 symoff int 1816 w gcprog.Writer 1817 } 1818 1819 var Debug_gcprog int // set by -d gcprog 1820 1821 func (p *GCProg) init(lsym *obj.LSym) { 1822 p.lsym = lsym 1823 p.symoff = 4 // first 4 bytes hold program length 1824 p.w.Init(p.writeByte) 1825 if Debug_gcprog > 0 { 1826 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1827 p.w.Debug(os.Stderr) 1828 } 1829 } 1830 1831 func (p *GCProg) writeByte(x byte) { 1832 p.symoff = duint8(p.lsym, p.symoff, x) 1833 } 1834 1835 func (p *GCProg) end() { 1836 p.w.End() 1837 duint32(p.lsym, 0, uint32(p.symoff-4)) 1838 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1839 if Debug_gcprog > 0 { 1840 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1841 } 1842 } 1843 1844 func (p *GCProg) emit(t *types.Type, offset int64) { 1845 dowidth(t) 1846 if !types.Haspointers(t) { 1847 return 1848 } 1849 if t.Width == int64(Widthptr) { 1850 p.w.Ptr(offset / int64(Widthptr)) 1851 return 1852 } 1853 switch t.Etype { 1854 default: 1855 Fatalf("GCProg.emit: unexpected type %v", t) 1856 1857 case TSTRING: 1858 p.w.Ptr(offset / int64(Widthptr)) 1859 1860 case TINTER: 1861 p.w.Ptr(offset / int64(Widthptr)) 1862 p.w.Ptr(offset/int64(Widthptr) + 1) 1863 1864 case TSLICE: 1865 p.w.Ptr(offset / int64(Widthptr)) 1866 1867 case TARRAY: 1868 if t.NumElem() == 0 { 1869 // should have been handled by haspointers check above 1870 Fatalf("GCProg.emit: empty array") 1871 } 1872 1873 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1874 count := t.NumElem() 1875 elem := t.Elem() 1876 for elem.IsArray() { 1877 count *= elem.NumElem() 1878 elem = elem.Elem() 1879 } 1880 1881 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1882 // Cheaper to just emit the bits. 1883 for i := int64(0); i < count; i++ { 1884 p.emit(elem, offset+i*elem.Width) 1885 } 1886 return 1887 } 1888 p.emit(elem, offset) 1889 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1890 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1891 1892 case TSTRUCT: 1893 for _, t1 := range t.Fields().Slice() { 1894 p.emit(t1.Type, offset+t1.Offset) 1895 } 1896 } 1897 } 1898 1899 // zeroaddr returns the address of a symbol with at least 1900 // size bytes of zeros. 1901 func zeroaddr(size int64) *Node { 1902 if size >= 1<<31 { 1903 Fatalf("map value too big %d", size) 1904 } 1905 if zerosize < size { 1906 zerosize = size 1907 } 1908 s := mappkg.Lookup("zero") 1909 if s.Def == nil { 1910 x := newname(s) 1911 x.Type = types.Types[TUINT8] 1912 x.SetClass(PEXTERN) 1913 x.SetTypecheck(1) 1914 s.Def = asTypesNode(x) 1915 } 1916 z := nod(OADDR, asNode(s.Def), nil) 1917 z.Type = types.NewPtr(types.Types[TUINT8]) 1918 z.SetAddable(true) 1919 z.SetTypecheck(1) 1920 return z 1921 } 1922