1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package reflect implements run-time reflection, allowing a program to 6 // manipulate objects with arbitrary types. The typical use is to take a value 7 // with static type interface{} and extract its dynamic type information by 8 // calling TypeOf, which returns a Type. 9 // 10 // A call to ValueOf returns a Value representing the run-time data. 11 // Zero takes a Type and returns a Value representing a zero value 12 // for that type. 13 // 14 // See "The Laws of Reflection" for an introduction to reflection in Go: 15 // https://golang.org/doc/articles/laws_of_reflection.html 16 package reflect 17 18 import ( 19 "runtime" 20 "strconv" 21 "sync" 22 "unsafe" 23 ) 24 25 // Type is the representation of a Go type. 26 // 27 // Not all methods apply to all kinds of types. Restrictions, 28 // if any, are noted in the documentation for each method. 29 // Use the Kind method to find out the kind of type before 30 // calling kind-specific methods. Calling a method 31 // inappropriate to the kind of type causes a run-time panic. 32 // 33 // Type values are comparable, such as with the == operator. 34 // Two Type values are equal if they represent identical types. 35 type Type interface { 36 // Methods applicable to all types. 37 38 // Align returns the alignment in bytes of a value of 39 // this type when allocated in memory. 40 Align() int 41 42 // FieldAlign returns the alignment in bytes of a value of 43 // this type when used as a field in a struct. 44 FieldAlign() int 45 46 // Method returns the i'th method in the type's method set. 47 // It panics if i is not in the range [0, NumMethod()). 48 // 49 // For a non-interface type T or *T, the returned Method's Type and Func 50 // fields describe a function whose first argument is the receiver. 51 // 52 // For an interface type, the returned Method's Type field gives the 53 // method signature, without a receiver, and the Func field is nil. 54 Method(int) Method 55 56 // MethodByName returns the method with that name in the type's 57 // method set and a boolean indicating if the method was found. 58 // 59 // For a non-interface type T or *T, the returned Method's Type and Func 60 // fields describe a function whose first argument is the receiver. 61 // 62 // For an interface type, the returned Method's Type field gives the 63 // method signature, without a receiver, and the Func field is nil. 64 MethodByName(string) (Method, bool) 65 66 // NumMethod returns the number of exported methods in the type's method set. 67 NumMethod() int 68 69 // Name returns the type's name within its package. 70 // It returns an empty string for unnamed types. 71 Name() string 72 73 // PkgPath returns a named type's package path, that is, the import path 74 // that uniquely identifies the package, such as "encoding/base64". 75 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int), 76 // the package path will be the empty string. 77 PkgPath() string 78 79 // Size returns the number of bytes needed to store 80 // a value of the given type; it is analogous to unsafe.Sizeof. 81 Size() uintptr 82 83 // String returns a string representation of the type. 84 // The string representation may use shortened package names 85 // (e.g., base64 instead of "encoding/base64") and is not 86 // guaranteed to be unique among types. To test for type identity, 87 // compare the Types directly. 88 String() string 89 90 // Kind returns the specific kind of this type. 91 Kind() Kind 92 93 // Implements reports whether the type implements the interface type u. 94 Implements(u Type) bool 95 96 // AssignableTo reports whether a value of the type is assignable to type u. 97 AssignableTo(u Type) bool 98 99 // ConvertibleTo reports whether a value of the type is convertible to type u. 100 ConvertibleTo(u Type) bool 101 102 // Comparable reports whether values of this type are comparable. 103 Comparable() bool 104 105 // Methods applicable only to some types, depending on Kind. 106 // The methods allowed for each kind are: 107 // 108 // Int*, Uint*, Float*, Complex*: Bits 109 // Array: Elem, Len 110 // Chan: ChanDir, Elem 111 // Func: In, NumIn, Out, NumOut, IsVariadic. 112 // Map: Key, Elem 113 // Ptr: Elem 114 // Slice: Elem 115 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField 116 117 // Bits returns the size of the type in bits. 118 // It panics if the type's Kind is not one of the 119 // sized or unsized Int, Uint, Float, or Complex kinds. 120 Bits() int 121 122 // ChanDir returns a channel type's direction. 123 // It panics if the type's Kind is not Chan. 124 ChanDir() ChanDir 125 126 // IsVariadic reports whether a function type's final input parameter 127 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's 128 // implicit actual type []T. 129 // 130 // For concreteness, if t represents func(x int, y ... float64), then 131 // 132 // t.NumIn() == 2 133 // t.In(0) is the reflect.Type for "int" 134 // t.In(1) is the reflect.Type for "[]float64" 135 // t.IsVariadic() == true 136 // 137 // IsVariadic panics if the type's Kind is not Func. 138 IsVariadic() bool 139 140 // Elem returns a type's element type. 141 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice. 142 Elem() Type 143 144 // Field returns a struct type's i'th field. 145 // It panics if the type's Kind is not Struct. 146 // It panics if i is not in the range [0, NumField()). 147 Field(i int) StructField 148 149 // FieldByIndex returns the nested field corresponding 150 // to the index sequence. It is equivalent to calling Field 151 // successively for each index i. 152 // It panics if the type's Kind is not Struct. 153 FieldByIndex(index []int) StructField 154 155 // FieldByName returns the struct field with the given name 156 // and a boolean indicating if the field was found. 157 FieldByName(name string) (StructField, bool) 158 159 // FieldByNameFunc returns the struct field with a name 160 // that satisfies the match function and a boolean indicating if 161 // the field was found. 162 // 163 // FieldByNameFunc considers the fields in the struct itself 164 // and then the fields in any anonymous structs, in breadth first order, 165 // stopping at the shallowest nesting depth containing one or more 166 // fields satisfying the match function. If multiple fields at that depth 167 // satisfy the match function, they cancel each other 168 // and FieldByNameFunc returns no match. 169 // This behavior mirrors Go's handling of name lookup in 170 // structs containing anonymous fields. 171 FieldByNameFunc(match func(string) bool) (StructField, bool) 172 173 // In returns the type of a function type's i'th input parameter. 174 // It panics if the type's Kind is not Func. 175 // It panics if i is not in the range [0, NumIn()). 176 In(i int) Type 177 178 // Key returns a map type's key type. 179 // It panics if the type's Kind is not Map. 180 Key() Type 181 182 // Len returns an array type's length. 183 // It panics if the type's Kind is not Array. 184 Len() int 185 186 // NumField returns a struct type's field count. 187 // It panics if the type's Kind is not Struct. 188 NumField() int 189 190 // NumIn returns a function type's input parameter count. 191 // It panics if the type's Kind is not Func. 192 NumIn() int 193 194 // NumOut returns a function type's output parameter count. 195 // It panics if the type's Kind is not Func. 196 NumOut() int 197 198 // Out returns the type of a function type's i'th output parameter. 199 // It panics if the type's Kind is not Func. 200 // It panics if i is not in the range [0, NumOut()). 201 Out(i int) Type 202 203 common() *rtype 204 uncommon() *uncommonType 205 } 206 207 // BUG(rsc): FieldByName and related functions consider struct field names to be equal 208 // if the names are equal, even if they are unexported names originating 209 // in different packages. The practical effect of this is that the result of 210 // t.FieldByName("x") is not well defined if the struct type t contains 211 // multiple fields named x (embedded from different packages). 212 // FieldByName may return one of the fields named x or may report that there are none. 213 // See golang.org/issue/4876 for more details. 214 215 /* 216 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). 217 * A few are known to ../runtime/type.go to convey to debuggers. 218 * They are also known to ../runtime/type.go. 219 */ 220 221 // A Kind represents the specific kind of type that a Type represents. 222 // The zero Kind is not a valid kind. 223 type Kind uint 224 225 const ( 226 Invalid Kind = iota 227 Bool 228 Int 229 Int8 230 Int16 231 Int32 232 Int64 233 Uint 234 Uint8 235 Uint16 236 Uint32 237 Uint64 238 Uintptr 239 Float32 240 Float64 241 Complex64 242 Complex128 243 Array 244 Chan 245 Func 246 Interface 247 Map 248 Ptr 249 Slice 250 String 251 Struct 252 UnsafePointer 253 ) 254 255 // tflag is used by an rtype to signal what extra type information is 256 // available in the memory directly following the rtype value. 257 // 258 // tflag values must be kept in sync with copies in: 259 // cmd/compile/internal/gc/reflect.go 260 // cmd/link/internal/ld/decodesym.go 261 // runtime/type.go 262 type tflag uint8 263 264 const ( 265 // tflagUncommon means that there is a pointer, *uncommonType, 266 // just beyond the outer type structure. 267 // 268 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, 269 // then t has uncommonType data and it can be accessed as: 270 // 271 // type tUncommon struct { 272 // structType 273 // u uncommonType 274 // } 275 // u := &(*tUncommon)(unsafe.Pointer(t)).u 276 tflagUncommon tflag = 1 << 0 277 278 // tflagExtraStar means the name in the str field has an 279 // extraneous '*' prefix. This is because for most types T in 280 // a program, the type *T also exists and reusing the str data 281 // saves binary size. 282 tflagExtraStar tflag = 1 << 1 283 284 // tflagNamed means the type has a name. 285 tflagNamed tflag = 1 << 2 286 ) 287 288 // rtype is the common implementation of most values. 289 // It is embedded in other, public struct types, but always 290 // with a unique tag like `reflect:"array"` or `reflect:"ptr"` 291 // so that code cannot convert from, say, *arrayType to *ptrType. 292 type rtype struct { 293 size uintptr 294 ptrdata uintptr 295 hash uint32 // hash of type; avoids computation in hash tables 296 tflag tflag // extra type information flags 297 align uint8 // alignment of variable with this type 298 fieldAlign uint8 // alignment of struct field with this type 299 kind uint8 // enumeration for C 300 alg *typeAlg // algorithm table 301 gcdata *byte // garbage collection data 302 str nameOff // string form 303 ptrToThis typeOff // type for pointer to this type, may be zero 304 } 305 306 // a copy of runtime.typeAlg 307 type typeAlg struct { 308 // function for hashing objects of this type 309 // (ptr to object, seed) -> hash 310 hash func(unsafe.Pointer, uintptr) uintptr 311 // function for comparing objects of this type 312 // (ptr to object A, ptr to object B) -> ==? 313 equal func(unsafe.Pointer, unsafe.Pointer) bool 314 } 315 316 // Method on non-interface type 317 type method struct { 318 name nameOff // name of method 319 mtyp typeOff // method type (without receiver) 320 ifn textOff // fn used in interface call (one-word receiver) 321 tfn textOff // fn used for normal method call 322 } 323 324 // uncommonType is present only for types with names or methods 325 // (if T is a named type, the uncommonTypes for T and *T have methods). 326 // Using a pointer to this struct reduces the overall size required 327 // to describe an unnamed type with no methods. 328 type uncommonType struct { 329 pkgPath nameOff // import path; empty for built-in types like int, string 330 mcount uint16 // number of methods 331 _ uint16 // unused 332 moff uint32 // offset from this uncommontype to [mcount]method 333 _ uint32 // unused 334 } 335 336 // ChanDir represents a channel type's direction. 337 type ChanDir int 338 339 const ( 340 RecvDir ChanDir = 1 << iota // <-chan 341 SendDir // chan<- 342 BothDir = RecvDir | SendDir // chan 343 ) 344 345 // arrayType represents a fixed array type. 346 type arrayType struct { 347 rtype `reflect:"array"` 348 elem *rtype // array element type 349 slice *rtype // slice type 350 len uintptr 351 } 352 353 // chanType represents a channel type. 354 type chanType struct { 355 rtype `reflect:"chan"` 356 elem *rtype // channel element type 357 dir uintptr // channel direction (ChanDir) 358 } 359 360 // funcType represents a function type. 361 // 362 // A *rtype for each in and out parameter is stored in an array that 363 // directly follows the funcType (and possibly its uncommonType). So 364 // a function type with one method, one input, and one output is: 365 // 366 // struct { 367 // funcType 368 // uncommonType 369 // [2]*rtype // [0] is in, [1] is out 370 // } 371 type funcType struct { 372 rtype `reflect:"func"` 373 inCount uint16 374 outCount uint16 // top bit is set if last input parameter is ... 375 } 376 377 // imethod represents a method on an interface type 378 type imethod struct { 379 name nameOff // name of method 380 typ typeOff // .(*FuncType) underneath 381 } 382 383 // interfaceType represents an interface type. 384 type interfaceType struct { 385 rtype `reflect:"interface"` 386 pkgPath name // import path 387 methods []imethod // sorted by hash 388 } 389 390 // mapType represents a map type. 391 type mapType struct { 392 rtype `reflect:"map"` 393 key *rtype // map key type 394 elem *rtype // map element (value) type 395 bucket *rtype // internal bucket structure 396 hmap *rtype // internal map header 397 keysize uint8 // size of key slot 398 indirectkey uint8 // store ptr to key instead of key itself 399 valuesize uint8 // size of value slot 400 indirectvalue uint8 // store ptr to value instead of value itself 401 bucketsize uint16 // size of bucket 402 reflexivekey bool // true if k==k for all keys 403 needkeyupdate bool // true if we need to update key on an overwrite 404 } 405 406 // ptrType represents a pointer type. 407 type ptrType struct { 408 rtype `reflect:"ptr"` 409 elem *rtype // pointer element (pointed at) type 410 } 411 412 // sliceType represents a slice type. 413 type sliceType struct { 414 rtype `reflect:"slice"` 415 elem *rtype // slice element type 416 } 417 418 // Struct field 419 type structField struct { 420 name name // name is empty for embedded fields 421 typ *rtype // type of field 422 offset uintptr // byte offset of field within struct 423 } 424 425 // structType represents a struct type. 426 type structType struct { 427 rtype `reflect:"struct"` 428 pkgPath name 429 fields []structField // sorted by offset 430 } 431 432 // name is an encoded type name with optional extra data. 433 // 434 // The first byte is a bit field containing: 435 // 436 // 1<<0 the name is exported 437 // 1<<1 tag data follows the name 438 // 1<<2 pkgPath nameOff follows the name and tag 439 // 440 // The next two bytes are the data length: 441 // 442 // l := uint16(data[1])<<8 | uint16(data[2]) 443 // 444 // Bytes [3:3+l] are the string data. 445 // 446 // If tag data follows then bytes 3+l and 3+l+1 are the tag length, 447 // with the data following. 448 // 449 // If the import path follows, then 4 bytes at the end of 450 // the data form a nameOff. The import path is only set for concrete 451 // methods that are defined in a different package than their type. 452 // 453 // If a name starts with "*", then the exported bit represents 454 // whether the pointed to type is exported. 455 type name struct { 456 bytes *byte 457 } 458 459 func (n name) data(off int) *byte { 460 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) 461 } 462 463 func (n name) isExported() bool { 464 return (*n.bytes)&(1<<0) != 0 465 } 466 467 func (n name) nameLen() int { 468 return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) 469 } 470 471 func (n name) tagLen() int { 472 if *n.data(0)&(1<<1) == 0 { 473 return 0 474 } 475 off := 3 + n.nameLen() 476 return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) 477 } 478 479 func (n name) name() (s string) { 480 if n.bytes == nil { 481 return 482 } 483 b := (*[4]byte)(unsafe.Pointer(n.bytes)) 484 485 hdr := (*stringHeader)(unsafe.Pointer(&s)) 486 hdr.Data = unsafe.Pointer(&b[3]) 487 hdr.Len = int(b[1])<<8 | int(b[2]) 488 return s 489 } 490 491 func (n name) tag() (s string) { 492 tl := n.tagLen() 493 if tl == 0 { 494 return "" 495 } 496 nl := n.nameLen() 497 hdr := (*stringHeader)(unsafe.Pointer(&s)) 498 hdr.Data = unsafe.Pointer(n.data(3 + nl + 2)) 499 hdr.Len = tl 500 return s 501 } 502 503 func (n name) pkgPath() string { 504 if n.bytes == nil || *n.data(0)&(1<<2) == 0 { 505 return "" 506 } 507 off := 3 + n.nameLen() 508 if tl := n.tagLen(); tl > 0 { 509 off += 2 + tl 510 } 511 var nameOff int32 512 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) 513 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} 514 return pkgPathName.name() 515 } 516 517 // round n up to a multiple of a. a must be a power of 2. 518 func round(n, a uintptr) uintptr { 519 return (n + a - 1) &^ (a - 1) 520 } 521 522 func newName(n, tag, pkgPath string, exported bool) name { 523 if len(n) > 1<<16-1 { 524 panic("reflect.nameFrom: name too long: " + n) 525 } 526 if len(tag) > 1<<16-1 { 527 panic("reflect.nameFrom: tag too long: " + tag) 528 } 529 530 var bits byte 531 l := 1 + 2 + len(n) 532 if exported { 533 bits |= 1 << 0 534 } 535 if len(tag) > 0 { 536 l += 2 + len(tag) 537 bits |= 1 << 1 538 } 539 if pkgPath != "" { 540 bits |= 1 << 2 541 } 542 543 b := make([]byte, l) 544 b[0] = bits 545 b[1] = uint8(len(n) >> 8) 546 b[2] = uint8(len(n)) 547 copy(b[3:], n) 548 if len(tag) > 0 { 549 tb := b[3+len(n):] 550 tb[0] = uint8(len(tag) >> 8) 551 tb[1] = uint8(len(tag)) 552 copy(tb[2:], tag) 553 } 554 555 if pkgPath != "" { 556 panic("reflect: creating a name with a package path is not supported") 557 } 558 559 return name{bytes: &b[0]} 560 } 561 562 /* 563 * The compiler knows the exact layout of all the data structures above. 564 * The compiler does not know about the data structures and methods below. 565 */ 566 567 // Method represents a single method. 568 type Method struct { 569 // Name is the method name. 570 // PkgPath is the package path that qualifies a lower case (unexported) 571 // method name. It is empty for upper case (exported) method names. 572 // The combination of PkgPath and Name uniquely identifies a method 573 // in a method set. 574 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 575 Name string 576 PkgPath string 577 578 Type Type // method type 579 Func Value // func with receiver as first argument 580 Index int // index for Type.Method 581 } 582 583 const ( 584 kindDirectIface = 1 << 5 585 kindGCProg = 1 << 6 // Type.gc points to GC program 586 kindNoPointers = 1 << 7 587 kindMask = (1 << 5) - 1 588 ) 589 590 func (k Kind) String() string { 591 if int(k) < len(kindNames) { 592 return kindNames[k] 593 } 594 return "kind" + strconv.Itoa(int(k)) 595 } 596 597 var kindNames = []string{ 598 Invalid: "invalid", 599 Bool: "bool", 600 Int: "int", 601 Int8: "int8", 602 Int16: "int16", 603 Int32: "int32", 604 Int64: "int64", 605 Uint: "uint", 606 Uint8: "uint8", 607 Uint16: "uint16", 608 Uint32: "uint32", 609 Uint64: "uint64", 610 Uintptr: "uintptr", 611 Float32: "float32", 612 Float64: "float64", 613 Complex64: "complex64", 614 Complex128: "complex128", 615 Array: "array", 616 Chan: "chan", 617 Func: "func", 618 Interface: "interface", 619 Map: "map", 620 Ptr: "ptr", 621 Slice: "slice", 622 String: "string", 623 Struct: "struct", 624 UnsafePointer: "unsafe.Pointer", 625 } 626 627 func (t *uncommonType) methods() []method { 628 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount] 629 } 630 631 // resolveNameOff resolves a name offset from a base pointer. 632 // The (*rtype).nameOff method is a convenience wrapper for this function. 633 // Implemented in the runtime package. 634 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer 635 636 // resolveTypeOff resolves an *rtype offset from a base type. 637 // The (*rtype).typeOff method is a convenience wrapper for this function. 638 // Implemented in the runtime package. 639 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 640 641 // resolveTextOff resolves an function pointer offset from a base type. 642 // The (*rtype).textOff method is a convenience wrapper for this function. 643 // Implemented in the runtime package. 644 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer 645 646 // addReflectOff adds a pointer to the reflection lookup map in the runtime. 647 // It returns a new ID that can be used as a typeOff or textOff, and will 648 // be resolved correctly. Implemented in the runtime package. 649 func addReflectOff(ptr unsafe.Pointer) int32 650 651 // resolveReflectType adds a name to the reflection lookup map in the runtime. 652 // It returns a new nameOff that can be used to refer to the pointer. 653 func resolveReflectName(n name) nameOff { 654 return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) 655 } 656 657 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. 658 // It returns a new typeOff that can be used to refer to the pointer. 659 func resolveReflectType(t *rtype) typeOff { 660 return typeOff(addReflectOff(unsafe.Pointer(t))) 661 } 662 663 // resolveReflectText adds a function pointer to the reflection lookup map in 664 // the runtime. It returns a new textOff that can be used to refer to the 665 // pointer. 666 func resolveReflectText(ptr unsafe.Pointer) textOff { 667 return textOff(addReflectOff(ptr)) 668 } 669 670 type nameOff int32 // offset to a name 671 type typeOff int32 // offset to an *rtype 672 type textOff int32 // offset from top of text section 673 674 func (t *rtype) nameOff(off nameOff) name { 675 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} 676 } 677 678 func (t *rtype) typeOff(off typeOff) *rtype { 679 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) 680 } 681 682 func (t *rtype) textOff(off textOff) unsafe.Pointer { 683 return resolveTextOff(unsafe.Pointer(t), int32(off)) 684 } 685 686 func (t *rtype) uncommon() *uncommonType { 687 if t.tflag&tflagUncommon == 0 { 688 return nil 689 } 690 switch t.Kind() { 691 case Struct: 692 return &(*structTypeUncommon)(unsafe.Pointer(t)).u 693 case Ptr: 694 type u struct { 695 ptrType 696 u uncommonType 697 } 698 return &(*u)(unsafe.Pointer(t)).u 699 case Func: 700 type u struct { 701 funcType 702 u uncommonType 703 } 704 return &(*u)(unsafe.Pointer(t)).u 705 case Slice: 706 type u struct { 707 sliceType 708 u uncommonType 709 } 710 return &(*u)(unsafe.Pointer(t)).u 711 case Array: 712 type u struct { 713 arrayType 714 u uncommonType 715 } 716 return &(*u)(unsafe.Pointer(t)).u 717 case Chan: 718 type u struct { 719 chanType 720 u uncommonType 721 } 722 return &(*u)(unsafe.Pointer(t)).u 723 case Map: 724 type u struct { 725 mapType 726 u uncommonType 727 } 728 return &(*u)(unsafe.Pointer(t)).u 729 case Interface: 730 type u struct { 731 interfaceType 732 u uncommonType 733 } 734 return &(*u)(unsafe.Pointer(t)).u 735 default: 736 type u struct { 737 rtype 738 u uncommonType 739 } 740 return &(*u)(unsafe.Pointer(t)).u 741 } 742 } 743 744 func (t *rtype) String() string { 745 s := t.nameOff(t.str).name() 746 if t.tflag&tflagExtraStar != 0 { 747 return s[1:] 748 } 749 return s 750 } 751 752 func (t *rtype) Size() uintptr { return t.size } 753 754 func (t *rtype) Bits() int { 755 if t == nil { 756 panic("reflect: Bits of nil Type") 757 } 758 k := t.Kind() 759 if k < Int || k > Complex128 { 760 panic("reflect: Bits of non-arithmetic Type " + t.String()) 761 } 762 return int(t.size) * 8 763 } 764 765 func (t *rtype) Align() int { return int(t.align) } 766 767 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) } 768 769 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) } 770 771 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 } 772 773 func (t *rtype) common() *rtype { return t } 774 775 var methodCache struct { 776 sync.RWMutex 777 m map[*rtype][]method 778 } 779 780 func (t *rtype) exportedMethods() []method { 781 methodCache.RLock() 782 methods, found := methodCache.m[t] 783 methodCache.RUnlock() 784 785 if found { 786 return methods 787 } 788 789 ut := t.uncommon() 790 if ut == nil { 791 return nil 792 } 793 allm := ut.methods() 794 allExported := true 795 for _, m := range allm { 796 name := t.nameOff(m.name) 797 if !name.isExported() { 798 allExported = false 799 break 800 } 801 } 802 if allExported { 803 methods = allm 804 } else { 805 methods = make([]method, 0, len(allm)) 806 for _, m := range allm { 807 name := t.nameOff(m.name) 808 if name.isExported() { 809 methods = append(methods, m) 810 } 811 } 812 methods = methods[:len(methods):len(methods)] 813 } 814 815 methodCache.Lock() 816 if methodCache.m == nil { 817 methodCache.m = make(map[*rtype][]method) 818 } 819 methodCache.m[t] = methods 820 methodCache.Unlock() 821 822 return methods 823 } 824 825 func (t *rtype) NumMethod() int { 826 if t.Kind() == Interface { 827 tt := (*interfaceType)(unsafe.Pointer(t)) 828 return tt.NumMethod() 829 } 830 if t.tflag&tflagUncommon == 0 { 831 return 0 // avoid methodCache lock in zero case 832 } 833 return len(t.exportedMethods()) 834 } 835 836 func (t *rtype) Method(i int) (m Method) { 837 if t.Kind() == Interface { 838 tt := (*interfaceType)(unsafe.Pointer(t)) 839 return tt.Method(i) 840 } 841 methods := t.exportedMethods() 842 if i < 0 || i >= len(methods) { 843 panic("reflect: Method index out of range") 844 } 845 p := methods[i] 846 pname := t.nameOff(p.name) 847 m.Name = pname.name() 848 fl := flag(Func) 849 mtyp := t.typeOff(p.mtyp) 850 ft := (*funcType)(unsafe.Pointer(mtyp)) 851 in := make([]Type, 0, 1+len(ft.in())) 852 in = append(in, t) 853 for _, arg := range ft.in() { 854 in = append(in, arg) 855 } 856 out := make([]Type, 0, len(ft.out())) 857 for _, ret := range ft.out() { 858 out = append(out, ret) 859 } 860 mt := FuncOf(in, out, ft.IsVariadic()) 861 m.Type = mt 862 tfn := t.textOff(p.tfn) 863 fn := unsafe.Pointer(&tfn) 864 m.Func = Value{mt.(*rtype), fn, fl} 865 866 m.Index = i 867 return m 868 } 869 870 func (t *rtype) MethodByName(name string) (m Method, ok bool) { 871 if t.Kind() == Interface { 872 tt := (*interfaceType)(unsafe.Pointer(t)) 873 return tt.MethodByName(name) 874 } 875 ut := t.uncommon() 876 if ut == nil { 877 return Method{}, false 878 } 879 utmethods := ut.methods() 880 for i := 0; i < int(ut.mcount); i++ { 881 p := utmethods[i] 882 pname := t.nameOff(p.name) 883 if pname.isExported() && pname.name() == name { 884 return t.Method(i), true 885 } 886 } 887 return Method{}, false 888 } 889 890 func (t *rtype) PkgPath() string { 891 if t.tflag&tflagNamed == 0 { 892 return "" 893 } 894 ut := t.uncommon() 895 if ut == nil { 896 return "" 897 } 898 return t.nameOff(ut.pkgPath).name() 899 } 900 901 func hasPrefix(s, prefix string) bool { 902 return len(s) >= len(prefix) && s[:len(prefix)] == prefix 903 } 904 905 func (t *rtype) Name() string { 906 if t.tflag&tflagNamed == 0 { 907 return "" 908 } 909 s := t.String() 910 i := len(s) - 1 911 for i >= 0 { 912 if s[i] == '.' { 913 break 914 } 915 i-- 916 } 917 return s[i+1:] 918 } 919 920 func (t *rtype) ChanDir() ChanDir { 921 if t.Kind() != Chan { 922 panic("reflect: ChanDir of non-chan type") 923 } 924 tt := (*chanType)(unsafe.Pointer(t)) 925 return ChanDir(tt.dir) 926 } 927 928 func (t *rtype) IsVariadic() bool { 929 if t.Kind() != Func { 930 panic("reflect: IsVariadic of non-func type") 931 } 932 tt := (*funcType)(unsafe.Pointer(t)) 933 return tt.outCount&(1<<15) != 0 934 } 935 936 func (t *rtype) Elem() Type { 937 switch t.Kind() { 938 case Array: 939 tt := (*arrayType)(unsafe.Pointer(t)) 940 return toType(tt.elem) 941 case Chan: 942 tt := (*chanType)(unsafe.Pointer(t)) 943 return toType(tt.elem) 944 case Map: 945 tt := (*mapType)(unsafe.Pointer(t)) 946 return toType(tt.elem) 947 case Ptr: 948 tt := (*ptrType)(unsafe.Pointer(t)) 949 return toType(tt.elem) 950 case Slice: 951 tt := (*sliceType)(unsafe.Pointer(t)) 952 return toType(tt.elem) 953 } 954 panic("reflect: Elem of invalid type") 955 } 956 957 func (t *rtype) Field(i int) StructField { 958 if t.Kind() != Struct { 959 panic("reflect: Field of non-struct type") 960 } 961 tt := (*structType)(unsafe.Pointer(t)) 962 return tt.Field(i) 963 } 964 965 func (t *rtype) FieldByIndex(index []int) StructField { 966 if t.Kind() != Struct { 967 panic("reflect: FieldByIndex of non-struct type") 968 } 969 tt := (*structType)(unsafe.Pointer(t)) 970 return tt.FieldByIndex(index) 971 } 972 973 func (t *rtype) FieldByName(name string) (StructField, bool) { 974 if t.Kind() != Struct { 975 panic("reflect: FieldByName of non-struct type") 976 } 977 tt := (*structType)(unsafe.Pointer(t)) 978 return tt.FieldByName(name) 979 } 980 981 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) { 982 if t.Kind() != Struct { 983 panic("reflect: FieldByNameFunc of non-struct type") 984 } 985 tt := (*structType)(unsafe.Pointer(t)) 986 return tt.FieldByNameFunc(match) 987 } 988 989 func (t *rtype) In(i int) Type { 990 if t.Kind() != Func { 991 panic("reflect: In of non-func type") 992 } 993 tt := (*funcType)(unsafe.Pointer(t)) 994 return toType(tt.in()[i]) 995 } 996 997 func (t *rtype) Key() Type { 998 if t.Kind() != Map { 999 panic("reflect: Key of non-map type") 1000 } 1001 tt := (*mapType)(unsafe.Pointer(t)) 1002 return toType(tt.key) 1003 } 1004 1005 func (t *rtype) Len() int { 1006 if t.Kind() != Array { 1007 panic("reflect: Len of non-array type") 1008 } 1009 tt := (*arrayType)(unsafe.Pointer(t)) 1010 return int(tt.len) 1011 } 1012 1013 func (t *rtype) NumField() int { 1014 if t.Kind() != Struct { 1015 panic("reflect: NumField of non-struct type") 1016 } 1017 tt := (*structType)(unsafe.Pointer(t)) 1018 return len(tt.fields) 1019 } 1020 1021 func (t *rtype) NumIn() int { 1022 if t.Kind() != Func { 1023 panic("reflect: NumIn of non-func type") 1024 } 1025 tt := (*funcType)(unsafe.Pointer(t)) 1026 return int(tt.inCount) 1027 } 1028 1029 func (t *rtype) NumOut() int { 1030 if t.Kind() != Func { 1031 panic("reflect: NumOut of non-func type") 1032 } 1033 tt := (*funcType)(unsafe.Pointer(t)) 1034 return len(tt.out()) 1035 } 1036 1037 func (t *rtype) Out(i int) Type { 1038 if t.Kind() != Func { 1039 panic("reflect: Out of non-func type") 1040 } 1041 tt := (*funcType)(unsafe.Pointer(t)) 1042 return toType(tt.out()[i]) 1043 } 1044 1045 func (t *funcType) in() []*rtype { 1046 uadd := unsafe.Sizeof(*t) 1047 if t.tflag&tflagUncommon != 0 { 1048 uadd += unsafe.Sizeof(uncommonType{}) 1049 } 1050 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount] 1051 } 1052 1053 func (t *funcType) out() []*rtype { 1054 uadd := unsafe.Sizeof(*t) 1055 if t.tflag&tflagUncommon != 0 { 1056 uadd += unsafe.Sizeof(uncommonType{}) 1057 } 1058 outCount := t.outCount & (1<<15 - 1) 1059 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount] 1060 } 1061 1062 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { 1063 return unsafe.Pointer(uintptr(p) + x) 1064 } 1065 1066 func (d ChanDir) String() string { 1067 switch d { 1068 case SendDir: 1069 return "chan<-" 1070 case RecvDir: 1071 return "<-chan" 1072 case BothDir: 1073 return "chan" 1074 } 1075 return "ChanDir" + strconv.Itoa(int(d)) 1076 } 1077 1078 // Method returns the i'th method in the type's method set. 1079 func (t *interfaceType) Method(i int) (m Method) { 1080 if i < 0 || i >= len(t.methods) { 1081 return 1082 } 1083 p := &t.methods[i] 1084 pname := t.nameOff(p.name) 1085 m.Name = pname.name() 1086 if !pname.isExported() { 1087 m.PkgPath = pname.pkgPath() 1088 if m.PkgPath == "" { 1089 m.PkgPath = t.pkgPath.name() 1090 } 1091 } 1092 m.Type = toType(t.typeOff(p.typ)) 1093 m.Index = i 1094 return 1095 } 1096 1097 // NumMethod returns the number of interface methods in the type's method set. 1098 func (t *interfaceType) NumMethod() int { return len(t.methods) } 1099 1100 // MethodByName method with the given name in the type's method set. 1101 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { 1102 if t == nil { 1103 return 1104 } 1105 var p *imethod 1106 for i := range t.methods { 1107 p = &t.methods[i] 1108 if t.nameOff(p.name).name() == name { 1109 return t.Method(i), true 1110 } 1111 } 1112 return 1113 } 1114 1115 // A StructField describes a single field in a struct. 1116 type StructField struct { 1117 // Name is the field name. 1118 Name string 1119 // PkgPath is the package path that qualifies a lower case (unexported) 1120 // field name. It is empty for upper case (exported) field names. 1121 // See https://golang.org/ref/spec#Uniqueness_of_identifiers 1122 PkgPath string 1123 1124 Type Type // field type 1125 Tag StructTag // field tag string 1126 Offset uintptr // offset within struct, in bytes 1127 Index []int // index sequence for Type.FieldByIndex 1128 Anonymous bool // is an embedded field 1129 } 1130 1131 // A StructTag is the tag string in a struct field. 1132 // 1133 // By convention, tag strings are a concatenation of 1134 // optionally space-separated key:"value" pairs. 1135 // Each key is a non-empty string consisting of non-control 1136 // characters other than space (U+0020 ' '), quote (U+0022 '"'), 1137 // and colon (U+003A ':'). Each value is quoted using U+0022 '"' 1138 // characters and Go string literal syntax. 1139 type StructTag string 1140 1141 // Get returns the value associated with key in the tag string. 1142 // If there is no such key in the tag, Get returns the empty string. 1143 // If the tag does not have the conventional format, the value 1144 // returned by Get is unspecified. To determine whether a tag is 1145 // explicitly set to the empty string, use Lookup. 1146 func (tag StructTag) Get(key string) string { 1147 v, _ := tag.Lookup(key) 1148 return v 1149 } 1150 1151 // Lookup returns the value associated with key in the tag string. 1152 // If the key is present in the tag the value (which may be empty) 1153 // is returned. Otherwise the returned value will be the empty string. 1154 // The ok return value reports whether the value was explicitly set in 1155 // the tag string. If the tag does not have the conventional format, 1156 // the value returned by Lookup is unspecified. 1157 func (tag StructTag) Lookup(key string) (value string, ok bool) { 1158 // When modifying this code, also update the validateStructTag code 1159 // in cmd/vet/structtag.go. 1160 1161 for tag != "" { 1162 // Skip leading space. 1163 i := 0 1164 for i < len(tag) && tag[i] == ' ' { 1165 i++ 1166 } 1167 tag = tag[i:] 1168 if tag == "" { 1169 break 1170 } 1171 1172 // Scan to colon. A space, a quote or a control character is a syntax error. 1173 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just 1174 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters 1175 // as it is simpler to inspect the tag's bytes than the tag's runes. 1176 i = 0 1177 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { 1178 i++ 1179 } 1180 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { 1181 break 1182 } 1183 name := string(tag[:i]) 1184 tag = tag[i+1:] 1185 1186 // Scan quoted string to find value. 1187 i = 1 1188 for i < len(tag) && tag[i] != '"' { 1189 if tag[i] == '\\' { 1190 i++ 1191 } 1192 i++ 1193 } 1194 if i >= len(tag) { 1195 break 1196 } 1197 qvalue := string(tag[:i+1]) 1198 tag = tag[i+1:] 1199 1200 if key == name { 1201 value, err := strconv.Unquote(qvalue) 1202 if err != nil { 1203 break 1204 } 1205 return value, true 1206 } 1207 } 1208 return "", false 1209 } 1210 1211 // Field returns the i'th struct field. 1212 func (t *structType) Field(i int) (f StructField) { 1213 if i < 0 || i >= len(t.fields) { 1214 panic("reflect: Field index out of bounds") 1215 } 1216 p := &t.fields[i] 1217 f.Type = toType(p.typ) 1218 if name := p.name.name(); name != "" { 1219 f.Name = name 1220 } else { 1221 t := f.Type 1222 if t.Kind() == Ptr { 1223 t = t.Elem() 1224 } 1225 f.Name = t.Name() 1226 f.Anonymous = true 1227 } 1228 if !p.name.isExported() { 1229 f.PkgPath = p.name.pkgPath() 1230 if f.PkgPath == "" { 1231 f.PkgPath = t.pkgPath.name() 1232 } 1233 } 1234 if tag := p.name.tag(); tag != "" { 1235 f.Tag = StructTag(tag) 1236 } 1237 f.Offset = p.offset 1238 1239 // NOTE(rsc): This is the only allocation in the interface 1240 // presented by a reflect.Type. It would be nice to avoid, 1241 // at least in the common cases, but we need to make sure 1242 // that misbehaving clients of reflect cannot affect other 1243 // uses of reflect. One possibility is CL 5371098, but we 1244 // postponed that ugliness until there is a demonstrated 1245 // need for the performance. This is issue 2320. 1246 f.Index = []int{i} 1247 return 1248 } 1249 1250 // TODO(gri): Should there be an error/bool indicator if the index 1251 // is wrong for FieldByIndex? 1252 1253 // FieldByIndex returns the nested field corresponding to index. 1254 func (t *structType) FieldByIndex(index []int) (f StructField) { 1255 f.Type = toType(&t.rtype) 1256 for i, x := range index { 1257 if i > 0 { 1258 ft := f.Type 1259 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct { 1260 ft = ft.Elem() 1261 } 1262 f.Type = ft 1263 } 1264 f = f.Type.Field(x) 1265 } 1266 return 1267 } 1268 1269 // A fieldScan represents an item on the fieldByNameFunc scan work list. 1270 type fieldScan struct { 1271 typ *structType 1272 index []int 1273 } 1274 1275 // FieldByNameFunc returns the struct field with a name that satisfies the 1276 // match function and a boolean to indicate if the field was found. 1277 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) { 1278 // This uses the same condition that the Go language does: there must be a unique instance 1279 // of the match at a given depth level. If there are multiple instances of a match at the 1280 // same depth, they annihilate each other and inhibit any possible match at a lower level. 1281 // The algorithm is breadth first search, one depth level at a time. 1282 1283 // The current and next slices are work queues: 1284 // current lists the fields to visit on this depth level, 1285 // and next lists the fields on the next lower level. 1286 current := []fieldScan{} 1287 next := []fieldScan{{typ: t}} 1288 1289 // nextCount records the number of times an embedded type has been 1290 // encountered and considered for queueing in the 'next' slice. 1291 // We only queue the first one, but we increment the count on each. 1292 // If a struct type T can be reached more than once at a given depth level, 1293 // then it annihilates itself and need not be considered at all when we 1294 // process that next depth level. 1295 var nextCount map[*structType]int 1296 1297 // visited records the structs that have been considered already. 1298 // Embedded pointer fields can create cycles in the graph of 1299 // reachable embedded types; visited avoids following those cycles. 1300 // It also avoids duplicated effort: if we didn't find the field in an 1301 // embedded type T at level 2, we won't find it in one at level 4 either. 1302 visited := map[*structType]bool{} 1303 1304 for len(next) > 0 { 1305 current, next = next, current[:0] 1306 count := nextCount 1307 nextCount = nil 1308 1309 // Process all the fields at this depth, now listed in 'current'. 1310 // The loop queues embedded fields found in 'next', for processing during the next 1311 // iteration. The multiplicity of the 'current' field counts is recorded 1312 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. 1313 for _, scan := range current { 1314 t := scan.typ 1315 if visited[t] { 1316 // We've looked through this type before, at a higher level. 1317 // That higher level would shadow the lower level we're now at, 1318 // so this one can't be useful to us. Ignore it. 1319 continue 1320 } 1321 visited[t] = true 1322 for i := range t.fields { 1323 f := &t.fields[i] 1324 // Find name and type for field f. 1325 var fname string 1326 var ntyp *rtype 1327 if name := f.name.name(); name != "" { 1328 fname = name 1329 } else { 1330 // Anonymous field of type T or *T. 1331 // Name taken from type. 1332 ntyp = f.typ 1333 if ntyp.Kind() == Ptr { 1334 ntyp = ntyp.Elem().common() 1335 } 1336 fname = ntyp.Name() 1337 } 1338 1339 // Does it match? 1340 if match(fname) { 1341 // Potential match 1342 if count[t] > 1 || ok { 1343 // Name appeared multiple times at this level: annihilate. 1344 return StructField{}, false 1345 } 1346 result = t.Field(i) 1347 result.Index = nil 1348 result.Index = append(result.Index, scan.index...) 1349 result.Index = append(result.Index, i) 1350 ok = true 1351 continue 1352 } 1353 1354 // Queue embedded struct fields for processing with next level, 1355 // but only if we haven't seen a match yet at this level and only 1356 // if the embedded types haven't already been queued. 1357 if ok || ntyp == nil || ntyp.Kind() != Struct { 1358 continue 1359 } 1360 styp := (*structType)(unsafe.Pointer(ntyp)) 1361 if nextCount[styp] > 0 { 1362 nextCount[styp] = 2 // exact multiple doesn't matter 1363 continue 1364 } 1365 if nextCount == nil { 1366 nextCount = map[*structType]int{} 1367 } 1368 nextCount[styp] = 1 1369 if count[t] > 1 { 1370 nextCount[styp] = 2 // exact multiple doesn't matter 1371 } 1372 var index []int 1373 index = append(index, scan.index...) 1374 index = append(index, i) 1375 next = append(next, fieldScan{styp, index}) 1376 } 1377 } 1378 if ok { 1379 break 1380 } 1381 } 1382 return 1383 } 1384 1385 // FieldByName returns the struct field with the given name 1386 // and a boolean to indicate if the field was found. 1387 func (t *structType) FieldByName(name string) (f StructField, present bool) { 1388 // Quick check for top-level name, or struct without anonymous fields. 1389 hasAnon := false 1390 if name != "" { 1391 for i := range t.fields { 1392 tf := &t.fields[i] 1393 tfname := tf.name.name() 1394 if tfname == "" { 1395 hasAnon = true 1396 continue 1397 } 1398 if tfname == name { 1399 return t.Field(i), true 1400 } 1401 } 1402 } 1403 if !hasAnon { 1404 return 1405 } 1406 return t.FieldByNameFunc(func(s string) bool { return s == name }) 1407 } 1408 1409 // TypeOf returns the reflection Type that represents the dynamic type of i. 1410 // If i is a nil interface value, TypeOf returns nil. 1411 func TypeOf(i interface{}) Type { 1412 eface := *(*emptyInterface)(unsafe.Pointer(&i)) 1413 return toType(eface.typ) 1414 } 1415 1416 // ptrMap is the cache for PtrTo. 1417 var ptrMap struct { 1418 sync.RWMutex 1419 m map[*rtype]*ptrType 1420 } 1421 1422 // PtrTo returns the pointer type with element t. 1423 // For example, if t represents type Foo, PtrTo(t) represents *Foo. 1424 func PtrTo(t Type) Type { 1425 return t.(*rtype).ptrTo() 1426 } 1427 1428 func (t *rtype) ptrTo() *rtype { 1429 if t.ptrToThis != 0 { 1430 return t.typeOff(t.ptrToThis) 1431 } 1432 1433 // Check the cache. 1434 ptrMap.RLock() 1435 if m := ptrMap.m; m != nil { 1436 if p := m[t]; p != nil { 1437 ptrMap.RUnlock() 1438 return &p.rtype 1439 } 1440 } 1441 ptrMap.RUnlock() 1442 1443 ptrMap.Lock() 1444 if ptrMap.m == nil { 1445 ptrMap.m = make(map[*rtype]*ptrType) 1446 } 1447 p := ptrMap.m[t] 1448 if p != nil { 1449 // some other goroutine won the race and created it 1450 ptrMap.Unlock() 1451 return &p.rtype 1452 } 1453 1454 // Look in known types. 1455 s := "*" + t.String() 1456 for _, tt := range typesByString(s) { 1457 p = (*ptrType)(unsafe.Pointer(tt)) 1458 if p.elem == t { 1459 ptrMap.m[t] = p 1460 ptrMap.Unlock() 1461 return &p.rtype 1462 } 1463 } 1464 1465 // Create a new ptrType starting with the description 1466 // of an *unsafe.Pointer. 1467 var iptr interface{} = (*unsafe.Pointer)(nil) 1468 prototype := *(**ptrType)(unsafe.Pointer(&iptr)) 1469 pp := *prototype 1470 1471 pp.str = resolveReflectName(newName(s, "", "", false)) 1472 pp.ptrToThis = 0 1473 1474 // For the type structures linked into the binary, the 1475 // compiler provides a good hash of the string. 1476 // Create a good hash for the new string by using 1477 // the FNV-1 hash's mixing function to combine the 1478 // old hash and the new "*". 1479 pp.hash = fnv1(t.hash, '*') 1480 1481 pp.elem = t 1482 1483 ptrMap.m[t] = &pp 1484 ptrMap.Unlock() 1485 return &pp.rtype 1486 } 1487 1488 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. 1489 func fnv1(x uint32, list ...byte) uint32 { 1490 for _, b := range list { 1491 x = x*16777619 ^ uint32(b) 1492 } 1493 return x 1494 } 1495 1496 func (t *rtype) Implements(u Type) bool { 1497 if u == nil { 1498 panic("reflect: nil type passed to Type.Implements") 1499 } 1500 if u.Kind() != Interface { 1501 panic("reflect: non-interface type passed to Type.Implements") 1502 } 1503 return implements(u.(*rtype), t) 1504 } 1505 1506 func (t *rtype) AssignableTo(u Type) bool { 1507 if u == nil { 1508 panic("reflect: nil type passed to Type.AssignableTo") 1509 } 1510 uu := u.(*rtype) 1511 return directlyAssignable(uu, t) || implements(uu, t) 1512 } 1513 1514 func (t *rtype) ConvertibleTo(u Type) bool { 1515 if u == nil { 1516 panic("reflect: nil type passed to Type.ConvertibleTo") 1517 } 1518 uu := u.(*rtype) 1519 return convertOp(uu, t) != nil 1520 } 1521 1522 func (t *rtype) Comparable() bool { 1523 return t.alg != nil && t.alg.equal != nil 1524 } 1525 1526 // implements reports whether the type V implements the interface type T. 1527 func implements(T, V *rtype) bool { 1528 if T.Kind() != Interface { 1529 return false 1530 } 1531 t := (*interfaceType)(unsafe.Pointer(T)) 1532 if len(t.methods) == 0 { 1533 return true 1534 } 1535 1536 // The same algorithm applies in both cases, but the 1537 // method tables for an interface type and a concrete type 1538 // are different, so the code is duplicated. 1539 // In both cases the algorithm is a linear scan over the two 1540 // lists - T's methods and V's methods - simultaneously. 1541 // Since method tables are stored in a unique sorted order 1542 // (alphabetical, with no duplicate method names), the scan 1543 // through V's methods must hit a match for each of T's 1544 // methods along the way, or else V does not implement T. 1545 // This lets us run the scan in overall linear time instead of 1546 // the quadratic time a naive search would require. 1547 // See also ../runtime/iface.go. 1548 if V.Kind() == Interface { 1549 v := (*interfaceType)(unsafe.Pointer(V)) 1550 i := 0 1551 for j := 0; j < len(v.methods); j++ { 1552 tm := &t.methods[i] 1553 vm := &v.methods[j] 1554 if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { 1555 if i++; i >= len(t.methods) { 1556 return true 1557 } 1558 } 1559 } 1560 return false 1561 } 1562 1563 v := V.uncommon() 1564 if v == nil { 1565 return false 1566 } 1567 i := 0 1568 vmethods := v.methods() 1569 for j := 0; j < int(v.mcount); j++ { 1570 tm := &t.methods[i] 1571 vm := vmethods[j] 1572 if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { 1573 if i++; i >= len(t.methods) { 1574 return true 1575 } 1576 } 1577 } 1578 return false 1579 } 1580 1581 // directlyAssignable reports whether a value x of type V can be directly 1582 // assigned (using memmove) to a value of type T. 1583 // https://golang.org/doc/go_spec.html#Assignability 1584 // Ignoring the interface rules (implemented elsewhere) 1585 // and the ideal constant rules (no ideal constants at run time). 1586 func directlyAssignable(T, V *rtype) bool { 1587 // x's type V is identical to T? 1588 if T == V { 1589 return true 1590 } 1591 1592 // Otherwise at least one of T and V must be unnamed 1593 // and they must have the same kind. 1594 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() { 1595 return false 1596 } 1597 1598 // x's type T and V must have identical underlying types. 1599 return haveIdenticalUnderlyingType(T, V, true) 1600 } 1601 1602 func haveIdenticalType(T, V Type, cmpTags bool) bool { 1603 if cmpTags { 1604 return T == V 1605 } 1606 1607 if T.Name() != V.Name() || T.Kind() != V.Kind() { 1608 return false 1609 } 1610 1611 return haveIdenticalUnderlyingType(T.common(), V.common(), false) 1612 } 1613 1614 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { 1615 if T == V { 1616 return true 1617 } 1618 1619 kind := T.Kind() 1620 if kind != V.Kind() { 1621 return false 1622 } 1623 1624 // Non-composite types of equal kind have same underlying type 1625 // (the predefined instance of the type). 1626 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer { 1627 return true 1628 } 1629 1630 // Composite types. 1631 switch kind { 1632 case Array: 1633 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1634 1635 case Chan: 1636 // Special case: 1637 // x is a bidirectional channel value, T is a channel type, 1638 // and x's type V and T have identical element types. 1639 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) { 1640 return true 1641 } 1642 1643 // Otherwise continue test for identical underlying type. 1644 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1645 1646 case Func: 1647 t := (*funcType)(unsafe.Pointer(T)) 1648 v := (*funcType)(unsafe.Pointer(V)) 1649 if t.outCount != v.outCount || t.inCount != v.inCount { 1650 return false 1651 } 1652 for i := 0; i < t.NumIn(); i++ { 1653 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { 1654 return false 1655 } 1656 } 1657 for i := 0; i < t.NumOut(); i++ { 1658 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { 1659 return false 1660 } 1661 } 1662 return true 1663 1664 case Interface: 1665 t := (*interfaceType)(unsafe.Pointer(T)) 1666 v := (*interfaceType)(unsafe.Pointer(V)) 1667 if len(t.methods) == 0 && len(v.methods) == 0 { 1668 return true 1669 } 1670 // Might have the same methods but still 1671 // need a run time conversion. 1672 return false 1673 1674 case Map: 1675 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1676 1677 case Ptr, Slice: 1678 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags) 1679 1680 case Struct: 1681 t := (*structType)(unsafe.Pointer(T)) 1682 v := (*structType)(unsafe.Pointer(V)) 1683 if len(t.fields) != len(v.fields) { 1684 return false 1685 } 1686 for i := range t.fields { 1687 tf := &t.fields[i] 1688 vf := &v.fields[i] 1689 if tf.name.name() != vf.name.name() { 1690 return false 1691 } 1692 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) { 1693 return false 1694 } 1695 if cmpTags && tf.name.tag() != vf.name.tag() { 1696 return false 1697 } 1698 if tf.offset != vf.offset { 1699 return false 1700 } 1701 if !tf.name.isExported() { 1702 tp := tf.name.pkgPath() 1703 if tp == "" { 1704 tp = t.pkgPath.name() 1705 } 1706 vp := vf.name.pkgPath() 1707 if vp == "" { 1708 vp = v.pkgPath.name() 1709 } 1710 if tp != vp { 1711 return false 1712 } 1713 } 1714 } 1715 return true 1716 } 1717 1718 return false 1719 } 1720 1721 // typelinks is implemented in package runtime. 1722 // It returns a slice of the sections in each module, 1723 // and a slice of *rtype offsets in each module. 1724 // 1725 // The types in each module are sorted by string. That is, the first 1726 // two linked types of the first module are: 1727 // 1728 // d0 := sections[0] 1729 // t1 := (*rtype)(add(d0, offset[0][0])) 1730 // t2 := (*rtype)(add(d0, offset[0][1])) 1731 // 1732 // and 1733 // 1734 // t1.String() < t2.String() 1735 // 1736 // Note that strings are not unique identifiers for types: 1737 // there can be more than one with a given string. 1738 // Only types we might want to look up are included: 1739 // pointers, channels, maps, slices, and arrays. 1740 func typelinks() (sections []unsafe.Pointer, offset [][]int32) 1741 1742 func rtypeOff(section unsafe.Pointer, off int32) *rtype { 1743 return (*rtype)(add(section, uintptr(off))) 1744 } 1745 1746 // typesByString returns the subslice of typelinks() whose elements have 1747 // the given string representation. 1748 // It may be empty (no known types with that string) or may have 1749 // multiple elements (multiple types with that string). 1750 func typesByString(s string) []*rtype { 1751 sections, offset := typelinks() 1752 var ret []*rtype 1753 1754 for offsI, offs := range offset { 1755 section := sections[offsI] 1756 1757 // We are looking for the first index i where the string becomes >= s. 1758 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). 1759 i, j := 0, len(offs) 1760 for i < j { 1761 h := i + (j-i)/2 // avoid overflow when computing h 1762 // i h < j 1763 if !(rtypeOff(section, offs[h]).String() >= s) { 1764 i = h + 1 // preserves f(i-1) == false 1765 } else { 1766 j = h // preserves f(j) == true 1767 } 1768 } 1769 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. 1770 1771 // Having found the first, linear scan forward to find the last. 1772 // We could do a second binary search, but the caller is going 1773 // to do a linear scan anyway. 1774 for j := i; j < len(offs); j++ { 1775 typ := rtypeOff(section, offs[j]) 1776 if typ.String() != s { 1777 break 1778 } 1779 ret = append(ret, typ) 1780 } 1781 } 1782 return ret 1783 } 1784 1785 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. 1786 var lookupCache struct { 1787 sync.RWMutex 1788 m map[cacheKey]*rtype 1789 } 1790 1791 // A cacheKey is the key for use in the lookupCache. 1792 // Four values describe any of the types we are looking for: 1793 // type kind, one or two subtypes, and an extra integer. 1794 type cacheKey struct { 1795 kind Kind 1796 t1 *rtype 1797 t2 *rtype 1798 extra uintptr 1799 } 1800 1801 // cacheGet looks for a type under the key k in the lookupCache. 1802 // If it finds one, it returns that type. 1803 // If not, it returns nil with the cache locked. 1804 // The caller is expected to use cachePut to unlock the cache. 1805 func cacheGet(k cacheKey) Type { 1806 lookupCache.RLock() 1807 t := lookupCache.m[k] 1808 lookupCache.RUnlock() 1809 if t != nil { 1810 return t 1811 } 1812 1813 lookupCache.Lock() 1814 t = lookupCache.m[k] 1815 if t != nil { 1816 lookupCache.Unlock() 1817 return t 1818 } 1819 1820 if lookupCache.m == nil { 1821 lookupCache.m = make(map[cacheKey]*rtype) 1822 } 1823 1824 return nil 1825 } 1826 1827 // cachePut stores the given type in the cache, unlocks the cache, 1828 // and returns the type. It is expected that the cache is locked 1829 // because cacheGet returned nil. 1830 func cachePut(k cacheKey, t *rtype) Type { 1831 lookupCache.m[k] = t 1832 lookupCache.Unlock() 1833 return t 1834 } 1835 1836 // The funcLookupCache caches FuncOf lookups. 1837 // FuncOf does not share the common lookupCache since cacheKey is not 1838 // sufficient to represent functions unambiguously. 1839 var funcLookupCache struct { 1840 sync.RWMutex 1841 m map[uint32][]*rtype // keyed by hash calculated in FuncOf 1842 } 1843 1844 // ChanOf returns the channel type with the given direction and element type. 1845 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. 1846 // 1847 // The gc runtime imposes a limit of 64 kB on channel element types. 1848 // If t's size is equal to or exceeds this limit, ChanOf panics. 1849 func ChanOf(dir ChanDir, t Type) Type { 1850 typ := t.(*rtype) 1851 1852 // Look in cache. 1853 ckey := cacheKey{Chan, typ, nil, uintptr(dir)} 1854 if ch := cacheGet(ckey); ch != nil { 1855 return ch 1856 } 1857 1858 // This restriction is imposed by the gc compiler and the runtime. 1859 if typ.size >= 1<<16 { 1860 lookupCache.Unlock() 1861 panic("reflect.ChanOf: element size too large") 1862 } 1863 1864 // Look in known types. 1865 // TODO: Precedence when constructing string. 1866 var s string 1867 switch dir { 1868 default: 1869 lookupCache.Unlock() 1870 panic("reflect.ChanOf: invalid dir") 1871 case SendDir: 1872 s = "chan<- " + typ.String() 1873 case RecvDir: 1874 s = "<-chan " + typ.String() 1875 case BothDir: 1876 s = "chan " + typ.String() 1877 } 1878 for _, tt := range typesByString(s) { 1879 ch := (*chanType)(unsafe.Pointer(tt)) 1880 if ch.elem == typ && ch.dir == uintptr(dir) { 1881 return cachePut(ckey, tt) 1882 } 1883 } 1884 1885 // Make a channel type. 1886 var ichan interface{} = (chan unsafe.Pointer)(nil) 1887 prototype := *(**chanType)(unsafe.Pointer(&ichan)) 1888 ch := *prototype 1889 ch.tflag = 0 1890 ch.dir = uintptr(dir) 1891 ch.str = resolveReflectName(newName(s, "", "", false)) 1892 ch.hash = fnv1(typ.hash, 'c', byte(dir)) 1893 ch.elem = typ 1894 1895 return cachePut(ckey, &ch.rtype) 1896 } 1897 1898 func ismapkey(*rtype) bool // implemented in runtime 1899 1900 // MapOf returns the map type with the given key and element types. 1901 // For example, if k represents int and e represents string, 1902 // MapOf(k, e) represents map[int]string. 1903 // 1904 // If the key type is not a valid map key type (that is, if it does 1905 // not implement Go's == operator), MapOf panics. 1906 func MapOf(key, elem Type) Type { 1907 ktyp := key.(*rtype) 1908 etyp := elem.(*rtype) 1909 1910 if !ismapkey(ktyp) { 1911 panic("reflect.MapOf: invalid key type " + ktyp.String()) 1912 } 1913 1914 // Look in cache. 1915 ckey := cacheKey{Map, ktyp, etyp, 0} 1916 if mt := cacheGet(ckey); mt != nil { 1917 return mt 1918 } 1919 1920 // Look in known types. 1921 s := "map[" + ktyp.String() + "]" + etyp.String() 1922 for _, tt := range typesByString(s) { 1923 mt := (*mapType)(unsafe.Pointer(tt)) 1924 if mt.key == ktyp && mt.elem == etyp { 1925 return cachePut(ckey, tt) 1926 } 1927 } 1928 1929 // Make a map type. 1930 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) 1931 mt := **(**mapType)(unsafe.Pointer(&imap)) 1932 mt.str = resolveReflectName(newName(s, "", "", false)) 1933 mt.tflag = 0 1934 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) 1935 mt.key = ktyp 1936 mt.elem = etyp 1937 mt.bucket = bucketOf(ktyp, etyp) 1938 if ktyp.size > maxKeySize { 1939 mt.keysize = uint8(ptrSize) 1940 mt.indirectkey = 1 1941 } else { 1942 mt.keysize = uint8(ktyp.size) 1943 mt.indirectkey = 0 1944 } 1945 if etyp.size > maxValSize { 1946 mt.valuesize = uint8(ptrSize) 1947 mt.indirectvalue = 1 1948 } else { 1949 mt.valuesize = uint8(etyp.size) 1950 mt.indirectvalue = 0 1951 } 1952 mt.bucketsize = uint16(mt.bucket.size) 1953 mt.reflexivekey = isReflexive(ktyp) 1954 mt.needkeyupdate = needKeyUpdate(ktyp) 1955 mt.ptrToThis = 0 1956 1957 return cachePut(ckey, &mt.rtype) 1958 } 1959 1960 type funcTypeFixed4 struct { 1961 funcType 1962 args [4]*rtype 1963 } 1964 type funcTypeFixed8 struct { 1965 funcType 1966 args [8]*rtype 1967 } 1968 type funcTypeFixed16 struct { 1969 funcType 1970 args [16]*rtype 1971 } 1972 type funcTypeFixed32 struct { 1973 funcType 1974 args [32]*rtype 1975 } 1976 type funcTypeFixed64 struct { 1977 funcType 1978 args [64]*rtype 1979 } 1980 type funcTypeFixed128 struct { 1981 funcType 1982 args [128]*rtype 1983 } 1984 1985 // FuncOf returns the function type with the given argument and result types. 1986 // For example if k represents int and e represents string, 1987 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. 1988 // 1989 // The variadic argument controls whether the function is variadic. FuncOf 1990 // panics if the in[len(in)-1] does not represent a slice and variadic is 1991 // true. 1992 func FuncOf(in, out []Type, variadic bool) Type { 1993 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { 1994 panic("reflect.FuncOf: last arg of variadic func must be slice") 1995 } 1996 1997 // Make a func type. 1998 var ifunc interface{} = (func())(nil) 1999 prototype := *(**funcType)(unsafe.Pointer(&ifunc)) 2000 n := len(in) + len(out) 2001 2002 var ft *funcType 2003 var args []*rtype 2004 switch { 2005 case n <= 4: 2006 fixed := new(funcTypeFixed4) 2007 args = fixed.args[:0:len(fixed.args)] 2008 ft = &fixed.funcType 2009 case n <= 8: 2010 fixed := new(funcTypeFixed8) 2011 args = fixed.args[:0:len(fixed.args)] 2012 ft = &fixed.funcType 2013 case n <= 16: 2014 fixed := new(funcTypeFixed16) 2015 args = fixed.args[:0:len(fixed.args)] 2016 ft = &fixed.funcType 2017 case n <= 32: 2018 fixed := new(funcTypeFixed32) 2019 args = fixed.args[:0:len(fixed.args)] 2020 ft = &fixed.funcType 2021 case n <= 64: 2022 fixed := new(funcTypeFixed64) 2023 args = fixed.args[:0:len(fixed.args)] 2024 ft = &fixed.funcType 2025 case n <= 128: 2026 fixed := new(funcTypeFixed128) 2027 args = fixed.args[:0:len(fixed.args)] 2028 ft = &fixed.funcType 2029 default: 2030 panic("reflect.FuncOf: too many arguments") 2031 } 2032 *ft = *prototype 2033 2034 // Build a hash and minimally populate ft. 2035 var hash uint32 2036 for _, in := range in { 2037 t := in.(*rtype) 2038 args = append(args, t) 2039 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2040 } 2041 if variadic { 2042 hash = fnv1(hash, 'v') 2043 } 2044 hash = fnv1(hash, '.') 2045 for _, out := range out { 2046 t := out.(*rtype) 2047 args = append(args, t) 2048 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash)) 2049 } 2050 if len(args) > 50 { 2051 panic("reflect.FuncOf does not support more than 50 arguments") 2052 } 2053 ft.tflag = 0 2054 ft.hash = hash 2055 ft.inCount = uint16(len(in)) 2056 ft.outCount = uint16(len(out)) 2057 if variadic { 2058 ft.outCount |= 1 << 15 2059 } 2060 2061 // Look in cache. 2062 funcLookupCache.RLock() 2063 for _, t := range funcLookupCache.m[hash] { 2064 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2065 funcLookupCache.RUnlock() 2066 return t 2067 } 2068 } 2069 funcLookupCache.RUnlock() 2070 2071 // Not in cache, lock and retry. 2072 funcLookupCache.Lock() 2073 defer funcLookupCache.Unlock() 2074 if funcLookupCache.m == nil { 2075 funcLookupCache.m = make(map[uint32][]*rtype) 2076 } 2077 for _, t := range funcLookupCache.m[hash] { 2078 if haveIdenticalUnderlyingType(&ft.rtype, t, true) { 2079 return t 2080 } 2081 } 2082 2083 // Look in known types for the same string representation. 2084 str := funcStr(ft) 2085 for _, tt := range typesByString(str) { 2086 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { 2087 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], tt) 2088 return tt 2089 } 2090 } 2091 2092 // Populate the remaining fields of ft and store in cache. 2093 ft.str = resolveReflectName(newName(str, "", "", false)) 2094 ft.ptrToThis = 0 2095 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype) 2096 2097 return &ft.rtype 2098 } 2099 2100 // funcStr builds a string representation of a funcType. 2101 func funcStr(ft *funcType) string { 2102 repr := make([]byte, 0, 64) 2103 repr = append(repr, "func("...) 2104 for i, t := range ft.in() { 2105 if i > 0 { 2106 repr = append(repr, ", "...) 2107 } 2108 if ft.IsVariadic() && i == int(ft.inCount)-1 { 2109 repr = append(repr, "..."...) 2110 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) 2111 } else { 2112 repr = append(repr, t.String()...) 2113 } 2114 } 2115 repr = append(repr, ')') 2116 out := ft.out() 2117 if len(out) == 1 { 2118 repr = append(repr, ' ') 2119 } else if len(out) > 1 { 2120 repr = append(repr, " ("...) 2121 } 2122 for i, t := range out { 2123 if i > 0 { 2124 repr = append(repr, ", "...) 2125 } 2126 repr = append(repr, t.String()...) 2127 } 2128 if len(out) > 1 { 2129 repr = append(repr, ')') 2130 } 2131 return string(repr) 2132 } 2133 2134 // isReflexive reports whether the == operation on the type is reflexive. 2135 // That is, x == x for all values x of type t. 2136 func isReflexive(t *rtype) bool { 2137 switch t.Kind() { 2138 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: 2139 return true 2140 case Float32, Float64, Complex64, Complex128, Interface: 2141 return false 2142 case Array: 2143 tt := (*arrayType)(unsafe.Pointer(t)) 2144 return isReflexive(tt.elem) 2145 case Struct: 2146 tt := (*structType)(unsafe.Pointer(t)) 2147 for _, f := range tt.fields { 2148 if !isReflexive(f.typ) { 2149 return false 2150 } 2151 } 2152 return true 2153 default: 2154 // Func, Map, Slice, Invalid 2155 panic("isReflexive called on non-key type " + t.String()) 2156 } 2157 } 2158 2159 // needKeyUpdate reports whether map overwrites require the key to be copied. 2160 func needKeyUpdate(t *rtype) bool { 2161 switch t.Kind() { 2162 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer: 2163 return false 2164 case Float32, Float64, Complex64, Complex128, Interface, String: 2165 // Float keys can be updated from +0 to -0. 2166 // String keys can be updated to use a smaller backing store. 2167 // Interfaces might have floats of strings in them. 2168 return true 2169 case Array: 2170 tt := (*arrayType)(unsafe.Pointer(t)) 2171 return needKeyUpdate(tt.elem) 2172 case Struct: 2173 tt := (*structType)(unsafe.Pointer(t)) 2174 for _, f := range tt.fields { 2175 if needKeyUpdate(f.typ) { 2176 return true 2177 } 2178 } 2179 return false 2180 default: 2181 // Func, Map, Slice, Invalid 2182 panic("needKeyUpdate called on non-key type " + t.String()) 2183 } 2184 } 2185 2186 // Make sure these routines stay in sync with ../../runtime/hashmap.go! 2187 // These types exist only for GC, so we only fill out GC relevant info. 2188 // Currently, that's just size and the GC program. We also fill in string 2189 // for possible debugging use. 2190 const ( 2191 bucketSize uintptr = 8 2192 maxKeySize uintptr = 128 2193 maxValSize uintptr = 128 2194 ) 2195 2196 func bucketOf(ktyp, etyp *rtype) *rtype { 2197 // See comment on hmap.overflow in ../runtime/hashmap.go. 2198 var kind uint8 2199 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 && 2200 ktyp.size <= maxKeySize && etyp.size <= maxValSize { 2201 kind = kindNoPointers 2202 } 2203 2204 if ktyp.size > maxKeySize { 2205 ktyp = PtrTo(ktyp).(*rtype) 2206 } 2207 if etyp.size > maxValSize { 2208 etyp = PtrTo(etyp).(*rtype) 2209 } 2210 2211 // Prepare GC data if any. 2212 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, 2213 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. 2214 // Normally the enforced limit on pointer maps is 16 bytes, 2215 // but larger ones are acceptable, 33 bytes isn't too too big, 2216 // and it's easier to generate a pointer bitmap than a GC program. 2217 // Note that since the key and value are known to be <= 128 bytes, 2218 // they're guaranteed to have bitmaps instead of GC programs. 2219 var gcdata *byte 2220 var ptrdata uintptr 2221 var overflowPad uintptr 2222 2223 // On NaCl, pad if needed to make overflow end at the proper struct alignment. 2224 // On other systems, align > ptrSize is not possible. 2225 if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) { 2226 overflowPad = ptrSize 2227 } 2228 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize 2229 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { 2230 panic("reflect: bad size computation in MapOf") 2231 } 2232 2233 if kind != kindNoPointers { 2234 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize 2235 mask := make([]byte, (nptr+7)/8) 2236 base := bucketSize / ptrSize 2237 2238 if ktyp.kind&kindNoPointers == 0 { 2239 if ktyp.kind&kindGCProg != 0 { 2240 panic("reflect: unexpected GC program in MapOf") 2241 } 2242 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata)) 2243 for i := uintptr(0); i < ktyp.size/ptrSize; i++ { 2244 if (kmask[i/8]>>(i%8))&1 != 0 { 2245 for j := uintptr(0); j < bucketSize; j++ { 2246 word := base + j*ktyp.size/ptrSize + i 2247 mask[word/8] |= 1 << (word % 8) 2248 } 2249 } 2250 } 2251 } 2252 base += bucketSize * ktyp.size / ptrSize 2253 2254 if etyp.kind&kindNoPointers == 0 { 2255 if etyp.kind&kindGCProg != 0 { 2256 panic("reflect: unexpected GC program in MapOf") 2257 } 2258 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata)) 2259 for i := uintptr(0); i < etyp.size/ptrSize; i++ { 2260 if (emask[i/8]>>(i%8))&1 != 0 { 2261 for j := uintptr(0); j < bucketSize; j++ { 2262 word := base + j*etyp.size/ptrSize + i 2263 mask[word/8] |= 1 << (word % 8) 2264 } 2265 } 2266 } 2267 } 2268 base += bucketSize * etyp.size / ptrSize 2269 base += overflowPad / ptrSize 2270 2271 word := base 2272 mask[word/8] |= 1 << (word % 8) 2273 gcdata = &mask[0] 2274 ptrdata = (word + 1) * ptrSize 2275 2276 // overflow word must be last 2277 if ptrdata != size { 2278 panic("reflect: bad layout computation in MapOf") 2279 } 2280 } 2281 2282 b := &rtype{ 2283 align: ptrSize, 2284 size: size, 2285 kind: kind, 2286 ptrdata: ptrdata, 2287 gcdata: gcdata, 2288 } 2289 if overflowPad > 0 { 2290 b.align = 8 2291 } 2292 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" 2293 b.str = resolveReflectName(newName(s, "", "", false)) 2294 return b 2295 } 2296 2297 // SliceOf returns the slice type with element type t. 2298 // For example, if t represents int, SliceOf(t) represents []int. 2299 func SliceOf(t Type) Type { 2300 typ := t.(*rtype) 2301 2302 // Look in cache. 2303 ckey := cacheKey{Slice, typ, nil, 0} 2304 if slice := cacheGet(ckey); slice != nil { 2305 return slice 2306 } 2307 2308 // Look in known types. 2309 s := "[]" + typ.String() 2310 for _, tt := range typesByString(s) { 2311 slice := (*sliceType)(unsafe.Pointer(tt)) 2312 if slice.elem == typ { 2313 return cachePut(ckey, tt) 2314 } 2315 } 2316 2317 // Make a slice type. 2318 var islice interface{} = ([]unsafe.Pointer)(nil) 2319 prototype := *(**sliceType)(unsafe.Pointer(&islice)) 2320 slice := *prototype 2321 slice.tflag = 0 2322 slice.str = resolveReflectName(newName(s, "", "", false)) 2323 slice.hash = fnv1(typ.hash, '[') 2324 slice.elem = typ 2325 slice.ptrToThis = 0 2326 2327 return cachePut(ckey, &slice.rtype) 2328 } 2329 2330 // The structLookupCache caches StructOf lookups. 2331 // StructOf does not share the common lookupCache since we need to pin 2332 // the memory associated with *structTypeFixedN. 2333 var structLookupCache struct { 2334 sync.RWMutex 2335 m map[uint32][]interface { 2336 common() *rtype 2337 } // keyed by hash calculated in StructOf 2338 } 2339 2340 type structTypeUncommon struct { 2341 structType 2342 u uncommonType 2343 } 2344 2345 // A *rtype representing a struct is followed directly in memory by an 2346 // array of method objects representing the methods attached to the 2347 // struct. To get the same layout for a run time generated type, we 2348 // need an array directly following the uncommonType memory. The types 2349 // structTypeFixed4, ...structTypeFixedN are used to do this. 2350 // 2351 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. 2352 2353 // TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs 2354 // have no methods, they could be defined at runtime using the StructOf 2355 // function. 2356 2357 type structTypeFixed4 struct { 2358 structType 2359 u uncommonType 2360 m [4]method 2361 } 2362 2363 type structTypeFixed8 struct { 2364 structType 2365 u uncommonType 2366 m [8]method 2367 } 2368 2369 type structTypeFixed16 struct { 2370 structType 2371 u uncommonType 2372 m [16]method 2373 } 2374 2375 type structTypeFixed32 struct { 2376 structType 2377 u uncommonType 2378 m [32]method 2379 } 2380 2381 // StructOf returns the struct type containing fields. 2382 // The Offset and Index fields are ignored and computed as they would be 2383 // by the compiler. 2384 // 2385 // StructOf currently does not generate wrapper methods for embedded fields. 2386 // This limitation may be lifted in a future version. 2387 func StructOf(fields []StructField) Type { 2388 var ( 2389 hash = fnv1(0, []byte("struct {")...) 2390 size uintptr 2391 typalign uint8 2392 comparable = true 2393 hashable = true 2394 methods []method 2395 2396 fs = make([]structField, len(fields)) 2397 repr = make([]byte, 0, 64) 2398 fset = map[string]struct{}{} // fields' names 2399 2400 hasPtr = false // records whether at least one struct-field is a pointer 2401 hasGCProg = false // records whether a struct-field type has a GCProg 2402 ) 2403 2404 lastzero := uintptr(0) 2405 repr = append(repr, "struct {"...) 2406 for i, field := range fields { 2407 if field.Type == nil { 2408 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type") 2409 } 2410 f := runtimeStructField(field) 2411 ft := f.typ 2412 if ft.kind&kindGCProg != 0 { 2413 hasGCProg = true 2414 } 2415 if ft.pointers() { 2416 hasPtr = true 2417 } 2418 2419 name := "" 2420 // Update string and hash 2421 if f.name.nameLen() > 0 { 2422 hash = fnv1(hash, []byte(f.name.name())...) 2423 repr = append(repr, (" " + f.name.name())...) 2424 name = f.name.name() 2425 } else { 2426 // Embedded field 2427 if f.typ.Kind() == Ptr { 2428 // Embedded ** and *interface{} are illegal 2429 elem := ft.Elem() 2430 if k := elem.Kind(); k == Ptr || k == Interface { 2431 panic("reflect.StructOf: illegal anonymous field type " + ft.String()) 2432 } 2433 name = elem.String() 2434 } else { 2435 name = ft.String() 2436 } 2437 // TODO(sbinet) check for syntactically impossible type names? 2438 2439 switch f.typ.Kind() { 2440 case Interface: 2441 ift := (*interfaceType)(unsafe.Pointer(ft)) 2442 for im, m := range ift.methods { 2443 if ift.nameOff(m.name).pkgPath() != "" { 2444 // TODO(sbinet) 2445 panic("reflect: embedded interface with unexported method(s) not implemented") 2446 } 2447 2448 var ( 2449 mtyp = ift.typeOff(m.typ) 2450 ifield = i 2451 imethod = im 2452 ifn Value 2453 tfn Value 2454 ) 2455 2456 if ft.kind&kindDirectIface != 0 { 2457 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2458 var args []Value 2459 var recv = in[0] 2460 if len(in) > 1 { 2461 args = in[1:] 2462 } 2463 return recv.Field(ifield).Method(imethod).Call(args) 2464 }) 2465 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2466 var args []Value 2467 var recv = in[0] 2468 if len(in) > 1 { 2469 args = in[1:] 2470 } 2471 return recv.Field(ifield).Method(imethod).Call(args) 2472 }) 2473 } else { 2474 tfn = MakeFunc(mtyp, func(in []Value) []Value { 2475 var args []Value 2476 var recv = in[0] 2477 if len(in) > 1 { 2478 args = in[1:] 2479 } 2480 return recv.Field(ifield).Method(imethod).Call(args) 2481 }) 2482 ifn = MakeFunc(mtyp, func(in []Value) []Value { 2483 var args []Value 2484 var recv = Indirect(in[0]) 2485 if len(in) > 1 { 2486 args = in[1:] 2487 } 2488 return recv.Field(ifield).Method(imethod).Call(args) 2489 }) 2490 } 2491 2492 methods = append(methods, method{ 2493 name: resolveReflectName(ift.nameOff(m.name)), 2494 mtyp: resolveReflectType(mtyp), 2495 ifn: resolveReflectText(unsafe.Pointer(&ifn)), 2496 tfn: resolveReflectText(unsafe.Pointer(&tfn)), 2497 }) 2498 } 2499 case Ptr: 2500 ptr := (*ptrType)(unsafe.Pointer(ft)) 2501 if unt := ptr.uncommon(); unt != nil { 2502 for _, m := range unt.methods() { 2503 mname := ptr.nameOff(m.name) 2504 if mname.pkgPath() != "" { 2505 // TODO(sbinet) 2506 panic("reflect: embedded interface with unexported method(s) not implemented") 2507 } 2508 methods = append(methods, method{ 2509 name: resolveReflectName(mname), 2510 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), 2511 ifn: resolveReflectText(ptr.textOff(m.ifn)), 2512 tfn: resolveReflectText(ptr.textOff(m.tfn)), 2513 }) 2514 } 2515 } 2516 if unt := ptr.elem.uncommon(); unt != nil { 2517 for _, m := range unt.methods() { 2518 mname := ptr.nameOff(m.name) 2519 if mname.pkgPath() != "" { 2520 // TODO(sbinet) 2521 panic("reflect: embedded interface with unexported method(s) not implemented") 2522 } 2523 methods = append(methods, method{ 2524 name: resolveReflectName(mname), 2525 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), 2526 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), 2527 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), 2528 }) 2529 } 2530 } 2531 default: 2532 if unt := ft.uncommon(); unt != nil { 2533 for _, m := range unt.methods() { 2534 mname := ft.nameOff(m.name) 2535 if mname.pkgPath() != "" { 2536 // TODO(sbinet) 2537 panic("reflect: embedded interface with unexported method(s) not implemented") 2538 } 2539 methods = append(methods, method{ 2540 name: resolveReflectName(mname), 2541 mtyp: resolveReflectType(ft.typeOff(m.mtyp)), 2542 ifn: resolveReflectText(ft.textOff(m.ifn)), 2543 tfn: resolveReflectText(ft.textOff(m.tfn)), 2544 }) 2545 2546 } 2547 } 2548 } 2549 } 2550 if _, dup := fset[name]; dup { 2551 panic("reflect.StructOf: duplicate field " + name) 2552 } 2553 fset[name] = struct{}{} 2554 2555 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) 2556 2557 repr = append(repr, (" " + ft.String())...) 2558 if f.name.tagLen() > 0 { 2559 hash = fnv1(hash, []byte(f.name.tag())...) 2560 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) 2561 } 2562 if i < len(fields)-1 { 2563 repr = append(repr, ';') 2564 } 2565 2566 comparable = comparable && (ft.alg.equal != nil) 2567 hashable = hashable && (ft.alg.hash != nil) 2568 2569 f.offset = align(size, uintptr(ft.align)) 2570 if ft.align > typalign { 2571 typalign = ft.align 2572 } 2573 size = f.offset + ft.size 2574 2575 if ft.size == 0 { 2576 lastzero = size 2577 } 2578 2579 fs[i] = f 2580 } 2581 2582 if size > 0 && lastzero == size { 2583 // This is a non-zero sized struct that ends in a 2584 // zero-sized field. We add an extra byte of padding, 2585 // to ensure that taking the address of the final 2586 // zero-sized field can't manufacture a pointer to the 2587 // next object in the heap. See issue 9401. 2588 size++ 2589 } 2590 2591 var typ *structType 2592 var ut *uncommonType 2593 var typPin interface { 2594 common() *rtype 2595 } // structTypeFixedN 2596 2597 switch { 2598 case len(methods) == 0: 2599 t := new(structTypeUncommon) 2600 typ = &t.structType 2601 ut = &t.u 2602 typPin = t 2603 case len(methods) <= 4: 2604 t := new(structTypeFixed4) 2605 typ = &t.structType 2606 ut = &t.u 2607 copy(t.m[:], methods) 2608 typPin = t 2609 case len(methods) <= 8: 2610 t := new(structTypeFixed8) 2611 typ = &t.structType 2612 ut = &t.u 2613 copy(t.m[:], methods) 2614 typPin = t 2615 case len(methods) <= 16: 2616 t := new(structTypeFixed16) 2617 typ = &t.structType 2618 ut = &t.u 2619 copy(t.m[:], methods) 2620 typPin = t 2621 case len(methods) <= 32: 2622 t := new(structTypeFixed32) 2623 typ = &t.structType 2624 ut = &t.u 2625 copy(t.m[:], methods) 2626 typPin = t 2627 default: 2628 panic("reflect.StructOf: too many methods") 2629 } 2630 ut.mcount = uint16(len(methods)) 2631 ut.moff = uint32(unsafe.Sizeof(uncommonType{})) 2632 2633 if len(fs) > 0 { 2634 repr = append(repr, ' ') 2635 } 2636 repr = append(repr, '}') 2637 hash = fnv1(hash, '}') 2638 str := string(repr) 2639 2640 // Round the size up to be a multiple of the alignment. 2641 size = align(size, uintptr(typalign)) 2642 2643 // Make the struct type. 2644 var istruct interface{} = struct{}{} 2645 prototype := *(**structType)(unsafe.Pointer(&istruct)) 2646 *typ = *prototype 2647 typ.fields = fs 2648 2649 // Look in cache 2650 structLookupCache.RLock() 2651 for _, st := range structLookupCache.m[hash] { 2652 t := st.common() 2653 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2654 structLookupCache.RUnlock() 2655 return t 2656 } 2657 } 2658 structLookupCache.RUnlock() 2659 2660 // not in cache, lock and retry 2661 structLookupCache.Lock() 2662 defer structLookupCache.Unlock() 2663 if structLookupCache.m == nil { 2664 structLookupCache.m = make(map[uint32][]interface { 2665 common() *rtype 2666 }) 2667 } 2668 for _, st := range structLookupCache.m[hash] { 2669 t := st.common() 2670 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2671 return t 2672 } 2673 } 2674 2675 // Look in known types. 2676 for _, t := range typesByString(str) { 2677 if haveIdenticalUnderlyingType(&typ.rtype, t, true) { 2678 // even if 't' wasn't a structType with methods, we should be ok 2679 // as the 'u uncommonType' field won't be accessed except when 2680 // tflag&tflagUncommon is set. 2681 structLookupCache.m[hash] = append(structLookupCache.m[hash], t) 2682 return t 2683 } 2684 } 2685 2686 typ.str = resolveReflectName(newName(str, "", "", false)) 2687 typ.tflag = 0 2688 typ.hash = hash 2689 typ.size = size 2690 typ.align = typalign 2691 typ.fieldAlign = typalign 2692 typ.ptrToThis = 0 2693 if len(methods) > 0 { 2694 typ.tflag |= tflagUncommon 2695 } 2696 if !hasPtr { 2697 typ.kind |= kindNoPointers 2698 } else { 2699 typ.kind &^= kindNoPointers 2700 } 2701 2702 if hasGCProg { 2703 lastPtrField := 0 2704 for i, ft := range fs { 2705 if ft.typ.pointers() { 2706 lastPtrField = i 2707 } 2708 } 2709 prog := []byte{0, 0, 0, 0} // will be length of prog 2710 for i, ft := range fs { 2711 if i > lastPtrField { 2712 // gcprog should not include anything for any field after 2713 // the last field that contains pointer data 2714 break 2715 } 2716 // FIXME(sbinet) handle padding, fields smaller than a word 2717 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:] 2718 elemPtrs := ft.typ.ptrdata / ptrSize 2719 switch { 2720 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0: 2721 // Element is small with pointer mask; use as literal bits. 2722 mask := elemGC 2723 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2724 var n uintptr 2725 for n := elemPtrs; n > 120; n -= 120 { 2726 prog = append(prog, 120) 2727 prog = append(prog, mask[:15]...) 2728 mask = mask[15:] 2729 } 2730 prog = append(prog, byte(n)) 2731 prog = append(prog, mask[:(n+7)/8]...) 2732 case ft.typ.kind&kindGCProg != 0: 2733 // Element has GC program; emit one element. 2734 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2735 prog = append(prog, elemProg...) 2736 } 2737 // Pad from ptrdata to size. 2738 elemWords := ft.typ.size / ptrSize 2739 if elemPtrs < elemWords { 2740 // Emit literal 0 bit, then repeat as needed. 2741 prog = append(prog, 0x01, 0x00) 2742 if elemPtrs+1 < elemWords { 2743 prog = append(prog, 0x81) 2744 prog = appendVarint(prog, elemWords-elemPtrs-1) 2745 } 2746 } 2747 } 2748 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2749 typ.kind |= kindGCProg 2750 typ.gcdata = &prog[0] 2751 } else { 2752 typ.kind &^= kindGCProg 2753 bv := new(bitVector) 2754 addTypeBits(bv, 0, typ.common()) 2755 if len(bv.data) > 0 { 2756 typ.gcdata = &bv.data[0] 2757 } 2758 } 2759 typ.ptrdata = typeptrdata(typ.common()) 2760 typ.alg = new(typeAlg) 2761 if hashable { 2762 typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr { 2763 o := seed 2764 for _, ft := range typ.fields { 2765 pi := unsafe.Pointer(uintptr(p) + ft.offset) 2766 o = ft.typ.alg.hash(pi, o) 2767 } 2768 return o 2769 } 2770 } 2771 2772 if comparable { 2773 typ.alg.equal = func(p, q unsafe.Pointer) bool { 2774 for _, ft := range typ.fields { 2775 pi := unsafe.Pointer(uintptr(p) + ft.offset) 2776 qi := unsafe.Pointer(uintptr(q) + ft.offset) 2777 if !ft.typ.alg.equal(pi, qi) { 2778 return false 2779 } 2780 } 2781 return true 2782 } 2783 } 2784 2785 switch { 2786 case len(fs) == 1 && !ifaceIndir(fs[0].typ): 2787 // structs of 1 direct iface type can be direct 2788 typ.kind |= kindDirectIface 2789 default: 2790 typ.kind &^= kindDirectIface 2791 } 2792 2793 structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin) 2794 return &typ.rtype 2795 } 2796 2797 func runtimeStructField(field StructField) structField { 2798 exported := field.PkgPath == "" 2799 if field.Name == "" { 2800 t := field.Type.(*rtype) 2801 if t.Kind() == Ptr { 2802 t = t.Elem().(*rtype) 2803 } 2804 exported = t.nameOff(t.str).isExported() 2805 } else if exported { 2806 b0 := field.Name[0] 2807 if ('a' <= b0 && b0 <= 'z') || b0 == '_' { 2808 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath") 2809 } 2810 } 2811 2812 _ = resolveReflectType(field.Type.common()) 2813 return structField{ 2814 name: newName(field.Name, string(field.Tag), field.PkgPath, exported), 2815 typ: field.Type.common(), 2816 offset: 0, 2817 } 2818 } 2819 2820 // typeptrdata returns the length in bytes of the prefix of t 2821 // containing pointer data. Anything after this offset is scalar data. 2822 // keep in sync with ../cmd/compile/internal/gc/reflect.go 2823 func typeptrdata(t *rtype) uintptr { 2824 if !t.pointers() { 2825 return 0 2826 } 2827 switch t.Kind() { 2828 case Struct: 2829 st := (*structType)(unsafe.Pointer(t)) 2830 // find the last field that has pointers. 2831 field := 0 2832 for i := range st.fields { 2833 ft := st.fields[i].typ 2834 if ft.pointers() { 2835 field = i 2836 } 2837 } 2838 f := st.fields[field] 2839 return f.offset + f.typ.ptrdata 2840 2841 default: 2842 panic("reflect.typeptrdata: unexpected type, " + t.String()) 2843 } 2844 } 2845 2846 // See cmd/compile/internal/gc/reflect.go for derivation of constant. 2847 const maxPtrmaskBytes = 2048 2848 2849 // ArrayOf returns the array type with the given count and element type. 2850 // For example, if t represents int, ArrayOf(5, t) represents [5]int. 2851 // 2852 // If the resulting type would be larger than the available address space, 2853 // ArrayOf panics. 2854 func ArrayOf(count int, elem Type) Type { 2855 typ := elem.(*rtype) 2856 // call SliceOf here as it calls cacheGet/cachePut. 2857 // ArrayOf also calls cacheGet/cachePut and thus may modify the state of 2858 // the lookupCache mutex. 2859 slice := SliceOf(elem) 2860 2861 // Look in cache. 2862 ckey := cacheKey{Array, typ, nil, uintptr(count)} 2863 if array := cacheGet(ckey); array != nil { 2864 return array 2865 } 2866 2867 // Look in known types. 2868 s := "[" + strconv.Itoa(count) + "]" + typ.String() 2869 for _, tt := range typesByString(s) { 2870 array := (*arrayType)(unsafe.Pointer(tt)) 2871 if array.elem == typ { 2872 return cachePut(ckey, tt) 2873 } 2874 } 2875 2876 // Make an array type. 2877 var iarray interface{} = [1]unsafe.Pointer{} 2878 prototype := *(**arrayType)(unsafe.Pointer(&iarray)) 2879 array := *prototype 2880 array.str = resolveReflectName(newName(s, "", "", false)) 2881 array.hash = fnv1(typ.hash, '[') 2882 for n := uint32(count); n > 0; n >>= 8 { 2883 array.hash = fnv1(array.hash, byte(n)) 2884 } 2885 array.hash = fnv1(array.hash, ']') 2886 array.elem = typ 2887 array.ptrToThis = 0 2888 max := ^uintptr(0) / typ.size 2889 if uintptr(count) > max { 2890 panic("reflect.ArrayOf: array size would exceed virtual address space") 2891 } 2892 array.size = typ.size * uintptr(count) 2893 if count > 0 && typ.ptrdata != 0 { 2894 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata 2895 } 2896 array.align = typ.align 2897 array.fieldAlign = typ.fieldAlign 2898 array.len = uintptr(count) 2899 array.slice = slice.(*rtype) 2900 2901 array.kind &^= kindNoPointers 2902 switch { 2903 case typ.kind&kindNoPointers != 0 || array.size == 0: 2904 // No pointers. 2905 array.kind |= kindNoPointers 2906 array.gcdata = nil 2907 array.ptrdata = 0 2908 2909 case count == 1: 2910 // In memory, 1-element array looks just like the element. 2911 array.kind |= typ.kind & kindGCProg 2912 array.gcdata = typ.gcdata 2913 array.ptrdata = typ.ptrdata 2914 2915 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize: 2916 // Element is small with pointer mask; array is still small. 2917 // Create direct pointer mask by turning each 1 bit in elem 2918 // into count 1 bits in larger mask. 2919 mask := make([]byte, (array.ptrdata/ptrSize+7)/8) 2920 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2921 elemWords := typ.size / ptrSize 2922 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ { 2923 if (elemMask[j/8]>>(j%8))&1 != 0 { 2924 for i := uintptr(0); i < array.len; i++ { 2925 k := i*elemWords + j 2926 mask[k/8] |= 1 << (k % 8) 2927 } 2928 } 2929 } 2930 array.gcdata = &mask[0] 2931 2932 default: 2933 // Create program that emits one element 2934 // and then repeats to make the array. 2935 prog := []byte{0, 0, 0, 0} // will be length of prog 2936 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:] 2937 elemPtrs := typ.ptrdata / ptrSize 2938 if typ.kind&kindGCProg == 0 { 2939 // Element is small with pointer mask; use as literal bits. 2940 mask := elemGC 2941 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). 2942 var n uintptr 2943 for n = elemPtrs; n > 120; n -= 120 { 2944 prog = append(prog, 120) 2945 prog = append(prog, mask[:15]...) 2946 mask = mask[15:] 2947 } 2948 prog = append(prog, byte(n)) 2949 prog = append(prog, mask[:(n+7)/8]...) 2950 } else { 2951 // Element has GC program; emit one element. 2952 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1] 2953 prog = append(prog, elemProg...) 2954 } 2955 // Pad from ptrdata to size. 2956 elemWords := typ.size / ptrSize 2957 if elemPtrs < elemWords { 2958 // Emit literal 0 bit, then repeat as needed. 2959 prog = append(prog, 0x01, 0x00) 2960 if elemPtrs+1 < elemWords { 2961 prog = append(prog, 0x81) 2962 prog = appendVarint(prog, elemWords-elemPtrs-1) 2963 } 2964 } 2965 // Repeat count-1 times. 2966 if elemWords < 0x80 { 2967 prog = append(prog, byte(elemWords|0x80)) 2968 } else { 2969 prog = append(prog, 0x80) 2970 prog = appendVarint(prog, elemWords) 2971 } 2972 prog = appendVarint(prog, uintptr(count)-1) 2973 prog = append(prog, 0) 2974 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) 2975 array.kind |= kindGCProg 2976 array.gcdata = &prog[0] 2977 array.ptrdata = array.size // overestimate but ok; must match program 2978 } 2979 2980 etyp := typ.common() 2981 esize := etyp.Size() 2982 ealg := etyp.alg 2983 2984 array.alg = new(typeAlg) 2985 if ealg.equal != nil { 2986 eequal := ealg.equal 2987 array.alg.equal = func(p, q unsafe.Pointer) bool { 2988 for i := 0; i < count; i++ { 2989 pi := arrayAt(p, i, esize) 2990 qi := arrayAt(q, i, esize) 2991 if !eequal(pi, qi) { 2992 return false 2993 } 2994 2995 } 2996 return true 2997 } 2998 } 2999 if ealg.hash != nil { 3000 ehash := ealg.hash 3001 array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr { 3002 o := seed 3003 for i := 0; i < count; i++ { 3004 o = ehash(arrayAt(ptr, i, esize), o) 3005 } 3006 return o 3007 } 3008 } 3009 3010 switch { 3011 case count == 1 && !ifaceIndir(typ): 3012 // array of 1 direct iface type can be direct 3013 array.kind |= kindDirectIface 3014 default: 3015 array.kind &^= kindDirectIface 3016 } 3017 3018 return cachePut(ckey, &array.rtype) 3019 } 3020 3021 func appendVarint(x []byte, v uintptr) []byte { 3022 for ; v >= 0x80; v >>= 7 { 3023 x = append(x, byte(v|0x80)) 3024 } 3025 x = append(x, byte(v)) 3026 return x 3027 } 3028 3029 // toType converts from a *rtype to a Type that can be returned 3030 // to the client of package reflect. In gc, the only concern is that 3031 // a nil *rtype must be replaced by a nil Type, but in gccgo this 3032 // function takes care of ensuring that multiple *rtype for the same 3033 // type are coalesced into a single Type. 3034 func toType(t *rtype) Type { 3035 if t == nil { 3036 return nil 3037 } 3038 return t 3039 } 3040 3041 type layoutKey struct { 3042 t *rtype // function signature 3043 rcvr *rtype // receiver type, or nil if none 3044 } 3045 3046 type layoutType struct { 3047 t *rtype 3048 argSize uintptr // size of arguments 3049 retOffset uintptr // offset of return values. 3050 stack *bitVector 3051 framePool *sync.Pool 3052 } 3053 3054 var layoutCache struct { 3055 sync.RWMutex 3056 m map[layoutKey]layoutType 3057 } 3058 3059 // funcLayout computes a struct type representing the layout of the 3060 // function arguments and return values for the function type t. 3061 // If rcvr != nil, rcvr specifies the type of the receiver. 3062 // The returned type exists only for GC, so we only fill out GC relevant info. 3063 // Currently, that's just size and the GC program. We also fill in 3064 // the name for possible debugging use. 3065 func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { 3066 if t.Kind() != Func { 3067 panic("reflect: funcLayout of non-func type") 3068 } 3069 if rcvr != nil && rcvr.Kind() == Interface { 3070 panic("reflect: funcLayout with interface receiver " + rcvr.String()) 3071 } 3072 k := layoutKey{t, rcvr} 3073 layoutCache.RLock() 3074 if x := layoutCache.m[k]; x.t != nil { 3075 layoutCache.RUnlock() 3076 return x.t, x.argSize, x.retOffset, x.stack, x.framePool 3077 } 3078 layoutCache.RUnlock() 3079 layoutCache.Lock() 3080 if x := layoutCache.m[k]; x.t != nil { 3081 layoutCache.Unlock() 3082 return x.t, x.argSize, x.retOffset, x.stack, x.framePool 3083 } 3084 3085 tt := (*funcType)(unsafe.Pointer(t)) 3086 3087 // compute gc program & stack bitmap for arguments 3088 ptrmap := new(bitVector) 3089 var offset uintptr 3090 if rcvr != nil { 3091 // Reflect uses the "interface" calling convention for 3092 // methods, where receivers take one word of argument 3093 // space no matter how big they actually are. 3094 if ifaceIndir(rcvr) || rcvr.pointers() { 3095 ptrmap.append(1) 3096 } 3097 offset += ptrSize 3098 } 3099 for _, arg := range tt.in() { 3100 offset += -offset & uintptr(arg.align-1) 3101 addTypeBits(ptrmap, offset, arg) 3102 offset += arg.size 3103 } 3104 argN := ptrmap.n 3105 argSize = offset 3106 if runtime.GOARCH == "amd64p32" { 3107 offset += -offset & (8 - 1) 3108 } 3109 offset += -offset & (ptrSize - 1) 3110 retOffset = offset 3111 for _, res := range tt.out() { 3112 offset += -offset & uintptr(res.align-1) 3113 addTypeBits(ptrmap, offset, res) 3114 offset += res.size 3115 } 3116 offset += -offset & (ptrSize - 1) 3117 3118 // build dummy rtype holding gc program 3119 x := &rtype{ 3120 align: ptrSize, 3121 size: offset, 3122 ptrdata: uintptr(ptrmap.n) * ptrSize, 3123 } 3124 if runtime.GOARCH == "amd64p32" { 3125 x.align = 8 3126 } 3127 if ptrmap.n > 0 { 3128 x.gcdata = &ptrmap.data[0] 3129 } else { 3130 x.kind |= kindNoPointers 3131 } 3132 ptrmap.n = argN 3133 3134 var s string 3135 if rcvr != nil { 3136 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" 3137 } else { 3138 s = "funcargs(" + t.String() + ")" 3139 } 3140 x.str = resolveReflectName(newName(s, "", "", false)) 3141 3142 // cache result for future callers 3143 if layoutCache.m == nil { 3144 layoutCache.m = make(map[layoutKey]layoutType) 3145 } 3146 framePool = &sync.Pool{New: func() interface{} { 3147 return unsafe_New(x) 3148 }} 3149 layoutCache.m[k] = layoutType{ 3150 t: x, 3151 argSize: argSize, 3152 retOffset: retOffset, 3153 stack: ptrmap, 3154 framePool: framePool, 3155 } 3156 layoutCache.Unlock() 3157 return x, argSize, retOffset, ptrmap, framePool 3158 } 3159 3160 // ifaceIndir reports whether t is stored indirectly in an interface value. 3161 func ifaceIndir(t *rtype) bool { 3162 return t.kind&kindDirectIface == 0 3163 } 3164 3165 // Layout matches runtime.BitVector (well enough). 3166 type bitVector struct { 3167 n uint32 // number of bits 3168 data []byte 3169 } 3170 3171 // append a bit to the bitmap. 3172 func (bv *bitVector) append(bit uint8) { 3173 if bv.n%8 == 0 { 3174 bv.data = append(bv.data, 0) 3175 } 3176 bv.data[bv.n/8] |= bit << (bv.n % 8) 3177 bv.n++ 3178 } 3179 3180 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { 3181 if t.kind&kindNoPointers != 0 { 3182 return 3183 } 3184 3185 switch Kind(t.kind & kindMask) { 3186 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: 3187 // 1 pointer at start of representation 3188 for bv.n < uint32(offset/uintptr(ptrSize)) { 3189 bv.append(0) 3190 } 3191 bv.append(1) 3192 3193 case Interface: 3194 // 2 pointers 3195 for bv.n < uint32(offset/uintptr(ptrSize)) { 3196 bv.append(0) 3197 } 3198 bv.append(1) 3199 bv.append(1) 3200 3201 case Array: 3202 // repeat inner type 3203 tt := (*arrayType)(unsafe.Pointer(t)) 3204 for i := 0; i < int(tt.len); i++ { 3205 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) 3206 } 3207 3208 case Struct: 3209 // apply fields 3210 tt := (*structType)(unsafe.Pointer(t)) 3211 for i := range tt.fields { 3212 f := &tt.fields[i] 3213 addTypeBits(bv, offset+f.offset, f.typ) 3214 } 3215 } 3216 } 3217