1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Implementation of runtime/debug.WriteHeapDump. Writes all 6 // objects in the heap plus additional info (roots, threads, 7 // finalizers, etc.) to a file. 8 9 // The format of the dumped file is described at 10 // https://golang.org/s/go14heapdump. 11 12 package runtime 13 14 import "unsafe" 15 16 //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump 17 func runtime_debug_WriteHeapDump(fd uintptr) { 18 stopTheWorld("write heap dump") 19 20 systemstack(func() { 21 writeheapdump_m(fd) 22 }) 23 24 startTheWorld() 25 } 26 27 const ( 28 fieldKindEol = 0 29 fieldKindPtr = 1 30 fieldKindIface = 2 31 fieldKindEface = 3 32 tagEOF = 0 33 tagObject = 1 34 tagOtherRoot = 2 35 tagType = 3 36 tagGoroutine = 4 37 tagStackFrame = 5 38 tagParams = 6 39 tagFinalizer = 7 40 tagItab = 8 41 tagOSThread = 9 42 tagMemStats = 10 43 tagQueuedFinalizer = 11 44 tagData = 12 45 tagBSS = 13 46 tagDefer = 14 47 tagPanic = 15 48 tagMemProf = 16 49 tagAllocSample = 17 50 ) 51 52 var dumpfd uintptr // fd to write the dump to. 53 var tmpbuf []byte 54 55 // buffer of pending write data 56 const ( 57 bufSize = 4096 58 ) 59 60 var buf [bufSize]byte 61 var nbuf uintptr 62 63 func dwrite(data unsafe.Pointer, len uintptr) { 64 if len == 0 { 65 return 66 } 67 if nbuf+len <= bufSize { 68 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len]) 69 nbuf += len 70 return 71 } 72 73 write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf)) 74 if len >= bufSize { 75 write(dumpfd, data, int32(len)) 76 nbuf = 0 77 } else { 78 copy(buf[:], (*[bufSize]byte)(data)[:len]) 79 nbuf = len 80 } 81 } 82 83 func dwritebyte(b byte) { 84 dwrite(unsafe.Pointer(&b), 1) 85 } 86 87 func flush() { 88 write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf)) 89 nbuf = 0 90 } 91 92 // Cache of types that have been serialized already. 93 // We use a type's hash field to pick a bucket. 94 // Inside a bucket, we keep a list of types that 95 // have been serialized so far, most recently used first. 96 // Note: when a bucket overflows we may end up 97 // serializing a type more than once. That's ok. 98 const ( 99 typeCacheBuckets = 256 100 typeCacheAssoc = 4 101 ) 102 103 type typeCacheBucket struct { 104 t [typeCacheAssoc]*_type 105 } 106 107 var typecache [typeCacheBuckets]typeCacheBucket 108 109 // dump a uint64 in a varint format parseable by encoding/binary 110 func dumpint(v uint64) { 111 var buf [10]byte 112 var n int 113 for v >= 0x80 { 114 buf[n] = byte(v | 0x80) 115 n++ 116 v >>= 7 117 } 118 buf[n] = byte(v) 119 n++ 120 dwrite(unsafe.Pointer(&buf), uintptr(n)) 121 } 122 123 func dumpbool(b bool) { 124 if b { 125 dumpint(1) 126 } else { 127 dumpint(0) 128 } 129 } 130 131 // dump varint uint64 length followed by memory contents 132 func dumpmemrange(data unsafe.Pointer, len uintptr) { 133 dumpint(uint64(len)) 134 dwrite(data, len) 135 } 136 137 func dumpslice(b []byte) { 138 dumpint(uint64(len(b))) 139 if len(b) > 0 { 140 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b))) 141 } 142 } 143 144 func dumpstr(s string) { 145 sp := (*stringStruct)(unsafe.Pointer(&s)) 146 dumpmemrange(sp.str, uintptr(sp.len)) 147 } 148 149 // dump information for a type 150 func dumptype(t *_type) { 151 if t == nil { 152 return 153 } 154 155 // If we've definitely serialized the type before, 156 // no need to do it again. 157 b := &typecache[t.hash&(typeCacheBuckets-1)] 158 if t == b.t[0] { 159 return 160 } 161 for i := 1; i < typeCacheAssoc; i++ { 162 if t == b.t[i] { 163 // Move-to-front 164 for j := i; j > 0; j-- { 165 b.t[j] = b.t[j-1] 166 } 167 b.t[0] = t 168 return 169 } 170 } 171 172 // Might not have been dumped yet. Dump it and 173 // remember we did so. 174 for j := typeCacheAssoc - 1; j > 0; j-- { 175 b.t[j] = b.t[j-1] 176 } 177 b.t[0] = t 178 179 // dump the type 180 dumpint(tagType) 181 dumpint(uint64(uintptr(unsafe.Pointer(t)))) 182 dumpint(uint64(t.size)) 183 if t.x == nil || t.x.pkgpath == nil || t.x.name == nil { 184 dumpstr(*t._string) 185 } else { 186 pkgpath := (*stringStruct)(unsafe.Pointer(&t.x.pkgpath)) 187 name := (*stringStruct)(unsafe.Pointer(&t.x.name)) 188 dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len))) 189 dwrite(pkgpath.str, uintptr(pkgpath.len)) 190 dwritebyte('.') 191 dwrite(name.str, uintptr(name.len)) 192 } 193 dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0) 194 } 195 196 // dump an object 197 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) { 198 dumpbvtypes(&bv, obj) 199 dumpint(tagObject) 200 dumpint(uint64(uintptr(obj))) 201 dumpmemrange(obj, size) 202 dumpfields(bv) 203 } 204 205 func dumpotherroot(description string, to unsafe.Pointer) { 206 dumpint(tagOtherRoot) 207 dumpstr(description) 208 dumpint(uint64(uintptr(to))) 209 } 210 211 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) { 212 dumpint(tagFinalizer) 213 dumpint(uint64(uintptr(obj))) 214 dumpint(uint64(uintptr(unsafe.Pointer(fn)))) 215 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) 216 dumpint(uint64(uintptr(unsafe.Pointer(fint)))) 217 dumpint(uint64(uintptr(unsafe.Pointer(ot)))) 218 } 219 220 type childInfo struct { 221 // Information passed up from the callee frame about 222 // the layout of the outargs region. 223 argoff uintptr // where the arguments start in the frame 224 arglen uintptr // size of args region 225 args bitvector // if args.n >= 0, pointer map of args region 226 sp *uint8 // callee sp 227 depth uintptr // depth in call stack (0 == most recent) 228 } 229 230 // dump kinds & offsets of interesting fields in bv 231 func dumpbv(cbv *bitvector, offset uintptr) { 232 bv := gobv(*cbv) 233 for i := uintptr(0); i < uintptr(bv.n); i++ { 234 if bv.bytedata[i/8]>>(i%8)&1 == 1 { 235 dumpint(fieldKindPtr) 236 dumpint(uint64(offset + i*ptrSize)) 237 } 238 } 239 } 240 241 func dumpframe(s *stkframe, arg unsafe.Pointer) bool { 242 child := (*childInfo)(arg) 243 f := s.fn 244 245 // Figure out what we can about our stack map 246 pc := s.pc 247 if pc != f.entry { 248 pc-- 249 } 250 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc) 251 if pcdata == -1 { 252 // We do not have a valid pcdata value but there might be a 253 // stackmap for this function. It is likely that we are looking 254 // at the function prologue, assume so and hope for the best. 255 pcdata = 0 256 } 257 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 258 259 // Dump any types we will need to resolve Efaces. 260 if child.args.n >= 0 { 261 dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff)) 262 } 263 var bv bitvector 264 if stkmap != nil && stkmap.n > 0 { 265 bv = stackmapdata(stkmap, pcdata) 266 dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*ptrSize))) 267 } else { 268 bv.n = -1 269 } 270 271 // Dump main body of stack frame. 272 dumpint(tagStackFrame) 273 dumpint(uint64(s.sp)) // lowest address in frame 274 dumpint(uint64(child.depth)) // # of frames deep on the stack 275 dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack 276 dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents 277 dumpint(uint64(f.entry)) 278 dumpint(uint64(s.pc)) 279 dumpint(uint64(s.continpc)) 280 name := funcname(f) 281 if name == "" { 282 name = "unknown function" 283 } 284 dumpstr(name) 285 286 // Dump fields in the outargs section 287 if child.args.n >= 0 { 288 dumpbv(&child.args, child.argoff) 289 } else { 290 // conservative - everything might be a pointer 291 for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize { 292 dumpint(fieldKindPtr) 293 dumpint(uint64(off)) 294 } 295 } 296 297 // Dump fields in the local vars section 298 if stkmap == nil { 299 // No locals information, dump everything. 300 for off := child.arglen; off < s.varp-s.sp; off += ptrSize { 301 dumpint(fieldKindPtr) 302 dumpint(uint64(off)) 303 } 304 } else if stkmap.n < 0 { 305 // Locals size information, dump just the locals. 306 size := uintptr(-stkmap.n) 307 for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize { 308 dumpint(fieldKindPtr) 309 dumpint(uint64(off)) 310 } 311 } else if stkmap.n > 0 { 312 // Locals bitmap information, scan just the pointers in 313 // locals. 314 dumpbv(&bv, s.varp-uintptr(bv.n)*ptrSize-s.sp) 315 } 316 dumpint(fieldKindEol) 317 318 // Record arg info for parent. 319 child.argoff = s.argp - s.fp 320 child.arglen = s.arglen 321 child.sp = (*uint8)(unsafe.Pointer(s.sp)) 322 child.depth++ 323 stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 324 if stkmap != nil { 325 child.args = stackmapdata(stkmap, pcdata) 326 } else { 327 child.args.n = -1 328 } 329 return true 330 } 331 332 func dumpgoroutine(gp *g) { 333 var sp, pc, lr uintptr 334 if gp.syscallsp != 0 { 335 sp = gp.syscallsp 336 pc = gp.syscallpc 337 lr = 0 338 } else { 339 sp = gp.sched.sp 340 pc = gp.sched.pc 341 lr = gp.sched.lr 342 } 343 344 dumpint(tagGoroutine) 345 dumpint(uint64(uintptr(unsafe.Pointer(gp)))) 346 dumpint(uint64(sp)) 347 dumpint(uint64(gp.goid)) 348 dumpint(uint64(gp.gopc)) 349 dumpint(uint64(readgstatus(gp))) 350 dumpbool(isSystemGoroutine(gp)) 351 dumpbool(false) // isbackground 352 dumpint(uint64(gp.waitsince)) 353 dumpstr(gp.waitreason) 354 dumpint(uint64(uintptr(gp.sched.ctxt))) 355 dumpint(uint64(uintptr(unsafe.Pointer(gp.m)))) 356 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer)))) 357 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic)))) 358 359 // dump stack 360 var child childInfo 361 child.args.n = -1 362 child.arglen = 0 363 child.sp = nil 364 child.depth = 0 365 gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0) 366 367 // dump defer & panic records 368 for d := gp._defer; d != nil; d = d.link { 369 dumpint(tagDefer) 370 dumpint(uint64(uintptr(unsafe.Pointer(d)))) 371 dumpint(uint64(uintptr(unsafe.Pointer(gp)))) 372 dumpint(uint64(d.sp)) 373 dumpint(uint64(d.pc)) 374 dumpint(uint64(uintptr(unsafe.Pointer(d.fn)))) 375 dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn)))) 376 dumpint(uint64(uintptr(unsafe.Pointer(d.link)))) 377 } 378 for p := gp._panic; p != nil; p = p.link { 379 dumpint(tagPanic) 380 dumpint(uint64(uintptr(unsafe.Pointer(p)))) 381 dumpint(uint64(uintptr(unsafe.Pointer(gp)))) 382 eface := (*eface)(unsafe.Pointer(&p.arg)) 383 dumpint(uint64(uintptr(unsafe.Pointer(eface._type)))) 384 dumpint(uint64(uintptr(unsafe.Pointer(eface.data)))) 385 dumpint(0) // was p->defer, no longer recorded 386 dumpint(uint64(uintptr(unsafe.Pointer(p.link)))) 387 } 388 } 389 390 func dumpgs() { 391 // goroutines & stacks 392 for i := 0; uintptr(i) < allglen; i++ { 393 gp := allgs[i] 394 status := readgstatus(gp) // The world is stopped so gp will not be in a scan state. 395 switch status { 396 default: 397 print("runtime: unexpected G.status ", hex(status), "\n") 398 throw("dumpgs in STW - bad status") 399 case _Gdead: 400 // ok 401 case _Grunnable, 402 _Gsyscall, 403 _Gwaiting: 404 dumpgoroutine(gp) 405 } 406 } 407 } 408 409 func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) { 410 dumpint(tagQueuedFinalizer) 411 dumpint(uint64(uintptr(obj))) 412 dumpint(uint64(uintptr(unsafe.Pointer(fn)))) 413 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) 414 dumpint(uint64(uintptr(unsafe.Pointer(fint)))) 415 dumpint(uint64(uintptr(unsafe.Pointer(ot)))) 416 } 417 418 func dumproots() { 419 // TODO(mwhudson): dump datamask etc from all objects 420 // data segment 421 dumpbvtypes(&firstmoduledata.gcdatamask, unsafe.Pointer(firstmoduledata.data)) 422 dumpint(tagData) 423 dumpint(uint64(firstmoduledata.data)) 424 dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data) 425 dumpfields(firstmoduledata.gcdatamask) 426 427 // bss segment 428 dumpbvtypes(&firstmoduledata.gcbssmask, unsafe.Pointer(firstmoduledata.bss)) 429 dumpint(tagBSS) 430 dumpint(uint64(firstmoduledata.bss)) 431 dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss) 432 dumpfields(firstmoduledata.gcbssmask) 433 434 // MSpan.types 435 allspans := h_allspans 436 for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { 437 s := allspans[spanidx] 438 if s.state == _MSpanInUse { 439 // Finalizers 440 for sp := s.specials; sp != nil; sp = sp.next { 441 if sp.kind != _KindSpecialFinalizer { 442 continue 443 } 444 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 445 p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset)) 446 dumpfinalizer(p, spf.fn, spf.fint, spf.ot) 447 } 448 } 449 } 450 451 // Finalizer queue 452 iterate_finq(finq_callback) 453 } 454 455 // Bit vector of free marks. 456 // Needs to be as big as the largest number of objects per span. 457 var freemark [_PageSize / 8]bool 458 459 func dumpobjs() { 460 for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { 461 s := h_allspans[i] 462 if s.state != _MSpanInUse { 463 continue 464 } 465 p := uintptr(s.start << _PageShift) 466 size := s.elemsize 467 n := (s.npages << _PageShift) / size 468 if n > uintptr(len(freemark)) { 469 throw("freemark array doesn't have enough entries") 470 } 471 for l := s.freelist; l.ptr() != nil; l = l.ptr().next { 472 freemark[(uintptr(l)-p)/size] = true 473 } 474 for j := uintptr(0); j < n; j, p = j+1, p+size { 475 if freemark[j] { 476 freemark[j] = false 477 continue 478 } 479 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size)) 480 } 481 } 482 } 483 484 func dumpparams() { 485 dumpint(tagParams) 486 x := uintptr(1) 487 if *(*byte)(unsafe.Pointer(&x)) == 1 { 488 dumpbool(false) // little-endian ptrs 489 } else { 490 dumpbool(true) // big-endian ptrs 491 } 492 dumpint(ptrSize) 493 dumpint(uint64(mheap_.arena_start)) 494 dumpint(uint64(mheap_.arena_used)) 495 dumpint(thechar) 496 dumpstr(goexperiment) 497 dumpint(uint64(ncpu)) 498 } 499 500 func itab_callback(tab *itab) { 501 t := tab._type 502 // Dump a map from itab* to the type of its data field. 503 // We want this map so we can deduce types of interface referents. 504 if t.kind&kindDirectIface == 0 { 505 // indirect - data slot is a pointer to t. 506 dumptype(t.ptrto) 507 dumpint(tagItab) 508 dumpint(uint64(uintptr(unsafe.Pointer(tab)))) 509 dumpint(uint64(uintptr(unsafe.Pointer(t.ptrto)))) 510 } else if t.kind&kindNoPointers == 0 { 511 // t is pointer-like - data slot is a t. 512 dumptype(t) 513 dumpint(tagItab) 514 dumpint(uint64(uintptr(unsafe.Pointer(tab)))) 515 dumpint(uint64(uintptr(unsafe.Pointer(t)))) 516 } else { 517 // Data slot is a scalar. Dump type just for fun. 518 // With pointer-only interfaces, this shouldn't happen. 519 dumptype(t) 520 dumpint(tagItab) 521 dumpint(uint64(uintptr(unsafe.Pointer(tab)))) 522 dumpint(uint64(uintptr(unsafe.Pointer(t)))) 523 } 524 } 525 526 func dumpitabs() { 527 iterate_itabs(itab_callback) 528 } 529 530 func dumpms() { 531 for mp := allm; mp != nil; mp = mp.alllink { 532 dumpint(tagOSThread) 533 dumpint(uint64(uintptr(unsafe.Pointer(mp)))) 534 dumpint(uint64(mp.id)) 535 dumpint(mp.procid) 536 } 537 } 538 539 func dumpmemstats() { 540 dumpint(tagMemStats) 541 dumpint(memstats.alloc) 542 dumpint(memstats.total_alloc) 543 dumpint(memstats.sys) 544 dumpint(memstats.nlookup) 545 dumpint(memstats.nmalloc) 546 dumpint(memstats.nfree) 547 dumpint(memstats.heap_alloc) 548 dumpint(memstats.heap_sys) 549 dumpint(memstats.heap_idle) 550 dumpint(memstats.heap_inuse) 551 dumpint(memstats.heap_released) 552 dumpint(memstats.heap_objects) 553 dumpint(memstats.stacks_inuse) 554 dumpint(memstats.stacks_sys) 555 dumpint(memstats.mspan_inuse) 556 dumpint(memstats.mspan_sys) 557 dumpint(memstats.mcache_inuse) 558 dumpint(memstats.mcache_sys) 559 dumpint(memstats.buckhash_sys) 560 dumpint(memstats.gc_sys) 561 dumpint(memstats.other_sys) 562 dumpint(memstats.next_gc) 563 dumpint(memstats.last_gc) 564 dumpint(memstats.pause_total_ns) 565 for i := 0; i < 256; i++ { 566 dumpint(memstats.pause_ns[i]) 567 } 568 dumpint(uint64(memstats.numgc)) 569 } 570 571 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) { 572 stk := (*[100000]uintptr)(unsafe.Pointer(pstk)) 573 dumpint(tagMemProf) 574 dumpint(uint64(uintptr(unsafe.Pointer(b)))) 575 dumpint(uint64(size)) 576 dumpint(uint64(nstk)) 577 for i := uintptr(0); i < nstk; i++ { 578 pc := stk[i] 579 f := findfunc(pc) 580 if f == nil { 581 var buf [64]byte 582 n := len(buf) 583 n-- 584 buf[n] = ')' 585 if pc == 0 { 586 n-- 587 buf[n] = '0' 588 } else { 589 for pc > 0 { 590 n-- 591 buf[n] = "0123456789abcdef"[pc&15] 592 pc >>= 4 593 } 594 } 595 n-- 596 buf[n] = 'x' 597 n-- 598 buf[n] = '0' 599 n-- 600 buf[n] = '(' 601 dumpslice(buf[n:]) 602 dumpstr("?") 603 dumpint(0) 604 } else { 605 dumpstr(funcname(f)) 606 if i > 0 && pc > f.entry { 607 pc-- 608 } 609 file, line := funcline(f, pc) 610 dumpstr(file) 611 dumpint(uint64(line)) 612 } 613 } 614 dumpint(uint64(allocs)) 615 dumpint(uint64(frees)) 616 } 617 618 func dumpmemprof() { 619 iterate_memprof(dumpmemprof_callback) 620 allspans := h_allspans 621 for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { 622 s := allspans[spanidx] 623 if s.state != _MSpanInUse { 624 continue 625 } 626 for sp := s.specials; sp != nil; sp = sp.next { 627 if sp.kind != _KindSpecialProfile { 628 continue 629 } 630 spp := (*specialprofile)(unsafe.Pointer(sp)) 631 p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset) 632 dumpint(tagAllocSample) 633 dumpint(uint64(p)) 634 dumpint(uint64(uintptr(unsafe.Pointer(spp.b)))) 635 } 636 } 637 } 638 639 var dumphdr = []byte("go1.5 heap dump\n") 640 641 func mdump() { 642 // make sure we're done sweeping 643 for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { 644 s := h_allspans[i] 645 if s.state == _MSpanInUse { 646 mSpan_EnsureSwept(s) 647 } 648 } 649 memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache)) 650 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr))) 651 dumpparams() 652 dumpitabs() 653 dumpobjs() 654 dumpgs() 655 dumpms() 656 dumproots() 657 dumpmemstats() 658 dumpmemprof() 659 dumpint(tagEOF) 660 flush() 661 } 662 663 func writeheapdump_m(fd uintptr) { 664 _g_ := getg() 665 casgstatus(_g_.m.curg, _Grunning, _Gwaiting) 666 _g_.waitreason = "dumping heap" 667 668 // Update stats so we can dump them. 669 // As a side effect, flushes all the MCaches so the MSpan.freelist 670 // lists contain all the free objects. 671 updatememstats(nil) 672 673 // Set dump file. 674 dumpfd = fd 675 676 // Call dump routine. 677 mdump() 678 679 // Reset dump file. 680 dumpfd = 0 681 if tmpbuf != nil { 682 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) 683 tmpbuf = nil 684 } 685 686 casgstatus(_g_.m.curg, _Gwaiting, _Grunning) 687 } 688 689 // dumpint() the kind & offset of each field in an object. 690 func dumpfields(bv bitvector) { 691 dumpbv(&bv, 0) 692 dumpint(fieldKindEol) 693 } 694 695 // The heap dump reader needs to be able to disambiguate 696 // Eface entries. So it needs to know every type that might 697 // appear in such an entry. The following routine accomplishes that. 698 // TODO(rsc, khr): Delete - no longer possible. 699 700 // Dump all the types that appear in the type field of 701 // any Eface described by this bit vector. 702 func dumpbvtypes(bv *bitvector, base unsafe.Pointer) { 703 } 704 705 func makeheapobjbv(p uintptr, size uintptr) bitvector { 706 // Extend the temp buffer if necessary. 707 nptr := size / ptrSize 708 if uintptr(len(tmpbuf)) < nptr/8+1 { 709 if tmpbuf != nil { 710 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) 711 } 712 n := nptr/8 + 1 713 p := sysAlloc(n, &memstats.other_sys) 714 if p == nil { 715 throw("heapdump: out of memory") 716 } 717 tmpbuf = (*[1 << 30]byte)(p)[:n] 718 } 719 // Convert heap bitmap to pointer bitmap. 720 for i := uintptr(0); i < nptr/8+1; i++ { 721 tmpbuf[i] = 0 722 } 723 i := uintptr(0) 724 hbits := heapBitsForAddr(p) 725 for ; i < nptr; i++ { 726 if i >= 2 && !hbits.isMarked() { 727 break // end of object 728 } 729 if hbits.isPointer() { 730 tmpbuf[i/8] |= 1 << (i % 8) 731 } 732 hbits = hbits.next() 733 } 734 return bitvector{int32(i), &tmpbuf[0]} 735 } 736