1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x 6 7 package runtime 8 9 import "unsafe" 10 11 const ( 12 // addrBits is the number of bits needed to represent a virtual address. 13 // 14 // In Linux the user address space for each architecture is limited as 15 // follows (taken from the processor.h file for the architecture): 16 // 17 // Architecture Name Maximum Value (exclusive) 18 // --------------------------------------------------------------------- 19 // arm64 TASK_SIZE_64 Depends on configuration. 20 // ppc64{,le} TASK_SIZE_USER64 0x400000000000UL (46 bit addresses) 21 // mips64{,le} TASK_SIZE64 0x010000000000UL (40 bit addresses) 22 // s390x TASK_SIZE 0x020000000000UL (41 bit addresses) 23 // 24 // These values may increase over time. 25 // 26 // On AMD64, virtual addresses are 48-bit numbers sign extended to 64. 27 // We shift the address left 16 to eliminate the sign extended part and make 28 // room in the bottom for the count. 29 addrBits = 48 30 31 // In addition to the 16 bits taken from the top, we can take 3 from the 32 // bottom, because node must be pointer-aligned, giving a total of 19 bits 33 // of count. 34 cntBits = 64 - addrBits + 3 35 ) 36 37 func lfstackPack(node *lfnode, cnt uintptr) uint64 { 38 return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1)) 39 } 40 41 func lfstackUnpack(val uint64) *lfnode { 42 if GOARCH == "amd64" { 43 // amd64 systems can place the stack above the VA hole, so we need to sign extend 44 // val before unpacking. 45 return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3))) 46 } 47 return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) 48 } 49