1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "context_x86.h" 18 19 #include "mirror/art_method.h" 20 #include "mirror/object-inl.h" 21 #include "stack.h" 22 23 namespace art { 24 namespace x86 { 25 26 static const uint32_t gZero = 0; 27 28 void X86Context::Reset() { 29 for (int i = 0; i < kNumberOfCpuRegisters; i++) { 30 gprs_[i] = NULL; 31 } 32 gprs_[ESP] = &esp_; 33 // Initialize registers with easy to spot debug values. 34 esp_ = X86Context::kBadGprBase + ESP; 35 eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters; 36 } 37 38 void X86Context::FillCalleeSaves(const StackVisitor& fr) { 39 mirror::ArtMethod* method = fr.GetMethod(); 40 uint32_t core_spills = method->GetCoreSpillMask(); 41 size_t spill_count = __builtin_popcount(core_spills); 42 DCHECK_EQ(method->GetFpSpillMask(), 0u); 43 size_t frame_size = method->GetFrameSizeInBytes(); 44 if (spill_count > 0) { 45 // Lowest number spill is farthest away, walk registers and fill into context. 46 int j = 2; // Offset j to skip return address spill. 47 for (int i = 0; i < kNumberOfCpuRegisters; i++) { 48 if (((core_spills >> i) & 1) != 0) { 49 gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); 50 j++; 51 } 52 } 53 } 54 } 55 56 void X86Context::SmashCallerSaves() { 57 // This needs to be 0 because we want a null/zero return value. 58 gprs_[EAX] = const_cast<uint32_t*>(&gZero); 59 gprs_[EDX] = const_cast<uint32_t*>(&gZero); 60 gprs_[ECX] = NULL; 61 gprs_[EBX] = NULL; 62 } 63 64 void X86Context::SetGPR(uint32_t reg, uintptr_t value) { 65 CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters)); 66 CHECK_NE(gprs_[reg], &gZero); 67 CHECK(gprs_[reg] != NULL); 68 *gprs_[reg] = value; 69 } 70 71 void X86Context::DoLongJump() { 72 #if defined(__i386__) 73 // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at 74 // the top for the stack pointer that doesn't get popped in a pop-all. 75 volatile uintptr_t gprs[kNumberOfCpuRegisters + 1]; 76 for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) { 77 gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != NULL ? *gprs_[i] : X86Context::kBadGprBase + i; 78 } 79 // We want to load the stack pointer one slot below so that the ret will pop eip. 80 uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize; 81 gprs[kNumberOfCpuRegisters] = esp; 82 *(reinterpret_cast<uintptr_t*>(esp)) = eip_; 83 __asm__ __volatile__( 84 "movl %0, %%esp\n\t" // ESP points to gprs. 85 "popal\n\t" // Load all registers except ESP and EIP with values in gprs. 86 "popl %%esp\n\t" // Load stack pointer. 87 "ret\n\t" // From higher in the stack pop eip. 88 : // output. 89 : "g"(&gprs[0]) // input. 90 :); // clobber. 91 #else 92 UNIMPLEMENTED(FATAL); 93 #endif 94 } 95 96 } // namespace x86 97 } // namespace art 98