Home | History | Annotate | Download | only in tests
      1 /* Copyright (c) 2006, Google Inc.
      2  * All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  *
     30  * ---
     31  * Author: Sanjay Ghemawat
     32  */
     33 
     34 #include <stdio.h>
     35 #include "base/logging.h"
     36 #include "base/atomicops.h"
     37 
     38 #define GG_ULONGLONG(x)  static_cast<uint64>(x)
     39 
     40 template <class AtomicType>
     41 static void TestAtomicIncrement() {
     42   // For now, we just test single threaded execution
     43 
     44   // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
     45   // outside the expected address bounds.  This is in particular to
     46   // test that some future change to the asm code doesn't cause the
     47   // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
     48   // machines.
     49   struct {
     50     AtomicType prev_word;
     51     AtomicType count;
     52     AtomicType next_word;
     53   } s;
     54 
     55   AtomicType prev_word_value, next_word_value;
     56   memset(&prev_word_value, 0xFF, sizeof(AtomicType));
     57   memset(&next_word_value, 0xEE, sizeof(AtomicType));
     58 
     59   s.prev_word = prev_word_value;
     60   s.count = 0;
     61   s.next_word = next_word_value;
     62 
     63   ASSERT_EQ(1, base::subtle::NoBarrier_AtomicIncrement(&s.count, 1));
     64   ASSERT_EQ(1, s.count);
     65   ASSERT_EQ(prev_word_value, s.prev_word);
     66   ASSERT_EQ(next_word_value, s.next_word);
     67 
     68   ASSERT_EQ(3, base::subtle::NoBarrier_AtomicIncrement(&s.count, 2));
     69   ASSERT_EQ(3, s.count);
     70   ASSERT_EQ(prev_word_value, s.prev_word);
     71   ASSERT_EQ(next_word_value, s.next_word);
     72 
     73   ASSERT_EQ(6, base::subtle::NoBarrier_AtomicIncrement(&s.count, 3));
     74   ASSERT_EQ(6, s.count);
     75   ASSERT_EQ(prev_word_value, s.prev_word);
     76   ASSERT_EQ(next_word_value, s.next_word);
     77 
     78   ASSERT_EQ(3, base::subtle::NoBarrier_AtomicIncrement(&s.count, -3));
     79   ASSERT_EQ(3, s.count);
     80   ASSERT_EQ(prev_word_value, s.prev_word);
     81   ASSERT_EQ(next_word_value, s.next_word);
     82 
     83   ASSERT_EQ(1, base::subtle::NoBarrier_AtomicIncrement(&s.count, -2));
     84   ASSERT_EQ(1, s.count);
     85   ASSERT_EQ(prev_word_value, s.prev_word);
     86   ASSERT_EQ(next_word_value, s.next_word);
     87 
     88   ASSERT_EQ(0, base::subtle::NoBarrier_AtomicIncrement(&s.count, -1));
     89   ASSERT_EQ(0, s.count);
     90   ASSERT_EQ(prev_word_value, s.prev_word);
     91   ASSERT_EQ(next_word_value, s.next_word);
     92 
     93   ASSERT_EQ(-1, base::subtle::NoBarrier_AtomicIncrement(&s.count, -1));
     94   ASSERT_EQ(-1, s.count);
     95   ASSERT_EQ(prev_word_value, s.prev_word);
     96   ASSERT_EQ(next_word_value, s.next_word);
     97 
     98   ASSERT_EQ(-5, base::subtle::NoBarrier_AtomicIncrement(&s.count, -4));
     99   ASSERT_EQ(-5, s.count);
    100   ASSERT_EQ(prev_word_value, s.prev_word);
    101   ASSERT_EQ(next_word_value, s.next_word);
    102 
    103   ASSERT_EQ(0, base::subtle::NoBarrier_AtomicIncrement(&s.count, 5));
    104   ASSERT_EQ(0, s.count);
    105   ASSERT_EQ(prev_word_value, s.prev_word);
    106   ASSERT_EQ(next_word_value, s.next_word);
    107 }
    108 
    109 
    110 #define NUM_BITS(T) (sizeof(T) * 8)
    111 
    112 
    113 template <class AtomicType>
    114 static void TestCompareAndSwap() {
    115   AtomicType value = 0;
    116   AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
    117   ASSERT_EQ(1, value);
    118   ASSERT_EQ(0, prev);
    119 
    120   // Use test value that has non-zero bits in both halves, more for testing
    121   // 64-bit implementation on 32-bit platforms.
    122   const AtomicType k_test_val = (GG_ULONGLONG(1) <<
    123                                  (NUM_BITS(AtomicType) - 2)) + 11;
    124   value = k_test_val;
    125   prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
    126   ASSERT_EQ(k_test_val, value);
    127   ASSERT_EQ(k_test_val, prev);
    128 
    129   value = k_test_val;
    130   prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
    131   ASSERT_EQ(5, value);
    132   ASSERT_EQ(k_test_val, prev);
    133 }
    134 
    135 
    136 template <class AtomicType>
    137 static void TestAtomicExchange() {
    138   AtomicType value = 0;
    139   AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
    140   ASSERT_EQ(1, value);
    141   ASSERT_EQ(0, new_value);
    142 
    143   // Use test value that has non-zero bits in both halves, more for testing
    144   // 64-bit implementation on 32-bit platforms.
    145   const AtomicType k_test_val = (GG_ULONGLONG(1) <<
    146                                  (NUM_BITS(AtomicType) - 2)) + 11;
    147   value = k_test_val;
    148   new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
    149   ASSERT_EQ(k_test_val, value);
    150   ASSERT_EQ(k_test_val, new_value);
    151 
    152   value = k_test_val;
    153   new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
    154   ASSERT_EQ(5, value);
    155   ASSERT_EQ(k_test_val, new_value);
    156 }
    157 
    158 
    159 template <class AtomicType>
    160 static void TestAtomicIncrementBounds() {
    161   // Test increment at the half-width boundary of the atomic type.
    162   // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
    163   AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2);
    164   AtomicType value = test_val - 1;
    165   AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
    166   ASSERT_EQ(test_val, value);
    167   ASSERT_EQ(value, new_value);
    168 
    169   base::subtle::NoBarrier_AtomicIncrement(&value, -1);
    170   ASSERT_EQ(test_val - 1, value);
    171 }
    172 
    173 // This is a simple sanity check that values are correct. Not testing
    174 // atomicity
    175 template <class AtomicType>
    176 static void TestStore() {
    177   const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
    178   const AtomicType kVal2 = static_cast<AtomicType>(-1);
    179 
    180   AtomicType value;
    181 
    182   base::subtle::NoBarrier_Store(&value, kVal1);
    183   ASSERT_EQ(kVal1, value);
    184   base::subtle::NoBarrier_Store(&value, kVal2);
    185   ASSERT_EQ(kVal2, value);
    186 
    187   base::subtle::Acquire_Store(&value, kVal1);
    188   ASSERT_EQ(kVal1, value);
    189   base::subtle::Acquire_Store(&value, kVal2);
    190   ASSERT_EQ(kVal2, value);
    191 
    192   base::subtle::Release_Store(&value, kVal1);
    193   ASSERT_EQ(kVal1, value);
    194   base::subtle::Release_Store(&value, kVal2);
    195   ASSERT_EQ(kVal2, value);
    196 }
    197 
    198 // This is a simple sanity check that values are correct. Not testing
    199 // atomicity
    200 template <class AtomicType>
    201 static void TestLoad() {
    202   const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
    203   const AtomicType kVal2 = static_cast<AtomicType>(-1);
    204 
    205   AtomicType value;
    206 
    207   value = kVal1;
    208   ASSERT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
    209   value = kVal2;
    210   ASSERT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
    211 
    212   value = kVal1;
    213   ASSERT_EQ(kVal1, base::subtle::Acquire_Load(&value));
    214   value = kVal2;
    215   ASSERT_EQ(kVal2, base::subtle::Acquire_Load(&value));
    216 
    217   value = kVal1;
    218   ASSERT_EQ(kVal1, base::subtle::Release_Load(&value));
    219   value = kVal2;
    220   ASSERT_EQ(kVal2, base::subtle::Release_Load(&value));
    221 }
    222 
    223 template <class AtomicType>
    224 static void TestAtomicOps() {
    225   TestCompareAndSwap<AtomicType>();
    226   TestAtomicExchange<AtomicType>();
    227   TestAtomicIncrementBounds<AtomicType>();
    228   TestStore<AtomicType>();
    229   TestLoad<AtomicType>();
    230 }
    231 
    232 int main(int argc, char** argv) {
    233   TestAtomicIncrement<AtomicWord>();
    234   TestAtomicIncrement<Atomic32>();
    235 
    236   TestAtomicOps<AtomicWord>();
    237   TestAtomicOps<Atomic32>();
    238 
    239   // I've commented the Atomic64 tests out for now, because Atomic64
    240   // doesn't work on x86 systems that are not compiled to support mmx
    241   // registers.  Since I want this project to be as portable as
    242   // possible -- that is, not to assume we've compiled for mmx or even
    243   // that the processor supports it -- and we don't actually use
    244   // Atomic64 anywhere, I've commented it out of the test for now.
    245   // (Luckily, if we ever do use Atomic64 by accident, we'll get told
    246   // via a compiler error rather than some obscure runtime failure, so
    247   // this course of action is safe.)
    248   // If we ever *do* want to enable this, try adding -msse (or -mmmx?)
    249   // to the CXXFLAGS in Makefile.am.
    250 #if 0 and defined(BASE_HAS_ATOMIC64)
    251   TestAtomicIncrement<base::subtle::Atomic64>();
    252   TestAtomicOps<base::subtle::Atomic64>();
    253 #endif
    254 
    255   printf("PASS\n");
    256   return 0;
    257 }
    258