Home | History | Annotate | Download | only in shm
      1 #pragma once
      2 
      3 /*
      4  * Copyright (C) 2017 The Android Open Source Project
      5  *
      6  * Licensed under the Apache License, Version 2.0 (the "License");
      7  * you may not use this file except in compliance with the License.
      8  * You may obtain a copy of the License at
      9  *
     10  *      http://www.apache.org/licenses/LICENSE-2.0
     11  *
     12  * Unless required by applicable law or agreed to in writing, software
     13  * distributed under the License is distributed on an "AS IS" BASIS,
     14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15  * See the License for the specific language governing permissions and
     16  * limitations under the License.
     17  */
     18 
     19 // Memory layout for locks of all types.
     20 
     21 // The vsoc::layout namespace indicates that these are shared memory structure
     22 // definitions. The #include's given above are strictly limited, as are the
     23 // types that can be referenced below.
     24 
     25 // For _mm_pause()
     26 #include <x86intrin.h>
     27 
     28 #include <atomic>
     29 #include <cstdint>
     30 
     31 #include "common/vsoc/shm/base.h"
     32 
     33 // Host userspace, guest userspace, and the guest kernel must all agree on
     34 // the relationship between std::atomic and atomic_t. That's hard to do without
     35 // examining assembly, and we can't really examing atomic_t outside of the
     36 // kernel tree, but we can at least assert that the host and the guest
     37 // agree on a size.
     38 static_assert(sizeof(std::atomic<uint32_t>) == 4, "std::atomic size mismatch");
     39 
     40 namespace vsoc {
     41 
     42 class RegionView;
     43 
     44 namespace layout {
     45 
     46 /**
     47  * Lock that causes threads to busy loop rather than sleeping.
     48  * This lock should never be used when the amount of work in the critical
     49  * section cannot be bounded.
     50  */
     51 class SpinLock {
     52  public:
     53   static constexpr size_t layout_size = 4;
     54 
     55   /**
     56    * Acquire the spinlock on the queue. This will effectively block all
     57    * readers and writers.
     58    */
     59   void Lock() {
     60     while (1) {
     61       uint32_t expected = 0;
     62       if (lock_.compare_exchange_strong(expected, Sides::OurSide)) {
     63         return;
     64       }
     65       _mm_pause();
     66     }
     67   }
     68 
     69   /**
     70    * Drop the lock iff it is currently held by this side. Used by
     71    * recovery code that cleans up regions in the event of a reboot
     72    * (guest side) or a service restart (host side).
     73    *
     74    * The caller must ensure that there are no other threads on its
     75    * side (e.g. guest/host) are using the window.
     76    */
     77   bool Recover() {
     78     uint32_t expected = Sides::OurSide;
     79     return lock_.compare_exchange_strong(expected, 0);
     80   }
     81 
     82   /**
     83    * Release the spinlock.
     84    */
     85   void Unlock() {
     86     lock_ = 0;
     87   }
     88 
     89  protected:
     90   std::atomic<uint32_t> lock_;
     91 };
     92 ASSERT_SHM_COMPATIBLE(SpinLock);
     93 
     94 /**
     95  * This is a generic synchronization primitive that provides space for the
     96  * owner of the lock to write platform-specific information.
     97  */
     98 class WaitingLockBase {
     99  public:
    100   static constexpr size_t layout_size = 40;
    101 
    102  protected:
    103   // Common code to handle locking
    104   // Must be called with the kernel's thread id
    105   // Returns true if the lock was acquired. In this case the value in
    106   // expected_vlaue is undefined.
    107   // Returns false if locking failed. The value discovered in the lock word
    108   // is returned in expected_value, and should probably be used in a conditional
    109   // sleep.
    110   bool TryLock(uint32_t tid, uint32_t* expected_value);
    111 
    112   // Common code to handle unlocking.
    113   // Must be called with the kernel's thread id
    114   // Returns sides that should be signalled or 0
    115   Sides UnlockCommon(uint32_t tid);
    116 
    117   // Common code to recover single-sided locks.
    118   bool RecoverSingleSided();
    119 
    120   // Non-zero values in this word indicate that the lock is in use.
    121   // This is 32 bits for compatibility with futex()
    122   std::atomic<uint32_t> lock_uint32_;
    123 
    124   // Pad so we line up with glib's pthread_mutex_t and can share the same queue.
    125   // These fields may be redefined at any point in the future. They should not
    126   // be used.
    127  private:
    128 // These fields are known to be unused and are provided for compatibility
    129 // with glibc's locks.
    130 #pragma clang diagnostic push
    131 #pragma clang diagnostic ignored "-Wunused-private-field"
    132   uint32_t reserved_1_;
    133   char reserved_2_[16];
    134   // Provide scratch space for the owner of the lock. The content of this space
    135   // is undefined when the lock is acquired. The owner may write to and read
    136   // from it while it holds the lock, but must relinquish control before
    137   // releasing the lock.
    138   //
    139   // This is intended to support Linux robust futexes. See the documentation
    140   // in the kernel tree:
    141   //   Documentation/robust-futex-ABI.txt
    142  public:
    143   int64_t owner_scratch_[2];
    144 #pragma clang diagnostic pop
    145 };
    146 ASSERT_SHM_COMPATIBLE(WaitingLockBase);
    147 
    148 /**
    149  * GuestLocks can be acquired and released only on the guest. They reside
    150  * in the shared memory window because mutiple guest processes may need
    151  * to coordinate activities in certain shared memory regions.
    152  *
    153  * Representing this as a concrete type allows for some optimizations when
    154  * signalling on the lock.
    155  */
    156 class GuestLock : public WaitingLockBase {
    157  public:
    158   static constexpr size_t layout_size = WaitingLockBase::layout_size;
    159 
    160 #ifndef CUTTLEFISH_HOST
    161   void Lock();
    162   void Unlock();
    163   /**
    164    * Drop the lock iff it is currently held. Used by
    165    * recovery code that cleans up regions in the event of a reboot.
    166    *
    167    * The caller must ensure that there are no other threads on its
    168    * side (e.g. guest/host) are using the window.
    169    */
    170   bool Recover();
    171 #endif
    172 };
    173 ASSERT_SHM_COMPATIBLE(GuestLock);
    174 
    175 /**
    176  * HostLocks can be acquired and released only on the host. They reside
    177  * in the shared memory window because mutiple host processes may need
    178  * to coordinate activities in certain shared memory regions.
    179  *
    180  * Representing this as a concrete type allows for some optimizations when
    181  * signalling on the lock.
    182  */
    183 class HostLock : public WaitingLockBase {
    184  public:
    185   static constexpr size_t layout_size = WaitingLockBase::layout_size;
    186 
    187 #ifdef CUTTLEFISH_HOST
    188   void Lock();
    189   void Unlock();
    190   /**
    191    * Drop the lock iff it is currently held. Used by
    192    * recovery code that cleans up regions in the event of a daemon
    193    * restart.
    194    *
    195    * The caller must ensure that there are no other threads on its
    196    * side (e.g. guest/host) are using the window.
    197    */
    198   bool Recover();
    199 #endif
    200 };
    201 ASSERT_SHM_COMPATIBLE(HostLock);
    202 
    203 /**
    204  * GuestAndHostLocks can be acquired and released on either side of the
    205  * shared memory window. The locks attempt to enforce fairness by using
    206  * a round-trip signal:
    207  *
    208  *   When a guest releases a lock this code sends a signal to wake the host,
    209  *   but not other guest waiters.
    210  *
    211  *   The wake handler on the host wakes up and local waiters and then reposts
    212  *   the signal to the guest.
    213  *
    214  *   When the guest receives the signal from the host it then wakes ups
    215  *   any waiters.
    216  *
    217  * A similar scenario applies when the host releases a lock with guest waiters.
    218  *
    219  * Signalling across the shared memory window twice has non-trivial cost.
    220  * There are some optimizations in the code to prevent the full round-trip
    221  * if the process releasing the lock can confirm that there are no waiters on
    222  * the other side.
    223  *
    224  * Representing this as a concrete type allows for some optimizations when
    225  * signalling on the lock.
    226  */
    227 class GuestAndHostLock : public WaitingLockBase {
    228  public:
    229   static constexpr size_t layout_size = WaitingLockBase::layout_size;
    230 
    231   void Lock(RegionView*);
    232   void Unlock(RegionView*);
    233   /**
    234    * Drop the lock iff it is currently held by this side. Used by
    235    * recovery code that cleans up regions in the event of a reboot
    236    * (guest side) or a service restart (host side).
    237    *
    238    * The caller must ensure that there are no other threads on its
    239    * side (e.g. guest/host) are using the window.
    240    */
    241   bool Recover(RegionView*);
    242 };
    243 ASSERT_SHM_COMPATIBLE(GuestAndHostLock);
    244 
    245 }  // namespace layout
    246 }  // namespace vsoc
    247