Home | History | Annotate | Download | only in sanitizer_common
      1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Implementation of a mapping from arbitrary values to unique 32-bit
     11 // identifiers.
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef SANITIZER_STACKDEPOTBASE_H
     15 #define SANITIZER_STACKDEPOTBASE_H
     16 
     17 #include "sanitizer_internal_defs.h"
     18 #include "sanitizer_mutex.h"
     19 #include "sanitizer_atomic.h"
     20 #include "sanitizer_persistent_allocator.h"
     21 
     22 namespace __sanitizer {
     23 
     24 template <class Node, int kReservedBits, int kTabSizeLog>
     25 class StackDepotBase {
     26  public:
     27   typedef typename Node::args_type args_type;
     28   typedef typename Node::handle_type handle_type;
     29   // Maps stack trace to an unique id.
     30   handle_type Put(args_type args, bool *inserted = nullptr);
     31   // Retrieves a stored stack trace by the id.
     32   args_type Get(u32 id);
     33 
     34   StackDepotStats *GetStats() { return &stats; }
     35 
     36   void LockAll();
     37   void UnlockAll();
     38 
     39  private:
     40   static Node *find(Node *s, args_type args, u32 hash);
     41   static Node *lock(atomic_uintptr_t *p);
     42   static void unlock(atomic_uintptr_t *p, Node *s);
     43 
     44   static const int kTabSize = 1 << kTabSizeLog;  // Hash table size.
     45   static const int kPartBits = 8;
     46   static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
     47   static const int kPartCount =
     48       1 << kPartBits;  // Number of subparts in the table.
     49   static const int kPartSize = kTabSize / kPartCount;
     50   static const int kMaxId = 1 << kPartShift;
     51 
     52   atomic_uintptr_t tab[kTabSize];   // Hash table of Node's.
     53   atomic_uint32_t seq[kPartCount];  // Unique id generators.
     54 
     55   StackDepotStats stats;
     56 
     57   friend class StackDepotReverseMap;
     58 };
     59 
     60 template <class Node, int kReservedBits, int kTabSizeLog>
     61 Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
     62                                                              args_type args,
     63                                                              u32 hash) {
     64   // Searches linked list s for the stack, returns its id.
     65   for (; s; s = s->link) {
     66     if (s->eq(hash, args)) {
     67       return s;
     68     }
     69   }
     70   return nullptr;
     71 }
     72 
     73 template <class Node, int kReservedBits, int kTabSizeLog>
     74 Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
     75     atomic_uintptr_t *p) {
     76   // Uses the pointer lsb as mutex.
     77   for (int i = 0;; i++) {
     78     uptr cmp = atomic_load(p, memory_order_relaxed);
     79     if ((cmp & 1) == 0 &&
     80         atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
     81       return (Node *)cmp;
     82     if (i < 10)
     83       proc_yield(10);
     84     else
     85       internal_sched_yield();
     86   }
     87 }
     88 
     89 template <class Node, int kReservedBits, int kTabSizeLog>
     90 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
     91     atomic_uintptr_t *p, Node *s) {
     92   DCHECK_EQ((uptr)s & 1, 0);
     93   atomic_store(p, (uptr)s, memory_order_release);
     94 }
     95 
     96 template <class Node, int kReservedBits, int kTabSizeLog>
     97 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
     98 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
     99                                                       bool *inserted) {
    100   if (inserted) *inserted = false;
    101   if (!Node::is_valid(args)) return handle_type();
    102   uptr h = Node::hash(args);
    103   atomic_uintptr_t *p = &tab[h % kTabSize];
    104   uptr v = atomic_load(p, memory_order_consume);
    105   Node *s = (Node *)(v & ~1);
    106   // First, try to find the existing stack.
    107   Node *node = find(s, args, h);
    108   if (node) return node->get_handle();
    109   // If failed, lock, retry and insert new.
    110   Node *s2 = lock(p);
    111   if (s2 != s) {
    112     node = find(s2, args, h);
    113     if (node) {
    114       unlock(p, s2);
    115       return node->get_handle();
    116     }
    117   }
    118   uptr part = (h % kTabSize) / kPartSize;
    119   u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
    120   stats.n_uniq_ids++;
    121   CHECK_LT(id, kMaxId);
    122   id |= part << kPartShift;
    123   CHECK_NE(id, 0);
    124   CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
    125   uptr memsz = Node::storage_size(args);
    126   s = (Node *)PersistentAlloc(memsz);
    127   stats.allocated += memsz;
    128   s->id = id;
    129   s->store(args, h);
    130   s->link = s2;
    131   unlock(p, s);
    132   if (inserted) *inserted = true;
    133   return s->get_handle();
    134 }
    135 
    136 template <class Node, int kReservedBits, int kTabSizeLog>
    137 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
    138 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
    139   if (id == 0) {
    140     return args_type();
    141   }
    142   CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
    143   // High kPartBits contain part id, so we need to scan at most kPartSize lists.
    144   uptr part = id >> kPartShift;
    145   for (int i = 0; i != kPartSize; i++) {
    146     uptr idx = part * kPartSize + i;
    147     CHECK_LT(idx, kTabSize);
    148     atomic_uintptr_t *p = &tab[idx];
    149     uptr v = atomic_load(p, memory_order_consume);
    150     Node *s = (Node *)(v & ~1);
    151     for (; s; s = s->link) {
    152       if (s->id == id) {
    153         return s->load();
    154       }
    155     }
    156   }
    157   return args_type();
    158 }
    159 
    160 template <class Node, int kReservedBits, int kTabSizeLog>
    161 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
    162   for (int i = 0; i < kTabSize; ++i) {
    163     lock(&tab[i]);
    164   }
    165 }
    166 
    167 template <class Node, int kReservedBits, int kTabSizeLog>
    168 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
    169   for (int i = 0; i < kTabSize; ++i) {
    170     atomic_uintptr_t *p = &tab[i];
    171     uptr s = atomic_load(p, memory_order_relaxed);
    172     unlock(p, (Node *)(s & ~1UL));
    173   }
    174 }
    175 
    176 } // namespace __sanitizer
    177 
    178 #endif // SANITIZER_STACKDEPOTBASE_H
    179