1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "base/metrics/stats_table.h" 6 7 #include "base/logging.h" 8 #include "base/memory/scoped_ptr.h" 9 #include "base/memory/shared_memory.h" 10 #include "base/process/process_handle.h" 11 #include "base/strings/string_piece.h" 12 #include "base/strings/string_util.h" 13 #include "base/strings/utf_string_conversions.h" 14 #include "base/threading/platform_thread.h" 15 #include "base/threading/thread_local_storage.h" 16 17 namespace base { 18 19 // The StatsTable uses a shared memory segment that is laid out as follows 20 // 21 // +-------------------------------------------+ 22 // | Version | Size | MaxCounters | MaxThreads | 23 // +-------------------------------------------+ 24 // | Thread names table | 25 // +-------------------------------------------+ 26 // | Thread TID table | 27 // +-------------------------------------------+ 28 // | Thread PID table | 29 // +-------------------------------------------+ 30 // | Counter names table | 31 // +-------------------------------------------+ 32 // | Data | 33 // +-------------------------------------------+ 34 // 35 // The data layout is a grid, where the columns are the thread_ids and the 36 // rows are the counter_ids. 37 // 38 // If the first character of the thread_name is '\0', then that column is 39 // empty. 40 // If the first character of the counter_name is '\0', then that row is 41 // empty. 42 // 43 // About Locking: 44 // This class is designed to be both multi-thread and multi-process safe. 45 // Aside from initialization, this is done by partitioning the data which 46 // each thread uses so that no locking is required. However, to allocate 47 // the rows and columns of the table to particular threads, locking is 48 // required. 49 // 50 // At the shared-memory level, we have a lock. This lock protects the 51 // shared-memory table only, and is used when we create new counters (e.g. 52 // use rows) or when we register new threads (e.g. use columns). Reading 53 // data from the table does not require any locking at the shared memory 54 // level. 55 // 56 // Each process which accesses the table will create a StatsTable object. 57 // The StatsTable maintains a hash table of the existing counters in the 58 // table for faster lookup. Since the hash table is process specific, 59 // each process maintains its own cache. We avoid complexity here by never 60 // de-allocating from the hash table. (Counters are dynamically added, 61 // but not dynamically removed). 62 63 // In order for external viewers to be able to read our shared memory, 64 // we all need to use the same size ints. 65 COMPILE_ASSERT(sizeof(int)==4, expect_4_byte_ints); 66 67 namespace { 68 69 // An internal version in case we ever change the format of this 70 // file, and so that we can identify our table. 71 const int kTableVersion = 0x13131313; 72 73 // The name for un-named counters and threads in the table. 74 const char kUnknownName[] = "<unknown>"; 75 76 // Calculates delta to align an offset to the size of an int 77 inline int AlignOffset(int offset) { 78 return (sizeof(int) - (offset % sizeof(int))) % sizeof(int); 79 } 80 81 inline int AlignedSize(int size) { 82 return size + AlignOffset(size); 83 } 84 85 } // namespace 86 87 // The StatsTable::Internal maintains convenience pointers into the 88 // shared memory segment. Use this class to keep the data structure 89 // clean and accessible. 90 class StatsTable::Internal { 91 public: 92 // Various header information contained in the memory mapped segment. 93 struct TableHeader { 94 int version; 95 int size; 96 int max_counters; 97 int max_threads; 98 }; 99 100 // Construct a new Internal based on expected size parameters, or 101 // return NULL on failure. 102 static Internal* New(const StatsTable::TableIdentifier& table, 103 int size, 104 int max_threads, 105 int max_counters); 106 107 SharedMemory* shared_memory() { return shared_memory_.get(); } 108 109 // Accessors for our header pointers 110 TableHeader* table_header() const { return table_header_; } 111 int version() const { return table_header_->version; } 112 int size() const { return table_header_->size; } 113 int max_counters() const { return table_header_->max_counters; } 114 int max_threads() const { return table_header_->max_threads; } 115 116 // Accessors for our tables 117 char* thread_name(int slot_id) const { 118 return &thread_names_table_[ 119 (slot_id-1) * (StatsTable::kMaxThreadNameLength)]; 120 } 121 PlatformThreadId* thread_tid(int slot_id) const { 122 return &(thread_tid_table_[slot_id-1]); 123 } 124 int* thread_pid(int slot_id) const { 125 return &(thread_pid_table_[slot_id-1]); 126 } 127 char* counter_name(int counter_id) const { 128 return &counter_names_table_[ 129 (counter_id-1) * (StatsTable::kMaxCounterNameLength)]; 130 } 131 int* row(int counter_id) const { 132 return &data_table_[(counter_id-1) * max_threads()]; 133 } 134 135 private: 136 // Constructor is private because you should use New() instead. 137 explicit Internal(SharedMemory* shared_memory) 138 : shared_memory_(shared_memory), 139 table_header_(NULL), 140 thread_names_table_(NULL), 141 thread_tid_table_(NULL), 142 thread_pid_table_(NULL), 143 counter_names_table_(NULL), 144 data_table_(NULL) { 145 } 146 147 // Create or open the SharedMemory used by the stats table. 148 static SharedMemory* CreateSharedMemory( 149 const StatsTable::TableIdentifier& table, 150 int size); 151 152 // Initializes the table on first access. Sets header values 153 // appropriately and zeroes all counters. 154 void InitializeTable(void* memory, int size, int max_counters, 155 int max_threads); 156 157 // Initializes our in-memory pointers into a pre-created StatsTable. 158 void ComputeMappedPointers(void* memory); 159 160 scoped_ptr<SharedMemory> shared_memory_; 161 TableHeader* table_header_; 162 char* thread_names_table_; 163 PlatformThreadId* thread_tid_table_; 164 int* thread_pid_table_; 165 char* counter_names_table_; 166 int* data_table_; 167 168 DISALLOW_COPY_AND_ASSIGN(Internal); 169 }; 170 171 // static 172 StatsTable::Internal* StatsTable::Internal::New( 173 const StatsTable::TableIdentifier& table, 174 int size, 175 int max_threads, 176 int max_counters) { 177 scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(table, size)); 178 if (!shared_memory.get()) 179 return NULL; 180 if (!shared_memory->Map(size)) 181 return NULL; 182 void* memory = shared_memory->memory(); 183 184 scoped_ptr<Internal> internal(new Internal(shared_memory.release())); 185 TableHeader* header = static_cast<TableHeader*>(memory); 186 187 // If the version does not match, then assume the table needs 188 // to be initialized. 189 if (header->version != kTableVersion) 190 internal->InitializeTable(memory, size, max_counters, max_threads); 191 192 // We have a valid table, so compute our pointers. 193 internal->ComputeMappedPointers(memory); 194 195 return internal.release(); 196 } 197 198 // static 199 SharedMemory* StatsTable::Internal::CreateSharedMemory( 200 const StatsTable::TableIdentifier& table, 201 int size) { 202 #if defined(OS_POSIX) 203 // Check for existing table. 204 if (table.fd != -1) 205 return new SharedMemory(table, false); 206 207 // Otherwise we need to create it. 208 scoped_ptr<SharedMemory> shared_memory(new SharedMemory()); 209 if (!shared_memory->CreateAnonymous(size)) 210 return NULL; 211 return shared_memory.release(); 212 #elif defined(OS_WIN) 213 scoped_ptr<SharedMemory> shared_memory(new SharedMemory()); 214 if (table.empty()) { 215 // Create an anonymous table. 216 if (!shared_memory->CreateAnonymous(size)) 217 return NULL; 218 } else { 219 // Create a named table for sharing between processes. 220 if (!shared_memory->CreateNamedDeprecated(table, true, size)) 221 return NULL; 222 } 223 return shared_memory.release(); 224 #endif 225 } 226 227 void StatsTable::Internal::InitializeTable(void* memory, int size, 228 int max_counters, 229 int max_threads) { 230 // Zero everything. 231 memset(memory, 0, size); 232 233 // Initialize the header. 234 TableHeader* header = static_cast<TableHeader*>(memory); 235 header->version = kTableVersion; 236 header->size = size; 237 header->max_counters = max_counters; 238 header->max_threads = max_threads; 239 } 240 241 void StatsTable::Internal::ComputeMappedPointers(void* memory) { 242 char* data = static_cast<char*>(memory); 243 int offset = 0; 244 245 table_header_ = reinterpret_cast<TableHeader*>(data); 246 offset += sizeof(*table_header_); 247 offset += AlignOffset(offset); 248 249 // Verify we're looking at a valid StatsTable. 250 DCHECK_EQ(table_header_->version, kTableVersion); 251 252 thread_names_table_ = reinterpret_cast<char*>(data + offset); 253 offset += sizeof(char) * 254 max_threads() * StatsTable::kMaxThreadNameLength; 255 offset += AlignOffset(offset); 256 257 thread_tid_table_ = reinterpret_cast<PlatformThreadId*>(data + offset); 258 offset += sizeof(int) * max_threads(); 259 offset += AlignOffset(offset); 260 261 thread_pid_table_ = reinterpret_cast<int*>(data + offset); 262 offset += sizeof(int) * max_threads(); 263 offset += AlignOffset(offset); 264 265 counter_names_table_ = reinterpret_cast<char*>(data + offset); 266 offset += sizeof(char) * 267 max_counters() * StatsTable::kMaxCounterNameLength; 268 offset += AlignOffset(offset); 269 270 data_table_ = reinterpret_cast<int*>(data + offset); 271 offset += sizeof(int) * max_threads() * max_counters(); 272 273 DCHECK_EQ(offset, size()); 274 } 275 276 // TLSData carries the data stored in the TLS slots for the 277 // StatsTable. This is used so that we can properly cleanup when the 278 // thread exits and return the table slot. 279 // 280 // Each thread that calls RegisterThread in the StatsTable will have 281 // a TLSData stored in its TLS. 282 struct StatsTable::TLSData { 283 StatsTable* table; 284 int slot; 285 }; 286 287 // We keep a singleton table which can be easily accessed. 288 StatsTable* global_table = NULL; 289 290 StatsTable::StatsTable(const TableIdentifier& table, 291 int max_threads, 292 int max_counters) 293 : internal_(NULL), 294 tls_index_(SlotReturnFunction) { 295 int table_size = 296 AlignedSize(sizeof(Internal::TableHeader)) + 297 AlignedSize((max_counters * sizeof(char) * kMaxCounterNameLength)) + 298 AlignedSize((max_threads * sizeof(char) * kMaxThreadNameLength)) + 299 AlignedSize(max_threads * sizeof(int)) + 300 AlignedSize(max_threads * sizeof(int)) + 301 AlignedSize((sizeof(int) * (max_counters * max_threads))); 302 303 internal_ = Internal::New(table, table_size, max_threads, max_counters); 304 305 if (!internal_) 306 DPLOG(ERROR) << "StatsTable did not initialize"; 307 } 308 309 StatsTable::~StatsTable() { 310 // Before we tear down our copy of the table, be sure to 311 // unregister our thread. 312 UnregisterThread(); 313 314 // Return ThreadLocalStorage. At this point, if any registered threads 315 // still exist, they cannot Unregister. 316 tls_index_.Free(); 317 318 // Cleanup our shared memory. 319 delete internal_; 320 321 // If we are the global table, unregister ourselves. 322 if (global_table == this) 323 global_table = NULL; 324 } 325 326 StatsTable* StatsTable::current() { 327 return global_table; 328 } 329 330 void StatsTable::set_current(StatsTable* value) { 331 global_table = value; 332 } 333 334 int StatsTable::GetSlot() const { 335 TLSData* data = GetTLSData(); 336 if (!data) 337 return 0; 338 return data->slot; 339 } 340 341 int StatsTable::RegisterThread(const std::string& name) { 342 int slot = 0; 343 if (!internal_) 344 return 0; 345 346 // Registering a thread requires that we lock the shared memory 347 // so that two threads don't grab the same slot. Fortunately, 348 // thread creation shouldn't happen in inner loops. 349 // TODO(viettrungluu): crbug.com/345734: Use a different locking mechanism. 350 { 351 SharedMemoryAutoLockDeprecated lock(internal_->shared_memory()); 352 slot = FindEmptyThread(); 353 if (!slot) { 354 return 0; 355 } 356 357 // We have space, so consume a column in the table. 358 std::string thread_name = name; 359 if (name.empty()) 360 thread_name = kUnknownName; 361 strlcpy(internal_->thread_name(slot), thread_name.c_str(), 362 kMaxThreadNameLength); 363 *(internal_->thread_tid(slot)) = PlatformThread::CurrentId(); 364 *(internal_->thread_pid(slot)) = GetCurrentProcId(); 365 } 366 367 // Set our thread local storage. 368 TLSData* data = new TLSData; 369 data->table = this; 370 data->slot = slot; 371 tls_index_.Set(data); 372 return slot; 373 } 374 375 int StatsTable::CountThreadsRegistered() const { 376 if (!internal_) 377 return 0; 378 379 // Loop through the shared memory and count the threads that are active. 380 // We intentionally do not lock the table during the operation. 381 int count = 0; 382 for (int index = 1; index <= internal_->max_threads(); index++) { 383 char* name = internal_->thread_name(index); 384 if (*name != '\0') 385 count++; 386 } 387 return count; 388 } 389 390 int StatsTable::FindCounter(const std::string& name) { 391 // Note: the API returns counters numbered from 1..N, although 392 // internally, the array is 0..N-1. This is so that we can return 393 // zero as "not found". 394 if (!internal_) 395 return 0; 396 397 // Create a scope for our auto-lock. 398 { 399 AutoLock scoped_lock(counters_lock_); 400 401 // Attempt to find the counter. 402 CountersMap::const_iterator iter; 403 iter = counters_.find(name); 404 if (iter != counters_.end()) 405 return iter->second; 406 } 407 408 // Counter does not exist, so add it. 409 return AddCounter(name); 410 } 411 412 int* StatsTable::GetLocation(int counter_id, int slot_id) const { 413 if (!internal_) 414 return NULL; 415 if (slot_id > internal_->max_threads()) 416 return NULL; 417 418 int* row = internal_->row(counter_id); 419 return &(row[slot_id-1]); 420 } 421 422 const char* StatsTable::GetRowName(int index) const { 423 if (!internal_) 424 return NULL; 425 426 return internal_->counter_name(index); 427 } 428 429 int StatsTable::GetRowValue(int index) const { 430 return GetRowValue(index, 0); 431 } 432 433 int StatsTable::GetRowValue(int index, int pid) const { 434 if (!internal_) 435 return 0; 436 437 int rv = 0; 438 int* row = internal_->row(index); 439 for (int slot_id = 1; slot_id <= internal_->max_threads(); slot_id++) { 440 if (pid == 0 || *internal_->thread_pid(slot_id) == pid) 441 rv += row[slot_id-1]; 442 } 443 return rv; 444 } 445 446 int StatsTable::GetCounterValue(const std::string& name) { 447 return GetCounterValue(name, 0); 448 } 449 450 int StatsTable::GetCounterValue(const std::string& name, int pid) { 451 if (!internal_) 452 return 0; 453 454 int row = FindCounter(name); 455 if (!row) 456 return 0; 457 return GetRowValue(row, pid); 458 } 459 460 int StatsTable::GetMaxCounters() const { 461 if (!internal_) 462 return 0; 463 return internal_->max_counters(); 464 } 465 466 int StatsTable::GetMaxThreads() const { 467 if (!internal_) 468 return 0; 469 return internal_->max_threads(); 470 } 471 472 int* StatsTable::FindLocation(const char* name) { 473 // Get the static StatsTable 474 StatsTable *table = StatsTable::current(); 475 if (!table) 476 return NULL; 477 478 // Get the slot for this thread. Try to register 479 // it if none exists. 480 int slot = table->GetSlot(); 481 if (!slot) 482 slot = table->RegisterThread(std::string()); 483 if (!slot) 484 return NULL; 485 486 // Find the counter id for the counter. 487 std::string str_name(name); 488 int counter = table->FindCounter(str_name); 489 490 // Now we can find the location in the table. 491 return table->GetLocation(counter, slot); 492 } 493 494 void StatsTable::UnregisterThread() { 495 UnregisterThread(GetTLSData()); 496 } 497 498 void StatsTable::UnregisterThread(TLSData* data) { 499 if (!data) 500 return; 501 DCHECK(internal_); 502 503 // Mark the slot free by zeroing out the thread name. 504 char* name = internal_->thread_name(data->slot); 505 *name = '\0'; 506 507 // Remove the calling thread's TLS so that it cannot use the slot. 508 tls_index_.Set(NULL); 509 delete data; 510 } 511 512 void StatsTable::SlotReturnFunction(void* data) { 513 // This is called by the TLS destructor, which on some platforms has 514 // already cleared the TLS info, so use the tls_data argument 515 // rather than trying to fetch it ourselves. 516 TLSData* tls_data = static_cast<TLSData*>(data); 517 if (tls_data) { 518 DCHECK(tls_data->table); 519 tls_data->table->UnregisterThread(tls_data); 520 } 521 } 522 523 int StatsTable::FindEmptyThread() const { 524 // Note: the API returns slots numbered from 1..N, although 525 // internally, the array is 0..N-1. This is so that we can return 526 // zero as "not found". 527 // 528 // The reason for doing this is because the thread 'slot' is stored 529 // in TLS, which is always initialized to zero, not -1. If 0 were 530 // returned as a valid slot number, it would be confused with the 531 // uninitialized state. 532 if (!internal_) 533 return 0; 534 535 int index = 1; 536 for (; index <= internal_->max_threads(); index++) { 537 char* name = internal_->thread_name(index); 538 if (!*name) 539 break; 540 } 541 if (index > internal_->max_threads()) 542 return 0; // The table is full. 543 return index; 544 } 545 546 int StatsTable::FindCounterOrEmptyRow(const std::string& name) const { 547 // Note: the API returns slots numbered from 1..N, although 548 // internally, the array is 0..N-1. This is so that we can return 549 // zero as "not found". 550 // 551 // There isn't much reason for this other than to be consistent 552 // with the way we track columns for thread slots. (See comments 553 // in FindEmptyThread for why it is done this way). 554 if (!internal_) 555 return 0; 556 557 int free_slot = 0; 558 for (int index = 1; index <= internal_->max_counters(); index++) { 559 char* row_name = internal_->counter_name(index); 560 if (!*row_name && !free_slot) 561 free_slot = index; // save that we found a free slot 562 else if (!strncmp(row_name, name.c_str(), kMaxCounterNameLength)) 563 return index; 564 } 565 return free_slot; 566 } 567 568 int StatsTable::AddCounter(const std::string& name) { 569 if (!internal_) 570 return 0; 571 572 int counter_id = 0; 573 { 574 // To add a counter to the shared memory, we need the 575 // shared memory lock. 576 SharedMemoryAutoLockDeprecated lock(internal_->shared_memory()); 577 578 // We have space, so create a new counter. 579 counter_id = FindCounterOrEmptyRow(name); 580 if (!counter_id) 581 return 0; 582 583 std::string counter_name = name; 584 if (name.empty()) 585 counter_name = kUnknownName; 586 strlcpy(internal_->counter_name(counter_id), counter_name.c_str(), 587 kMaxCounterNameLength); 588 } 589 590 // now add to our in-memory cache 591 { 592 AutoLock lock(counters_lock_); 593 counters_[name] = counter_id; 594 } 595 return counter_id; 596 } 597 598 StatsTable::TLSData* StatsTable::GetTLSData() const { 599 TLSData* data = 600 static_cast<TLSData*>(tls_index_.Get()); 601 if (!data) 602 return NULL; 603 604 DCHECK(data->slot); 605 DCHECK_EQ(data->table, this); 606 return data; 607 } 608 609 #if defined(OS_POSIX) 610 SharedMemoryHandle StatsTable::GetSharedMemoryHandle() const { 611 if (!internal_) 612 return SharedMemory::NULLHandle(); 613 return internal_->shared_memory()->handle(); 614 } 615 #endif 616 617 } // namespace base 618