Home | History | Annotate | Download | only in src
      1 // Copyright (c) 2005, Google Inc.
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 //     * Redistributions of source code must retain the above copyright
      9 // notice, this list of conditions and the following disclaimer.
     10 //     * Redistributions in binary form must reproduce the above
     11 // copyright notice, this list of conditions and the following disclaimer
     12 // in the documentation and/or other materials provided with the
     13 // distribution.
     14 //     * Neither the name of Google Inc. nor the names of its
     15 // contributors may be used to endorse or promote products derived from
     16 // this software without specific prior written permission.
     17 //
     18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 
     30 // ---
     31 // Author: Sanjay Ghemawat
     32 //
     33 // TODO: Log large allocations
     34 
     35 #include <config.h>
     36 #include <stddef.h>
     37 #include <stdio.h>
     38 #include <stdlib.h>
     39 #ifdef HAVE_UNISTD_H
     40 #include <unistd.h>
     41 #endif
     42 #ifdef HAVE_INTTYPES_H
     43 #include <inttypes.h>
     44 #endif
     45 #ifdef HAVE_FCNTL_H
     46 #include <fcntl.h>    // for open()
     47 #endif
     48 #ifdef HAVE_MMAP
     49 #include <sys/mman.h>
     50 #endif
     51 #include <errno.h>
     52 #include <assert.h>
     53 #include <sys/types.h>
     54 
     55 #include <algorithm>
     56 #include <string>
     57 
     58 #include <gperftools/heap-profiler.h>
     59 
     60 #include "base/logging.h"
     61 #include "base/basictypes.h"   // for PRId64, among other things
     62 #include "base/googleinit.h"
     63 #include "base/commandlineflags.h"
     64 #include "malloc_hook-inl.h"
     65 #include "tcmalloc_guard.h"
     66 #include <gperftools/malloc_hook.h>
     67 #include <gperftools/malloc_extension.h>
     68 #include "base/spinlock.h"
     69 #include "base/low_level_alloc.h"
     70 #include "base/sysinfo.h"      // for GetUniquePathFromEnv()
     71 #include "heap-profile-table.h"
     72 #include "memory_region_map.h"
     73 
     74 
     75 #ifndef	PATH_MAX
     76 #ifdef MAXPATHLEN
     77 #define	PATH_MAX	MAXPATHLEN
     78 #else
     79 #define	PATH_MAX	4096         // seems conservative for max filename len!
     80 #endif
     81 #endif
     82 
     83 using STL_NAMESPACE::string;
     84 using STL_NAMESPACE::sort;
     85 
     86 //----------------------------------------------------------------------
     87 // Flags that control heap-profiling
     88 //
     89 // The thread-safety of the profiler depends on these being immutable
     90 // after main starts, so don't change them.
     91 //----------------------------------------------------------------------
     92 
     93 DEFINE_int64(heap_profile_allocation_interval,
     94              EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
     95              "If non-zero, dump heap profiling information once every "
     96              "specified number of bytes allocated by the program since "
     97              "the last dump.");
     98 DEFINE_int64(heap_profile_deallocation_interval,
     99              EnvToInt64("HEAP_PROFILE_DEALLOCATION_INTERVAL", 0),
    100              "If non-zero, dump heap profiling information once every "
    101              "specified number of bytes deallocated by the program "
    102              "since the last dump.");
    103 // We could also add flags that report whenever inuse_bytes changes by
    104 // X or -X, but there hasn't been a need for that yet, so we haven't.
    105 DEFINE_int64(heap_profile_inuse_interval,
    106              EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
    107              "If non-zero, dump heap profiling information whenever "
    108              "the high-water memory usage mark increases by the specified "
    109              "number of bytes.");
    110 DEFINE_bool(mmap_log,
    111             EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
    112             "Should mmap/munmap calls be logged?");
    113 DEFINE_bool(mmap_profile,
    114             EnvToBool("HEAP_PROFILE_MMAP", false),
    115             "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
    116 DEFINE_bool(only_mmap_profile,
    117             EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
    118             "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
    119             "do not profile malloc/new/etc");
    120 
    121 
    122 //----------------------------------------------------------------------
    123 // Locking
    124 //----------------------------------------------------------------------
    125 
    126 // A pthread_mutex has way too much lock contention to be used here.
    127 //
    128 // I would like to use Mutex, but it can call malloc(),
    129 // which can cause us to fall into an infinite recursion.
    130 //
    131 // So we use a simple spinlock.
    132 static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);
    133 
    134 //----------------------------------------------------------------------
    135 // Simple allocator for heap profiler's internal memory
    136 //----------------------------------------------------------------------
    137 
    138 static LowLevelAlloc::Arena *heap_profiler_memory;
    139 
    140 static void* ProfilerMalloc(size_t bytes) {
    141   return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
    142 }
    143 static void ProfilerFree(void* p) {
    144   LowLevelAlloc::Free(p);
    145 }
    146 
    147 // We use buffers of this size in DoGetHeapProfile.
    148 static const int kProfileBufferSize = 1 << 20;
    149 
    150 // This is a last-ditch buffer we use in DumpProfileLocked in case we
    151 // can't allocate more memory from ProfilerMalloc.  We expect this
    152 // will be used by HeapProfileEndWriter when the application has to
    153 // exit due to out-of-memory.  This buffer is allocated in
    154 // HeapProfilerStart.  Access to this must be protected by heap_lock.
    155 static char* global_profiler_buffer = NULL;
    156 
    157 
    158 //----------------------------------------------------------------------
    159 // Profiling control/state data
    160 //----------------------------------------------------------------------
    161 
    162 // Access to all of these is protected by heap_lock.
    163 static bool  is_on = false;           // If are on as a subsytem.
    164 static bool  dumping = false;         // Dumping status to prevent recursion
    165 static char* filename_prefix = NULL;  // Prefix used for profile file names
    166                                       // (NULL if no need for dumping yet)
    167 static int   dump_count = 0;          // How many dumps so far
    168 static int64 last_dump_alloc = 0;     // alloc_size when did we last dump
    169 static int64 last_dump_free = 0;      // free_size when did we last dump
    170 static int64 high_water_mark = 0;     // In-use-bytes at last high-water dump
    171 
    172 static HeapProfileTable* heap_profile = NULL;  // the heap profile table
    173 
    174 //----------------------------------------------------------------------
    175 // Profile generation
    176 //----------------------------------------------------------------------
    177 
    178 // Input must be a buffer of size at least 1MB.
    179 static char* DoGetHeapProfileLocked(char* buf, int buflen) {
    180   // We used to be smarter about estimating the required memory and
    181   // then capping it to 1MB and generating the profile into that.
    182   if (buf == NULL || buflen < 1)
    183     return NULL;
    184 
    185   RAW_DCHECK(heap_lock.IsHeld(), "");
    186   int bytes_written = 0;
    187   if (is_on) {
    188     if (FLAGS_mmap_profile) {
    189       heap_profile->RefreshMMapData();
    190     }
    191     bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
    192     if (FLAGS_mmap_profile) {
    193       heap_profile->ClearMMapData();
    194     }
    195   }
    196   buf[bytes_written] = '\0';
    197   RAW_DCHECK(bytes_written == strlen(buf), "");
    198 
    199   return buf;
    200 }
    201 
    202 extern "C" char* GetHeapProfile() {
    203   // Use normal malloc: we return the profile to the user to free it:
    204   char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
    205   SpinLockHolder l(&heap_lock);
    206   return DoGetHeapProfileLocked(buffer, kProfileBufferSize);
    207 }
    208 
    209 // defined below
    210 static void NewHook(const void* ptr, size_t size);
    211 static void DeleteHook(const void* ptr);
    212 
    213 // Helper for HeapProfilerDump.
    214 static void DumpProfileLocked(const char* reason) {
    215   RAW_DCHECK(heap_lock.IsHeld(), "");
    216   RAW_DCHECK(is_on, "");
    217   RAW_DCHECK(!dumping, "");
    218 
    219   if (filename_prefix == NULL) return;  // we do not yet need dumping
    220 
    221   dumping = true;
    222 
    223   // Make file name
    224   char file_name[1000];
    225   dump_count++;
    226   snprintf(file_name, sizeof(file_name), "%s.%04d%s",
    227            filename_prefix, dump_count, HeapProfileTable::kFileExt);
    228 
    229   // Dump the profile
    230   RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
    231   // We must use file routines that don't access memory, since we hold
    232   // a memory lock now.
    233   RawFD fd = RawOpenForWriting(file_name);
    234   if (fd == kIllegalRawFD) {
    235     RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
    236     dumping = false;
    237     return;
    238   }
    239 
    240   // This case may be impossible, but it's best to be safe.
    241   // It's safe to use the global buffer: we're protected by heap_lock.
    242   if (global_profiler_buffer == NULL) {
    243     global_profiler_buffer =
    244         reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
    245   }
    246 
    247   char* profile = DoGetHeapProfileLocked(global_profiler_buffer,
    248                                          kProfileBufferSize);
    249   RawWrite(fd, profile, strlen(profile));
    250   RawClose(fd);
    251 
    252   dumping = false;
    253 }
    254 
    255 //----------------------------------------------------------------------
    256 // Profile collection
    257 //----------------------------------------------------------------------
    258 
    259 // Dump a profile after either an allocation or deallocation, if
    260 // the memory use has changed enough since the last dump.
    261 static void MaybeDumpProfileLocked() {
    262   if (!dumping) {
    263     const HeapProfileTable::Stats& total = heap_profile->total();
    264     const int64 inuse_bytes = total.alloc_size - total.free_size;
    265     bool need_to_dump = false;
    266     char buf[128];
    267     if (FLAGS_heap_profile_allocation_interval > 0 &&
    268         total.alloc_size >=
    269         last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
    270       snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, "
    271                                   "%"PRId64" MB currently in use"),
    272                total.alloc_size >> 20, inuse_bytes >> 20);
    273       need_to_dump = true;
    274     } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
    275                total.free_size >=
    276                last_dump_free + FLAGS_heap_profile_deallocation_interval) {
    277       snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, "
    278                                   "%"PRId64" MB currently in use"),
    279                total.free_size >> 20, inuse_bytes >> 20);
    280       need_to_dump = true;
    281     } else if (FLAGS_heap_profile_inuse_interval > 0 &&
    282                inuse_bytes >
    283                high_water_mark + FLAGS_heap_profile_inuse_interval) {
    284       snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use",
    285                inuse_bytes >> 20);
    286       need_to_dump = true;
    287     }
    288     if (need_to_dump) {
    289       DumpProfileLocked(buf);
    290 
    291       last_dump_alloc = total.alloc_size;
    292       last_dump_free = total.free_size;
    293       if (inuse_bytes > high_water_mark)
    294         high_water_mark = inuse_bytes;
    295     }
    296   }
    297 }
    298 
    299 // Record an allocation in the profile.
    300 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
    301   // Take the stack trace outside the critical section.
    302   void* stack[HeapProfileTable::kMaxStackDepth];
    303   int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack);
    304   SpinLockHolder l(&heap_lock);
    305   if (is_on) {
    306     heap_profile->RecordAlloc(ptr, bytes, depth, stack);
    307     MaybeDumpProfileLocked();
    308   }
    309 }
    310 
    311 // Record a deallocation in the profile.
    312 static void RecordFree(const void* ptr) {
    313   SpinLockHolder l(&heap_lock);
    314   if (is_on) {
    315     heap_profile->RecordFree(ptr);
    316     MaybeDumpProfileLocked();
    317   }
    318 }
    319 
    320 //----------------------------------------------------------------------
    321 // Allocation/deallocation hooks for MallocHook
    322 //----------------------------------------------------------------------
    323 
    324 // static
    325 void NewHook(const void* ptr, size_t size) {
    326   if (ptr != NULL) RecordAlloc(ptr, size, 0);
    327 }
    328 
    329 // static
    330 void DeleteHook(const void* ptr) {
    331   if (ptr != NULL) RecordFree(ptr);
    332 }
    333 
    334 // TODO(jandrews): Re-enable stack tracing
    335 #ifdef TODO_REENABLE_STACK_TRACING
    336 static void RawInfoStackDumper(const char* message, void*) {
    337   RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
    338   // -1 is to chop the \n which will be added by RAW_LOG
    339 }
    340 #endif
    341 
    342 static void MmapHook(const void* result, const void* start, size_t size,
    343                      int prot, int flags, int fd, off_t offset) {
    344   if (FLAGS_mmap_log) {  // log it
    345     // We use PRIxS not just '%p' to avoid deadlocks
    346     // in pretty-printing of NULL as "nil".
    347     // TODO(maxim): instead should use a safe snprintf reimplementation
    348     RAW_LOG(INFO,
    349             "mmap(start=0x%"PRIxPTR", len=%"PRIuS", prot=0x%x, flags=0x%x, "
    350             "fd=%d, offset=0x%x) = 0x%"PRIxPTR"",
    351             (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
    352             (uintptr_t) result);
    353 #ifdef TODO_REENABLE_STACK_TRACING
    354     DumpStackTrace(1, RawInfoStackDumper, NULL);
    355 #endif
    356   }
    357 }
    358 
    359 static void MremapHook(const void* result, const void* old_addr,
    360                        size_t old_size, size_t new_size,
    361                        int flags, const void* new_addr) {
    362   if (FLAGS_mmap_log) {  // log it
    363     // We use PRIxS not just '%p' to avoid deadlocks
    364     // in pretty-printing of NULL as "nil".
    365     // TODO(maxim): instead should use a safe snprintf reimplementation
    366     RAW_LOG(INFO,
    367             "mremap(old_addr=0x%"PRIxPTR", old_size=%"PRIuS", "
    368             "new_size=%"PRIuS", flags=0x%x, new_addr=0x%"PRIxPTR") = "
    369             "0x%"PRIxPTR"",
    370             (uintptr_t) old_addr, old_size, new_size, flags,
    371             (uintptr_t) new_addr, (uintptr_t) result);
    372 #ifdef TODO_REENABLE_STACK_TRACING
    373     DumpStackTrace(1, RawInfoStackDumper, NULL);
    374 #endif
    375   }
    376 }
    377 
    378 static void MunmapHook(const void* ptr, size_t size) {
    379   if (FLAGS_mmap_log) {  // log it
    380     // We use PRIxS not just '%p' to avoid deadlocks
    381     // in pretty-printing of NULL as "nil".
    382     // TODO(maxim): instead should use a safe snprintf reimplementation
    383     RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")",
    384                   (uintptr_t) ptr, size);
    385 #ifdef TODO_REENABLE_STACK_TRACING
    386     DumpStackTrace(1, RawInfoStackDumper, NULL);
    387 #endif
    388   }
    389 }
    390 
    391 static void SbrkHook(const void* result, ptrdiff_t increment) {
    392   if (FLAGS_mmap_log) {  // log it
    393     RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"",
    394                   increment, (uintptr_t) result);
    395 #ifdef TODO_REENABLE_STACK_TRACING
    396     DumpStackTrace(1, RawInfoStackDumper, NULL);
    397 #endif
    398   }
    399 }
    400 
    401 //----------------------------------------------------------------------
    402 // Starting/stopping/dumping
    403 //----------------------------------------------------------------------
    404 
    405 extern "C" void HeapProfilerStart(const char* prefix) {
    406   SpinLockHolder l(&heap_lock);
    407 
    408   if (is_on) return;
    409 
    410   is_on = true;
    411 
    412   RAW_VLOG(0, "Starting tracking the heap");
    413 
    414   // This should be done before the hooks are set up, since it should
    415   // call new, and we want that to be accounted for correctly.
    416   MallocExtension::Initialize();
    417 
    418   if (FLAGS_only_mmap_profile) {
    419     FLAGS_mmap_profile = true;
    420   }
    421 
    422   if (FLAGS_mmap_profile) {
    423     // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
    424     // call stack traces of at least size kMaxStackDepth:
    425     MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth);
    426   }
    427 
    428   if (FLAGS_mmap_log) {
    429     // Install our hooks to do the logging:
    430     RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
    431     RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
    432     RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
    433     RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
    434   }
    435 
    436   heap_profiler_memory =
    437     LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
    438 
    439   // Reserve space now for the heap profiler, so we can still write a
    440   // heap profile even if the application runs out of memory.
    441   global_profiler_buffer =
    442       reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
    443 
    444   heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
    445                    HeapProfileTable(ProfilerMalloc, ProfilerFree);
    446 
    447   last_dump_alloc = 0;
    448   last_dump_free = 0;
    449   high_water_mark = 0;
    450 
    451   // We do not reset dump_count so if the user does a sequence of
    452   // HeapProfilerStart/HeapProfileStop, we will get a continuous
    453   // sequence of profiles.
    454 
    455   if (FLAGS_only_mmap_profile == false) {
    456     // Now set the hooks that capture new/delete and malloc/free.
    457     RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
    458     RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
    459   }
    460 
    461   // Copy filename prefix
    462   RAW_DCHECK(filename_prefix == NULL, "");
    463   const int prefix_length = strlen(prefix);
    464   filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
    465   memcpy(filename_prefix, prefix, prefix_length);
    466   filename_prefix[prefix_length] = '\0';
    467 }
    468 
    469 extern "C" int IsHeapProfilerRunning() {
    470   SpinLockHolder l(&heap_lock);
    471   return is_on ? 1 : 0;   // return an int, because C code doesn't have bool
    472 }
    473 
    474 extern "C" void HeapProfilerStop() {
    475   SpinLockHolder l(&heap_lock);
    476 
    477   if (!is_on) return;
    478 
    479   if (FLAGS_only_mmap_profile == false) {
    480     // Unset our new/delete hooks, checking they were set:
    481     RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
    482     RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
    483   }
    484   if (FLAGS_mmap_log) {
    485     // Restore mmap/sbrk hooks, checking that our hooks were set:
    486     RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
    487     RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
    488     RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
    489     RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
    490   }
    491 
    492   // free profile
    493   heap_profile->~HeapProfileTable();
    494   ProfilerFree(heap_profile);
    495   heap_profile = NULL;
    496 
    497   // free output-buffer memory
    498   ProfilerFree(global_profiler_buffer);
    499 
    500   // free prefix
    501   ProfilerFree(filename_prefix);
    502   filename_prefix = NULL;
    503 
    504   if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
    505     RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
    506   }
    507 
    508   if (FLAGS_mmap_profile) {
    509     MemoryRegionMap::Shutdown();
    510   }
    511 
    512   is_on = false;
    513 }
    514 
    515 extern "C" void HeapProfilerDump(const char *reason) {
    516   SpinLockHolder l(&heap_lock);
    517   if (is_on && !dumping) {
    518     DumpProfileLocked(reason);
    519   }
    520 }
    521 
    522 //----------------------------------------------------------------------
    523 // Initialization/finalization code
    524 //----------------------------------------------------------------------
    525 
    526 // Initialization code
    527 static void HeapProfilerInit() {
    528   // Everything after this point is for setting up the profiler based on envvar
    529   char fname[PATH_MAX];
    530   if (!GetUniquePathFromEnv("HEAPPROFILE", fname)) {
    531     return;
    532   }
    533   // We do a uid check so we don't write out files in a setuid executable.
    534 #ifdef HAVE_GETEUID
    535   if (getuid() != geteuid()) {
    536     RAW_LOG(WARNING, ("HeapProfiler: ignoring HEAPPROFILE because "
    537                       "program seems to be setuid\n"));
    538     return;
    539   }
    540 #endif
    541 
    542   HeapProfileTable::CleanupOldProfiles(fname);
    543 
    544   HeapProfilerStart(fname);
    545 }
    546 
    547 // class used for finalization -- dumps the heap-profile at program exit
    548 struct HeapProfileEndWriter {
    549   ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); }
    550 };
    551 
    552 // We want to make sure tcmalloc is up and running before starting the profiler
    553 static const TCMallocGuard tcmalloc_initializer;
    554 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
    555 static HeapProfileEndWriter heap_profile_end_writer;
    556