Home | History | Annotate | Download | only in gpr
      1 /*
      2  *
      3  * Copyright 2017 gRPC authors.
      4  *
      5  * Licensed under the Apache License, Version 2.0 (the "License");
      6  * you may not use this file except in compliance with the License.
      7  * You may obtain a copy of the License at
      8  *
      9  *     http://www.apache.org/licenses/LICENSE-2.0
     10  *
     11  * Unless required by applicable law or agreed to in writing, software
     12  * distributed under the License is distributed on an "AS IS" BASIS,
     13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14  * See the License for the specific language governing permissions and
     15  * limitations under the License.
     16  *
     17  */
     18 
     19 #include <grpc/support/port_platform.h>
     20 
     21 #include "src/core/lib/gpr/arena.h"
     22 
     23 #include <string.h>
     24 
     25 #include <grpc/support/alloc.h>
     26 #include <grpc/support/atm.h>
     27 #include <grpc/support/log.h>
     28 #include <grpc/support/sync.h>
     29 
     30 #include "src/core/lib/gpr/alloc.h"
     31 
     32 // Uncomment this to use a simple arena that simply allocates the
     33 // requested amount of memory for each call to gpr_arena_alloc().  This
     34 // effectively eliminates the efficiency gain of using an arena, but it
     35 // may be useful for debugging purposes.
     36 //#define SIMPLE_ARENA_FOR_DEBUGGING
     37 
     38 #ifdef SIMPLE_ARENA_FOR_DEBUGGING
     39 
     40 struct gpr_arena {
     41   gpr_mu mu;
     42   void** ptrs;
     43   size_t num_ptrs;
     44 };
     45 
     46 gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
     47   gpr_arena* arena = (gpr_arena*)gpr_zalloc(sizeof(*arena));
     48   gpr_mu_init(&arena->mu);
     49   return arena;
     50 }
     51 
     52 size_t gpr_arena_destroy(gpr_arena* arena) {
     53   gpr_mu_destroy(&arena->mu);
     54   for (size_t i = 0; i < arena->num_ptrs; ++i) {
     55     gpr_free(arena->ptrs[i]);
     56   }
     57   gpr_free(arena->ptrs);
     58   gpr_free(arena);
     59   return 1;  // Value doesn't matter, since it won't be used.
     60 }
     61 
     62 void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
     63   gpr_mu_lock(&arena->mu);
     64   arena->ptrs =
     65       (void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
     66   void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size);
     67   gpr_mu_unlock(&arena->mu);
     68   return retval;
     69 }
     70 
     71 #else  // SIMPLE_ARENA_FOR_DEBUGGING
     72 
     73 // TODO(roth): We currently assume that all callers need alignment of 16
     74 // bytes, which may be wrong in some cases.  As part of converting the
     75 // arena API to C++, we should consider replacing gpr_arena_alloc() with a
     76 // template that takes the type of the value being allocated, which
     77 // would allow us to use the alignment actually needed by the caller.
     78 
     79 typedef struct zone {
     80   zone* next;
     81 } zone;
     82 
     83 struct gpr_arena {
     84   // Keep track of the total used size. We use this in our call sizing
     85   // historesis.
     86   gpr_atm total_used;
     87   size_t initial_zone_size;
     88   zone initial_zone;
     89   zone* last_zone;
     90   gpr_mu arena_growth_mutex;
     91 };
     92 
     93 static void* zalloc_aligned(size_t size) {
     94   void* ptr = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
     95   memset(ptr, 0, size);
     96   return ptr;
     97 }
     98 
     99 gpr_arena* gpr_arena_create(size_t initial_size) {
    100   initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
    101   gpr_arena* a = static_cast<gpr_arena*>(zalloc_aligned(
    102       GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size));
    103   a->initial_zone_size = initial_size;
    104   a->last_zone = &a->initial_zone;
    105   gpr_mu_init(&a->arena_growth_mutex);
    106   return a;
    107 }
    108 
    109 size_t gpr_arena_destroy(gpr_arena* arena) {
    110   gpr_mu_destroy(&arena->arena_growth_mutex);
    111   gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
    112   zone* z = arena->initial_zone.next;
    113   gpr_free_aligned(arena);
    114   while (z) {
    115     zone* next_z = z->next;
    116     gpr_free_aligned(z);
    117     z = next_z;
    118   }
    119   return static_cast<size_t>(size);
    120 }
    121 
    122 void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
    123   size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
    124   size_t begin = gpr_atm_no_barrier_fetch_add(&arena->total_used, size);
    125   if (begin + size <= arena->initial_zone_size) {
    126     return reinterpret_cast<char*>(arena) +
    127            GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + begin;
    128   } else {
    129     // If the allocation isn't able to end in the initial zone, create a new
    130     // zone for this allocation, and any unused space in the initial zone is
    131     // wasted. This overflowing and wasting is uncommon because of our arena
    132     // sizing historesis (that is, most calls should have a large enough initial
    133     // zone and will not need to grow the arena).
    134     gpr_mu_lock(&arena->arena_growth_mutex);
    135     zone* z = static_cast<zone*>(
    136         zalloc_aligned(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size));
    137     arena->last_zone->next = z;
    138     arena->last_zone = z;
    139     gpr_mu_unlock(&arena->arena_growth_mutex);
    140     return reinterpret_cast<char*>(z) +
    141            GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone));
    142   }
    143 }
    144 
    145 #endif  // SIMPLE_ARENA_FOR_DEBUGGING
    146