Home | History | Annotate | Download | only in wifi_hal
      1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
      2  *
      3  * Redistribution and use in source and binary forms, with or without
      4  * modification, are permitted provided that the following conditions are
      5  * met:
      6  *     * Redistributions of source code must retain the above copyright
      7  *       notice, this list of conditions and the following disclaimer.
      8  *     * Redistributions in binary form must reproduce the above
      9  *       copyright notice, this list of conditions and the following
     10  *       disclaimer in the documentation and/or other materials provided
     11  *       with the distribution.
     12  *     * Neither the name of The Linux Foundation nor the names of its
     13  *       contributors may be used to endorse or promote products derived
     14  *       from this software without specific prior written permission.
     15  *
     16  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
     17  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
     26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include <stdint.h>
     30 #include <stdlib.h>
     31 #include <string.h>
     32 
     33 #define LOG_TAG  "WifiHAL"
     34 
     35 #include <utils/Log.h>
     36 
     37 typedef unsigned char u8;
     38 typedef uint16_t u16;
     39 typedef uint32_t u32;
     40 typedef uint64_t u64;
     41 
     42 #include "ring_buffer.h"
     43 
     44 enum rb_bool {
     45     RB_TRUE = 0,
     46     RB_FALSE = 1
     47 };
     48 
     49 typedef struct rb_entry_s {
     50     u8 *data;
     51     unsigned int last_wr_index;
     52     u8 full;
     53 } rb_entry_t;
     54 
     55 typedef struct ring_buf_cb {
     56     unsigned int rd_buf_no; // Current buffer number to be read from
     57     unsigned int wr_buf_no; // Current buffer number to be written into
     58     unsigned int cur_rd_buf_idx; // Read index within the current read buffer
     59     unsigned int cur_wr_buf_idx; // Write index within the current write buffer
     60     rb_entry_t *bufs; // Array of buffer pointers
     61 
     62     unsigned int max_num_bufs; // Maximum number of buffers that should be used
     63     size_t each_buf_size; // Size of each buffer in bytes
     64 
     65     pthread_mutex_t rb_rw_lock;
     66 
     67     /* Threshold vars */
     68     unsigned int num_min_bytes;
     69     void (*threshold_cb)(void *);
     70     void *cb_ctx;
     71 
     72     u32 total_bytes_written;
     73     u32 total_bytes_read;
     74     u32 total_bytes_overwritten;
     75     u32 cur_valid_bytes;
     76     enum rb_bool threshold_reached;
     77 } rbc_t;
     78 
     79 
     80 #define RB_MIN(x, y) ((x) < (y)?(x):(y))
     81 inline void rb_lock(pthread_mutex_t *lock)
     82 {
     83     int error = pthread_mutex_lock(lock);
     84 
     85     if (error)
     86         ALOGE("Failed to acquire lock with err %d", error);
     87     // TODO Handle the lock failure
     88 }
     89 
     90 inline void rb_unlock(pthread_mutex_t *lock)
     91 {
     92     int error = pthread_mutex_unlock(lock);
     93 
     94     if (error)
     95         ALOGE("Failed to release lock with err %d", error);
     96     // TODO Handle the unlock failure
     97 }
     98 
     99 void * ring_buffer_init(size_t size_of_buf, int num_bufs)
    100 {
    101     struct ring_buf_cb *rbc;
    102     int status;
    103 
    104     rbc = (struct ring_buf_cb *)malloc(sizeof(struct ring_buf_cb));
    105     if (rbc == NULL) {
    106         ALOGE("Failed to alloc rbc");
    107         return NULL;
    108     }
    109     memset(rbc, 0, sizeof(struct ring_buf_cb));
    110 
    111     rbc->bufs = (rb_entry_t *)malloc(num_bufs * sizeof(rb_entry_t));
    112     if (rbc->bufs == NULL) {
    113         free(rbc);
    114         ALOGE("Failed to alloc rbc->bufs");
    115         return NULL;
    116     }
    117     memset(rbc->bufs, 0, (num_bufs * sizeof(rb_entry_t)));
    118 
    119     rbc->each_buf_size = size_of_buf;
    120     rbc->max_num_bufs = num_bufs;
    121 
    122     status = pthread_mutex_init(&rbc->rb_rw_lock, NULL);
    123     if (status != 0) {
    124         ALOGE("Failed to initialize rb_rw_lock");
    125         // TODO handle lock initialization failure
    126     }
    127     rbc->threshold_reached = RB_FALSE;
    128     return rbc;
    129 }
    130 
    131 void ring_buffer_deinit(void *ctx)
    132 {
    133     rbc_t *rbc = (rbc_t *)ctx;
    134     int status;
    135     unsigned int buf_no;
    136 
    137     status = pthread_mutex_destroy(&rbc->rb_rw_lock);
    138     if (status != 0) {
    139         ALOGE("Failed to destroy rb_rw_lock");
    140         // TODO handle the lock destroy failure
    141     }
    142     for (buf_no = 0; buf_no < rbc->max_num_bufs; buf_no++) {
    143         free(rbc->bufs[buf_no].data);
    144     }
    145     free(rbc->bufs);
    146     free(rbc);
    147 }
    148 
    149 /*
    150  * record_length : 0  - byte boundary
    151  *               : >0 - Ensures to write record_length no.of bytes to the same buffer.
    152  */
    153 enum rb_status rb_write (void *ctx, u8 *buf, size_t length, int overwrite,
    154                          size_t record_length)
    155 {
    156     rbc_t *rbc = (rbc_t *)ctx;
    157     unsigned int bytes_written = 0; // bytes written into rb so far
    158     unsigned int push_in_rd_ptr = 0; // push required in read pointer because of
    159                                      // write in current buffer
    160     unsigned int total_push_in_rd_ptr = 0; // Total amount of push in read pointer in this write
    161 
    162     if (record_length > rbc->each_buf_size) {
    163         return RB_FAILURE;
    164     }
    165 
    166     if (overwrite == 0) {
    167         /* Check if the complete RB is full. If the current wr_buf is also
    168          * full, it indicates that the complete RB is full
    169          */
    170         if (rbc->bufs[rbc->wr_buf_no].full == 1)
    171             return RB_FULL;
    172         /* Check whether record fits in current buffer */
    173         if (rbc->wr_buf_no == rbc->rd_buf_no) {
    174             if ((rbc->cur_wr_buf_idx == rbc->cur_rd_buf_idx) &&
    175                 rbc->cur_valid_bytes) {
    176                 return RB_FULL;
    177             } else if (rbc->cur_wr_buf_idx < rbc->cur_rd_buf_idx) {
    178                 if (record_length >
    179                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx)) {
    180                     return RB_FULL;
    181                 }
    182             } else {
    183                 if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
    184                     /* Check if the next buffer is not full to write this record into
    185                      * next buffer
    186                      */
    187                     unsigned int next_buf_no = rbc->wr_buf_no + 1;
    188 
    189                     if (next_buf_no >= rbc->max_num_bufs) {
    190                         next_buf_no = 0;
    191                     }
    192                     if (rbc->bufs[next_buf_no].full == 1) {
    193                         return RB_FULL;
    194                     }
    195                 }
    196             }
    197         } else if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
    198             /* Check if the next buffer is not full to write this record into
    199              * next buffer
    200              */
    201             unsigned int next_buf_no = rbc->wr_buf_no + 1;
    202 
    203             if (next_buf_no >= rbc->max_num_bufs) {
    204                 next_buf_no = 0;
    205             }
    206             if (rbc->bufs[next_buf_no].full == 1) {
    207                 return RB_FULL;
    208             }
    209         }
    210     }
    211 
    212     /* Go to next buffer if the current buffer is not enough to write the
    213      * complete record
    214      */
    215     if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
    216         rbc->bufs[rbc->wr_buf_no].full = 1;
    217         rbc->bufs[rbc->wr_buf_no].last_wr_index = rbc->cur_wr_buf_idx;
    218         rbc->wr_buf_no++;
    219         if (rbc->wr_buf_no == rbc->max_num_bufs) {
    220             rbc->wr_buf_no = 0;
    221         }
    222         rbc->cur_wr_buf_idx = 0;
    223     }
    224 
    225 
    226     /* In each iteration of below loop, the data that can be fit into
    227      * buffer @wr_buf_no will be copied from input buf */
    228     while (bytes_written < length) {
    229         unsigned int cur_copy_len;
    230 
    231         /* Allocate a buffer if no buf available @ wr_buf_no */
    232         if (rbc->bufs[rbc->wr_buf_no].data == NULL) {
    233             rbc->bufs[rbc->wr_buf_no].data = (u8 *)malloc(rbc->each_buf_size);
    234             if (rbc->bufs[rbc->wr_buf_no].data == NULL) {
    235                 ALOGE("Failed to alloc write buffer");
    236                 return RB_RETRY;
    237             }
    238         }
    239 
    240         /* Take the minimum of the remaining length that needs to be written
    241          * from buf and the maximum length that can be written into current
    242          * buffer in ring buffer
    243          */
    244         cur_copy_len = RB_MIN((rbc->each_buf_size - rbc->cur_wr_buf_idx),
    245                               (length - bytes_written));
    246 
    247         rb_lock(&rbc->rb_rw_lock);
    248 
    249         /* Push the read pointer in case of overrun */
    250         if (rbc->rd_buf_no == rbc->wr_buf_no) {
    251             if ((rbc->cur_rd_buf_idx > rbc->cur_wr_buf_idx) ||
    252                 ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
    253                  rbc->cur_valid_bytes)) {
    254                 /* If read ptr is ahead of write pointer and if the
    255                  * gap is not enough to fit the cur_copy_len bytes then
    256                  * push the read pointer so that points to the start of
    257                  * old bytes after this write
    258                  */
    259                 if ((rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx) <
    260                     cur_copy_len) {
    261                     push_in_rd_ptr += cur_copy_len -
    262                                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx);
    263                     rbc->cur_rd_buf_idx = rbc->cur_wr_buf_idx + cur_copy_len;
    264                     if (rbc->cur_rd_buf_idx >=
    265                         rbc->bufs[rbc->rd_buf_no].last_wr_index) {
    266                         rbc->cur_rd_buf_idx = 0;
    267                         rbc->rd_buf_no++;
    268                         if (rbc->rd_buf_no == rbc->max_num_bufs) {
    269                             rbc->rd_buf_no = 0;
    270                             ALOGV("Pushing read to the start of ring buffer");
    271                         }
    272                         /* the previous buffer might have little more empty room
    273                          * after overwriting the remaining bytes
    274                          */
    275                         rbc->bufs[rbc->wr_buf_no].full = 0;
    276                     }
    277                 }
    278             }
    279         }
    280         rb_unlock(&rbc->rb_rw_lock);
    281 
    282         /* don't use lock while doing memcpy, so that we don't block the read
    283          * context for too long. There is no harm while writing the memory if
    284          * locking is properly done while upgrading the pointers */
    285         memcpy((rbc->bufs[rbc->wr_buf_no].data + rbc->cur_wr_buf_idx),
    286                (buf + bytes_written),
    287                cur_copy_len);
    288 
    289         rb_lock(&rbc->rb_rw_lock);
    290         /* Update the write idx by the amount of write done in this iteration */
    291         rbc->cur_wr_buf_idx += cur_copy_len;
    292         if (rbc->cur_wr_buf_idx == rbc->each_buf_size) {
    293             /* Increment the wr_buf_no as the current buffer is full */
    294             rbc->bufs[rbc->wr_buf_no].full = 1;
    295             rbc->bufs[rbc->wr_buf_no].last_wr_index = rbc->cur_wr_buf_idx;
    296             rbc->wr_buf_no++;
    297             if (rbc->wr_buf_no == rbc->max_num_bufs) {
    298                 ALOGV("Write rolling over to the start of ring buffer");
    299                 rbc->wr_buf_no = 0;
    300             }
    301             /* Reset the write index to zero as this is a new buffer */
    302             rbc->cur_wr_buf_idx = 0;
    303         }
    304 
    305         if ((rbc->cur_valid_bytes + (cur_copy_len - push_in_rd_ptr)) >
    306             (rbc->max_num_bufs * rbc->each_buf_size)) {
    307             /* The below is only a precautionary print and ideally should never
    308              * come */
    309             ALOGE("Something going wrong in ring buffer");
    310         } else {
    311             /* Increase the valid bytes count by number of bytes written without
    312              * overwriting the old bytes */
    313             rbc->cur_valid_bytes += cur_copy_len - push_in_rd_ptr;
    314         }
    315         total_push_in_rd_ptr += push_in_rd_ptr;
    316         push_in_rd_ptr = 0;
    317         rb_unlock(&rbc->rb_rw_lock);
    318         bytes_written += cur_copy_len;
    319     }
    320 
    321     rb_lock(&rbc->rb_rw_lock);
    322     rbc->total_bytes_written += bytes_written - total_push_in_rd_ptr;
    323     rbc->total_bytes_overwritten += total_push_in_rd_ptr;
    324 
    325     /* check if valid bytes is going more than threshold */
    326     if ((rbc->threshold_reached == RB_FALSE) &&
    327         (rbc->cur_valid_bytes >= rbc->num_min_bytes) &&
    328         ((length == record_length) || !record_length) &&
    329         rbc->threshold_cb) {
    330         /* Release the lock before calling threshold_cb as it might call rb_read
    331          * in this same context in order to avoid dead lock
    332          */
    333         rbc->threshold_reached = RB_TRUE;
    334         rb_unlock(&rbc->rb_rw_lock);
    335         rbc->threshold_cb(rbc->cb_ctx);
    336     } else {
    337         rb_unlock(&rbc->rb_rw_lock);
    338     }
    339     return RB_SUCCESS;
    340 }
    341 
    342 size_t rb_read (void *ctx, u8 *buf, size_t max_length)
    343 {
    344     rbc_t *rbc = (rbc_t *)ctx;
    345     unsigned int bytes_read = 0;
    346     unsigned int no_more_bytes_available = 0;
    347 
    348     rb_lock(&rbc->rb_rw_lock);
    349     while (bytes_read < max_length) {
    350         unsigned int cur_cpy_len;
    351 
    352         if (rbc->bufs[rbc->rd_buf_no].data == NULL) {
    353             break;
    354         }
    355 
    356         /* if read and write are on same buffer, work with rd, wr indices */
    357         if (rbc->rd_buf_no == rbc->wr_buf_no) {
    358             if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
    359                 /* Check if all the required bytes are available, if not
    360                  * read only the available bytes in the current buffer and
    361                  * break out after reading current buffer
    362                  */
    363                 if ((rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx) <
    364                         (max_length - bytes_read)) {
    365                     cur_cpy_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
    366                     no_more_bytes_available = 1;
    367                 } else {
    368                     cur_cpy_len = max_length - bytes_read;
    369                 }
    370             } else {
    371                 /* When there are no bytes available to read cur_rd_buf_idx
    372                  * will be euqal to cur_wr_buf_idx. Handle this scenario using
    373                  * cur_valid_bytes */
    374                 if (rbc->cur_valid_bytes <= bytes_read) {
    375                     /* Suppress possible static analyzer's warning */
    376                     cur_cpy_len = 0;
    377                     break;
    378                 }
    379                 cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
    380                                      (max_length - bytes_read));
    381             }
    382         } else {
    383             /* Check if all remaining_length bytes can be read from this
    384              * buffer, if not read only the available bytes in the current
    385              * buffer and go to next buffer using the while loop.
    386              */
    387             cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
    388                                  (max_length - bytes_read));
    389         }
    390 
    391         memcpy((buf + bytes_read),
    392                (rbc->bufs[rbc->rd_buf_no].data + rbc->cur_rd_buf_idx),
    393                cur_cpy_len);
    394 
    395         /* Update the read index */
    396         rbc->cur_rd_buf_idx += cur_cpy_len;
    397         if (rbc->cur_rd_buf_idx == rbc->each_buf_size) {
    398             /* Increment rd_buf_no as the current buffer is completely read */
    399             if (rbc->rd_buf_no != rbc->wr_buf_no) {
    400                 free(rbc->bufs[rbc->rd_buf_no].data);
    401                 rbc->bufs[rbc->rd_buf_no].data = NULL;
    402             }
    403             rbc->rd_buf_no++;
    404             if (rbc->rd_buf_no == rbc->max_num_bufs) {
    405                 ALOGV("Read rolling over to the start of ring buffer");
    406                 rbc->rd_buf_no = 0;
    407             }
    408             /* Reset the read index as this is a new buffer */
    409             rbc->cur_rd_buf_idx = 0;
    410         }
    411 
    412         bytes_read += cur_cpy_len;
    413         if (no_more_bytes_available) {
    414             break;
    415         }
    416     }
    417 
    418     rbc->total_bytes_read += bytes_read;
    419     if (rbc->cur_valid_bytes < bytes_read) {
    420         /* The below is only a precautionary print and ideally should never
    421          * come */
    422         ALOGE("Something going wrong in ring buffer");
    423     } else {
    424         rbc->cur_valid_bytes -= bytes_read;
    425     }
    426 
    427     /* check if valid bytes is going less than threshold */
    428     if (rbc->threshold_reached == RB_TRUE) {
    429         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
    430             rbc->threshold_reached = RB_FALSE;
    431         }
    432     }
    433     rb_unlock(&rbc->rb_rw_lock);
    434     return bytes_read;
    435 }
    436 
    437 u8 *rb_get_read_buf(void *ctx, size_t *length)
    438 {
    439     rbc_t *rbc = (rbc_t *)ctx;
    440     unsigned int cur_read_len = 0;
    441     u8 *buf;
    442 
    443     /* If no buffer is available for reading */
    444     if (rbc->bufs[rbc->rd_buf_no].data == NULL) {
    445         *length = 0;
    446         return NULL;
    447     }
    448 
    449     rb_lock(&rbc->rb_rw_lock);
    450     if ((rbc->bufs[rbc->rd_buf_no].full == 1) &&
    451         (rbc->cur_rd_buf_idx == rbc->bufs[rbc->rd_buf_no].last_wr_index)) {
    452         if (rbc->wr_buf_no != rbc->rd_buf_no) {
    453             free(rbc->bufs[rbc->rd_buf_no].data);
    454             rbc->bufs[rbc->rd_buf_no].data = NULL;
    455         }
    456         rbc->bufs[rbc->rd_buf_no].full = 0;
    457         rbc->rd_buf_no++;
    458         if (rbc->rd_buf_no == rbc->max_num_bufs) {
    459             rbc->rd_buf_no = 0;
    460         }
    461         rbc->cur_rd_buf_idx = 0;
    462     }
    463 
    464     if (rbc->wr_buf_no == rbc->rd_buf_no) {
    465         /* If read and write are happening on the same buffer currently, use
    466          * rd and wr indices within the buffer */
    467         if ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
    468             (rbc->cur_valid_bytes == 0)) {
    469             /* No bytes available for reading */
    470             *length = 0;
    471             rb_unlock(&rbc->rb_rw_lock);
    472             return NULL;
    473         } else if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
    474             /* write is just ahead of read in this buffer */
    475             cur_read_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
    476         } else {
    477             /* write is rolled over and just behind the read */
    478             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index - rbc->cur_rd_buf_idx;
    479         }
    480     } else {
    481         if (rbc->cur_rd_buf_idx == 0) {
    482             /* The complete buffer can be read out */
    483             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index;
    484         } else {
    485             /* Read the remaining bytes in this buffer */
    486             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index - rbc->cur_rd_buf_idx;
    487         }
    488     }
    489 
    490     if ((rbc->bufs[rbc->rd_buf_no].full == 1) &&
    491          (rbc->cur_rd_buf_idx == 0)) {
    492         /* Pluck out the complete buffer and send it out */
    493         buf = rbc->bufs[rbc->rd_buf_no].data;
    494         rbc->bufs[rbc->rd_buf_no].data = NULL;
    495 
    496         /* Move to the next buffer */
    497         rbc->bufs[rbc->rd_buf_no].full = 0;
    498         rbc->rd_buf_no++;
    499         if (rbc->rd_buf_no == rbc->max_num_bufs) {
    500             ALOGV("Read rolling over to the start of ring buffer");
    501             rbc->rd_buf_no = 0;
    502         }
    503     } else {
    504         /* We cannot give out the complete buffer, so allocate a new memory and
    505          * and copy the data into it.
    506          */
    507         buf = (u8 *)malloc(cur_read_len);
    508         if (buf == NULL) {
    509             ALOGE("Failed to alloc buffer for partial buf read");
    510             *length = 0;
    511             rb_unlock(&rbc->rb_rw_lock);
    512             return NULL;
    513         }
    514         memcpy(buf,
    515                (rbc->bufs[rbc->rd_buf_no].data + rbc->cur_rd_buf_idx),
    516                cur_read_len);
    517 
    518         /* Update the read index */
    519         if (rbc->bufs[rbc->rd_buf_no].full == 1) {
    520             if (rbc->wr_buf_no != rbc->rd_buf_no) {
    521                 free(rbc->bufs[rbc->rd_buf_no].data);
    522                 rbc->bufs[rbc->rd_buf_no].data = NULL;
    523             }
    524             rbc->bufs[rbc->rd_buf_no].full = 0;
    525             rbc->rd_buf_no++;
    526             if (rbc->rd_buf_no == rbc->max_num_bufs) {
    527                 rbc->rd_buf_no = 0;
    528             }
    529             rbc->cur_rd_buf_idx = 0;
    530         } else {
    531             rbc->cur_rd_buf_idx += cur_read_len;
    532         }
    533     }
    534 
    535     rbc->total_bytes_read += cur_read_len;
    536     if (rbc->cur_valid_bytes < cur_read_len) {
    537         /* The below is only a precautionary print and ideally should never
    538          * come */
    539         ALOGE("Something going wrong in ring buffer");
    540     } else {
    541         rbc->cur_valid_bytes -= cur_read_len;
    542     }
    543 
    544     /* check if valid bytes is going less than threshold */
    545     if (rbc->threshold_reached == RB_TRUE) {
    546         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
    547             rbc->threshold_reached = RB_FALSE;
    548         }
    549     }
    550     rb_unlock(&rbc->rb_rw_lock);
    551 
    552     *length = cur_read_len;
    553     return buf;
    554 }
    555 
    556 void rb_config_threshold(void *ctx,
    557                          unsigned int num_min_bytes,
    558                          threshold_call_back callback,
    559                          void *cb_ctx)
    560 {
    561     rbc_t *rbc = (rbc_t *)ctx;
    562 
    563     rbc->num_min_bytes = num_min_bytes;
    564     rbc->threshold_cb = callback;
    565     rbc->cb_ctx = cb_ctx;
    566 }
    567 
    568 void rb_get_stats(void *ctx, struct rb_stats *rbs)
    569 {
    570     rbc_t *rbc = (rbc_t *)ctx;
    571 
    572     rbs->total_bytes_written = rbc->total_bytes_written;
    573     rbs->total_bytes_read = rbc->total_bytes_read;
    574     rbs->cur_valid_bytes = rbc->cur_valid_bytes;
    575     rbs->each_buf_size = rbc->each_buf_size;
    576     rbs->max_num_bufs = rbc->max_num_bufs;
    577 }
    578