Home | History | Annotate | Download | only in wifi_hal
      1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
      2  *
      3  * Redistribution and use in source and binary forms, with or without
      4  * modification, are permitted provided that the following conditions are
      5  * met:
      6  *     * Redistributions of source code must retain the above copyright
      7  *       notice, this list of conditions and the following disclaimer.
      8  *     * Redistributions in binary form must reproduce the above
      9  *       copyright notice, this list of conditions and the following
     10  *       disclaimer in the documentation and/or other materials provided
     11  *       with the distribution.
     12  *     * Neither the name of The Linux Foundation nor the names of its
     13  *       contributors may be used to endorse or promote products derived
     14  *       from this software without specific prior written permission.
     15  *
     16  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
     17  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
     26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include <stdint.h>
     30 #include <stdlib.h>
     31 #include <string.h>
     32 
     33 #define LOG_TAG  "WifiHAL"
     34 
     35 #include <utils/Log.h>
     36 
     37 typedef unsigned char u8;
     38 typedef uint16_t u16;
     39 typedef uint32_t u32;
     40 typedef uint64_t u64;
     41 
     42 #include "ring_buffer.h"
     43 
     44 enum rb_bool {
     45     RB_TRUE = 0,
     46     RB_FALSE = 1
     47 };
     48 
     49 typedef struct ring_buf_cb {
     50     unsigned int rd_buf_no; // Current buffer number to be read from
     51     unsigned int wr_buf_no; // Current buffer number to be written into
     52     unsigned int cur_rd_buf_idx; // Read index within the current read buffer
     53     unsigned int cur_wr_buf_idx; // Write index within the current write buffer
     54     u8 **bufs; // Array of buffer pointers
     55 
     56     unsigned int max_num_bufs; // Maximum number of buffers that should be used
     57     size_t each_buf_size; // Size of each buffer in bytes
     58 
     59     pthread_mutex_t rb_rw_lock;
     60 
     61     /* Threshold vars */
     62     unsigned int num_min_bytes;
     63     void (*threshold_cb)(void *);
     64     void *cb_ctx;
     65 
     66     u32 total_bytes_written;
     67     u32 total_bytes_read;
     68     u32 total_bytes_overwritten;
     69     u32 cur_valid_bytes;
     70     enum rb_bool threshold_reached;
     71 } rbc_t;
     72 
     73 
     74 #define RB_MIN(x, y) ((x) < (y)?(x):(y))
     75 inline void rb_lock(pthread_mutex_t *lock)
     76 {
     77     int error = pthread_mutex_lock(lock);
     78 
     79     if (error)
     80         ALOGE("Failed to acquire lock with err %d", error);
     81     // TODO Handle the lock failure
     82 }
     83 
     84 inline void rb_unlock(pthread_mutex_t *lock)
     85 {
     86     int error = pthread_mutex_unlock(lock);
     87 
     88     if (error)
     89         ALOGE("Failed to release lock with err %d", error);
     90     // TODO Handle the unlock failure
     91 }
     92 
     93 void * ring_buffer_init(size_t size_of_buf, int num_bufs)
     94 {
     95     struct ring_buf_cb *rbc;
     96     int status;
     97 
     98     rbc = (struct ring_buf_cb *)malloc(sizeof(struct ring_buf_cb));
     99     if (rbc == NULL) {
    100         ALOGE("Failed to alloc rbc");
    101         return NULL;
    102     }
    103     memset(rbc, 0, sizeof(struct ring_buf_cb));
    104 
    105     rbc->bufs = (u8 **)malloc(num_bufs * sizeof(void *));
    106     if (rbc->bufs == NULL) {
    107         free(rbc);
    108         ALOGE("Failed to alloc rbc->bufs");
    109         return NULL;
    110     }
    111     memset(rbc->bufs, 0, (num_bufs * sizeof(void *)));
    112 
    113     rbc->each_buf_size = size_of_buf;
    114     rbc->max_num_bufs = num_bufs;
    115 
    116     status = pthread_mutex_init(&rbc->rb_rw_lock, NULL);
    117     if (status != 0) {
    118         ALOGE("Failed to initialize rb_rw_lock");
    119         // TODO handle lock initialization failure
    120     }
    121     rbc->threshold_reached = RB_FALSE;
    122     return rbc;
    123 }
    124 
    125 void ring_buffer_deinit(void *ctx)
    126 {
    127     rbc_t *rbc = (rbc_t *)ctx;
    128     int status;
    129     unsigned int buf_no;
    130 
    131     status = pthread_mutex_destroy(&rbc->rb_rw_lock);
    132     if (status != 0) {
    133         ALOGE("Failed to destroy rb_rw_lock");
    134         // TODO handle the lock destroy failure
    135     }
    136     for (buf_no = 0; buf_no < rbc->max_num_bufs; buf_no++) {
    137         free(rbc->bufs[buf_no]);
    138     }
    139     free(rbc->bufs);
    140     free(rbc);
    141 }
    142 
    143 enum rb_status rb_write (void *ctx, u8 *buf, size_t length, int overwrite)
    144 {
    145     rbc_t *rbc = (rbc_t *)ctx;
    146     unsigned int bytes_written = 0; // bytes written into rb so far
    147     unsigned int push_in_rd_ptr = 0; // push required in read pointer because of
    148                                      // write in current buffer
    149     unsigned int total_push_in_rd_ptr = 0; // Total amount of push in read pointer in this write
    150 
    151     /* Check if this write fits into remaining ring buffer */
    152     if ((overwrite == 0) &&
    153         (length >
    154          ((rbc->max_num_bufs * rbc->each_buf_size) - rbc->cur_valid_bytes))) {
    155         return RB_FAILURE;
    156     }
    157 
    158     /* In each iteration of below loop, the data that can be fit into
    159      * buffer @wr_buf_no will be copied from input buf */
    160     while (bytes_written < length) {
    161         unsigned int cur_copy_len;
    162 
    163         /* Allocate a buffer if no buf available @ wr_buf_no */
    164         if (rbc->bufs[rbc->wr_buf_no] == NULL) {
    165             rbc->bufs[rbc->wr_buf_no] = (u8 *)malloc(rbc->each_buf_size);
    166             if (rbc->bufs[rbc->wr_buf_no] == NULL) {
    167                 ALOGE("Failed to alloc write buffer");
    168                 return RB_FAILURE;
    169             }
    170         }
    171 
    172         /* Take the minimum of the remaining length that needs to be written
    173          * from buf and the maximum length that can be written into current
    174          * buffer in ring buffer
    175          */
    176         cur_copy_len = RB_MIN((rbc->each_buf_size - rbc->cur_wr_buf_idx),
    177                               (length - bytes_written));
    178 
    179         rb_lock(&rbc->rb_rw_lock);
    180 
    181         /* Push the read pointer in case of overrun */
    182         if (rbc->rd_buf_no == rbc->wr_buf_no) {
    183             if ((rbc->cur_rd_buf_idx > rbc->cur_wr_buf_idx) ||
    184                 ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
    185                  rbc->cur_valid_bytes)) {
    186                 /* If read ptr is ahead of write pointer and if the
    187                  * gap is not enough to fit the cur_copy_len bytes then
    188                  * push the read pointer so that points to the start of
    189                  * old bytes after this write
    190                  */
    191                 if ((rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx) <
    192                     cur_copy_len) {
    193                     push_in_rd_ptr += cur_copy_len -
    194                                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx);
    195                     rbc->cur_rd_buf_idx = rbc->cur_wr_buf_idx + cur_copy_len;
    196                     if (rbc->cur_rd_buf_idx == rbc->each_buf_size) {
    197                         rbc->cur_rd_buf_idx = 0;
    198                         rbc->rd_buf_no++;
    199                         if (rbc->rd_buf_no == rbc->max_num_bufs) {
    200                             rbc->rd_buf_no = 0;
    201                             ALOGD("Pushing read to the start of ring buffer");
    202                         }
    203                     }
    204                 }
    205             }
    206         }
    207         rb_unlock(&rbc->rb_rw_lock);
    208 
    209         /* don't use lock while doing memcpy, so that we don't block the read
    210          * context for too long. There is no harm while writing the memory if
    211          * locking is properly done while upgrading the pointers */
    212         memcpy((rbc->bufs[rbc->wr_buf_no] + rbc->cur_wr_buf_idx),
    213                (buf + bytes_written),
    214                cur_copy_len);
    215 
    216         rb_lock(&rbc->rb_rw_lock);
    217         /* Update the write idx by the amount of write done in this iteration */
    218         rbc->cur_wr_buf_idx += cur_copy_len;
    219         if (rbc->cur_wr_buf_idx == rbc->each_buf_size) {
    220             /* Increment the wr_buf_no as the current buffer is full */
    221             rbc->wr_buf_no++;
    222             if (rbc->wr_buf_no == rbc->max_num_bufs) {
    223                 ALOGD("Write rolling over to the start of ring buffer");
    224                 rbc->wr_buf_no = 0;
    225             }
    226             /* Reset the write index to zero as this is a new buffer */
    227             rbc->cur_wr_buf_idx = 0;
    228         }
    229 
    230         if ((rbc->cur_valid_bytes + (cur_copy_len - push_in_rd_ptr)) >
    231             (rbc->max_num_bufs * rbc->each_buf_size)) {
    232             /* The below is only a precautionary print and ideally should never
    233              * come */
    234             ALOGE("Something going wrong in ring buffer");
    235         } else {
    236             /* Increase the valid bytes count by number of bytes written without
    237              * overwriting the old bytes */
    238             rbc->cur_valid_bytes += cur_copy_len - push_in_rd_ptr;
    239         }
    240         total_push_in_rd_ptr += push_in_rd_ptr;
    241         push_in_rd_ptr = 0;
    242         rb_unlock(&rbc->rb_rw_lock);
    243         bytes_written += cur_copy_len;
    244     }
    245 
    246     rb_lock(&rbc->rb_rw_lock);
    247     rbc->total_bytes_written += bytes_written - total_push_in_rd_ptr;
    248     rbc->total_bytes_overwritten += total_push_in_rd_ptr;
    249 
    250     /* check if valid bytes is going more than threshold */
    251     if ((rbc->threshold_reached == RB_FALSE) &&
    252         (rbc->cur_valid_bytes >= rbc->num_min_bytes) &&
    253         rbc->threshold_cb) {
    254         /* Release the lock before calling threshold_cb as it might call rb_read
    255          * in this same context in order to avoid dead lock
    256          */
    257         rbc->threshold_reached = RB_TRUE;
    258         rb_unlock(&rbc->rb_rw_lock);
    259         rbc->threshold_cb(rbc->cb_ctx);
    260     } else {
    261         rb_unlock(&rbc->rb_rw_lock);
    262     }
    263     return RB_SUCCESS;
    264 }
    265 
    266 size_t rb_read (void *ctx, u8 *buf, size_t max_length)
    267 {
    268     rbc_t *rbc = (rbc_t *)ctx;
    269     unsigned int bytes_read = 0;
    270     unsigned int no_more_bytes_available = 0;
    271 
    272     rb_lock(&rbc->rb_rw_lock);
    273     while (bytes_read < max_length) {
    274         unsigned int cur_cpy_len;
    275 
    276         if (rbc->bufs[rbc->rd_buf_no] == NULL) {
    277             break;
    278         }
    279 
    280         /* if read and write are on same buffer, work with rd, wr indices */
    281         if (rbc->rd_buf_no == rbc->wr_buf_no) {
    282             if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
    283                 /* Check if all the required bytes are available, if not
    284                  * read only the available bytes in the current buffer and
    285                  * break out after reading current buffer
    286                  */
    287                 if ((rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx) <
    288                         (max_length - bytes_read)) {
    289                     cur_cpy_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
    290                     no_more_bytes_available = 1;
    291                 } else {
    292                     cur_cpy_len = max_length - bytes_read;
    293                 }
    294             } else {
    295                 /* When there are no bytes available to read cur_rd_buf_idx
    296                  * will be euqal to cur_wr_buf_idx. Handle this scenario using
    297                  * cur_valid_bytes */
    298                 if (rbc->cur_valid_bytes <= bytes_read) {
    299                     /* Suppress possible static analyzer's warning */
    300                     cur_cpy_len = 0;
    301                     break;
    302                 }
    303                 cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
    304                                      (max_length - bytes_read));
    305             }
    306         } else {
    307             /* Check if all remaining_length bytes can be read from this
    308              * buffer, if not read only the available bytes in the current
    309              * buffer and go to next buffer using the while loop.
    310              */
    311             cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
    312                                  (max_length - bytes_read));
    313         }
    314 
    315         memcpy((buf + bytes_read),
    316                (rbc->bufs[rbc->rd_buf_no] + rbc->cur_rd_buf_idx),
    317                cur_cpy_len);
    318 
    319         /* Update the read index */
    320         rbc->cur_rd_buf_idx += cur_cpy_len;
    321         if (rbc->cur_rd_buf_idx == rbc->each_buf_size) {
    322             /* Increment rd_buf_no as the current buffer is completely read */
    323             if (rbc->rd_buf_no != rbc->wr_buf_no) {
    324                 free(rbc->bufs[rbc->rd_buf_no]);
    325                 rbc->bufs[rbc->rd_buf_no] = NULL;
    326             }
    327             rbc->rd_buf_no++;
    328             if (rbc->rd_buf_no == rbc->max_num_bufs) {
    329                 ALOGD("Read rolling over to the start of ring buffer");
    330                 rbc->rd_buf_no = 0;
    331             }
    332             /* Reset the read index as this is a new buffer */
    333             rbc->cur_rd_buf_idx = 0;
    334         }
    335 
    336         bytes_read += cur_cpy_len;
    337         if (no_more_bytes_available) {
    338             break;
    339         }
    340     }
    341 
    342     rbc->total_bytes_read += bytes_read;
    343     if (rbc->cur_valid_bytes < bytes_read) {
    344         /* The below is only a precautionary print and ideally should never
    345          * come */
    346         ALOGE("Something going wrong in ring buffer");
    347     } else {
    348         rbc->cur_valid_bytes -= bytes_read;
    349     }
    350 
    351     /* check if valid bytes is going less than threshold */
    352     if (rbc->threshold_reached == RB_TRUE) {
    353         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
    354             rbc->threshold_reached = RB_FALSE;
    355         }
    356     }
    357     rb_unlock(&rbc->rb_rw_lock);
    358     return bytes_read;
    359 }
    360 
    361 u8 *rb_get_read_buf(void *ctx, size_t *length)
    362 {
    363     rbc_t *rbc = (rbc_t *)ctx;
    364     unsigned int cur_read_len = 0;
    365     u8 *buf;
    366 
    367     /* If no buffer is available for reading */
    368     if (rbc->bufs[rbc->rd_buf_no] == NULL) {
    369         *length = 0;
    370         return NULL;
    371     }
    372 
    373     rb_lock(&rbc->rb_rw_lock);
    374 
    375     if (rbc->wr_buf_no == rbc->rd_buf_no) {
    376         /* If read and write are happening on the same buffer currently, use
    377          * rd and wr indices within the buffer */
    378         if ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
    379             (rbc->cur_valid_bytes == 0)) {
    380             /* No bytes available for reading */
    381             *length = 0;
    382             rb_unlock(&rbc->rb_rw_lock);
    383             return NULL;
    384         } else if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
    385             /* write is just ahead of read in this buffer */
    386             cur_read_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
    387         } else {
    388             /* write is rolled over and just behind the read */
    389             cur_read_len = rbc->each_buf_size - rbc->cur_rd_buf_idx;
    390         }
    391     } else {
    392         if (rbc->cur_rd_buf_idx == 0) {
    393             /* The complete buffer can be read out */
    394             cur_read_len = rbc->each_buf_size;
    395         } else {
    396             /* Read the remaining bytes in this buffer */
    397             cur_read_len = rbc->each_buf_size - rbc->cur_rd_buf_idx;
    398         }
    399     }
    400 
    401     if (cur_read_len == rbc->each_buf_size) {
    402         /* Pluck out the complete buffer and send it out */
    403         buf = rbc->bufs[rbc->rd_buf_no];
    404         rbc->bufs[rbc->rd_buf_no] = NULL;
    405 
    406         /* Move to the next buffer */
    407         rbc->rd_buf_no++;
    408         if (rbc->rd_buf_no == rbc->max_num_bufs) {
    409             ALOGD("Read rolling over to the start of ring buffer");
    410             rbc->rd_buf_no = 0;
    411         }
    412     } else {
    413         /* We cannot give out the complete buffer, so allocate a new memory and
    414          * and copy the data into it.
    415          */
    416         buf = (u8 *)malloc(cur_read_len);
    417         memcpy(buf, (rbc->bufs[rbc->rd_buf_no] + rbc->cur_rd_buf_idx), cur_read_len);
    418 
    419         /* Update the read index */
    420         if ((cur_read_len + rbc->cur_rd_buf_idx) == rbc->each_buf_size) {
    421             if (rbc->wr_buf_no != rbc->rd_buf_no) {
    422                 free(rbc->bufs[rbc->rd_buf_no]);
    423                 rbc->bufs[rbc->rd_buf_no] = NULL;
    424             }
    425             rbc->rd_buf_no++;
    426             if (rbc->rd_buf_no == rbc->max_num_bufs) {
    427                 rbc->rd_buf_no = 0;
    428             }
    429             rbc->cur_rd_buf_idx = 0;
    430         } else {
    431             rbc->cur_rd_buf_idx += cur_read_len;
    432         }
    433     }
    434 
    435     rbc->total_bytes_read += cur_read_len;
    436     if (rbc->cur_valid_bytes < cur_read_len) {
    437         /* The below is only a precautionary print and ideally should never
    438          * come */
    439         ALOGE("Something going wrong in ring buffer");
    440     } else {
    441         rbc->cur_valid_bytes -= cur_read_len;
    442     }
    443 
    444     /* check if valid bytes is going less than threshold */
    445     if (rbc->threshold_reached == RB_TRUE) {
    446         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
    447             rbc->threshold_reached = RB_FALSE;
    448         }
    449     }
    450     rb_unlock(&rbc->rb_rw_lock);
    451 
    452     *length = cur_read_len;
    453     return buf;
    454 }
    455 
    456 void rb_config_threshold(void *ctx,
    457                          unsigned int num_min_bytes,
    458                          threshold_call_back callback,
    459                          void *cb_ctx)
    460 {
    461     rbc_t *rbc = (rbc_t *)ctx;
    462 
    463     rbc->num_min_bytes = num_min_bytes;
    464     rbc->threshold_cb = callback;
    465     rbc->cb_ctx = cb_ctx;
    466 }
    467 
    468 void rb_get_stats(void *ctx, struct rb_stats *rbs)
    469 {
    470     rbc_t *rbc = (rbc_t *)ctx;
    471 
    472     rbs->total_bytes_written = rbc->total_bytes_written;
    473     rbs->total_bytes_read = rbc->total_bytes_read;
    474     rbs->cur_valid_bytes = rbc->cur_valid_bytes;
    475     rbs->each_buf_size = rbc->each_buf_size;
    476     rbs->max_num_bufs = rbc->max_num_bufs;
    477 }
    478