Home | History | Annotate | Download | only in resolv
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include "resolv_cache.h"
     30 
     31 #include <resolv.h>
     32 #include <stdarg.h>
     33 #include <stdio.h>
     34 #include <stdlib.h>
     35 #include <string.h>
     36 #include <time.h>
     37 #include "pthread.h"
     38 
     39 #include <errno.h>
     40 #include <arpa/nameser.h>
     41 #include <net/if.h>
     42 #include <netdb.h>
     43 #include <linux/if.h>
     44 
     45 #include <arpa/inet.h>
     46 #include "resolv_private.h"
     47 #include "resolv_netid.h"
     48 #include "res_private.h"
     49 
     50 #include <async_safe/log.h>
     51 
     52 /* This code implements a small and *simple* DNS resolver cache.
     53  *
     54  * It is only used to cache DNS answers for a time defined by the smallest TTL
     55  * among the answer records in order to reduce DNS traffic. It is not supposed
     56  * to be a full DNS cache, since we plan to implement that in the future in a
     57  * dedicated process running on the system.
     58  *
     59  * Note that its design is kept simple very intentionally, i.e.:
     60  *
     61  *  - it takes raw DNS query packet data as input, and returns raw DNS
     62  *    answer packet data as output
     63  *
     64  *    (this means that two similar queries that encode the DNS name
     65  *     differently will be treated distinctly).
     66  *
     67  *    the smallest TTL value among the answer records are used as the time
     68  *    to keep an answer in the cache.
     69  *
     70  *    this is bad, but we absolutely want to avoid parsing the answer packets
     71  *    (and should be solved by the later full DNS cache process).
     72  *
     73  *  - the implementation is just a (query-data) => (answer-data) hash table
     74  *    with a trivial least-recently-used expiration policy.
     75  *
     76  * Doing this keeps the code simple and avoids to deal with a lot of things
     77  * that a full DNS cache is expected to do.
     78  *
     79  * The API is also very simple:
     80  *
     81  *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
     82  *     this will initialize the cache on first usage. the result can be NULL
     83  *     if the cache is disabled.
     84  *
     85  *   - the client calls _resolv_cache_lookup() before performing a query
     86  *
     87  *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
     88  *     has been copied into the client-provided answer buffer.
     89  *
     90  *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
     91  *     a request normally, *then* call _resolv_cache_add() to add the received
     92  *     answer to the cache.
     93  *
     94  *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
     95  *     perform a request normally, and *not* call _resolv_cache_add()
     96  *
     97  *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
     98  *     is too short to accomodate the cached result.
     99  */
    100 
    101 /* default number of entries kept in the cache. This value has been
    102  * determined by browsing through various sites and counting the number
    103  * of corresponding requests. Keep in mind that our framework is currently
    104  * performing two requests per name lookup (one for IPv4, the other for IPv6)
    105  *
    106  *    www.google.com      4
    107  *    www.ysearch.com     6
    108  *    www.amazon.com      8
    109  *    www.nytimes.com     22
    110  *    www.espn.com        28
    111  *    www.msn.com         28
    112  *    www.lemonde.fr      35
    113  *
    114  * (determined in 2009-2-17 from Paris, France, results may vary depending
    115  *  on location)
    116  *
    117  * most high-level websites use lots of media/ad servers with different names
    118  * but these are generally reused when browsing through the site.
    119  *
    120  * As such, a value of 64 should be relatively comfortable at the moment.
    121  *
    122  * ******************************************
    123  * * NOTE - this has changed.
    124  * * 1) we've added IPv6 support so each dns query results in 2 responses
    125  * * 2) we've made this a system-wide cache, so the cost is less (it's not
    126  * *    duplicated in each process) and the need is greater (more processes
    127  * *    making different requests).
    128  * * Upping by 2x for IPv6
    129  * * Upping by another 5x for the centralized nature
    130  * *****************************************
    131  */
    132 #define  CONFIG_MAX_ENTRIES    64 * 2 * 5
    133 
    134 /****************************************************************************/
    135 /****************************************************************************/
    136 /*****                                                                  *****/
    137 /*****                                                                  *****/
    138 /*****                                                                  *****/
    139 /****************************************************************************/
    140 /****************************************************************************/
    141 
    142 /* set to 1 to debug cache operations */
    143 #define  DEBUG       0
    144 
    145 /* set to 1 to debug query data */
    146 #define  DEBUG_DATA  0
    147 
    148 #if DEBUG
    149 #define __DEBUG__
    150 #else
    151 #define __DEBUG__ __attribute__((unused))
    152 #endif
    153 
    154 #undef XLOG
    155 
    156 #define XLOG(...) ({ \
    157     if (DEBUG) { \
    158         async_safe_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__); \
    159     } else { \
    160         ((void)0); \
    161     } \
    162 })
    163 
    164 /** BOUNDED BUFFER FORMATTING
    165  **/
    166 
    167 /* technical note:
    168  *
    169  *   the following debugging routines are used to append data to a bounded
    170  *   buffer they take two parameters that are:
    171  *
    172  *   - p : a pointer to the current cursor position in the buffer
    173  *         this value is initially set to the buffer's address.
    174  *
    175  *   - end : the address of the buffer's limit, i.e. of the first byte
    176  *           after the buffer. this address should never be touched.
    177  *
    178  *           IMPORTANT: it is assumed that end > buffer_address, i.e.
    179  *                      that the buffer is at least one byte.
    180  *
    181  *   the _bprint_() functions return the new value of 'p' after the data
    182  *   has been appended, and also ensure the following:
    183  *
    184  *   - the returned value will never be strictly greater than 'end'
    185  *
    186  *   - a return value equal to 'end' means that truncation occured
    187  *     (in which case, end[-1] will be set to 0)
    188  *
    189  *   - after returning from a _bprint_() function, the content of the buffer
    190  *     is always 0-terminated, even in the event of truncation.
    191  *
    192  *  these conventions allow you to call _bprint_ functions multiple times and
    193  *  only check for truncation at the end of the sequence, as in:
    194  *
    195  *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
    196  *
    197  *     p = _bprint_c(p, end, '"');
    198  *     p = _bprint_s(p, end, my_string);
    199  *     p = _bprint_c(p, end, '"');
    200  *
    201  *     if (p >= end) {
    202  *        // buffer was too small
    203  *     }
    204  *
    205  *     printf( "%s", buff );
    206  */
    207 
    208 /* add a char to a bounded buffer */
    209 char*
    210 _bprint_c( char*  p, char*  end, int  c )
    211 {
    212     if (p < end) {
    213         if (p+1 == end)
    214             *p++ = 0;
    215         else {
    216             *p++ = (char) c;
    217             *p   = 0;
    218         }
    219     }
    220     return p;
    221 }
    222 
    223 /* add a sequence of bytes to a bounded buffer */
    224 char*
    225 _bprint_b( char*  p, char*  end, const char*  buf, int  len )
    226 {
    227     int  avail = end - p;
    228 
    229     if (avail <= 0 || len <= 0)
    230         return p;
    231 
    232     if (avail > len)
    233         avail = len;
    234 
    235     memcpy( p, buf, avail );
    236     p += avail;
    237 
    238     if (p < end)
    239         p[0] = 0;
    240     else
    241         end[-1] = 0;
    242 
    243     return p;
    244 }
    245 
    246 /* add a string to a bounded buffer */
    247 char*
    248 _bprint_s( char*  p, char*  end, const char*  str )
    249 {
    250     return _bprint_b(p, end, str, strlen(str));
    251 }
    252 
    253 /* add a formatted string to a bounded buffer */
    254 char* _bprint( char*  p, char*  end, const char*  format, ... ) __DEBUG__;
    255 char* _bprint( char*  p, char*  end, const char*  format, ... )
    256 {
    257     int      avail, n;
    258     va_list  args;
    259 
    260     avail = end - p;
    261 
    262     if (avail <= 0)
    263         return p;
    264 
    265     va_start(args, format);
    266     n = vsnprintf( p, avail, format, args);
    267     va_end(args);
    268 
    269     /* certain C libraries return -1 in case of truncation */
    270     if (n < 0 || n > avail)
    271         n = avail;
    272 
    273     p += n;
    274     /* certain C libraries do not zero-terminate in case of truncation */
    275     if (p == end)
    276         p[-1] = 0;
    277 
    278     return p;
    279 }
    280 
    281 /* add a hex value to a bounded buffer, up to 8 digits */
    282 char*
    283 _bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
    284 {
    285     char   text[sizeof(unsigned)*2];
    286     int    nn = 0;
    287 
    288     while (numDigits-- > 0) {
    289         text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
    290     }
    291     return _bprint_b(p, end, text, nn);
    292 }
    293 
    294 /* add the hexadecimal dump of some memory area to a bounded buffer */
    295 char*
    296 _bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
    297 {
    298     int   lineSize = 16;
    299 
    300     while (datalen > 0) {
    301         int  avail = datalen;
    302         int  nn;
    303 
    304         if (avail > lineSize)
    305             avail = lineSize;
    306 
    307         for (nn = 0; nn < avail; nn++) {
    308             if (nn > 0)
    309                 p = _bprint_c(p, end, ' ');
    310             p = _bprint_hex(p, end, data[nn], 2);
    311         }
    312         for ( ; nn < lineSize; nn++ ) {
    313             p = _bprint_s(p, end, "   ");
    314         }
    315         p = _bprint_s(p, end, "  ");
    316 
    317         for (nn = 0; nn < avail; nn++) {
    318             int  c = data[nn];
    319 
    320             if (c < 32 || c > 127)
    321                 c = '.';
    322 
    323             p = _bprint_c(p, end, c);
    324         }
    325         p = _bprint_c(p, end, '\n');
    326 
    327         data    += avail;
    328         datalen -= avail;
    329     }
    330     return p;
    331 }
    332 
    333 /* dump the content of a query of packet to the log */
    334 void XLOG_BYTES( const void*  base, int  len ) __DEBUG__;
    335 void XLOG_BYTES( const void*  base, int  len )
    336 {
    337     if (DEBUG_DATA) {
    338         char  buff[1024];
    339         char*  p = buff, *end = p + sizeof(buff);
    340 
    341         p = _bprint_hexdump(p, end, base, len);
    342         XLOG("%s",buff);
    343     }
    344 } __DEBUG__
    345 
    346 static time_t
    347 _time_now( void )
    348 {
    349     struct timeval  tv;
    350 
    351     gettimeofday( &tv, NULL );
    352     return tv.tv_sec;
    353 }
    354 
    355 /* reminder: the general format of a DNS packet is the following:
    356  *
    357  *    HEADER  (12 bytes)
    358  *    QUESTION  (variable)
    359  *    ANSWER (variable)
    360  *    AUTHORITY (variable)
    361  *    ADDITIONNAL (variable)
    362  *
    363  * the HEADER is made of:
    364  *
    365  *   ID     : 16 : 16-bit unique query identification field
    366  *
    367  *   QR     :  1 : set to 0 for queries, and 1 for responses
    368  *   Opcode :  4 : set to 0 for queries
    369  *   AA     :  1 : set to 0 for queries
    370  *   TC     :  1 : truncation flag, will be set to 0 in queries
    371  *   RD     :  1 : recursion desired
    372  *
    373  *   RA     :  1 : recursion available (0 in queries)
    374  *   Z      :  3 : three reserved zero bits
    375  *   RCODE  :  4 : response code (always 0=NOERROR in queries)
    376  *
    377  *   QDCount: 16 : question count
    378  *   ANCount: 16 : Answer count (0 in queries)
    379  *   NSCount: 16: Authority Record count (0 in queries)
    380  *   ARCount: 16: Additionnal Record count (0 in queries)
    381  *
    382  * the QUESTION is made of QDCount Question Record (QRs)
    383  * the ANSWER is made of ANCount RRs
    384  * the AUTHORITY is made of NSCount RRs
    385  * the ADDITIONNAL is made of ARCount RRs
    386  *
    387  * Each Question Record (QR) is made of:
    388  *
    389  *   QNAME   : variable : Query DNS NAME
    390  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
    391  *   CLASS   : 16       : class of query (IN=1)
    392  *
    393  * Each Resource Record (RR) is made of:
    394  *
    395  *   NAME    : variable : DNS NAME
    396  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
    397  *   CLASS   : 16       : class of query (IN=1)
    398  *   TTL     : 32       : seconds to cache this RR (0=none)
    399  *   RDLENGTH: 16       : size of RDDATA in bytes
    400  *   RDDATA  : variable : RR data (depends on TYPE)
    401  *
    402  * Each QNAME contains a domain name encoded as a sequence of 'labels'
    403  * terminated by a zero. Each label has the following format:
    404  *
    405  *    LEN  : 8     : lenght of label (MUST be < 64)
    406  *    NAME : 8*LEN : label length (must exclude dots)
    407  *
    408  * A value of 0 in the encoding is interpreted as the 'root' domain and
    409  * terminates the encoding. So 'www.android.com' will be encoded as:
    410  *
    411  *   <3>www<7>android<3>com<0>
    412  *
    413  * Where <n> represents the byte with value 'n'
    414  *
    415  * Each NAME reflects the QNAME of the question, but has a slightly more
    416  * complex encoding in order to provide message compression. This is achieved
    417  * by using a 2-byte pointer, with format:
    418  *
    419  *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
    420  *    OFFSET : 14 : offset to another part of the DNS packet
    421  *
    422  * The offset is relative to the start of the DNS packet and must point
    423  * A pointer terminates the encoding.
    424  *
    425  * The NAME can be encoded in one of the following formats:
    426  *
    427  *   - a sequence of simple labels terminated by 0 (like QNAMEs)
    428  *   - a single pointer
    429  *   - a sequence of simple labels terminated by a pointer
    430  *
    431  * A pointer shall always point to either a pointer of a sequence of
    432  * labels (which can themselves be terminated by either a 0 or a pointer)
    433  *
    434  * The expanded length of a given domain name should not exceed 255 bytes.
    435  *
    436  * NOTE: we don't parse the answer packets, so don't need to deal with NAME
    437  *       records, only QNAMEs.
    438  */
    439 
    440 #define  DNS_HEADER_SIZE  12
    441 
    442 #define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
    443 #define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
    444 #define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
    445 #define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
    446 #define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
    447 
    448 #define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
    449 
    450 typedef struct {
    451     const uint8_t*  base;
    452     const uint8_t*  end;
    453     const uint8_t*  cursor;
    454 } DnsPacket;
    455 
    456 static void
    457 _dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
    458 {
    459     packet->base   = buff;
    460     packet->end    = buff + bufflen;
    461     packet->cursor = buff;
    462 }
    463 
    464 static void
    465 _dnsPacket_rewind( DnsPacket*  packet )
    466 {
    467     packet->cursor = packet->base;
    468 }
    469 
    470 static void
    471 _dnsPacket_skip( DnsPacket*  packet, int  count )
    472 {
    473     const uint8_t*  p = packet->cursor + count;
    474 
    475     if (p > packet->end)
    476         p = packet->end;
    477 
    478     packet->cursor = p;
    479 }
    480 
    481 static int
    482 _dnsPacket_readInt16( DnsPacket*  packet )
    483 {
    484     const uint8_t*  p = packet->cursor;
    485 
    486     if (p+2 > packet->end)
    487         return -1;
    488 
    489     packet->cursor = p+2;
    490     return (p[0]<< 8) | p[1];
    491 }
    492 
    493 /** QUERY CHECKING
    494  **/
    495 
    496 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
    497  * the cursor is only advanced in the case of success
    498  */
    499 static int
    500 _dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
    501 {
    502     const uint8_t*  p = packet->cursor;
    503 
    504     if (p + numBytes > packet->end)
    505         return 0;
    506 
    507     if (memcmp(p, bytes, numBytes) != 0)
    508         return 0;
    509 
    510     packet->cursor = p + numBytes;
    511     return 1;
    512 }
    513 
    514 /* parse and skip a given QNAME stored in a query packet,
    515  * from the current cursor position. returns 1 on success,
    516  * or 0 for malformed data.
    517  */
    518 static int
    519 _dnsPacket_checkQName( DnsPacket*  packet )
    520 {
    521     const uint8_t*  p   = packet->cursor;
    522     const uint8_t*  end = packet->end;
    523 
    524     for (;;) {
    525         int  c;
    526 
    527         if (p >= end)
    528             break;
    529 
    530         c = *p++;
    531 
    532         if (c == 0) {
    533             packet->cursor = p;
    534             return 1;
    535         }
    536 
    537         /* we don't expect label compression in QNAMEs */
    538         if (c >= 64)
    539             break;
    540 
    541         p += c;
    542         /* we rely on the bound check at the start
    543          * of the loop here */
    544     }
    545     /* malformed data */
    546     XLOG("malformed QNAME");
    547     return 0;
    548 }
    549 
    550 /* parse and skip a given QR stored in a packet.
    551  * returns 1 on success, and 0 on failure
    552  */
    553 static int
    554 _dnsPacket_checkQR( DnsPacket*  packet )
    555 {
    556     if (!_dnsPacket_checkQName(packet))
    557         return 0;
    558 
    559     /* TYPE must be one of the things we support */
    560     if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
    561         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
    562         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
    563         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
    564         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
    565     {
    566         XLOG("unsupported TYPE");
    567         return 0;
    568     }
    569     /* CLASS must be IN */
    570     if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
    571         XLOG("unsupported CLASS");
    572         return 0;
    573     }
    574 
    575     return 1;
    576 }
    577 
    578 /* check the header of a DNS Query packet, return 1 if it is one
    579  * type of query we can cache, or 0 otherwise
    580  */
    581 static int
    582 _dnsPacket_checkQuery( DnsPacket*  packet )
    583 {
    584     const uint8_t*  p = packet->base;
    585     int             qdCount, anCount, dnCount, arCount;
    586 
    587     if (p + DNS_HEADER_SIZE > packet->end) {
    588         XLOG("query packet too small");
    589         return 0;
    590     }
    591 
    592     /* QR must be set to 0, opcode must be 0 and AA must be 0 */
    593     /* RA, Z, and RCODE must be 0 */
    594     if ((p[2] & 0xFC) != 0 || (p[3] & 0xCF) != 0) {
    595         XLOG("query packet flags unsupported");
    596         return 0;
    597     }
    598 
    599     /* Note that we ignore the TC, RD, CD, and AD bits here for the
    600      * following reasons:
    601      *
    602      * - there is no point for a query packet sent to a server
    603      *   to have the TC bit set, but the implementation might
    604      *   set the bit in the query buffer for its own needs
    605      *   between a _resolv_cache_lookup and a
    606      *   _resolv_cache_add. We should not freak out if this
    607      *   is the case.
    608      *
    609      * - we consider that the result from a query might depend on
    610      *   the RD, AD, and CD bits, so these bits
    611      *   should be used to differentiate cached result.
    612      *
    613      *   this implies that these bits are checked when hashing or
    614      *   comparing query packets, but not TC
    615      */
    616 
    617     /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
    618     qdCount = (p[4] << 8) | p[5];
    619     anCount = (p[6] << 8) | p[7];
    620     dnCount = (p[8] << 8) | p[9];
    621     arCount = (p[10]<< 8) | p[11];
    622 
    623     if (anCount != 0 || dnCount != 0 || arCount > 1) {
    624         XLOG("query packet contains non-query records");
    625         return 0;
    626     }
    627 
    628     if (qdCount == 0) {
    629         XLOG("query packet doesn't contain query record");
    630         return 0;
    631     }
    632 
    633     /* Check QDCOUNT QRs */
    634     packet->cursor = p + DNS_HEADER_SIZE;
    635 
    636     for (;qdCount > 0; qdCount--)
    637         if (!_dnsPacket_checkQR(packet))
    638             return 0;
    639 
    640     return 1;
    641 }
    642 
    643 /** QUERY DEBUGGING
    644  **/
    645 #if DEBUG
    646 static char*
    647 _dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
    648 {
    649     const uint8_t*  p   = packet->cursor;
    650     const uint8_t*  end = packet->end;
    651     int             first = 1;
    652 
    653     for (;;) {
    654         int  c;
    655 
    656         if (p >= end)
    657             break;
    658 
    659         c = *p++;
    660 
    661         if (c == 0) {
    662             packet->cursor = p;
    663             return bp;
    664         }
    665 
    666         /* we don't expect label compression in QNAMEs */
    667         if (c >= 64)
    668             break;
    669 
    670         if (first)
    671             first = 0;
    672         else
    673             bp = _bprint_c(bp, bend, '.');
    674 
    675         bp = _bprint_b(bp, bend, (const char*)p, c);
    676 
    677         p += c;
    678         /* we rely on the bound check at the start
    679          * of the loop here */
    680     }
    681     /* malformed data */
    682     bp = _bprint_s(bp, bend, "<MALFORMED>");
    683     return bp;
    684 }
    685 
    686 static char*
    687 _dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
    688 {
    689 #define  QQ(x)   { DNS_TYPE_##x, #x }
    690     static const struct {
    691         const char*  typeBytes;
    692         const char*  typeString;
    693     } qTypes[] =
    694     {
    695         QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
    696         { NULL, NULL }
    697     };
    698     int          nn;
    699     const char*  typeString = NULL;
    700 
    701     /* dump QNAME */
    702     p = _dnsPacket_bprintQName(packet, p, end);
    703 
    704     /* dump TYPE */
    705     p = _bprint_s(p, end, " (");
    706 
    707     for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
    708         if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
    709             typeString = qTypes[nn].typeString;
    710             break;
    711         }
    712     }
    713 
    714     if (typeString != NULL)
    715         p = _bprint_s(p, end, typeString);
    716     else {
    717         int  typeCode = _dnsPacket_readInt16(packet);
    718         p = _bprint(p, end, "UNKNOWN-%d", typeCode);
    719     }
    720 
    721     p = _bprint_c(p, end, ')');
    722 
    723     /* skip CLASS */
    724     _dnsPacket_skip(packet, 2);
    725     return p;
    726 }
    727 
    728 /* this function assumes the packet has already been checked */
    729 static char*
    730 _dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
    731 {
    732     int   qdCount;
    733 
    734     if (packet->base[2] & 0x1) {
    735         p = _bprint_s(p, end, "RECURSIVE ");
    736     }
    737 
    738     _dnsPacket_skip(packet, 4);
    739     qdCount = _dnsPacket_readInt16(packet);
    740     _dnsPacket_skip(packet, 6);
    741 
    742     for ( ; qdCount > 0; qdCount-- ) {
    743         p = _dnsPacket_bprintQR(packet, p, end);
    744     }
    745     return p;
    746 }
    747 #endif
    748 
    749 
    750 /** QUERY HASHING SUPPORT
    751  **
    752  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
    753  ** BEEN SUCCESFULLY CHECKED.
    754  **/
    755 
    756 /* use 32-bit FNV hash function */
    757 #define  FNV_MULT   16777619U
    758 #define  FNV_BASIS  2166136261U
    759 
    760 static unsigned
    761 _dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
    762 {
    763     const uint8_t*  p   = packet->cursor;
    764     const uint8_t*  end = packet->end;
    765 
    766     while (numBytes > 0 && p < end) {
    767         hash = hash*FNV_MULT ^ *p++;
    768     }
    769     packet->cursor = p;
    770     return hash;
    771 }
    772 
    773 
    774 static unsigned
    775 _dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
    776 {
    777     const uint8_t*  p   = packet->cursor;
    778     const uint8_t*  end = packet->end;
    779 
    780     for (;;) {
    781         int  c;
    782 
    783         if (p >= end) {  /* should not happen */
    784             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
    785             break;
    786         }
    787 
    788         c = *p++;
    789 
    790         if (c == 0)
    791             break;
    792 
    793         if (c >= 64) {
    794             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
    795             break;
    796         }
    797         if (p + c >= end) {
    798             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
    799                     __FUNCTION__);
    800             break;
    801         }
    802         while (c > 0) {
    803             hash = hash*FNV_MULT ^ *p++;
    804             c   -= 1;
    805         }
    806     }
    807     packet->cursor = p;
    808     return hash;
    809 }
    810 
    811 static unsigned
    812 _dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
    813 {
    814     hash = _dnsPacket_hashQName(packet, hash);
    815     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
    816     return hash;
    817 }
    818 
    819 static unsigned
    820 _dnsPacket_hashRR( DnsPacket*  packet, unsigned  hash )
    821 {
    822     int rdlength;
    823     hash = _dnsPacket_hashQR(packet, hash);
    824     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TTL */
    825     rdlength = _dnsPacket_readInt16(packet);
    826     hash = _dnsPacket_hashBytes(packet, rdlength, hash); /* RDATA */
    827     return hash;
    828 }
    829 
    830 static unsigned
    831 _dnsPacket_hashQuery( DnsPacket*  packet )
    832 {
    833     unsigned  hash = FNV_BASIS;
    834     int       count, arcount;
    835     _dnsPacket_rewind(packet);
    836 
    837     /* ignore the ID */
    838     _dnsPacket_skip(packet, 2);
    839 
    840     /* we ignore the TC bit for reasons explained in
    841      * _dnsPacket_checkQuery().
    842      *
    843      * however we hash the RD bit to differentiate
    844      * between answers for recursive and non-recursive
    845      * queries.
    846      */
    847     hash = hash*FNV_MULT ^ (packet->base[2] & 1);
    848 
    849     /* mark the first header byte as processed */
    850     _dnsPacket_skip(packet, 1);
    851 
    852     /* process the second header byte */
    853     hash = _dnsPacket_hashBytes(packet, 1, hash);
    854 
    855     /* read QDCOUNT */
    856     count = _dnsPacket_readInt16(packet);
    857 
    858     /* assume: ANcount and NScount are 0 */
    859     _dnsPacket_skip(packet, 4);
    860 
    861     /* read ARCOUNT */
    862     arcount = _dnsPacket_readInt16(packet);
    863 
    864     /* hash QDCOUNT QRs */
    865     for ( ; count > 0; count-- )
    866         hash = _dnsPacket_hashQR(packet, hash);
    867 
    868     /* hash ARCOUNT RRs */
    869     for ( ; arcount > 0; arcount-- )
    870         hash = _dnsPacket_hashRR(packet, hash);
    871 
    872     return hash;
    873 }
    874 
    875 
    876 /** QUERY COMPARISON
    877  **
    878  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
    879  ** BEEN SUCCESFULLY CHECKED.
    880  **/
    881 
    882 static int
    883 _dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
    884 {
    885     const uint8_t*  p1   = pack1->cursor;
    886     const uint8_t*  end1 = pack1->end;
    887     const uint8_t*  p2   = pack2->cursor;
    888     const uint8_t*  end2 = pack2->end;
    889 
    890     for (;;) {
    891         int  c1, c2;
    892 
    893         if (p1 >= end1 || p2 >= end2) {
    894             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
    895             break;
    896         }
    897         c1 = *p1++;
    898         c2 = *p2++;
    899         if (c1 != c2)
    900             break;
    901 
    902         if (c1 == 0) {
    903             pack1->cursor = p1;
    904             pack2->cursor = p2;
    905             return 1;
    906         }
    907         if (c1 >= 64) {
    908             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
    909             break;
    910         }
    911         if ((p1+c1 > end1) || (p2+c1 > end2)) {
    912             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
    913                     __FUNCTION__);
    914             break;
    915         }
    916         if (memcmp(p1, p2, c1) != 0)
    917             break;
    918         p1 += c1;
    919         p2 += c1;
    920         /* we rely on the bound checks at the start of the loop */
    921     }
    922     /* not the same, or one is malformed */
    923     XLOG("different DN");
    924     return 0;
    925 }
    926 
    927 static int
    928 _dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
    929 {
    930     const uint8_t*  p1 = pack1->cursor;
    931     const uint8_t*  p2 = pack2->cursor;
    932 
    933     if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
    934         return 0;
    935 
    936     if ( memcmp(p1, p2, numBytes) != 0 )
    937         return 0;
    938 
    939     pack1->cursor += numBytes;
    940     pack2->cursor += numBytes;
    941     return 1;
    942 }
    943 
    944 static int
    945 _dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
    946 {
    947     /* compare domain name encoding + TYPE + CLASS */
    948     if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
    949          !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
    950         return 0;
    951 
    952     return 1;
    953 }
    954 
    955 static int
    956 _dnsPacket_isEqualRR( DnsPacket*  pack1, DnsPacket*  pack2 )
    957 {
    958     int rdlength1, rdlength2;
    959     /* compare query + TTL */
    960     if ( !_dnsPacket_isEqualQR(pack1, pack2) ||
    961          !_dnsPacket_isEqualBytes(pack1, pack2, 4) )
    962         return 0;
    963 
    964     /* compare RDATA */
    965     rdlength1 = _dnsPacket_readInt16(pack1);
    966     rdlength2 = _dnsPacket_readInt16(pack2);
    967     if ( rdlength1 != rdlength2 ||
    968          !_dnsPacket_isEqualBytes(pack1, pack2, rdlength1) )
    969         return 0;
    970 
    971     return 1;
    972 }
    973 
    974 static int
    975 _dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
    976 {
    977     int  count1, count2, arcount1, arcount2;
    978 
    979     /* compare the headers, ignore most fields */
    980     _dnsPacket_rewind(pack1);
    981     _dnsPacket_rewind(pack2);
    982 
    983     /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
    984     if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
    985         XLOG("different RD");
    986         return 0;
    987     }
    988 
    989     if (pack1->base[3] != pack2->base[3]) {
    990         XLOG("different CD or AD");
    991         return 0;
    992     }
    993 
    994     /* mark ID and header bytes as compared */
    995     _dnsPacket_skip(pack1, 4);
    996     _dnsPacket_skip(pack2, 4);
    997 
    998     /* compare QDCOUNT */
    999     count1 = _dnsPacket_readInt16(pack1);
   1000     count2 = _dnsPacket_readInt16(pack2);
   1001     if (count1 != count2 || count1 < 0) {
   1002         XLOG("different QDCOUNT");
   1003         return 0;
   1004     }
   1005 
   1006     /* assume: ANcount and NScount are 0 */
   1007     _dnsPacket_skip(pack1, 4);
   1008     _dnsPacket_skip(pack2, 4);
   1009 
   1010     /* compare ARCOUNT */
   1011     arcount1 = _dnsPacket_readInt16(pack1);
   1012     arcount2 = _dnsPacket_readInt16(pack2);
   1013     if (arcount1 != arcount2 || arcount1 < 0) {
   1014         XLOG("different ARCOUNT");
   1015         return 0;
   1016     }
   1017 
   1018     /* compare the QDCOUNT QRs */
   1019     for ( ; count1 > 0; count1-- ) {
   1020         if (!_dnsPacket_isEqualQR(pack1, pack2)) {
   1021             XLOG("different QR");
   1022             return 0;
   1023         }
   1024     }
   1025 
   1026     /* compare the ARCOUNT RRs */
   1027     for ( ; arcount1 > 0; arcount1-- ) {
   1028         if (!_dnsPacket_isEqualRR(pack1, pack2)) {
   1029             XLOG("different additional RR");
   1030             return 0;
   1031         }
   1032     }
   1033     return 1;
   1034 }
   1035 
   1036 /****************************************************************************/
   1037 /****************************************************************************/
   1038 /*****                                                                  *****/
   1039 /*****                                                                  *****/
   1040 /*****                                                                  *****/
   1041 /****************************************************************************/
   1042 /****************************************************************************/
   1043 
   1044 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
   1045  * structure though they are conceptually part of the hash table.
   1046  *
   1047  * similarly, mru_next and mru_prev are part of the global MRU list
   1048  */
   1049 typedef struct Entry {
   1050     unsigned int     hash;   /* hash value */
   1051     struct Entry*    hlink;  /* next in collision chain */
   1052     struct Entry*    mru_prev;
   1053     struct Entry*    mru_next;
   1054 
   1055     const uint8_t*   query;
   1056     int              querylen;
   1057     const uint8_t*   answer;
   1058     int              answerlen;
   1059     time_t           expires;   /* time_t when the entry isn't valid any more */
   1060     int              id;        /* for debugging purpose */
   1061 } Entry;
   1062 
   1063 /**
   1064  * Find the TTL for a negative DNS result.  This is defined as the minimum
   1065  * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
   1066  *
   1067  * Return 0 if not found.
   1068  */
   1069 static u_long
   1070 answer_getNegativeTTL(ns_msg handle) {
   1071     int n, nscount;
   1072     u_long result = 0;
   1073     ns_rr rr;
   1074 
   1075     nscount = ns_msg_count(handle, ns_s_ns);
   1076     for (n = 0; n < nscount; n++) {
   1077         if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
   1078             const u_char *rdata = ns_rr_rdata(rr); // find the data
   1079             const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
   1080             int len;
   1081             u_long ttl, rec_result = ns_rr_ttl(rr);
   1082 
   1083             // find the MINIMUM-TTL field from the blob of binary data for this record
   1084             // skip the server name
   1085             len = dn_skipname(rdata, edata);
   1086             if (len == -1) continue; // error skipping
   1087             rdata += len;
   1088 
   1089             // skip the admin name
   1090             len = dn_skipname(rdata, edata);
   1091             if (len == -1) continue; // error skipping
   1092             rdata += len;
   1093 
   1094             if (edata - rdata != 5*NS_INT32SZ) continue;
   1095             // skip: serial number + refresh interval + retry interval + expiry
   1096             rdata += NS_INT32SZ * 4;
   1097             // finally read the MINIMUM TTL
   1098             ttl = ns_get32(rdata);
   1099             if (ttl < rec_result) {
   1100                 rec_result = ttl;
   1101             }
   1102             // Now that the record is read successfully, apply the new min TTL
   1103             if (n == 0 || rec_result < result) {
   1104                 result = rec_result;
   1105             }
   1106         }
   1107     }
   1108     return result;
   1109 }
   1110 
   1111 /**
   1112  * Parse the answer records and find the appropriate
   1113  * smallest TTL among the records.  This might be from
   1114  * the answer records if found or from the SOA record
   1115  * if it's a negative result.
   1116  *
   1117  * The returned TTL is the number of seconds to
   1118  * keep the answer in the cache.
   1119  *
   1120  * In case of parse error zero (0) is returned which
   1121  * indicates that the answer shall not be cached.
   1122  */
   1123 static u_long
   1124 answer_getTTL(const void* answer, int answerlen)
   1125 {
   1126     ns_msg handle;
   1127     int ancount, n;
   1128     u_long result, ttl;
   1129     ns_rr rr;
   1130 
   1131     result = 0;
   1132     if (ns_initparse(answer, answerlen, &handle) >= 0) {
   1133         // get number of answer records
   1134         ancount = ns_msg_count(handle, ns_s_an);
   1135 
   1136         if (ancount == 0) {
   1137             // a response with no answers?  Cache this negative result.
   1138             result = answer_getNegativeTTL(handle);
   1139         } else {
   1140             for (n = 0; n < ancount; n++) {
   1141                 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
   1142                     ttl = ns_rr_ttl(rr);
   1143                     if (n == 0 || ttl < result) {
   1144                         result = ttl;
   1145                     }
   1146                 } else {
   1147                     XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
   1148                 }
   1149             }
   1150         }
   1151     } else {
   1152         XLOG("ns_parserr failed. %s\n", strerror(errno));
   1153     }
   1154 
   1155     XLOG("TTL = %lu\n", result);
   1156 
   1157     return result;
   1158 }
   1159 
   1160 static void
   1161 entry_free( Entry*  e )
   1162 {
   1163     /* everything is allocated in a single memory block */
   1164     if (e) {
   1165         free(e);
   1166     }
   1167 }
   1168 
   1169 static __inline__ void
   1170 entry_mru_remove( Entry*  e )
   1171 {
   1172     e->mru_prev->mru_next = e->mru_next;
   1173     e->mru_next->mru_prev = e->mru_prev;
   1174 }
   1175 
   1176 static __inline__ void
   1177 entry_mru_add( Entry*  e, Entry*  list )
   1178 {
   1179     Entry*  first = list->mru_next;
   1180 
   1181     e->mru_next = first;
   1182     e->mru_prev = list;
   1183 
   1184     list->mru_next  = e;
   1185     first->mru_prev = e;
   1186 }
   1187 
   1188 /* compute the hash of a given entry, this is a hash of most
   1189  * data in the query (key) */
   1190 static unsigned
   1191 entry_hash( const Entry*  e )
   1192 {
   1193     DnsPacket  pack[1];
   1194 
   1195     _dnsPacket_init(pack, e->query, e->querylen);
   1196     return _dnsPacket_hashQuery(pack);
   1197 }
   1198 
   1199 /* initialize an Entry as a search key, this also checks the input query packet
   1200  * returns 1 on success, or 0 in case of unsupported/malformed data */
   1201 static int
   1202 entry_init_key( Entry*  e, const void*  query, int  querylen )
   1203 {
   1204     DnsPacket  pack[1];
   1205 
   1206     memset(e, 0, sizeof(*e));
   1207 
   1208     e->query    = query;
   1209     e->querylen = querylen;
   1210     e->hash     = entry_hash(e);
   1211 
   1212     _dnsPacket_init(pack, query, querylen);
   1213 
   1214     return _dnsPacket_checkQuery(pack);
   1215 }
   1216 
   1217 /* allocate a new entry as a cache node */
   1218 static Entry*
   1219 entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
   1220 {
   1221     Entry*  e;
   1222     int     size;
   1223 
   1224     size = sizeof(*e) + init->querylen + answerlen;
   1225     e    = calloc(size, 1);
   1226     if (e == NULL)
   1227         return e;
   1228 
   1229     e->hash     = init->hash;
   1230     e->query    = (const uint8_t*)(e+1);
   1231     e->querylen = init->querylen;
   1232 
   1233     memcpy( (char*)e->query, init->query, e->querylen );
   1234 
   1235     e->answer    = e->query + e->querylen;
   1236     e->answerlen = answerlen;
   1237 
   1238     memcpy( (char*)e->answer, answer, e->answerlen );
   1239 
   1240     return e;
   1241 }
   1242 
   1243 static int
   1244 entry_equals( const Entry*  e1, const Entry*  e2 )
   1245 {
   1246     DnsPacket  pack1[1], pack2[1];
   1247 
   1248     if (e1->querylen != e2->querylen) {
   1249         return 0;
   1250     }
   1251     _dnsPacket_init(pack1, e1->query, e1->querylen);
   1252     _dnsPacket_init(pack2, e2->query, e2->querylen);
   1253 
   1254     return _dnsPacket_isEqualQuery(pack1, pack2);
   1255 }
   1256 
   1257 /****************************************************************************/
   1258 /****************************************************************************/
   1259 /*****                                                                  *****/
   1260 /*****                                                                  *****/
   1261 /*****                                                                  *****/
   1262 /****************************************************************************/
   1263 /****************************************************************************/
   1264 
   1265 /* We use a simple hash table with external collision lists
   1266  * for simplicity, the hash-table fields 'hash' and 'hlink' are
   1267  * inlined in the Entry structure.
   1268  */
   1269 
   1270 /* Maximum time for a thread to wait for an pending request */
   1271 #define PENDING_REQUEST_TIMEOUT 20;
   1272 
   1273 typedef struct pending_req_info {
   1274     unsigned int                hash;
   1275     pthread_cond_t              cond;
   1276     struct pending_req_info*    next;
   1277 } PendingReqInfo;
   1278 
   1279 typedef struct resolv_cache {
   1280     int              max_entries;
   1281     int              num_entries;
   1282     Entry            mru_list;
   1283     int              last_id;
   1284     Entry*           entries;
   1285     PendingReqInfo   pending_requests;
   1286 } Cache;
   1287 
   1288 struct resolv_cache_info {
   1289     unsigned                    netid;
   1290     Cache*                      cache;
   1291     struct resolv_cache_info*   next;
   1292     int                         nscount;
   1293     char*                       nameservers[MAXNS];
   1294     struct addrinfo*            nsaddrinfo[MAXNS];
   1295     int                         revision_id; // # times the nameservers have been replaced
   1296     struct __res_params         params;
   1297     struct __res_stats          nsstats[MAXNS];
   1298     char                        defdname[MAXDNSRCHPATH];
   1299     int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
   1300 };
   1301 
   1302 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
   1303 
   1304 static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
   1305 static void _res_cache_init(void);
   1306 
   1307 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
   1308 static pthread_mutex_t _res_cache_list_lock;
   1309 
   1310 /* gets cache associated with a network, or NULL if none exists */
   1311 static struct resolv_cache* _find_named_cache_locked(unsigned netid);
   1312 
   1313 static void
   1314 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
   1315 {
   1316     struct pending_req_info *ri, *tmp;
   1317     if (cache) {
   1318         ri = cache->pending_requests.next;
   1319 
   1320         while (ri) {
   1321             tmp = ri;
   1322             ri = ri->next;
   1323             pthread_cond_broadcast(&tmp->cond);
   1324 
   1325             pthread_cond_destroy(&tmp->cond);
   1326             free(tmp);
   1327         }
   1328 
   1329         cache->pending_requests.next = NULL;
   1330     }
   1331 }
   1332 
   1333 /* Return 0 if no pending request is found matching the key.
   1334  * If a matching request is found the calling thread will wait until
   1335  * the matching request completes, then update *cache and return 1. */
   1336 static int
   1337 _cache_check_pending_request_locked( struct resolv_cache** cache, Entry* key, unsigned netid )
   1338 {
   1339     struct pending_req_info *ri, *prev;
   1340     int exist = 0;
   1341 
   1342     if (*cache && key) {
   1343         ri = (*cache)->pending_requests.next;
   1344         prev = &(*cache)->pending_requests;
   1345         while (ri) {
   1346             if (ri->hash == key->hash) {
   1347                 exist = 1;
   1348                 break;
   1349             }
   1350             prev = ri;
   1351             ri = ri->next;
   1352         }
   1353 
   1354         if (!exist) {
   1355             ri = calloc(1, sizeof(struct pending_req_info));
   1356             if (ri) {
   1357                 ri->hash = key->hash;
   1358                 pthread_cond_init(&ri->cond, NULL);
   1359                 prev->next = ri;
   1360             }
   1361         } else {
   1362             struct timespec ts = {0,0};
   1363             XLOG("Waiting for previous request");
   1364             ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
   1365             pthread_cond_timedwait(&ri->cond, &_res_cache_list_lock, &ts);
   1366             /* Must update *cache as it could have been deleted. */
   1367             *cache = _find_named_cache_locked(netid);
   1368         }
   1369     }
   1370 
   1371     return exist;
   1372 }
   1373 
   1374 /* notify any waiting thread that waiting on a request
   1375  * matching the key has been added to the cache */
   1376 static void
   1377 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
   1378 {
   1379     struct pending_req_info *ri, *prev;
   1380 
   1381     if (cache && key) {
   1382         ri = cache->pending_requests.next;
   1383         prev = &cache->pending_requests;
   1384         while (ri) {
   1385             if (ri->hash == key->hash) {
   1386                 pthread_cond_broadcast(&ri->cond);
   1387                 break;
   1388             }
   1389             prev = ri;
   1390             ri = ri->next;
   1391         }
   1392 
   1393         // remove item from list and destroy
   1394         if (ri) {
   1395             prev->next = ri->next;
   1396             pthread_cond_destroy(&ri->cond);
   1397             free(ri);
   1398         }
   1399     }
   1400 }
   1401 
   1402 /* notify the cache that the query failed */
   1403 void
   1404 _resolv_cache_query_failed( unsigned    netid,
   1405                    const void* query,
   1406                    int         querylen)
   1407 {
   1408     Entry    key[1];
   1409     Cache*   cache;
   1410 
   1411     if (!entry_init_key(key, query, querylen))
   1412         return;
   1413 
   1414     pthread_mutex_lock(&_res_cache_list_lock);
   1415 
   1416     cache = _find_named_cache_locked(netid);
   1417 
   1418     if (cache) {
   1419         _cache_notify_waiting_tid_locked(cache, key);
   1420     }
   1421 
   1422     pthread_mutex_unlock(&_res_cache_list_lock);
   1423 }
   1424 
   1425 static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
   1426 
   1427 static void
   1428 _cache_flush_locked( Cache*  cache )
   1429 {
   1430     int     nn;
   1431 
   1432     for (nn = 0; nn < cache->max_entries; nn++)
   1433     {
   1434         Entry**  pnode = (Entry**) &cache->entries[nn];
   1435 
   1436         while (*pnode != NULL) {
   1437             Entry*  node = *pnode;
   1438             *pnode = node->hlink;
   1439             entry_free(node);
   1440         }
   1441     }
   1442 
   1443     // flush pending request
   1444     _cache_flush_pending_requests_locked(cache);
   1445 
   1446     cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
   1447     cache->num_entries       = 0;
   1448     cache->last_id           = 0;
   1449 
   1450     XLOG("*************************\n"
   1451          "*** DNS CACHE FLUSHED ***\n"
   1452          "*************************");
   1453 }
   1454 
   1455 static int
   1456 _res_cache_get_max_entries( void )
   1457 {
   1458     int cache_size = CONFIG_MAX_ENTRIES;
   1459 
   1460     const char* cache_mode = getenv("ANDROID_DNS_MODE");
   1461     if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
   1462         // Don't use the cache in local mode. This is used by the proxy itself.
   1463         cache_size = 0;
   1464     }
   1465 
   1466     XLOG("cache size: %d", cache_size);
   1467     return cache_size;
   1468 }
   1469 
   1470 static struct resolv_cache*
   1471 _resolv_cache_create( void )
   1472 {
   1473     struct resolv_cache*  cache;
   1474 
   1475     cache = calloc(sizeof(*cache), 1);
   1476     if (cache) {
   1477         cache->max_entries = _res_cache_get_max_entries();
   1478         cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
   1479         if (cache->entries) {
   1480             cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
   1481             XLOG("%s: cache created\n", __FUNCTION__);
   1482         } else {
   1483             free(cache);
   1484             cache = NULL;
   1485         }
   1486     }
   1487     return cache;
   1488 }
   1489 
   1490 
   1491 #if DEBUG
   1492 static void
   1493 _dump_query( const uint8_t*  query, int  querylen )
   1494 {
   1495     char       temp[256], *p=temp, *end=p+sizeof(temp);
   1496     DnsPacket  pack[1];
   1497 
   1498     _dnsPacket_init(pack, query, querylen);
   1499     p = _dnsPacket_bprintQuery(pack, p, end);
   1500     XLOG("QUERY: %s", temp);
   1501 }
   1502 
   1503 static void
   1504 _cache_dump_mru( Cache*  cache )
   1505 {
   1506     char    temp[512], *p=temp, *end=p+sizeof(temp);
   1507     Entry*  e;
   1508 
   1509     p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
   1510     for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
   1511         p = _bprint(p, end, " %d", e->id);
   1512 
   1513     XLOG("%s", temp);
   1514 }
   1515 
   1516 static void
   1517 _dump_answer(const void* answer, int answerlen)
   1518 {
   1519     res_state statep;
   1520     FILE* fp;
   1521     char* buf;
   1522     int fileLen;
   1523 
   1524     fp = fopen("/data/reslog.txt", "w+e");
   1525     if (fp != NULL) {
   1526         statep = __res_get_state();
   1527 
   1528         res_pquery(statep, answer, answerlen, fp);
   1529 
   1530         //Get file length
   1531         fseek(fp, 0, SEEK_END);
   1532         fileLen=ftell(fp);
   1533         fseek(fp, 0, SEEK_SET);
   1534         buf = (char *)malloc(fileLen+1);
   1535         if (buf != NULL) {
   1536             //Read file contents into buffer
   1537             fread(buf, fileLen, 1, fp);
   1538             XLOG("%s\n", buf);
   1539             free(buf);
   1540         }
   1541         fclose(fp);
   1542         remove("/data/reslog.txt");
   1543     }
   1544     else {
   1545         errno = 0; // else debug is introducing error signals
   1546         XLOG("%s: can't open file\n", __FUNCTION__);
   1547     }
   1548 }
   1549 #endif
   1550 
   1551 #if DEBUG
   1552 #  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
   1553 #  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
   1554 #else
   1555 #  define  XLOG_QUERY(q,len)   ((void)0)
   1556 #  define  XLOG_ANSWER(a,len)  ((void)0)
   1557 #endif
   1558 
   1559 /* This function tries to find a key within the hash table
   1560  * In case of success, it will return a *pointer* to the hashed key.
   1561  * In case of failure, it will return a *pointer* to NULL
   1562  *
   1563  * So, the caller must check '*result' to check for success/failure.
   1564  *
   1565  * The main idea is that the result can later be used directly in
   1566  * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
   1567  * parameter. This makes the code simpler and avoids re-searching
   1568  * for the key position in the htable.
   1569  *
   1570  * The result of a lookup_p is only valid until you alter the hash
   1571  * table.
   1572  */
   1573 static Entry**
   1574 _cache_lookup_p( Cache*   cache,
   1575                  Entry*   key )
   1576 {
   1577     int      index = key->hash % cache->max_entries;
   1578     Entry**  pnode = (Entry**) &cache->entries[ index ];
   1579 
   1580     while (*pnode != NULL) {
   1581         Entry*  node = *pnode;
   1582 
   1583         if (node == NULL)
   1584             break;
   1585 
   1586         if (node->hash == key->hash && entry_equals(node, key))
   1587             break;
   1588 
   1589         pnode = &node->hlink;
   1590     }
   1591     return pnode;
   1592 }
   1593 
   1594 /* Add a new entry to the hash table. 'lookup' must be the
   1595  * result of an immediate previous failed _lookup_p() call
   1596  * (i.e. with *lookup == NULL), and 'e' is the pointer to the
   1597  * newly created entry
   1598  */
   1599 static void
   1600 _cache_add_p( Cache*   cache,
   1601               Entry**  lookup,
   1602               Entry*   e )
   1603 {
   1604     *lookup = e;
   1605     e->id = ++cache->last_id;
   1606     entry_mru_add(e, &cache->mru_list);
   1607     cache->num_entries += 1;
   1608 
   1609     XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
   1610          e->id, cache->num_entries);
   1611 }
   1612 
   1613 /* Remove an existing entry from the hash table,
   1614  * 'lookup' must be the result of an immediate previous
   1615  * and succesful _lookup_p() call.
   1616  */
   1617 static void
   1618 _cache_remove_p( Cache*   cache,
   1619                  Entry**  lookup )
   1620 {
   1621     Entry*  e  = *lookup;
   1622 
   1623     XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
   1624          e->id, cache->num_entries-1);
   1625 
   1626     entry_mru_remove(e);
   1627     *lookup = e->hlink;
   1628     entry_free(e);
   1629     cache->num_entries -= 1;
   1630 }
   1631 
   1632 /* Remove the oldest entry from the hash table.
   1633  */
   1634 static void
   1635 _cache_remove_oldest( Cache*  cache )
   1636 {
   1637     Entry*   oldest = cache->mru_list.mru_prev;
   1638     Entry**  lookup = _cache_lookup_p(cache, oldest);
   1639 
   1640     if (*lookup == NULL) { /* should not happen */
   1641         XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
   1642         return;
   1643     }
   1644     if (DEBUG) {
   1645         XLOG("Cache full - removing oldest");
   1646         XLOG_QUERY(oldest->query, oldest->querylen);
   1647     }
   1648     _cache_remove_p(cache, lookup);
   1649 }
   1650 
   1651 /* Remove all expired entries from the hash table.
   1652  */
   1653 static void _cache_remove_expired(Cache* cache) {
   1654     Entry* e;
   1655     time_t now = _time_now();
   1656 
   1657     for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
   1658         // Entry is old, remove
   1659         if (now >= e->expires) {
   1660             Entry** lookup = _cache_lookup_p(cache, e);
   1661             if (*lookup == NULL) { /* should not happen */
   1662                 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
   1663                 return;
   1664             }
   1665             e = e->mru_next;
   1666             _cache_remove_p(cache, lookup);
   1667         } else {
   1668             e = e->mru_next;
   1669         }
   1670     }
   1671 }
   1672 
   1673 ResolvCacheStatus
   1674 _resolv_cache_lookup( unsigned              netid,
   1675                       const void*           query,
   1676                       int                   querylen,
   1677                       void*                 answer,
   1678                       int                   answersize,
   1679                       int                  *answerlen )
   1680 {
   1681     Entry      key[1];
   1682     Entry**    lookup;
   1683     Entry*     e;
   1684     time_t     now;
   1685     Cache*     cache;
   1686 
   1687     ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
   1688 
   1689     XLOG("%s: lookup", __FUNCTION__);
   1690     XLOG_QUERY(query, querylen);
   1691 
   1692     /* we don't cache malformed queries */
   1693     if (!entry_init_key(key, query, querylen)) {
   1694         XLOG("%s: unsupported query", __FUNCTION__);
   1695         return RESOLV_CACHE_UNSUPPORTED;
   1696     }
   1697     /* lookup cache */
   1698     pthread_once(&_res_cache_once, _res_cache_init);
   1699     pthread_mutex_lock(&_res_cache_list_lock);
   1700 
   1701     cache = _find_named_cache_locked(netid);
   1702     if (cache == NULL) {
   1703         result = RESOLV_CACHE_UNSUPPORTED;
   1704         goto Exit;
   1705     }
   1706 
   1707     /* see the description of _lookup_p to understand this.
   1708      * the function always return a non-NULL pointer.
   1709      */
   1710     lookup = _cache_lookup_p(cache, key);
   1711     e      = *lookup;
   1712 
   1713     if (e == NULL) {
   1714         XLOG( "NOT IN CACHE");
   1715         // calling thread will wait if an outstanding request is found
   1716         // that matching this query
   1717         if (!_cache_check_pending_request_locked(&cache, key, netid) || cache == NULL) {
   1718             goto Exit;
   1719         } else {
   1720             lookup = _cache_lookup_p(cache, key);
   1721             e = *lookup;
   1722             if (e == NULL) {
   1723                 goto Exit;
   1724             }
   1725         }
   1726     }
   1727 
   1728     now = _time_now();
   1729 
   1730     /* remove stale entries here */
   1731     if (now >= e->expires) {
   1732         XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
   1733         XLOG_QUERY(e->query, e->querylen);
   1734         _cache_remove_p(cache, lookup);
   1735         goto Exit;
   1736     }
   1737 
   1738     *answerlen = e->answerlen;
   1739     if (e->answerlen > answersize) {
   1740         /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
   1741         result = RESOLV_CACHE_UNSUPPORTED;
   1742         XLOG(" ANSWER TOO LONG");
   1743         goto Exit;
   1744     }
   1745 
   1746     memcpy( answer, e->answer, e->answerlen );
   1747 
   1748     /* bump up this entry to the top of the MRU list */
   1749     if (e != cache->mru_list.mru_next) {
   1750         entry_mru_remove( e );
   1751         entry_mru_add( e, &cache->mru_list );
   1752     }
   1753 
   1754     XLOG( "FOUND IN CACHE entry=%p", e );
   1755     result = RESOLV_CACHE_FOUND;
   1756 
   1757 Exit:
   1758     pthread_mutex_unlock(&_res_cache_list_lock);
   1759     return result;
   1760 }
   1761 
   1762 
   1763 void
   1764 _resolv_cache_add( unsigned              netid,
   1765                    const void*           query,
   1766                    int                   querylen,
   1767                    const void*           answer,
   1768                    int                   answerlen )
   1769 {
   1770     Entry    key[1];
   1771     Entry*   e;
   1772     Entry**  lookup;
   1773     u_long   ttl;
   1774     Cache*   cache = NULL;
   1775 
   1776     /* don't assume that the query has already been cached
   1777      */
   1778     if (!entry_init_key( key, query, querylen )) {
   1779         XLOG( "%s: passed invalid query ?", __FUNCTION__);
   1780         return;
   1781     }
   1782 
   1783     pthread_mutex_lock(&_res_cache_list_lock);
   1784 
   1785     cache = _find_named_cache_locked(netid);
   1786     if (cache == NULL) {
   1787         goto Exit;
   1788     }
   1789 
   1790     XLOG( "%s: query:", __FUNCTION__ );
   1791     XLOG_QUERY(query,querylen);
   1792     XLOG_ANSWER(answer, answerlen);
   1793 #if DEBUG_DATA
   1794     XLOG( "answer:");
   1795     XLOG_BYTES(answer,answerlen);
   1796 #endif
   1797 
   1798     lookup = _cache_lookup_p(cache, key);
   1799     e      = *lookup;
   1800 
   1801     if (e != NULL) { /* should not happen */
   1802         XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
   1803              __FUNCTION__, e);
   1804         goto Exit;
   1805     }
   1806 
   1807     if (cache->num_entries >= cache->max_entries) {
   1808         _cache_remove_expired(cache);
   1809         if (cache->num_entries >= cache->max_entries) {
   1810             _cache_remove_oldest(cache);
   1811         }
   1812         /* need to lookup again */
   1813         lookup = _cache_lookup_p(cache, key);
   1814         e      = *lookup;
   1815         if (e != NULL) {
   1816             XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
   1817                 __FUNCTION__, e);
   1818             goto Exit;
   1819         }
   1820     }
   1821 
   1822     ttl = answer_getTTL(answer, answerlen);
   1823     if (ttl > 0) {
   1824         e = entry_alloc(key, answer, answerlen);
   1825         if (e != NULL) {
   1826             e->expires = ttl + _time_now();
   1827             _cache_add_p(cache, lookup, e);
   1828         }
   1829     }
   1830 #if DEBUG
   1831     _cache_dump_mru(cache);
   1832 #endif
   1833 Exit:
   1834     if (cache != NULL) {
   1835       _cache_notify_waiting_tid_locked(cache, key);
   1836     }
   1837     pthread_mutex_unlock(&_res_cache_list_lock);
   1838 }
   1839 
   1840 /****************************************************************************/
   1841 /****************************************************************************/
   1842 /*****                                                                  *****/
   1843 /*****                                                                  *****/
   1844 /*****                                                                  *****/
   1845 /****************************************************************************/
   1846 /****************************************************************************/
   1847 
   1848 // Head of the list of caches.  Protected by _res_cache_list_lock.
   1849 static struct resolv_cache_info _res_cache_list;
   1850 
   1851 /* insert resolv_cache_info into the list of resolv_cache_infos */
   1852 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
   1853 /* creates a resolv_cache_info */
   1854 static struct resolv_cache_info* _create_cache_info( void );
   1855 /* gets a resolv_cache_info associated with a network, or NULL if not found */
   1856 static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
   1857 /* look up the named cache, and creates one if needed */
   1858 static struct resolv_cache* _get_res_cache_for_net_locked(unsigned netid);
   1859 /* empty the named cache */
   1860 static void _flush_cache_for_net_locked(unsigned netid);
   1861 /* empty the nameservers set for the named cache */
   1862 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
   1863 /* return 1 if the provided list of name servers differs from the list of name servers
   1864  * currently attached to the provided cache_info */
   1865 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
   1866         const char** servers, int numservers);
   1867 /* clears the stats samples contained withing the given cache_info */
   1868 static void _res_cache_clear_stats_locked(struct resolv_cache_info* cache_info);
   1869 
   1870 static void
   1871 _res_cache_init(void)
   1872 {
   1873     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
   1874     pthread_mutex_init(&_res_cache_list_lock, NULL);
   1875 }
   1876 
   1877 static struct resolv_cache*
   1878 _get_res_cache_for_net_locked(unsigned netid)
   1879 {
   1880     struct resolv_cache* cache = _find_named_cache_locked(netid);
   1881     if (!cache) {
   1882         struct resolv_cache_info* cache_info = _create_cache_info();
   1883         if (cache_info) {
   1884             cache = _resolv_cache_create();
   1885             if (cache) {
   1886                 cache_info->cache = cache;
   1887                 cache_info->netid = netid;
   1888                 _insert_cache_info_locked(cache_info);
   1889             } else {
   1890                 free(cache_info);
   1891             }
   1892         }
   1893     }
   1894     return cache;
   1895 }
   1896 
   1897 void
   1898 _resolv_flush_cache_for_net(unsigned netid)
   1899 {
   1900     pthread_once(&_res_cache_once, _res_cache_init);
   1901     pthread_mutex_lock(&_res_cache_list_lock);
   1902 
   1903     _flush_cache_for_net_locked(netid);
   1904 
   1905     pthread_mutex_unlock(&_res_cache_list_lock);
   1906 }
   1907 
   1908 static void
   1909 _flush_cache_for_net_locked(unsigned netid)
   1910 {
   1911     struct resolv_cache* cache = _find_named_cache_locked(netid);
   1912     if (cache) {
   1913         _cache_flush_locked(cache);
   1914     }
   1915 
   1916     // Also clear the NS statistics.
   1917     struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
   1918     _res_cache_clear_stats_locked(cache_info);
   1919 }
   1920 
   1921 void _resolv_delete_cache_for_net(unsigned netid)
   1922 {
   1923     pthread_once(&_res_cache_once, _res_cache_init);
   1924     pthread_mutex_lock(&_res_cache_list_lock);
   1925 
   1926     struct resolv_cache_info* prev_cache_info = &_res_cache_list;
   1927 
   1928     while (prev_cache_info->next) {
   1929         struct resolv_cache_info* cache_info = prev_cache_info->next;
   1930 
   1931         if (cache_info->netid == netid) {
   1932             prev_cache_info->next = cache_info->next;
   1933             _cache_flush_locked(cache_info->cache);
   1934             free(cache_info->cache->entries);
   1935             free(cache_info->cache);
   1936             _free_nameservers_locked(cache_info);
   1937             free(cache_info);
   1938             break;
   1939         }
   1940 
   1941         prev_cache_info = prev_cache_info->next;
   1942     }
   1943 
   1944     pthread_mutex_unlock(&_res_cache_list_lock);
   1945 }
   1946 
   1947 static struct resolv_cache_info*
   1948 _create_cache_info(void)
   1949 {
   1950     struct resolv_cache_info* cache_info;
   1951 
   1952     cache_info = calloc(sizeof(*cache_info), 1);
   1953     return cache_info;
   1954 }
   1955 
   1956 static void
   1957 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
   1958 {
   1959     struct resolv_cache_info* last;
   1960 
   1961     for (last = &_res_cache_list; last->next; last = last->next);
   1962 
   1963     last->next = cache_info;
   1964 
   1965 }
   1966 
   1967 static struct resolv_cache*
   1968 _find_named_cache_locked(unsigned netid) {
   1969 
   1970     struct resolv_cache_info* info = _find_cache_info_locked(netid);
   1971 
   1972     if (info != NULL) return info->cache;
   1973 
   1974     return NULL;
   1975 }
   1976 
   1977 static struct resolv_cache_info*
   1978 _find_cache_info_locked(unsigned netid)
   1979 {
   1980     struct resolv_cache_info* cache_info = _res_cache_list.next;
   1981 
   1982     while (cache_info) {
   1983         if (cache_info->netid == netid) {
   1984             break;
   1985         }
   1986 
   1987         cache_info = cache_info->next;
   1988     }
   1989     return cache_info;
   1990 }
   1991 
   1992 void
   1993 _resolv_set_default_params(struct __res_params* params) {
   1994     params->sample_validity = NSSAMPLE_VALIDITY;
   1995     params->success_threshold = SUCCESS_THRESHOLD;
   1996     params->min_samples = 0;
   1997     params->max_samples = 0;
   1998 }
   1999 
   2000 int
   2001 _resolv_set_nameservers_for_net(unsigned netid, const char** servers, unsigned numservers,
   2002         const char *domains, const struct __res_params* params)
   2003 {
   2004     char sbuf[NI_MAXSERV];
   2005     register char *cp;
   2006     int *offset;
   2007     struct addrinfo* nsaddrinfo[MAXNS];
   2008 
   2009     if (numservers > MAXNS) {
   2010         XLOG("%s: numservers=%u, MAXNS=%u", __FUNCTION__, numservers, MAXNS);
   2011         return E2BIG;
   2012     }
   2013 
   2014     // Parse the addresses before actually locking or changing any state, in case there is an error.
   2015     // As a side effect this also reduces the time the lock is kept.
   2016     struct addrinfo hints = {
   2017         .ai_family = AF_UNSPEC,
   2018         .ai_socktype = SOCK_DGRAM,
   2019         .ai_flags = AI_NUMERICHOST
   2020     };
   2021     snprintf(sbuf, sizeof(sbuf), "%u", NAMESERVER_PORT);
   2022     for (unsigned i = 0; i < numservers; i++) {
   2023         // The addrinfo structures allocated here are freed in _free_nameservers_locked().
   2024         int rt = getaddrinfo(servers[i], sbuf, &hints, &nsaddrinfo[i]);
   2025         if (rt != 0) {
   2026             for (unsigned j = 0 ; j < i ; j++) {
   2027                 freeaddrinfo(nsaddrinfo[j]);
   2028                 nsaddrinfo[j] = NULL;
   2029             }
   2030             XLOG("%s: getaddrinfo(%s)=%s", __FUNCTION__, servers[i], gai_strerror(rt));
   2031             return EINVAL;
   2032         }
   2033     }
   2034 
   2035     pthread_once(&_res_cache_once, _res_cache_init);
   2036     pthread_mutex_lock(&_res_cache_list_lock);
   2037 
   2038     // creates the cache if not created
   2039     _get_res_cache_for_net_locked(netid);
   2040 
   2041     struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
   2042 
   2043     if (cache_info != NULL) {
   2044         uint8_t old_max_samples = cache_info->params.max_samples;
   2045         if (params != NULL) {
   2046             cache_info->params = *params;
   2047         } else {
   2048             _resolv_set_default_params(&cache_info->params);
   2049         }
   2050 
   2051         if (!_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
   2052             // free current before adding new
   2053             _free_nameservers_locked(cache_info);
   2054             unsigned i;
   2055             for (i = 0; i < numservers; i++) {
   2056                 cache_info->nsaddrinfo[i] = nsaddrinfo[i];
   2057                 cache_info->nameservers[i] = strdup(servers[i]);
   2058                 XLOG("%s: netid = %u, addr = %s\n", __FUNCTION__, netid, servers[i]);
   2059             }
   2060             cache_info->nscount = numservers;
   2061 
   2062             // Clear the NS statistics because the mapping to nameservers might have changed.
   2063             _res_cache_clear_stats_locked(cache_info);
   2064 
   2065             // increment the revision id to ensure that sample state is not written back if the
   2066             // servers change; in theory it would suffice to do so only if the servers or
   2067             // max_samples actually change, in practice the overhead of checking is higher than the
   2068             // cost, and overflows are unlikely
   2069             ++cache_info->revision_id;
   2070         } else if (cache_info->params.max_samples != old_max_samples) {
   2071             // If the maximum number of samples changes, the overhead of keeping the most recent
   2072             // samples around is not considered worth the effort, so they are cleared instead. All
   2073             // other parameters do not affect shared state: Changing these parameters does not
   2074             // invalidate the samples, as they only affect aggregation and the conditions under
   2075             // which servers are considered usable.
   2076             _res_cache_clear_stats_locked(cache_info);
   2077             ++cache_info->revision_id;
   2078         }
   2079 
   2080         // Always update the search paths, since determining whether they actually changed is
   2081         // complex due to the zero-padding, and probably not worth the effort. Cache-flushing
   2082         // however is not // necessary, since the stored cache entries do contain the domain, not
   2083         // just the host name.
   2084         // code moved from res_init.c, load_domain_search_list
   2085         strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
   2086         if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
   2087             *cp = '\0';
   2088 
   2089         cp = cache_info->defdname;
   2090         offset = cache_info->dnsrch_offset;
   2091         while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
   2092             while (*cp == ' ' || *cp == '\t') /* skip leading white space */
   2093                 cp++;
   2094             if (*cp == '\0') /* stop if nothing more to do */
   2095                 break;
   2096             *offset++ = cp - cache_info->defdname; /* record this search domain */
   2097             while (*cp) { /* zero-terminate it */
   2098                 if (*cp == ' '|| *cp == '\t') {
   2099                     *cp++ = '\0';
   2100                     break;
   2101                 }
   2102                 cp++;
   2103             }
   2104         }
   2105         *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
   2106     }
   2107 
   2108     pthread_mutex_unlock(&_res_cache_list_lock);
   2109     return 0;
   2110 }
   2111 
   2112 static int
   2113 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
   2114         const char** servers, int numservers)
   2115 {
   2116     if (cache_info->nscount != numservers) {
   2117         return 0;
   2118     }
   2119 
   2120     // Compare each name server against current name servers.
   2121     // TODO: this is incorrect if the list of current or previous nameservers
   2122     // contains duplicates. This does not really matter because the framework
   2123     // filters out duplicates, but we should probably fix it. It's also
   2124     // insensitive to the order of the nameservers; we should probably fix that
   2125     // too.
   2126     for (int i = 0; i < numservers; i++) {
   2127         for (int j = 0 ; ; j++) {
   2128             if (j >= numservers) {
   2129                 return 0;
   2130             }
   2131             if (strcmp(cache_info->nameservers[i], servers[j]) == 0) {
   2132                 break;
   2133             }
   2134         }
   2135     }
   2136 
   2137     return 1;
   2138 }
   2139 
   2140 static void
   2141 _free_nameservers_locked(struct resolv_cache_info* cache_info)
   2142 {
   2143     int i;
   2144     for (i = 0; i < cache_info->nscount; i++) {
   2145         free(cache_info->nameservers[i]);
   2146         cache_info->nameservers[i] = NULL;
   2147         if (cache_info->nsaddrinfo[i] != NULL) {
   2148             freeaddrinfo(cache_info->nsaddrinfo[i]);
   2149             cache_info->nsaddrinfo[i] = NULL;
   2150         }
   2151         cache_info->nsstats[i].sample_count =
   2152             cache_info->nsstats[i].sample_next = 0;
   2153     }
   2154     cache_info->nscount = 0;
   2155     _res_cache_clear_stats_locked(cache_info);
   2156     ++cache_info->revision_id;
   2157 }
   2158 
   2159 void
   2160 _resolv_populate_res_for_net(res_state statp)
   2161 {
   2162     if (statp == NULL) {
   2163         return;
   2164     }
   2165 
   2166     pthread_once(&_res_cache_once, _res_cache_init);
   2167     pthread_mutex_lock(&_res_cache_list_lock);
   2168 
   2169     struct resolv_cache_info* info = _find_cache_info_locked(statp->netid);
   2170     if (info != NULL) {
   2171         int nserv;
   2172         struct addrinfo* ai;
   2173         XLOG("%s: %u\n", __FUNCTION__, statp->netid);
   2174         for (nserv = 0; nserv < MAXNS; nserv++) {
   2175             ai = info->nsaddrinfo[nserv];
   2176             if (ai == NULL) {
   2177                 break;
   2178             }
   2179 
   2180             if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
   2181                 if (statp->_u._ext.ext != NULL) {
   2182                     memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
   2183                     statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
   2184                 } else {
   2185                     if ((size_t) ai->ai_addrlen
   2186                             <= sizeof(statp->nsaddr_list[0])) {
   2187                         memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
   2188                                 ai->ai_addrlen);
   2189                     } else {
   2190                         statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
   2191                     }
   2192                 }
   2193             } else {
   2194                 XLOG("%s: found too long addrlen", __FUNCTION__);
   2195             }
   2196         }
   2197         statp->nscount = nserv;
   2198         // now do search domains.  Note that we cache the offsets as this code runs alot
   2199         // but the setting/offset-computer only runs when set/changed
   2200         // WARNING: Don't use str*cpy() here, this string contains zeroes.
   2201         memcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
   2202         register char **pp = statp->dnsrch;
   2203         register int *p = info->dnsrch_offset;
   2204         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
   2205             *pp++ = &statp->defdname[0] + *p++;
   2206         }
   2207     }
   2208     pthread_mutex_unlock(&_res_cache_list_lock);
   2209 }
   2210 
   2211 /* Resolver reachability statistics. */
   2212 
   2213 static void
   2214 _res_cache_add_stats_sample_locked(struct __res_stats* stats, const struct __res_sample* sample,
   2215         int max_samples) {
   2216     // Note: This function expects max_samples > 0, otherwise a (harmless) modification of the
   2217     // allocated but supposedly unused memory for samples[0] will happen
   2218     XLOG("%s: adding sample to stats, next = %d, count = %d", __FUNCTION__,
   2219             stats->sample_next, stats->sample_count);
   2220     stats->samples[stats->sample_next] = *sample;
   2221     if (stats->sample_count < max_samples) {
   2222         ++stats->sample_count;
   2223     }
   2224     if (++stats->sample_next >= max_samples) {
   2225         stats->sample_next = 0;
   2226     }
   2227 }
   2228 
   2229 static void
   2230 _res_cache_clear_stats_locked(struct resolv_cache_info* cache_info) {
   2231     if (cache_info) {
   2232         for (int i = 0 ; i < MAXNS ; ++i) {
   2233             cache_info->nsstats->sample_count = cache_info->nsstats->sample_next = 0;
   2234         }
   2235     }
   2236 }
   2237 
   2238 int
   2239 android_net_res_stats_get_info_for_net(unsigned netid, int* nscount,
   2240         struct sockaddr_storage servers[MAXNS], int* dcount, char domains[MAXDNSRCH][MAXDNSRCHPATH],
   2241         struct __res_params* params, struct __res_stats stats[MAXNS]) {
   2242     int revision_id = -1;
   2243     pthread_mutex_lock(&_res_cache_list_lock);
   2244 
   2245     struct resolv_cache_info* info = _find_cache_info_locked(netid);
   2246     if (info) {
   2247         if (info->nscount > MAXNS) {
   2248             pthread_mutex_unlock(&_res_cache_list_lock);
   2249             XLOG("%s: nscount %d > MAXNS %d", __FUNCTION__, info->nscount, MAXNS);
   2250             errno = EFAULT;
   2251             return -1;
   2252         }
   2253         int i;
   2254         for (i = 0; i < info->nscount; i++) {
   2255             // Verify that the following assumptions are held, failure indicates corruption:
   2256             //  - getaddrinfo() may never return a sockaddr > sockaddr_storage
   2257             //  - all addresses are valid
   2258             //  - there is only one address per addrinfo thanks to numeric resolution
   2259             int addrlen = info->nsaddrinfo[i]->ai_addrlen;
   2260             if (addrlen < (int) sizeof(struct sockaddr) ||
   2261                     addrlen > (int) sizeof(servers[0])) {
   2262                 pthread_mutex_unlock(&_res_cache_list_lock);
   2263                 XLOG("%s: nsaddrinfo[%d].ai_addrlen == %d", __FUNCTION__, i, addrlen);
   2264                 errno = EMSGSIZE;
   2265                 return -1;
   2266             }
   2267             if (info->nsaddrinfo[i]->ai_addr == NULL) {
   2268                 pthread_mutex_unlock(&_res_cache_list_lock);
   2269                 XLOG("%s: nsaddrinfo[%d].ai_addr == NULL", __FUNCTION__, i);
   2270                 errno = ENOENT;
   2271                 return -1;
   2272             }
   2273             if (info->nsaddrinfo[i]->ai_next != NULL) {
   2274                 pthread_mutex_unlock(&_res_cache_list_lock);
   2275                 XLOG("%s: nsaddrinfo[%d].ai_next != NULL", __FUNCTION__, i);
   2276                 errno = ENOTUNIQ;
   2277                 return -1;
   2278             }
   2279         }
   2280         *nscount = info->nscount;
   2281         for (i = 0; i < info->nscount; i++) {
   2282             memcpy(&servers[i], info->nsaddrinfo[i]->ai_addr, info->nsaddrinfo[i]->ai_addrlen);
   2283             stats[i] = info->nsstats[i];
   2284         }
   2285         for (i = 0; i < MAXDNSRCH; i++) {
   2286             const char* cur_domain = info->defdname + info->dnsrch_offset[i];
   2287             // dnsrch_offset[i] can either be -1 or point to an empty string to indicate the end
   2288             // of the search offsets. Checking for < 0 is not strictly necessary, but safer.
   2289             // TODO: Pass in a search domain array instead of a string to
   2290             // _resolv_set_nameservers_for_net() and make this double check unnecessary.
   2291             if (info->dnsrch_offset[i] < 0 ||
   2292                     ((size_t)info->dnsrch_offset[i]) >= sizeof(info->defdname) || !cur_domain[0]) {
   2293                 break;
   2294             }
   2295             strlcpy(domains[i], cur_domain, MAXDNSRCHPATH);
   2296         }
   2297         *dcount = i;
   2298         *params = info->params;
   2299         revision_id = info->revision_id;
   2300     }
   2301 
   2302     pthread_mutex_unlock(&_res_cache_list_lock);
   2303     return revision_id;
   2304 }
   2305 
   2306 int
   2307 _resolv_cache_get_resolver_stats( unsigned netid, struct __res_params* params,
   2308         struct __res_stats stats[MAXNS]) {
   2309     int revision_id = -1;
   2310     pthread_mutex_lock(&_res_cache_list_lock);
   2311 
   2312     struct resolv_cache_info* info = _find_cache_info_locked(netid);
   2313     if (info) {
   2314         memcpy(stats, info->nsstats, sizeof(info->nsstats));
   2315         *params = info->params;
   2316         revision_id = info->revision_id;
   2317     }
   2318 
   2319     pthread_mutex_unlock(&_res_cache_list_lock);
   2320     return revision_id;
   2321 }
   2322 
   2323 void
   2324 _resolv_cache_add_resolver_stats_sample( unsigned netid, int revision_id, int ns,
   2325        const struct __res_sample* sample, int max_samples) {
   2326     if (max_samples <= 0) return;
   2327 
   2328     pthread_mutex_lock(&_res_cache_list_lock);
   2329 
   2330     struct resolv_cache_info* info = _find_cache_info_locked(netid);
   2331 
   2332     if (info && info->revision_id == revision_id) {
   2333         _res_cache_add_stats_sample_locked(&info->nsstats[ns], sample, max_samples);
   2334     }
   2335 
   2336     pthread_mutex_unlock(&_res_cache_list_lock);
   2337 }
   2338