Home | History | Annotate | Download | only in resolv
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include "resolv_cache.h"
     30 #include <resolv.h>
     31 #include <stdlib.h>
     32 #include <string.h>
     33 #include <time.h>
     34 #include "pthread.h"
     35 
     36 #include <errno.h>
     37 #include "arpa_nameser.h"
     38 #include <sys/system_properties.h>
     39 #include <net/if.h>
     40 #include <netdb.h>
     41 #include <linux/if.h>
     42 
     43 #include <arpa/inet.h>
     44 #include "resolv_private.h"
     45 #include "resolv_iface.h"
     46 #include "res_private.h"
     47 
     48 /* This code implements a small and *simple* DNS resolver cache.
     49  *
     50  * It is only used to cache DNS answers for a time defined by the smallest TTL
     51  * among the answer records in order to reduce DNS traffic. It is not supposed
     52  * to be a full DNS cache, since we plan to implement that in the future in a
     53  * dedicated process running on the system.
     54  *
     55  * Note that its design is kept simple very intentionally, i.e.:
     56  *
     57  *  - it takes raw DNS query packet data as input, and returns raw DNS
     58  *    answer packet data as output
     59  *
     60  *    (this means that two similar queries that encode the DNS name
     61  *     differently will be treated distinctly).
     62  *
     63  *    the smallest TTL value among the answer records are used as the time
     64  *    to keep an answer in the cache.
     65  *
     66  *    this is bad, but we absolutely want to avoid parsing the answer packets
     67  *    (and should be solved by the later full DNS cache process).
     68  *
     69  *  - the implementation is just a (query-data) => (answer-data) hash table
     70  *    with a trivial least-recently-used expiration policy.
     71  *
     72  * Doing this keeps the code simple and avoids to deal with a lot of things
     73  * that a full DNS cache is expected to do.
     74  *
     75  * The API is also very simple:
     76  *
     77  *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
     78  *     this will initialize the cache on first usage. the result can be NULL
     79  *     if the cache is disabled.
     80  *
     81  *   - the client calls _resolv_cache_lookup() before performing a query
     82  *
     83  *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
     84  *     has been copied into the client-provided answer buffer.
     85  *
     86  *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
     87  *     a request normally, *then* call _resolv_cache_add() to add the received
     88  *     answer to the cache.
     89  *
     90  *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
     91  *     perform a request normally, and *not* call _resolv_cache_add()
     92  *
     93  *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
     94  *     is too short to accomodate the cached result.
     95  *
     96  *  - when network settings change, the cache must be flushed since the list
     97  *    of DNS servers probably changed. this is done by calling
     98  *    _resolv_cache_reset()
     99  *
    100  *    the parameter to this function must be an ever-increasing generation
    101  *    number corresponding to the current network settings state.
    102  *
    103  *    This is done because several threads could detect the same network
    104  *    settings change (but at different times) and will all end up calling the
    105  *    same function. Comparing with the last used generation number ensures
    106  *    that the cache is only flushed once per network change.
    107  */
    108 
    109 /* the name of an environment variable that will be checked the first time
    110  * this code is called if its value is "0", then the resolver cache is
    111  * disabled.
    112  */
    113 #define  CONFIG_ENV  "BIONIC_DNSCACHE"
    114 
    115 /* entries older than CONFIG_SECONDS seconds are always discarded.
    116  */
    117 #define  CONFIG_SECONDS    (60*10)    /* 10 minutes */
    118 
    119 /* default number of entries kept in the cache. This value has been
    120  * determined by browsing through various sites and counting the number
    121  * of corresponding requests. Keep in mind that our framework is currently
    122  * performing two requests per name lookup (one for IPv4, the other for IPv6)
    123  *
    124  *    www.google.com      4
    125  *    www.ysearch.com     6
    126  *    www.amazon.com      8
    127  *    www.nytimes.com     22
    128  *    www.espn.com        28
    129  *    www.msn.com         28
    130  *    www.lemonde.fr      35
    131  *
    132  * (determined in 2009-2-17 from Paris, France, results may vary depending
    133  *  on location)
    134  *
    135  * most high-level websites use lots of media/ad servers with different names
    136  * but these are generally reused when browsing through the site.
    137  *
    138  * As such, a value of 64 should be relatively comfortable at the moment.
    139  *
    140  * The system property ro.net.dns_cache_size can be used to override the default
    141  * value with a custom value
    142  *
    143  *
    144  * ******************************************
    145  * * NOTE - this has changed.
    146  * * 1) we've added IPv6 support so each dns query results in 2 responses
    147  * * 2) we've made this a system-wide cache, so the cost is less (it's not
    148  * *    duplicated in each process) and the need is greater (more processes
    149  * *    making different requests).
    150  * * Upping by 2x for IPv6
    151  * * Upping by another 5x for the centralized nature
    152  * *****************************************
    153  */
    154 #define  CONFIG_MAX_ENTRIES    64 * 2 * 5
    155 /* name of the system property that can be used to set the cache size */
    156 #define  DNS_CACHE_SIZE_PROP_NAME   "ro.net.dns_cache_size"
    157 
    158 /****************************************************************************/
    159 /****************************************************************************/
    160 /*****                                                                  *****/
    161 /*****                                                                  *****/
    162 /*****                                                                  *****/
    163 /****************************************************************************/
    164 /****************************************************************************/
    165 
    166 /* set to 1 to debug cache operations */
    167 #define  DEBUG       0
    168 
    169 /* set to 1 to debug query data */
    170 #define  DEBUG_DATA  0
    171 
    172 #undef XLOG
    173 #if DEBUG
    174 #  include "libc_logging.h"
    175 #  define XLOG(...)  __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__)
    176 
    177 #include <stdio.h>
    178 #include <stdarg.h>
    179 
    180 /** BOUNDED BUFFER FORMATTING
    181  **/
    182 
    183 /* technical note:
    184  *
    185  *   the following debugging routines are used to append data to a bounded
    186  *   buffer they take two parameters that are:
    187  *
    188  *   - p : a pointer to the current cursor position in the buffer
    189  *         this value is initially set to the buffer's address.
    190  *
    191  *   - end : the address of the buffer's limit, i.e. of the first byte
    192  *           after the buffer. this address should never be touched.
    193  *
    194  *           IMPORTANT: it is assumed that end > buffer_address, i.e.
    195  *                      that the buffer is at least one byte.
    196  *
    197  *   the _bprint_() functions return the new value of 'p' after the data
    198  *   has been appended, and also ensure the following:
    199  *
    200  *   - the returned value will never be strictly greater than 'end'
    201  *
    202  *   - a return value equal to 'end' means that truncation occured
    203  *     (in which case, end[-1] will be set to 0)
    204  *
    205  *   - after returning from a _bprint_() function, the content of the buffer
    206  *     is always 0-terminated, even in the event of truncation.
    207  *
    208  *  these conventions allow you to call _bprint_ functions multiple times and
    209  *  only check for truncation at the end of the sequence, as in:
    210  *
    211  *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
    212  *
    213  *     p = _bprint_c(p, end, '"');
    214  *     p = _bprint_s(p, end, my_string);
    215  *     p = _bprint_c(p, end, '"');
    216  *
    217  *     if (p >= end) {
    218  *        // buffer was too small
    219  *     }
    220  *
    221  *     printf( "%s", buff );
    222  */
    223 
    224 /* add a char to a bounded buffer */
    225 static char*
    226 _bprint_c( char*  p, char*  end, int  c )
    227 {
    228     if (p < end) {
    229         if (p+1 == end)
    230             *p++ = 0;
    231         else {
    232             *p++ = (char) c;
    233             *p   = 0;
    234         }
    235     }
    236     return p;
    237 }
    238 
    239 /* add a sequence of bytes to a bounded buffer */
    240 static char*
    241 _bprint_b( char*  p, char*  end, const char*  buf, int  len )
    242 {
    243     int  avail = end - p;
    244 
    245     if (avail <= 0 || len <= 0)
    246         return p;
    247 
    248     if (avail > len)
    249         avail = len;
    250 
    251     memcpy( p, buf, avail );
    252     p += avail;
    253 
    254     if (p < end)
    255         p[0] = 0;
    256     else
    257         end[-1] = 0;
    258 
    259     return p;
    260 }
    261 
    262 /* add a string to a bounded buffer */
    263 static char*
    264 _bprint_s( char*  p, char*  end, const char*  str )
    265 {
    266     return _bprint_b(p, end, str, strlen(str));
    267 }
    268 
    269 /* add a formatted string to a bounded buffer */
    270 static char*
    271 _bprint( char*  p, char*  end, const char*  format, ... )
    272 {
    273     int      avail, n;
    274     va_list  args;
    275 
    276     avail = end - p;
    277 
    278     if (avail <= 0)
    279         return p;
    280 
    281     va_start(args, format);
    282     n = vsnprintf( p, avail, format, args);
    283     va_end(args);
    284 
    285     /* certain C libraries return -1 in case of truncation */
    286     if (n < 0 || n > avail)
    287         n = avail;
    288 
    289     p += n;
    290     /* certain C libraries do not zero-terminate in case of truncation */
    291     if (p == end)
    292         p[-1] = 0;
    293 
    294     return p;
    295 }
    296 
    297 /* add a hex value to a bounded buffer, up to 8 digits */
    298 static char*
    299 _bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
    300 {
    301     char   text[sizeof(unsigned)*2];
    302     int    nn = 0;
    303 
    304     while (numDigits-- > 0) {
    305         text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
    306     }
    307     return _bprint_b(p, end, text, nn);
    308 }
    309 
    310 /* add the hexadecimal dump of some memory area to a bounded buffer */
    311 static char*
    312 _bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
    313 {
    314     int   lineSize = 16;
    315 
    316     while (datalen > 0) {
    317         int  avail = datalen;
    318         int  nn;
    319 
    320         if (avail > lineSize)
    321             avail = lineSize;
    322 
    323         for (nn = 0; nn < avail; nn++) {
    324             if (nn > 0)
    325                 p = _bprint_c(p, end, ' ');
    326             p = _bprint_hex(p, end, data[nn], 2);
    327         }
    328         for ( ; nn < lineSize; nn++ ) {
    329             p = _bprint_s(p, end, "   ");
    330         }
    331         p = _bprint_s(p, end, "  ");
    332 
    333         for (nn = 0; nn < avail; nn++) {
    334             int  c = data[nn];
    335 
    336             if (c < 32 || c > 127)
    337                 c = '.';
    338 
    339             p = _bprint_c(p, end, c);
    340         }
    341         p = _bprint_c(p, end, '\n');
    342 
    343         data    += avail;
    344         datalen -= avail;
    345     }
    346     return p;
    347 }
    348 
    349 /* dump the content of a query of packet to the log */
    350 static void
    351 XLOG_BYTES( const void*  base, int  len )
    352 {
    353     char  buff[1024];
    354     char*  p = buff, *end = p + sizeof(buff);
    355 
    356     p = _bprint_hexdump(p, end, base, len);
    357     XLOG("%s",buff);
    358 }
    359 
    360 #else /* !DEBUG */
    361 #  define  XLOG(...)        ((void)0)
    362 #  define  XLOG_BYTES(a,b)  ((void)0)
    363 #endif
    364 
    365 static time_t
    366 _time_now( void )
    367 {
    368     struct timeval  tv;
    369 
    370     gettimeofday( &tv, NULL );
    371     return tv.tv_sec;
    372 }
    373 
    374 /* reminder: the general format of a DNS packet is the following:
    375  *
    376  *    HEADER  (12 bytes)
    377  *    QUESTION  (variable)
    378  *    ANSWER (variable)
    379  *    AUTHORITY (variable)
    380  *    ADDITIONNAL (variable)
    381  *
    382  * the HEADER is made of:
    383  *
    384  *   ID     : 16 : 16-bit unique query identification field
    385  *
    386  *   QR     :  1 : set to 0 for queries, and 1 for responses
    387  *   Opcode :  4 : set to 0 for queries
    388  *   AA     :  1 : set to 0 for queries
    389  *   TC     :  1 : truncation flag, will be set to 0 in queries
    390  *   RD     :  1 : recursion desired
    391  *
    392  *   RA     :  1 : recursion available (0 in queries)
    393  *   Z      :  3 : three reserved zero bits
    394  *   RCODE  :  4 : response code (always 0=NOERROR in queries)
    395  *
    396  *   QDCount: 16 : question count
    397  *   ANCount: 16 : Answer count (0 in queries)
    398  *   NSCount: 16: Authority Record count (0 in queries)
    399  *   ARCount: 16: Additionnal Record count (0 in queries)
    400  *
    401  * the QUESTION is made of QDCount Question Record (QRs)
    402  * the ANSWER is made of ANCount RRs
    403  * the AUTHORITY is made of NSCount RRs
    404  * the ADDITIONNAL is made of ARCount RRs
    405  *
    406  * Each Question Record (QR) is made of:
    407  *
    408  *   QNAME   : variable : Query DNS NAME
    409  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
    410  *   CLASS   : 16       : class of query (IN=1)
    411  *
    412  * Each Resource Record (RR) is made of:
    413  *
    414  *   NAME    : variable : DNS NAME
    415  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
    416  *   CLASS   : 16       : class of query (IN=1)
    417  *   TTL     : 32       : seconds to cache this RR (0=none)
    418  *   RDLENGTH: 16       : size of RDDATA in bytes
    419  *   RDDATA  : variable : RR data (depends on TYPE)
    420  *
    421  * Each QNAME contains a domain name encoded as a sequence of 'labels'
    422  * terminated by a zero. Each label has the following format:
    423  *
    424  *    LEN  : 8     : lenght of label (MUST be < 64)
    425  *    NAME : 8*LEN : label length (must exclude dots)
    426  *
    427  * A value of 0 in the encoding is interpreted as the 'root' domain and
    428  * terminates the encoding. So 'www.android.com' will be encoded as:
    429  *
    430  *   <3>www<7>android<3>com<0>
    431  *
    432  * Where <n> represents the byte with value 'n'
    433  *
    434  * Each NAME reflects the QNAME of the question, but has a slightly more
    435  * complex encoding in order to provide message compression. This is achieved
    436  * by using a 2-byte pointer, with format:
    437  *
    438  *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
    439  *    OFFSET : 14 : offset to another part of the DNS packet
    440  *
    441  * The offset is relative to the start of the DNS packet and must point
    442  * A pointer terminates the encoding.
    443  *
    444  * The NAME can be encoded in one of the following formats:
    445  *
    446  *   - a sequence of simple labels terminated by 0 (like QNAMEs)
    447  *   - a single pointer
    448  *   - a sequence of simple labels terminated by a pointer
    449  *
    450  * A pointer shall always point to either a pointer of a sequence of
    451  * labels (which can themselves be terminated by either a 0 or a pointer)
    452  *
    453  * The expanded length of a given domain name should not exceed 255 bytes.
    454  *
    455  * NOTE: we don't parse the answer packets, so don't need to deal with NAME
    456  *       records, only QNAMEs.
    457  */
    458 
    459 #define  DNS_HEADER_SIZE  12
    460 
    461 #define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
    462 #define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
    463 #define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
    464 #define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
    465 #define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
    466 
    467 #define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
    468 
    469 typedef struct {
    470     const uint8_t*  base;
    471     const uint8_t*  end;
    472     const uint8_t*  cursor;
    473 } DnsPacket;
    474 
    475 static void
    476 _dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
    477 {
    478     packet->base   = buff;
    479     packet->end    = buff + bufflen;
    480     packet->cursor = buff;
    481 }
    482 
    483 static void
    484 _dnsPacket_rewind( DnsPacket*  packet )
    485 {
    486     packet->cursor = packet->base;
    487 }
    488 
    489 static void
    490 _dnsPacket_skip( DnsPacket*  packet, int  count )
    491 {
    492     const uint8_t*  p = packet->cursor + count;
    493 
    494     if (p > packet->end)
    495         p = packet->end;
    496 
    497     packet->cursor = p;
    498 }
    499 
    500 static int
    501 _dnsPacket_readInt16( DnsPacket*  packet )
    502 {
    503     const uint8_t*  p = packet->cursor;
    504 
    505     if (p+2 > packet->end)
    506         return -1;
    507 
    508     packet->cursor = p+2;
    509     return (p[0]<< 8) | p[1];
    510 }
    511 
    512 /** QUERY CHECKING
    513  **/
    514 
    515 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
    516  * the cursor is only advanced in the case of success
    517  */
    518 static int
    519 _dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
    520 {
    521     const uint8_t*  p = packet->cursor;
    522 
    523     if (p + numBytes > packet->end)
    524         return 0;
    525 
    526     if (memcmp(p, bytes, numBytes) != 0)
    527         return 0;
    528 
    529     packet->cursor = p + numBytes;
    530     return 1;
    531 }
    532 
    533 /* parse and skip a given QNAME stored in a query packet,
    534  * from the current cursor position. returns 1 on success,
    535  * or 0 for malformed data.
    536  */
    537 static int
    538 _dnsPacket_checkQName( DnsPacket*  packet )
    539 {
    540     const uint8_t*  p   = packet->cursor;
    541     const uint8_t*  end = packet->end;
    542 
    543     for (;;) {
    544         int  c;
    545 
    546         if (p >= end)
    547             break;
    548 
    549         c = *p++;
    550 
    551         if (c == 0) {
    552             packet->cursor = p;
    553             return 1;
    554         }
    555 
    556         /* we don't expect label compression in QNAMEs */
    557         if (c >= 64)
    558             break;
    559 
    560         p += c;
    561         /* we rely on the bound check at the start
    562          * of the loop here */
    563     }
    564     /* malformed data */
    565     XLOG("malformed QNAME");
    566     return 0;
    567 }
    568 
    569 /* parse and skip a given QR stored in a packet.
    570  * returns 1 on success, and 0 on failure
    571  */
    572 static int
    573 _dnsPacket_checkQR( DnsPacket*  packet )
    574 {
    575     if (!_dnsPacket_checkQName(packet))
    576         return 0;
    577 
    578     /* TYPE must be one of the things we support */
    579     if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
    580         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
    581         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
    582         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
    583         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
    584     {
    585         XLOG("unsupported TYPE");
    586         return 0;
    587     }
    588     /* CLASS must be IN */
    589     if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
    590         XLOG("unsupported CLASS");
    591         return 0;
    592     }
    593 
    594     return 1;
    595 }
    596 
    597 /* check the header of a DNS Query packet, return 1 if it is one
    598  * type of query we can cache, or 0 otherwise
    599  */
    600 static int
    601 _dnsPacket_checkQuery( DnsPacket*  packet )
    602 {
    603     const uint8_t*  p = packet->base;
    604     int             qdCount, anCount, dnCount, arCount;
    605 
    606     if (p + DNS_HEADER_SIZE > packet->end) {
    607         XLOG("query packet too small");
    608         return 0;
    609     }
    610 
    611     /* QR must be set to 0, opcode must be 0 and AA must be 0 */
    612     /* RA, Z, and RCODE must be 0 */
    613     if ((p[2] & 0xFC) != 0 || p[3] != 0) {
    614         XLOG("query packet flags unsupported");
    615         return 0;
    616     }
    617 
    618     /* Note that we ignore the TC and RD bits here for the
    619      * following reasons:
    620      *
    621      * - there is no point for a query packet sent to a server
    622      *   to have the TC bit set, but the implementation might
    623      *   set the bit in the query buffer for its own needs
    624      *   between a _resolv_cache_lookup and a
    625      *   _resolv_cache_add. We should not freak out if this
    626      *   is the case.
    627      *
    628      * - we consider that the result from a RD=0 or a RD=1
    629      *   query might be different, hence that the RD bit
    630      *   should be used to differentiate cached result.
    631      *
    632      *   this implies that RD is checked when hashing or
    633      *   comparing query packets, but not TC
    634      */
    635 
    636     /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
    637     qdCount = (p[4] << 8) | p[5];
    638     anCount = (p[6] << 8) | p[7];
    639     dnCount = (p[8] << 8) | p[9];
    640     arCount = (p[10]<< 8) | p[11];
    641 
    642     if (anCount != 0 || dnCount != 0 || arCount != 0) {
    643         XLOG("query packet contains non-query records");
    644         return 0;
    645     }
    646 
    647     if (qdCount == 0) {
    648         XLOG("query packet doesn't contain query record");
    649         return 0;
    650     }
    651 
    652     /* Check QDCOUNT QRs */
    653     packet->cursor = p + DNS_HEADER_SIZE;
    654 
    655     for (;qdCount > 0; qdCount--)
    656         if (!_dnsPacket_checkQR(packet))
    657             return 0;
    658 
    659     return 1;
    660 }
    661 
    662 /** QUERY DEBUGGING
    663  **/
    664 #if DEBUG
    665 static char*
    666 _dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
    667 {
    668     const uint8_t*  p   = packet->cursor;
    669     const uint8_t*  end = packet->end;
    670     int             first = 1;
    671 
    672     for (;;) {
    673         int  c;
    674 
    675         if (p >= end)
    676             break;
    677 
    678         c = *p++;
    679 
    680         if (c == 0) {
    681             packet->cursor = p;
    682             return bp;
    683         }
    684 
    685         /* we don't expect label compression in QNAMEs */
    686         if (c >= 64)
    687             break;
    688 
    689         if (first)
    690             first = 0;
    691         else
    692             bp = _bprint_c(bp, bend, '.');
    693 
    694         bp = _bprint_b(bp, bend, (const char*)p, c);
    695 
    696         p += c;
    697         /* we rely on the bound check at the start
    698          * of the loop here */
    699     }
    700     /* malformed data */
    701     bp = _bprint_s(bp, bend, "<MALFORMED>");
    702     return bp;
    703 }
    704 
    705 static char*
    706 _dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
    707 {
    708 #define  QQ(x)   { DNS_TYPE_##x, #x }
    709     static const struct {
    710         const char*  typeBytes;
    711         const char*  typeString;
    712     } qTypes[] =
    713     {
    714         QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
    715         { NULL, NULL }
    716     };
    717     int          nn;
    718     const char*  typeString = NULL;
    719 
    720     /* dump QNAME */
    721     p = _dnsPacket_bprintQName(packet, p, end);
    722 
    723     /* dump TYPE */
    724     p = _bprint_s(p, end, " (");
    725 
    726     for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
    727         if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
    728             typeString = qTypes[nn].typeString;
    729             break;
    730         }
    731     }
    732 
    733     if (typeString != NULL)
    734         p = _bprint_s(p, end, typeString);
    735     else {
    736         int  typeCode = _dnsPacket_readInt16(packet);
    737         p = _bprint(p, end, "UNKNOWN-%d", typeCode);
    738     }
    739 
    740     p = _bprint_c(p, end, ')');
    741 
    742     /* skip CLASS */
    743     _dnsPacket_skip(packet, 2);
    744     return p;
    745 }
    746 
    747 /* this function assumes the packet has already been checked */
    748 static char*
    749 _dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
    750 {
    751     int   qdCount;
    752 
    753     if (packet->base[2] & 0x1) {
    754         p = _bprint_s(p, end, "RECURSIVE ");
    755     }
    756 
    757     _dnsPacket_skip(packet, 4);
    758     qdCount = _dnsPacket_readInt16(packet);
    759     _dnsPacket_skip(packet, 6);
    760 
    761     for ( ; qdCount > 0; qdCount-- ) {
    762         p = _dnsPacket_bprintQR(packet, p, end);
    763     }
    764     return p;
    765 }
    766 #endif
    767 
    768 
    769 /** QUERY HASHING SUPPORT
    770  **
    771  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
    772  ** BEEN SUCCESFULLY CHECKED.
    773  **/
    774 
    775 /* use 32-bit FNV hash function */
    776 #define  FNV_MULT   16777619U
    777 #define  FNV_BASIS  2166136261U
    778 
    779 static unsigned
    780 _dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
    781 {
    782     const uint8_t*  p   = packet->cursor;
    783     const uint8_t*  end = packet->end;
    784 
    785     while (numBytes > 0 && p < end) {
    786         hash = hash*FNV_MULT ^ *p++;
    787     }
    788     packet->cursor = p;
    789     return hash;
    790 }
    791 
    792 
    793 static unsigned
    794 _dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
    795 {
    796     const uint8_t*  p   = packet->cursor;
    797     const uint8_t*  end = packet->end;
    798 
    799     for (;;) {
    800         int  c;
    801 
    802         if (p >= end) {  /* should not happen */
    803             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
    804             break;
    805         }
    806 
    807         c = *p++;
    808 
    809         if (c == 0)
    810             break;
    811 
    812         if (c >= 64) {
    813             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
    814             break;
    815         }
    816         if (p + c >= end) {
    817             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
    818                     __FUNCTION__);
    819             break;
    820         }
    821         while (c > 0) {
    822             hash = hash*FNV_MULT ^ *p++;
    823             c   -= 1;
    824         }
    825     }
    826     packet->cursor = p;
    827     return hash;
    828 }
    829 
    830 static unsigned
    831 _dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
    832 {
    833     hash = _dnsPacket_hashQName(packet, hash);
    834     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
    835     return hash;
    836 }
    837 
    838 static unsigned
    839 _dnsPacket_hashQuery( DnsPacket*  packet )
    840 {
    841     unsigned  hash = FNV_BASIS;
    842     int       count;
    843     _dnsPacket_rewind(packet);
    844 
    845     /* we ignore the TC bit for reasons explained in
    846      * _dnsPacket_checkQuery().
    847      *
    848      * however we hash the RD bit to differentiate
    849      * between answers for recursive and non-recursive
    850      * queries.
    851      */
    852     hash = hash*FNV_MULT ^ (packet->base[2] & 1);
    853 
    854     /* assume: other flags are 0 */
    855     _dnsPacket_skip(packet, 4);
    856 
    857     /* read QDCOUNT */
    858     count = _dnsPacket_readInt16(packet);
    859 
    860     /* assume: ANcount, NScount, ARcount are 0 */
    861     _dnsPacket_skip(packet, 6);
    862 
    863     /* hash QDCOUNT QRs */
    864     for ( ; count > 0; count-- )
    865         hash = _dnsPacket_hashQR(packet, hash);
    866 
    867     return hash;
    868 }
    869 
    870 
    871 /** QUERY COMPARISON
    872  **
    873  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
    874  ** BEEN SUCCESFULLY CHECKED.
    875  **/
    876 
    877 static int
    878 _dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
    879 {
    880     const uint8_t*  p1   = pack1->cursor;
    881     const uint8_t*  end1 = pack1->end;
    882     const uint8_t*  p2   = pack2->cursor;
    883     const uint8_t*  end2 = pack2->end;
    884 
    885     for (;;) {
    886         int  c1, c2;
    887 
    888         if (p1 >= end1 || p2 >= end2) {
    889             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
    890             break;
    891         }
    892         c1 = *p1++;
    893         c2 = *p2++;
    894         if (c1 != c2)
    895             break;
    896 
    897         if (c1 == 0) {
    898             pack1->cursor = p1;
    899             pack2->cursor = p2;
    900             return 1;
    901         }
    902         if (c1 >= 64) {
    903             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
    904             break;
    905         }
    906         if ((p1+c1 > end1) || (p2+c1 > end2)) {
    907             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
    908                     __FUNCTION__);
    909             break;
    910         }
    911         if (memcmp(p1, p2, c1) != 0)
    912             break;
    913         p1 += c1;
    914         p2 += c1;
    915         /* we rely on the bound checks at the start of the loop */
    916     }
    917     /* not the same, or one is malformed */
    918     XLOG("different DN");
    919     return 0;
    920 }
    921 
    922 static int
    923 _dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
    924 {
    925     const uint8_t*  p1 = pack1->cursor;
    926     const uint8_t*  p2 = pack2->cursor;
    927 
    928     if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
    929         return 0;
    930 
    931     if ( memcmp(p1, p2, numBytes) != 0 )
    932         return 0;
    933 
    934     pack1->cursor += numBytes;
    935     pack2->cursor += numBytes;
    936     return 1;
    937 }
    938 
    939 static int
    940 _dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
    941 {
    942     /* compare domain name encoding + TYPE + CLASS */
    943     if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
    944          !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
    945         return 0;
    946 
    947     return 1;
    948 }
    949 
    950 static int
    951 _dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
    952 {
    953     int  count1, count2;
    954 
    955     /* compare the headers, ignore most fields */
    956     _dnsPacket_rewind(pack1);
    957     _dnsPacket_rewind(pack2);
    958 
    959     /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
    960     if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
    961         XLOG("different RD");
    962         return 0;
    963     }
    964 
    965     /* assume: other flags are all 0 */
    966     _dnsPacket_skip(pack1, 4);
    967     _dnsPacket_skip(pack2, 4);
    968 
    969     /* compare QDCOUNT */
    970     count1 = _dnsPacket_readInt16(pack1);
    971     count2 = _dnsPacket_readInt16(pack2);
    972     if (count1 != count2 || count1 < 0) {
    973         XLOG("different QDCOUNT");
    974         return 0;
    975     }
    976 
    977     /* assume: ANcount, NScount and ARcount are all 0 */
    978     _dnsPacket_skip(pack1, 6);
    979     _dnsPacket_skip(pack2, 6);
    980 
    981     /* compare the QDCOUNT QRs */
    982     for ( ; count1 > 0; count1-- ) {
    983         if (!_dnsPacket_isEqualQR(pack1, pack2)) {
    984             XLOG("different QR");
    985             return 0;
    986         }
    987     }
    988     return 1;
    989 }
    990 
    991 /****************************************************************************/
    992 /****************************************************************************/
    993 /*****                                                                  *****/
    994 /*****                                                                  *****/
    995 /*****                                                                  *****/
    996 /****************************************************************************/
    997 /****************************************************************************/
    998 
    999 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
   1000  * structure though they are conceptually part of the hash table.
   1001  *
   1002  * similarly, mru_next and mru_prev are part of the global MRU list
   1003  */
   1004 typedef struct Entry {
   1005     unsigned int     hash;   /* hash value */
   1006     struct Entry*    hlink;  /* next in collision chain */
   1007     struct Entry*    mru_prev;
   1008     struct Entry*    mru_next;
   1009 
   1010     const uint8_t*   query;
   1011     int              querylen;
   1012     const uint8_t*   answer;
   1013     int              answerlen;
   1014     time_t           expires;   /* time_t when the entry isn't valid any more */
   1015     int              id;        /* for debugging purpose */
   1016 } Entry;
   1017 
   1018 /**
   1019  * Find the TTL for a negative DNS result.  This is defined as the minimum
   1020  * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
   1021  *
   1022  * Return 0 if not found.
   1023  */
   1024 static u_long
   1025 answer_getNegativeTTL(ns_msg handle) {
   1026     int n, nscount;
   1027     u_long result = 0;
   1028     ns_rr rr;
   1029 
   1030     nscount = ns_msg_count(handle, ns_s_ns);
   1031     for (n = 0; n < nscount; n++) {
   1032         if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
   1033             const u_char *rdata = ns_rr_rdata(rr); // find the data
   1034             const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
   1035             int len;
   1036             u_long ttl, rec_result = ns_rr_ttl(rr);
   1037 
   1038             // find the MINIMUM-TTL field from the blob of binary data for this record
   1039             // skip the server name
   1040             len = dn_skipname(rdata, edata);
   1041             if (len == -1) continue; // error skipping
   1042             rdata += len;
   1043 
   1044             // skip the admin name
   1045             len = dn_skipname(rdata, edata);
   1046             if (len == -1) continue; // error skipping
   1047             rdata += len;
   1048 
   1049             if (edata - rdata != 5*NS_INT32SZ) continue;
   1050             // skip: serial number + refresh interval + retry interval + expiry
   1051             rdata += NS_INT32SZ * 4;
   1052             // finally read the MINIMUM TTL
   1053             ttl = ns_get32(rdata);
   1054             if (ttl < rec_result) {
   1055                 rec_result = ttl;
   1056             }
   1057             // Now that the record is read successfully, apply the new min TTL
   1058             if (n == 0 || rec_result < result) {
   1059                 result = rec_result;
   1060             }
   1061         }
   1062     }
   1063     return result;
   1064 }
   1065 
   1066 /**
   1067  * Parse the answer records and find the appropriate
   1068  * smallest TTL among the records.  This might be from
   1069  * the answer records if found or from the SOA record
   1070  * if it's a negative result.
   1071  *
   1072  * The returned TTL is the number of seconds to
   1073  * keep the answer in the cache.
   1074  *
   1075  * In case of parse error zero (0) is returned which
   1076  * indicates that the answer shall not be cached.
   1077  */
   1078 static u_long
   1079 answer_getTTL(const void* answer, int answerlen)
   1080 {
   1081     ns_msg handle;
   1082     int ancount, n;
   1083     u_long result, ttl;
   1084     ns_rr rr;
   1085 
   1086     result = 0;
   1087     if (ns_initparse(answer, answerlen, &handle) >= 0) {
   1088         // get number of answer records
   1089         ancount = ns_msg_count(handle, ns_s_an);
   1090 
   1091         if (ancount == 0) {
   1092             // a response with no answers?  Cache this negative result.
   1093             result = answer_getNegativeTTL(handle);
   1094         } else {
   1095             for (n = 0; n < ancount; n++) {
   1096                 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
   1097                     ttl = ns_rr_ttl(rr);
   1098                     if (n == 0 || ttl < result) {
   1099                         result = ttl;
   1100                     }
   1101                 } else {
   1102                     XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
   1103                 }
   1104             }
   1105         }
   1106     } else {
   1107         XLOG("ns_parserr failed. %s\n", strerror(errno));
   1108     }
   1109 
   1110     XLOG("TTL = %d\n", result);
   1111 
   1112     return result;
   1113 }
   1114 
   1115 static void
   1116 entry_free( Entry*  e )
   1117 {
   1118     /* everything is allocated in a single memory block */
   1119     if (e) {
   1120         free(e);
   1121     }
   1122 }
   1123 
   1124 static __inline__ void
   1125 entry_mru_remove( Entry*  e )
   1126 {
   1127     e->mru_prev->mru_next = e->mru_next;
   1128     e->mru_next->mru_prev = e->mru_prev;
   1129 }
   1130 
   1131 static __inline__ void
   1132 entry_mru_add( Entry*  e, Entry*  list )
   1133 {
   1134     Entry*  first = list->mru_next;
   1135 
   1136     e->mru_next = first;
   1137     e->mru_prev = list;
   1138 
   1139     list->mru_next  = e;
   1140     first->mru_prev = e;
   1141 }
   1142 
   1143 /* compute the hash of a given entry, this is a hash of most
   1144  * data in the query (key) */
   1145 static unsigned
   1146 entry_hash( const Entry*  e )
   1147 {
   1148     DnsPacket  pack[1];
   1149 
   1150     _dnsPacket_init(pack, e->query, e->querylen);
   1151     return _dnsPacket_hashQuery(pack);
   1152 }
   1153 
   1154 /* initialize an Entry as a search key, this also checks the input query packet
   1155  * returns 1 on success, or 0 in case of unsupported/malformed data */
   1156 static int
   1157 entry_init_key( Entry*  e, const void*  query, int  querylen )
   1158 {
   1159     DnsPacket  pack[1];
   1160 
   1161     memset(e, 0, sizeof(*e));
   1162 
   1163     e->query    = query;
   1164     e->querylen = querylen;
   1165     e->hash     = entry_hash(e);
   1166 
   1167     _dnsPacket_init(pack, query, querylen);
   1168 
   1169     return _dnsPacket_checkQuery(pack);
   1170 }
   1171 
   1172 /* allocate a new entry as a cache node */
   1173 static Entry*
   1174 entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
   1175 {
   1176     Entry*  e;
   1177     int     size;
   1178 
   1179     size = sizeof(*e) + init->querylen + answerlen;
   1180     e    = calloc(size, 1);
   1181     if (e == NULL)
   1182         return e;
   1183 
   1184     e->hash     = init->hash;
   1185     e->query    = (const uint8_t*)(e+1);
   1186     e->querylen = init->querylen;
   1187 
   1188     memcpy( (char*)e->query, init->query, e->querylen );
   1189 
   1190     e->answer    = e->query + e->querylen;
   1191     e->answerlen = answerlen;
   1192 
   1193     memcpy( (char*)e->answer, answer, e->answerlen );
   1194 
   1195     return e;
   1196 }
   1197 
   1198 static int
   1199 entry_equals( const Entry*  e1, const Entry*  e2 )
   1200 {
   1201     DnsPacket  pack1[1], pack2[1];
   1202 
   1203     if (e1->querylen != e2->querylen) {
   1204         return 0;
   1205     }
   1206     _dnsPacket_init(pack1, e1->query, e1->querylen);
   1207     _dnsPacket_init(pack2, e2->query, e2->querylen);
   1208 
   1209     return _dnsPacket_isEqualQuery(pack1, pack2);
   1210 }
   1211 
   1212 /****************************************************************************/
   1213 /****************************************************************************/
   1214 /*****                                                                  *****/
   1215 /*****                                                                  *****/
   1216 /*****                                                                  *****/
   1217 /****************************************************************************/
   1218 /****************************************************************************/
   1219 
   1220 /* We use a simple hash table with external collision lists
   1221  * for simplicity, the hash-table fields 'hash' and 'hlink' are
   1222  * inlined in the Entry structure.
   1223  */
   1224 
   1225 /* Maximum time for a thread to wait for an pending request */
   1226 #define PENDING_REQUEST_TIMEOUT 20;
   1227 
   1228 typedef struct pending_req_info {
   1229     unsigned int                hash;
   1230     pthread_cond_t              cond;
   1231     struct pending_req_info*    next;
   1232 } PendingReqInfo;
   1233 
   1234 typedef struct resolv_cache {
   1235     int              max_entries;
   1236     int              num_entries;
   1237     Entry            mru_list;
   1238     pthread_mutex_t  lock;
   1239     unsigned         generation;
   1240     int              last_id;
   1241     Entry*           entries;
   1242     PendingReqInfo   pending_requests;
   1243 } Cache;
   1244 
   1245 typedef struct resolv_cache_info {
   1246     char                        ifname[IF_NAMESIZE + 1];
   1247     struct in_addr              ifaddr;
   1248     Cache*                      cache;
   1249     struct resolv_cache_info*   next;
   1250     char*                       nameservers[MAXNS +1];
   1251     struct addrinfo*            nsaddrinfo[MAXNS + 1];
   1252     char                        defdname[256];
   1253     int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
   1254 } CacheInfo;
   1255 
   1256 typedef struct resolv_pidiface_info {
   1257     int                             pid;
   1258     char                            ifname[IF_NAMESIZE + 1];
   1259     struct resolv_pidiface_info*    next;
   1260 } PidIfaceInfo;
   1261 typedef struct resolv_uidiface_info {
   1262     int                             uid_start;
   1263     int                             uid_end;
   1264     char                            ifname[IF_NAMESIZE + 1];
   1265     struct resolv_uidiface_info*    next;
   1266 } UidIfaceInfo;
   1267 
   1268 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
   1269 
   1270 static void
   1271 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
   1272 {
   1273     struct pending_req_info *ri, *tmp;
   1274     if (cache) {
   1275         ri = cache->pending_requests.next;
   1276 
   1277         while (ri) {
   1278             tmp = ri;
   1279             ri = ri->next;
   1280             pthread_cond_broadcast(&tmp->cond);
   1281 
   1282             pthread_cond_destroy(&tmp->cond);
   1283             free(tmp);
   1284         }
   1285 
   1286         cache->pending_requests.next = NULL;
   1287     }
   1288 }
   1289 
   1290 /* return 0 if no pending request is found matching the key
   1291  * if a matching request is found the calling thread will wait
   1292  * and return 1 when released */
   1293 static int
   1294 _cache_check_pending_request_locked( struct resolv_cache* cache, Entry* key )
   1295 {
   1296     struct pending_req_info *ri, *prev;
   1297     int exist = 0;
   1298 
   1299     if (cache && key) {
   1300         ri = cache->pending_requests.next;
   1301         prev = &cache->pending_requests;
   1302         while (ri) {
   1303             if (ri->hash == key->hash) {
   1304                 exist = 1;
   1305                 break;
   1306             }
   1307             prev = ri;
   1308             ri = ri->next;
   1309         }
   1310 
   1311         if (!exist) {
   1312             ri = calloc(1, sizeof(struct pending_req_info));
   1313             if (ri) {
   1314                 ri->hash = key->hash;
   1315                 pthread_cond_init(&ri->cond, NULL);
   1316                 prev->next = ri;
   1317             }
   1318         } else {
   1319             struct timespec ts = {0,0};
   1320             XLOG("Waiting for previous request");
   1321             ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
   1322             pthread_cond_timedwait(&ri->cond, &cache->lock, &ts);
   1323         }
   1324     }
   1325 
   1326     return exist;
   1327 }
   1328 
   1329 /* notify any waiting thread that waiting on a request
   1330  * matching the key has been added to the cache */
   1331 static void
   1332 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
   1333 {
   1334     struct pending_req_info *ri, *prev;
   1335 
   1336     if (cache && key) {
   1337         ri = cache->pending_requests.next;
   1338         prev = &cache->pending_requests;
   1339         while (ri) {
   1340             if (ri->hash == key->hash) {
   1341                 pthread_cond_broadcast(&ri->cond);
   1342                 break;
   1343             }
   1344             prev = ri;
   1345             ri = ri->next;
   1346         }
   1347 
   1348         // remove item from list and destroy
   1349         if (ri) {
   1350             prev->next = ri->next;
   1351             pthread_cond_destroy(&ri->cond);
   1352             free(ri);
   1353         }
   1354     }
   1355 }
   1356 
   1357 /* notify the cache that the query failed */
   1358 void
   1359 _resolv_cache_query_failed( struct resolv_cache* cache,
   1360                    const void* query,
   1361                    int         querylen)
   1362 {
   1363     Entry    key[1];
   1364 
   1365     if (cache && entry_init_key(key, query, querylen)) {
   1366         pthread_mutex_lock(&cache->lock);
   1367         _cache_notify_waiting_tid_locked(cache, key);
   1368         pthread_mutex_unlock(&cache->lock);
   1369     }
   1370 }
   1371 
   1372 static void
   1373 _cache_flush_locked( Cache*  cache )
   1374 {
   1375     int     nn;
   1376 
   1377     for (nn = 0; nn < cache->max_entries; nn++)
   1378     {
   1379         Entry**  pnode = (Entry**) &cache->entries[nn];
   1380 
   1381         while (*pnode != NULL) {
   1382             Entry*  node = *pnode;
   1383             *pnode = node->hlink;
   1384             entry_free(node);
   1385         }
   1386     }
   1387 
   1388     // flush pending request
   1389     _cache_flush_pending_requests_locked(cache);
   1390 
   1391     cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
   1392     cache->num_entries       = 0;
   1393     cache->last_id           = 0;
   1394 
   1395     XLOG("*************************\n"
   1396          "*** DNS CACHE FLUSHED ***\n"
   1397          "*************************");
   1398 }
   1399 
   1400 /* Return max number of entries allowed in the cache,
   1401  * i.e. cache size. The cache size is either defined
   1402  * by system property ro.net.dns_cache_size or by
   1403  * CONFIG_MAX_ENTRIES if system property not set
   1404  * or set to invalid value. */
   1405 static int
   1406 _res_cache_get_max_entries( void )
   1407 {
   1408     int result = -1;
   1409     char cache_size[PROP_VALUE_MAX];
   1410 
   1411     const char* cache_mode = getenv("ANDROID_DNS_MODE");
   1412 
   1413     if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
   1414         // Don't use the cache in local mode.  This is used by the
   1415         // proxy itself.
   1416         XLOG("setup cache for non-cache process. size=0, %s", cache_mode);
   1417         return 0;
   1418     }
   1419 
   1420     if (__system_property_get(DNS_CACHE_SIZE_PROP_NAME, cache_size) > 0) {
   1421         result = atoi(cache_size);
   1422     }
   1423 
   1424     // ro.net.dns_cache_size not set or set to negative value
   1425     if (result <= 0) {
   1426         result = CONFIG_MAX_ENTRIES;
   1427     }
   1428 
   1429     XLOG("cache size: %d", result);
   1430     return result;
   1431 }
   1432 
   1433 static struct resolv_cache*
   1434 _resolv_cache_create( void )
   1435 {
   1436     struct resolv_cache*  cache;
   1437 
   1438     cache = calloc(sizeof(*cache), 1);
   1439     if (cache) {
   1440         cache->max_entries = _res_cache_get_max_entries();
   1441         cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
   1442         if (cache->entries) {
   1443             cache->generation = ~0U;
   1444             pthread_mutex_init( &cache->lock, NULL );
   1445             cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
   1446             XLOG("%s: cache created\n", __FUNCTION__);
   1447         } else {
   1448             free(cache);
   1449             cache = NULL;
   1450         }
   1451     }
   1452     return cache;
   1453 }
   1454 
   1455 
   1456 #if DEBUG
   1457 static void
   1458 _dump_query( const uint8_t*  query, int  querylen )
   1459 {
   1460     char       temp[256], *p=temp, *end=p+sizeof(temp);
   1461     DnsPacket  pack[1];
   1462 
   1463     _dnsPacket_init(pack, query, querylen);
   1464     p = _dnsPacket_bprintQuery(pack, p, end);
   1465     XLOG("QUERY: %s", temp);
   1466 }
   1467 
   1468 static void
   1469 _cache_dump_mru( Cache*  cache )
   1470 {
   1471     char    temp[512], *p=temp, *end=p+sizeof(temp);
   1472     Entry*  e;
   1473 
   1474     p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
   1475     for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
   1476         p = _bprint(p, end, " %d", e->id);
   1477 
   1478     XLOG("%s", temp);
   1479 }
   1480 
   1481 static void
   1482 _dump_answer(const void* answer, int answerlen)
   1483 {
   1484     res_state statep;
   1485     FILE* fp;
   1486     char* buf;
   1487     int fileLen;
   1488 
   1489     fp = fopen("/data/reslog.txt", "w+");
   1490     if (fp != NULL) {
   1491         statep = __res_get_state();
   1492 
   1493         res_pquery(statep, answer, answerlen, fp);
   1494 
   1495         //Get file length
   1496         fseek(fp, 0, SEEK_END);
   1497         fileLen=ftell(fp);
   1498         fseek(fp, 0, SEEK_SET);
   1499         buf = (char *)malloc(fileLen+1);
   1500         if (buf != NULL) {
   1501             //Read file contents into buffer
   1502             fread(buf, fileLen, 1, fp);
   1503             XLOG("%s\n", buf);
   1504             free(buf);
   1505         }
   1506         fclose(fp);
   1507         remove("/data/reslog.txt");
   1508     }
   1509     else {
   1510         errno = 0; // else debug is introducing error signals
   1511         XLOG("_dump_answer: can't open file\n");
   1512     }
   1513 }
   1514 #endif
   1515 
   1516 #if DEBUG
   1517 #  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
   1518 #  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
   1519 #else
   1520 #  define  XLOG_QUERY(q,len)   ((void)0)
   1521 #  define  XLOG_ANSWER(a,len)  ((void)0)
   1522 #endif
   1523 
   1524 /* This function tries to find a key within the hash table
   1525  * In case of success, it will return a *pointer* to the hashed key.
   1526  * In case of failure, it will return a *pointer* to NULL
   1527  *
   1528  * So, the caller must check '*result' to check for success/failure.
   1529  *
   1530  * The main idea is that the result can later be used directly in
   1531  * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
   1532  * parameter. This makes the code simpler and avoids re-searching
   1533  * for the key position in the htable.
   1534  *
   1535  * The result of a lookup_p is only valid until you alter the hash
   1536  * table.
   1537  */
   1538 static Entry**
   1539 _cache_lookup_p( Cache*   cache,
   1540                  Entry*   key )
   1541 {
   1542     int      index = key->hash % cache->max_entries;
   1543     Entry**  pnode = (Entry**) &cache->entries[ index ];
   1544 
   1545     while (*pnode != NULL) {
   1546         Entry*  node = *pnode;
   1547 
   1548         if (node == NULL)
   1549             break;
   1550 
   1551         if (node->hash == key->hash && entry_equals(node, key))
   1552             break;
   1553 
   1554         pnode = &node->hlink;
   1555     }
   1556     return pnode;
   1557 }
   1558 
   1559 /* Add a new entry to the hash table. 'lookup' must be the
   1560  * result of an immediate previous failed _lookup_p() call
   1561  * (i.e. with *lookup == NULL), and 'e' is the pointer to the
   1562  * newly created entry
   1563  */
   1564 static void
   1565 _cache_add_p( Cache*   cache,
   1566               Entry**  lookup,
   1567               Entry*   e )
   1568 {
   1569     *lookup = e;
   1570     e->id = ++cache->last_id;
   1571     entry_mru_add(e, &cache->mru_list);
   1572     cache->num_entries += 1;
   1573 
   1574     XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
   1575          e->id, cache->num_entries);
   1576 }
   1577 
   1578 /* Remove an existing entry from the hash table,
   1579  * 'lookup' must be the result of an immediate previous
   1580  * and succesful _lookup_p() call.
   1581  */
   1582 static void
   1583 _cache_remove_p( Cache*   cache,
   1584                  Entry**  lookup )
   1585 {
   1586     Entry*  e  = *lookup;
   1587 
   1588     XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
   1589          e->id, cache->num_entries-1);
   1590 
   1591     entry_mru_remove(e);
   1592     *lookup = e->hlink;
   1593     entry_free(e);
   1594     cache->num_entries -= 1;
   1595 }
   1596 
   1597 /* Remove the oldest entry from the hash table.
   1598  */
   1599 static void
   1600 _cache_remove_oldest( Cache*  cache )
   1601 {
   1602     Entry*   oldest = cache->mru_list.mru_prev;
   1603     Entry**  lookup = _cache_lookup_p(cache, oldest);
   1604 
   1605     if (*lookup == NULL) { /* should not happen */
   1606         XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
   1607         return;
   1608     }
   1609     if (DEBUG) {
   1610         XLOG("Cache full - removing oldest");
   1611         XLOG_QUERY(oldest->query, oldest->querylen);
   1612     }
   1613     _cache_remove_p(cache, lookup);
   1614 }
   1615 
   1616 /* Remove all expired entries from the hash table.
   1617  */
   1618 static void _cache_remove_expired(Cache* cache) {
   1619     Entry* e;
   1620     time_t now = _time_now();
   1621 
   1622     for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
   1623         // Entry is old, remove
   1624         if (now >= e->expires) {
   1625             Entry** lookup = _cache_lookup_p(cache, e);
   1626             if (*lookup == NULL) { /* should not happen */
   1627                 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
   1628                 return;
   1629             }
   1630             e = e->mru_next;
   1631             _cache_remove_p(cache, lookup);
   1632         } else {
   1633             e = e->mru_next;
   1634         }
   1635     }
   1636 }
   1637 
   1638 ResolvCacheStatus
   1639 _resolv_cache_lookup( struct resolv_cache*  cache,
   1640                       const void*           query,
   1641                       int                   querylen,
   1642                       void*                 answer,
   1643                       int                   answersize,
   1644                       int                  *answerlen )
   1645 {
   1646     Entry      key[1];
   1647     Entry**    lookup;
   1648     Entry*     e;
   1649     time_t     now;
   1650 
   1651     ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
   1652 
   1653     XLOG("%s: lookup", __FUNCTION__);
   1654     XLOG_QUERY(query, querylen);
   1655 
   1656     /* we don't cache malformed queries */
   1657     if (!entry_init_key(key, query, querylen)) {
   1658         XLOG("%s: unsupported query", __FUNCTION__);
   1659         return RESOLV_CACHE_UNSUPPORTED;
   1660     }
   1661     /* lookup cache */
   1662     pthread_mutex_lock( &cache->lock );
   1663 
   1664     /* see the description of _lookup_p to understand this.
   1665      * the function always return a non-NULL pointer.
   1666      */
   1667     lookup = _cache_lookup_p(cache, key);
   1668     e      = *lookup;
   1669 
   1670     if (e == NULL) {
   1671         XLOG( "NOT IN CACHE");
   1672         // calling thread will wait if an outstanding request is found
   1673         // that matching this query
   1674         if (!_cache_check_pending_request_locked(cache, key)) {
   1675             goto Exit;
   1676         } else {
   1677             lookup = _cache_lookup_p(cache, key);
   1678             e = *lookup;
   1679             if (e == NULL) {
   1680                 goto Exit;
   1681             }
   1682         }
   1683     }
   1684 
   1685     now = _time_now();
   1686 
   1687     /* remove stale entries here */
   1688     if (now >= e->expires) {
   1689         XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
   1690         XLOG_QUERY(e->query, e->querylen);
   1691         _cache_remove_p(cache, lookup);
   1692         goto Exit;
   1693     }
   1694 
   1695     *answerlen = e->answerlen;
   1696     if (e->answerlen > answersize) {
   1697         /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
   1698         result = RESOLV_CACHE_UNSUPPORTED;
   1699         XLOG(" ANSWER TOO LONG");
   1700         goto Exit;
   1701     }
   1702 
   1703     memcpy( answer, e->answer, e->answerlen );
   1704 
   1705     /* bump up this entry to the top of the MRU list */
   1706     if (e != cache->mru_list.mru_next) {
   1707         entry_mru_remove( e );
   1708         entry_mru_add( e, &cache->mru_list );
   1709     }
   1710 
   1711     XLOG( "FOUND IN CACHE entry=%p", e );
   1712     result = RESOLV_CACHE_FOUND;
   1713 
   1714 Exit:
   1715     pthread_mutex_unlock( &cache->lock );
   1716     return result;
   1717 }
   1718 
   1719 
   1720 void
   1721 _resolv_cache_add( struct resolv_cache*  cache,
   1722                    const void*           query,
   1723                    int                   querylen,
   1724                    const void*           answer,
   1725                    int                   answerlen )
   1726 {
   1727     Entry    key[1];
   1728     Entry*   e;
   1729     Entry**  lookup;
   1730     u_long   ttl;
   1731 
   1732     /* don't assume that the query has already been cached
   1733      */
   1734     if (!entry_init_key( key, query, querylen )) {
   1735         XLOG( "%s: passed invalid query ?", __FUNCTION__);
   1736         return;
   1737     }
   1738 
   1739     pthread_mutex_lock( &cache->lock );
   1740 
   1741     XLOG( "%s: query:", __FUNCTION__ );
   1742     XLOG_QUERY(query,querylen);
   1743     XLOG_ANSWER(answer, answerlen);
   1744 #if DEBUG_DATA
   1745     XLOG( "answer:");
   1746     XLOG_BYTES(answer,answerlen);
   1747 #endif
   1748 
   1749     lookup = _cache_lookup_p(cache, key);
   1750     e      = *lookup;
   1751 
   1752     if (e != NULL) { /* should not happen */
   1753         XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
   1754              __FUNCTION__, e);
   1755         goto Exit;
   1756     }
   1757 
   1758     if (cache->num_entries >= cache->max_entries) {
   1759         _cache_remove_expired(cache);
   1760         if (cache->num_entries >= cache->max_entries) {
   1761             _cache_remove_oldest(cache);
   1762         }
   1763         /* need to lookup again */
   1764         lookup = _cache_lookup_p(cache, key);
   1765         e      = *lookup;
   1766         if (e != NULL) {
   1767             XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
   1768                 __FUNCTION__, e);
   1769             goto Exit;
   1770         }
   1771     }
   1772 
   1773     ttl = answer_getTTL(answer, answerlen);
   1774     if (ttl > 0) {
   1775         e = entry_alloc(key, answer, answerlen);
   1776         if (e != NULL) {
   1777             e->expires = ttl + _time_now();
   1778             _cache_add_p(cache, lookup, e);
   1779         }
   1780     }
   1781 #if DEBUG
   1782     _cache_dump_mru(cache);
   1783 #endif
   1784 Exit:
   1785     _cache_notify_waiting_tid_locked(cache, key);
   1786     pthread_mutex_unlock( &cache->lock );
   1787 }
   1788 
   1789 /****************************************************************************/
   1790 /****************************************************************************/
   1791 /*****                                                                  *****/
   1792 /*****                                                                  *****/
   1793 /*****                                                                  *****/
   1794 /****************************************************************************/
   1795 /****************************************************************************/
   1796 
   1797 static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
   1798 
   1799 // Head of the list of caches.  Protected by _res_cache_list_lock.
   1800 static struct resolv_cache_info _res_cache_list;
   1801 
   1802 // List of pid iface pairs
   1803 static struct resolv_pidiface_info _res_pidiface_list;
   1804 
   1805 // List of uid iface pairs
   1806 static struct resolv_uidiface_info _res_uidiface_list;
   1807 
   1808 // name of the current default inteface
   1809 static char            _res_default_ifname[IF_NAMESIZE + 1];
   1810 
   1811 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
   1812 static pthread_mutex_t _res_cache_list_lock;
   1813 
   1814 // lock protecting the _res_pid_iface_list
   1815 static pthread_mutex_t _res_pidiface_list_lock;
   1816 
   1817 // lock protecting the _res_uidiface_list
   1818 static pthread_mutex_t _res_uidiface_list_lock;
   1819 
   1820 /* lookup the default interface name */
   1821 static char *_get_default_iface_locked();
   1822 /* find the first cache that has an associated interface and return the name of the interface */
   1823 static char* _find_any_iface_name_locked( void );
   1824 
   1825 /* insert resolv_cache_info into the list of resolv_cache_infos */
   1826 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
   1827 /* creates a resolv_cache_info */
   1828 static struct resolv_cache_info* _create_cache_info( void );
   1829 /* gets cache associated with an interface name, or NULL if none exists */
   1830 static struct resolv_cache* _find_named_cache_locked(const char* ifname);
   1831 /* gets a resolv_cache_info associated with an interface name, or NULL if not found */
   1832 static struct resolv_cache_info* _find_cache_info_locked(const char* ifname);
   1833 /* look up the named cache, and creates one if needed */
   1834 static struct resolv_cache* _get_res_cache_for_iface_locked(const char* ifname);
   1835 /* empty the named cache */
   1836 static void _flush_cache_for_iface_locked(const char* ifname);
   1837 /* empty the nameservers set for the named cache */
   1838 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
   1839 /* lookup the namserver for the name interface */
   1840 static int _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen);
   1841 /* lookup the addr of the nameserver for the named interface */
   1842 static struct addrinfo* _get_nameserver_addr_locked(const char* ifname, int n);
   1843 /* lookup the inteface's address */
   1844 static struct in_addr* _get_addr_locked(const char * ifname);
   1845 /* return 1 if the provided list of name servers differs from the list of name servers
   1846  * currently attached to the provided cache_info */
   1847 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
   1848         const char** servers, int numservers);
   1849 /* remove a resolv_pidiface_info structure from _res_pidiface_list */
   1850 static void _remove_pidiface_info_locked(int pid);
   1851 /* get a resolv_pidiface_info structure from _res_pidiface_list with a certain pid */
   1852 static struct resolv_pidiface_info* _get_pid_iface_info_locked(int pid);
   1853 
   1854 /* remove a resolv_pidiface_info structure from _res_uidiface_list */
   1855 static int _remove_uidiface_info_locked(int uid_start, int uid_end);
   1856 /* check if a range [low,high] overlaps with any already existing ranges in the uid=>iface map*/
   1857 static int  _resolv_check_uid_range_overlap_locked(int uid_start, int uid_end);
   1858 /* get a resolv_uidiface_info structure from _res_uidiface_list with a certain uid */
   1859 static struct resolv_uidiface_info* _get_uid_iface_info_locked(int uid);
   1860 
   1861 static void
   1862 _res_cache_init(void)
   1863 {
   1864     const char*  env = getenv(CONFIG_ENV);
   1865 
   1866     if (env && atoi(env) == 0) {
   1867         /* the cache is disabled */
   1868         return;
   1869     }
   1870 
   1871     memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
   1872     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
   1873     memset(&_res_pidiface_list, 0, sizeof(_res_pidiface_list));
   1874     memset(&_res_uidiface_list, 0, sizeof(_res_uidiface_list));
   1875     pthread_mutex_init(&_res_cache_list_lock, NULL);
   1876     pthread_mutex_init(&_res_pidiface_list_lock, NULL);
   1877     pthread_mutex_init(&_res_uidiface_list_lock, NULL);
   1878 }
   1879 
   1880 struct resolv_cache*
   1881 __get_res_cache(const char* ifname)
   1882 {
   1883     struct resolv_cache *cache;
   1884 
   1885     pthread_once(&_res_cache_once, _res_cache_init);
   1886     pthread_mutex_lock(&_res_cache_list_lock);
   1887 
   1888     char* iface;
   1889     if (ifname == NULL || ifname[0] == '\0') {
   1890         iface = _get_default_iface_locked();
   1891         if (iface[0] == '\0') {
   1892             char* tmp = _find_any_iface_name_locked();
   1893             if (tmp) {
   1894                 iface = tmp;
   1895             }
   1896         }
   1897     } else {
   1898         iface = (char *) ifname;
   1899     }
   1900 
   1901     cache = _get_res_cache_for_iface_locked(iface);
   1902 
   1903     pthread_mutex_unlock(&_res_cache_list_lock);
   1904     XLOG("_get_res_cache: iface = %s, cache=%p\n", iface, cache);
   1905     return cache;
   1906 }
   1907 
   1908 static struct resolv_cache*
   1909 _get_res_cache_for_iface_locked(const char* ifname)
   1910 {
   1911     if (ifname == NULL)
   1912         return NULL;
   1913 
   1914     struct resolv_cache* cache = _find_named_cache_locked(ifname);
   1915     if (!cache) {
   1916         struct resolv_cache_info* cache_info = _create_cache_info();
   1917         if (cache_info) {
   1918             cache = _resolv_cache_create();
   1919             if (cache) {
   1920                 int len = sizeof(cache_info->ifname);
   1921                 cache_info->cache = cache;
   1922                 strncpy(cache_info->ifname, ifname, len - 1);
   1923                 cache_info->ifname[len - 1] = '\0';
   1924 
   1925                 _insert_cache_info_locked(cache_info);
   1926             } else {
   1927                 free(cache_info);
   1928             }
   1929         }
   1930     }
   1931     return cache;
   1932 }
   1933 
   1934 void
   1935 _resolv_cache_reset(unsigned  generation)
   1936 {
   1937     XLOG("%s: generation=%d", __FUNCTION__, generation);
   1938 
   1939     pthread_once(&_res_cache_once, _res_cache_init);
   1940     pthread_mutex_lock(&_res_cache_list_lock);
   1941 
   1942     char* ifname = _get_default_iface_locked();
   1943     // if default interface not set then use the first cache
   1944     // associated with an interface as the default one.
   1945     // Note: Copied the code from __get_res_cache since this
   1946     // method will be deleted/obsolete when cache per interface
   1947     // implemented all over
   1948     if (ifname[0] == '\0') {
   1949         struct resolv_cache_info* cache_info = _res_cache_list.next;
   1950         while (cache_info) {
   1951             if (cache_info->ifname[0] != '\0') {
   1952                 ifname = cache_info->ifname;
   1953                 break;
   1954             }
   1955 
   1956             cache_info = cache_info->next;
   1957         }
   1958     }
   1959     struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
   1960 
   1961     if (cache != NULL) {
   1962         pthread_mutex_lock( &cache->lock );
   1963         if (cache->generation != generation) {
   1964             _cache_flush_locked(cache);
   1965             cache->generation = generation;
   1966         }
   1967         pthread_mutex_unlock( &cache->lock );
   1968     }
   1969 
   1970     pthread_mutex_unlock(&_res_cache_list_lock);
   1971 }
   1972 
   1973 void
   1974 _resolv_flush_cache_for_default_iface(void)
   1975 {
   1976     char* ifname;
   1977 
   1978     pthread_once(&_res_cache_once, _res_cache_init);
   1979     pthread_mutex_lock(&_res_cache_list_lock);
   1980 
   1981     ifname = _get_default_iface_locked();
   1982     _flush_cache_for_iface_locked(ifname);
   1983 
   1984     pthread_mutex_unlock(&_res_cache_list_lock);
   1985 }
   1986 
   1987 void
   1988 _resolv_flush_cache_for_iface(const char* ifname)
   1989 {
   1990     pthread_once(&_res_cache_once, _res_cache_init);
   1991     pthread_mutex_lock(&_res_cache_list_lock);
   1992 
   1993     _flush_cache_for_iface_locked(ifname);
   1994 
   1995     pthread_mutex_unlock(&_res_cache_list_lock);
   1996 }
   1997 
   1998 static void
   1999 _flush_cache_for_iface_locked(const char* ifname)
   2000 {
   2001     struct resolv_cache* cache = _find_named_cache_locked(ifname);
   2002     if (cache) {
   2003         pthread_mutex_lock(&cache->lock);
   2004         _cache_flush_locked(cache);
   2005         pthread_mutex_unlock(&cache->lock);
   2006     }
   2007 }
   2008 
   2009 static struct resolv_cache_info*
   2010 _create_cache_info(void)
   2011 {
   2012     struct resolv_cache_info*  cache_info;
   2013 
   2014     cache_info = calloc(sizeof(*cache_info), 1);
   2015     return cache_info;
   2016 }
   2017 
   2018 static void
   2019 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
   2020 {
   2021     struct resolv_cache_info* last;
   2022 
   2023     for (last = &_res_cache_list; last->next; last = last->next);
   2024 
   2025     last->next = cache_info;
   2026 
   2027 }
   2028 
   2029 static struct resolv_cache*
   2030 _find_named_cache_locked(const char* ifname) {
   2031 
   2032     struct resolv_cache_info* info = _find_cache_info_locked(ifname);
   2033 
   2034     if (info != NULL) return info->cache;
   2035 
   2036     return NULL;
   2037 }
   2038 
   2039 static struct resolv_cache_info*
   2040 _find_cache_info_locked(const char* ifname)
   2041 {
   2042     if (ifname == NULL)
   2043         return NULL;
   2044 
   2045     struct resolv_cache_info* cache_info = _res_cache_list.next;
   2046 
   2047     while (cache_info) {
   2048         if (strcmp(cache_info->ifname, ifname) == 0) {
   2049             break;
   2050         }
   2051 
   2052         cache_info = cache_info->next;
   2053     }
   2054     return cache_info;
   2055 }
   2056 
   2057 static char*
   2058 _get_default_iface_locked(void)
   2059 {
   2060 
   2061     char* iface = _res_default_ifname;
   2062 
   2063     return iface;
   2064 }
   2065 
   2066 static char*
   2067 _find_any_iface_name_locked( void ) {
   2068     char* ifname = NULL;
   2069 
   2070     struct resolv_cache_info* cache_info = _res_cache_list.next;
   2071     while (cache_info) {
   2072         if (cache_info->ifname[0] != '\0') {
   2073             ifname = cache_info->ifname;
   2074             break;
   2075         }
   2076 
   2077         cache_info = cache_info->next;
   2078     }
   2079 
   2080     return ifname;
   2081 }
   2082 
   2083 void
   2084 _resolv_set_default_iface(const char* ifname)
   2085 {
   2086     XLOG("_resolv_set_default_if ifname %s\n",ifname);
   2087 
   2088     pthread_once(&_res_cache_once, _res_cache_init);
   2089     pthread_mutex_lock(&_res_cache_list_lock);
   2090 
   2091     int size = sizeof(_res_default_ifname);
   2092     memset(_res_default_ifname, 0, size);
   2093     strncpy(_res_default_ifname, ifname, size - 1);
   2094     _res_default_ifname[size - 1] = '\0';
   2095 
   2096     pthread_mutex_unlock(&_res_cache_list_lock);
   2097 }
   2098 
   2099 void
   2100 _resolv_set_nameservers_for_iface(const char* ifname, const char** servers, int numservers,
   2101         const char *domains)
   2102 {
   2103     int i, rt, index;
   2104     struct addrinfo hints;
   2105     char sbuf[NI_MAXSERV];
   2106     register char *cp;
   2107     int *offset;
   2108 
   2109     pthread_once(&_res_cache_once, _res_cache_init);
   2110     pthread_mutex_lock(&_res_cache_list_lock);
   2111 
   2112     // creates the cache if not created
   2113     _get_res_cache_for_iface_locked(ifname);
   2114 
   2115     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
   2116 
   2117     if (cache_info != NULL &&
   2118             !_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
   2119         // free current before adding new
   2120         _free_nameservers_locked(cache_info);
   2121 
   2122         memset(&hints, 0, sizeof(hints));
   2123         hints.ai_family = PF_UNSPEC;
   2124         hints.ai_socktype = SOCK_DGRAM; /*dummy*/
   2125         hints.ai_flags = AI_NUMERICHOST;
   2126         sprintf(sbuf, "%u", NAMESERVER_PORT);
   2127 
   2128         index = 0;
   2129         for (i = 0; i < numservers && i < MAXNS; i++) {
   2130             rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
   2131             if (rt == 0) {
   2132                 cache_info->nameservers[index] = strdup(servers[i]);
   2133                 index++;
   2134                 XLOG("_resolv_set_nameservers_for_iface: iface = %s, addr = %s\n",
   2135                         ifname, servers[i]);
   2136             } else {
   2137                 cache_info->nsaddrinfo[index] = NULL;
   2138             }
   2139         }
   2140 
   2141         // code moved from res_init.c, load_domain_search_list
   2142         strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
   2143         if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
   2144             *cp = '\0';
   2145         cp = cache_info->defdname;
   2146         offset = cache_info->dnsrch_offset;
   2147         while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
   2148             while (*cp == ' ' || *cp == '\t') /* skip leading white space */
   2149                 cp++;
   2150             if (*cp == '\0') /* stop if nothing more to do */
   2151                 break;
   2152             *offset++ = cp - cache_info->defdname; /* record this search domain */
   2153             while (*cp) { /* zero-terminate it */
   2154                 if (*cp == ' '|| *cp == '\t') {
   2155                     *cp++ = '\0';
   2156                     break;
   2157                 }
   2158                 cp++;
   2159             }
   2160         }
   2161         *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
   2162 
   2163         // flush cache since new settings
   2164         _flush_cache_for_iface_locked(ifname);
   2165 
   2166     }
   2167 
   2168     pthread_mutex_unlock(&_res_cache_list_lock);
   2169 }
   2170 
   2171 static int
   2172 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
   2173         const char** servers, int numservers)
   2174 {
   2175     int i;
   2176     char** ns;
   2177     int equal = 1;
   2178 
   2179     // compare each name server against current name servers
   2180     if (numservers > MAXNS) numservers = MAXNS;
   2181     for (i = 0; i < numservers && equal; i++) {
   2182         ns = cache_info->nameservers;
   2183         equal = 0;
   2184         while(*ns) {
   2185             if (strcmp(*ns, servers[i]) == 0) {
   2186                 equal = 1;
   2187                 break;
   2188             }
   2189             ns++;
   2190         }
   2191     }
   2192 
   2193     return equal;
   2194 }
   2195 
   2196 static void
   2197 _free_nameservers_locked(struct resolv_cache_info* cache_info)
   2198 {
   2199     int i;
   2200     for (i = 0; i <= MAXNS; i++) {
   2201         free(cache_info->nameservers[i]);
   2202         cache_info->nameservers[i] = NULL;
   2203         if (cache_info->nsaddrinfo[i] != NULL) {
   2204             freeaddrinfo(cache_info->nsaddrinfo[i]);
   2205             cache_info->nsaddrinfo[i] = NULL;
   2206         }
   2207     }
   2208 }
   2209 
   2210 int
   2211 _resolv_cache_get_nameserver(int n, char* addr, int addrLen)
   2212 {
   2213     char *ifname;
   2214     int result = 0;
   2215 
   2216     pthread_once(&_res_cache_once, _res_cache_init);
   2217     pthread_mutex_lock(&_res_cache_list_lock);
   2218 
   2219     ifname = _get_default_iface_locked();
   2220     result = _get_nameserver_locked(ifname, n, addr, addrLen);
   2221 
   2222     pthread_mutex_unlock(&_res_cache_list_lock);
   2223     return result;
   2224 }
   2225 
   2226 static int
   2227 _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen)
   2228 {
   2229     int len = 0;
   2230     char* ns;
   2231     struct resolv_cache_info* cache_info;
   2232 
   2233     if (n < 1 || n > MAXNS || !addr)
   2234         return 0;
   2235 
   2236     cache_info = _find_cache_info_locked(ifname);
   2237     if (cache_info) {
   2238         ns = cache_info->nameservers[n - 1];
   2239         if (ns) {
   2240             len = strlen(ns);
   2241             if (len < addrLen) {
   2242                 strncpy(addr, ns, len);
   2243                 addr[len] = '\0';
   2244             } else {
   2245                 len = 0;
   2246             }
   2247         }
   2248     }
   2249 
   2250     return len;
   2251 }
   2252 
   2253 struct addrinfo*
   2254 _cache_get_nameserver_addr(int n)
   2255 {
   2256     struct addrinfo *result;
   2257     char* ifname;
   2258 
   2259     pthread_once(&_res_cache_once, _res_cache_init);
   2260     pthread_mutex_lock(&_res_cache_list_lock);
   2261 
   2262     ifname = _get_default_iface_locked();
   2263 
   2264     result = _get_nameserver_addr_locked(ifname, n);
   2265     pthread_mutex_unlock(&_res_cache_list_lock);
   2266     return result;
   2267 }
   2268 
   2269 static struct addrinfo*
   2270 _get_nameserver_addr_locked(const char* ifname, int n)
   2271 {
   2272     struct addrinfo* ai = NULL;
   2273     struct resolv_cache_info* cache_info;
   2274 
   2275     if (n < 1 || n > MAXNS)
   2276         return NULL;
   2277 
   2278     cache_info = _find_cache_info_locked(ifname);
   2279     if (cache_info) {
   2280         ai = cache_info->nsaddrinfo[n - 1];
   2281     }
   2282     return ai;
   2283 }
   2284 
   2285 void
   2286 _resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr)
   2287 {
   2288     pthread_once(&_res_cache_once, _res_cache_init);
   2289     pthread_mutex_lock(&_res_cache_list_lock);
   2290     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
   2291     if (cache_info) {
   2292         memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
   2293 
   2294         if (DEBUG) {
   2295             XLOG("address of interface %s is %s\n",
   2296                     ifname, inet_ntoa(cache_info->ifaddr));
   2297         }
   2298     }
   2299     pthread_mutex_unlock(&_res_cache_list_lock);
   2300 }
   2301 
   2302 struct in_addr*
   2303 _resolv_get_addr_of_default_iface(void)
   2304 {
   2305     struct in_addr* ai = NULL;
   2306     char* ifname;
   2307 
   2308     pthread_once(&_res_cache_once, _res_cache_init);
   2309     pthread_mutex_lock(&_res_cache_list_lock);
   2310     ifname = _get_default_iface_locked();
   2311     ai = _get_addr_locked(ifname);
   2312     pthread_mutex_unlock(&_res_cache_list_lock);
   2313 
   2314     return ai;
   2315 }
   2316 
   2317 struct in_addr*
   2318 _resolv_get_addr_of_iface(const char* ifname)
   2319 {
   2320     struct in_addr* ai = NULL;
   2321 
   2322     pthread_once(&_res_cache_once, _res_cache_init);
   2323     pthread_mutex_lock(&_res_cache_list_lock);
   2324     ai =_get_addr_locked(ifname);
   2325     pthread_mutex_unlock(&_res_cache_list_lock);
   2326     return ai;
   2327 }
   2328 
   2329 static struct in_addr*
   2330 _get_addr_locked(const char * ifname)
   2331 {
   2332     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
   2333     if (cache_info) {
   2334         return &cache_info->ifaddr;
   2335     }
   2336     return NULL;
   2337 }
   2338 
   2339 static void
   2340 _remove_pidiface_info_locked(int pid) {
   2341     struct resolv_pidiface_info* result = &_res_pidiface_list;
   2342     struct resolv_pidiface_info* prev = NULL;
   2343 
   2344     while (result != NULL && result->pid != pid) {
   2345         prev = result;
   2346         result = result->next;
   2347     }
   2348     if (prev != NULL && result != NULL) {
   2349         prev->next = result->next;
   2350         free(result);
   2351     }
   2352 }
   2353 
   2354 static struct resolv_pidiface_info*
   2355 _get_pid_iface_info_locked(int pid)
   2356 {
   2357     struct resolv_pidiface_info* result = &_res_pidiface_list;
   2358     while (result != NULL && result->pid != pid) {
   2359         result = result->next;
   2360     }
   2361 
   2362     return result;
   2363 }
   2364 
   2365 void
   2366 _resolv_set_iface_for_pid(const char* ifname, int pid)
   2367 {
   2368     // make sure the pid iface list is created
   2369     pthread_once(&_res_cache_once, _res_cache_init);
   2370     pthread_mutex_lock(&_res_pidiface_list_lock);
   2371 
   2372     struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
   2373     if (!pidiface_info) {
   2374         pidiface_info = calloc(sizeof(*pidiface_info), 1);
   2375         if (pidiface_info) {
   2376             pidiface_info->pid = pid;
   2377             int len = sizeof(pidiface_info->ifname);
   2378             strncpy(pidiface_info->ifname, ifname, len - 1);
   2379             pidiface_info->ifname[len - 1] = '\0';
   2380 
   2381             pidiface_info->next = _res_pidiface_list.next;
   2382             _res_pidiface_list.next = pidiface_info;
   2383 
   2384             XLOG("_resolv_set_iface_for_pid: pid %d , iface %s\n", pid, ifname);
   2385         } else {
   2386             XLOG("_resolv_set_iface_for_pid failing calloc");
   2387         }
   2388     }
   2389 
   2390     pthread_mutex_unlock(&_res_pidiface_list_lock);
   2391 }
   2392 
   2393 void
   2394 _resolv_clear_iface_for_pid(int pid)
   2395 {
   2396     pthread_once(&_res_cache_once, _res_cache_init);
   2397     pthread_mutex_lock(&_res_pidiface_list_lock);
   2398 
   2399     _remove_pidiface_info_locked(pid);
   2400 
   2401     XLOG("_resolv_clear_iface_for_pid: pid %d\n", pid);
   2402 
   2403     pthread_mutex_unlock(&_res_pidiface_list_lock);
   2404 }
   2405 
   2406 int
   2407 _resolv_get_pids_associated_interface(int pid, char* buff, int buffLen)
   2408 {
   2409     int len = 0;
   2410 
   2411     if (!buff) {
   2412         return -1;
   2413     }
   2414 
   2415     pthread_once(&_res_cache_once, _res_cache_init);
   2416     pthread_mutex_lock(&_res_pidiface_list_lock);
   2417 
   2418     struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
   2419     buff[0] = '\0';
   2420     if (pidiface_info) {
   2421         len = strlen(pidiface_info->ifname);
   2422         if (len < buffLen) {
   2423             strncpy(buff, pidiface_info->ifname, len);
   2424             buff[len] = '\0';
   2425         }
   2426     }
   2427 
   2428     XLOG("_resolv_get_pids_associated_interface buff: %s\n", buff);
   2429 
   2430     pthread_mutex_unlock(&_res_pidiface_list_lock);
   2431 
   2432     return len;
   2433 }
   2434 
   2435 static int
   2436 _remove_uidiface_info_locked(int uid_start, int uid_end) {
   2437     struct resolv_uidiface_info* result = _res_uidiface_list.next;
   2438     struct resolv_uidiface_info* prev = &_res_uidiface_list;
   2439 
   2440     while (result != NULL && result->uid_start != uid_start && result->uid_end != uid_end) {
   2441         prev = result;
   2442         result = result->next;
   2443     }
   2444     if (prev != NULL && result != NULL) {
   2445         prev->next = result->next;
   2446         free(result);
   2447         return 0;
   2448     }
   2449     errno = EINVAL;
   2450     return -1;
   2451 }
   2452 
   2453 static struct resolv_uidiface_info*
   2454 _get_uid_iface_info_locked(int uid)
   2455 {
   2456     struct resolv_uidiface_info* result = _res_uidiface_list.next;
   2457     while (result != NULL && !(result->uid_start <= uid && result->uid_end >= uid)) {
   2458         result = result->next;
   2459     }
   2460 
   2461     return result;
   2462 }
   2463 
   2464 static int
   2465 _resolv_check_uid_range_overlap_locked(int uid_start, int uid_end)
   2466 {
   2467     struct resolv_uidiface_info* cur = _res_uidiface_list.next;
   2468     while (cur != NULL) {
   2469         if (cur->uid_start <= uid_end && cur->uid_end >= uid_start) {
   2470             return -1;
   2471         }
   2472         cur = cur->next;
   2473     }
   2474     return 0;
   2475 }
   2476 
   2477 void
   2478 _resolv_clear_iface_uid_range_mapping()
   2479 {
   2480     pthread_once(&_res_cache_once, _res_cache_init);
   2481     pthread_mutex_lock(&_res_uidiface_list_lock);
   2482     struct resolv_uidiface_info *current = _res_uidiface_list.next;
   2483     struct resolv_uidiface_info *next;
   2484     while (current != NULL) {
   2485         next = current->next;
   2486         free(current);
   2487         current = next;
   2488     }
   2489     _res_uidiface_list.next = NULL;
   2490     pthread_mutex_unlock(&_res_uidiface_list_lock);
   2491 }
   2492 
   2493 void
   2494 _resolv_clear_iface_pid_mapping()
   2495 {
   2496     pthread_once(&_res_cache_once, _res_cache_init);
   2497     pthread_mutex_lock(&_res_pidiface_list_lock);
   2498     struct resolv_pidiface_info *current = _res_pidiface_list.next;
   2499     struct resolv_pidiface_info *next;
   2500     while (current != NULL) {
   2501         next = current->next;
   2502         free(current);
   2503         current = next;
   2504     }
   2505     _res_pidiface_list.next = NULL;
   2506     pthread_mutex_unlock(&_res_pidiface_list_lock);
   2507 }
   2508 
   2509 int
   2510 _resolv_set_iface_for_uid_range(const char* ifname, int uid_start, int uid_end)
   2511 {
   2512     int rv = 0;
   2513     struct resolv_uidiface_info* uidiface_info;
   2514     // make sure the uid iface list is created
   2515     pthread_once(&_res_cache_once, _res_cache_init);
   2516     if (uid_start > uid_end) {
   2517         errno = EINVAL;
   2518         return -1;
   2519     }
   2520     pthread_mutex_lock(&_res_uidiface_list_lock);
   2521     //check that we aren't adding an overlapping range
   2522     if (!_resolv_check_uid_range_overlap_locked(uid_start, uid_end)) {
   2523         uidiface_info = calloc(sizeof(*uidiface_info), 1);
   2524         if (uidiface_info) {
   2525             uidiface_info->uid_start = uid_start;
   2526             uidiface_info->uid_end = uid_end;
   2527             int len = sizeof(uidiface_info->ifname);
   2528             strncpy(uidiface_info->ifname, ifname, len - 1);
   2529             uidiface_info->ifname[len - 1] = '\0';
   2530 
   2531             uidiface_info->next = _res_uidiface_list.next;
   2532             _res_uidiface_list.next = uidiface_info;
   2533 
   2534             XLOG("_resolv_set_iface_for_uid_range: [%d,%d], iface %s\n", uid_start, uid_end,
   2535                     ifname);
   2536         } else {
   2537             XLOG("_resolv_set_iface_for_uid_range failing calloc\n");
   2538             rv = -1;
   2539             errno = EINVAL;
   2540         }
   2541     } else {
   2542         XLOG("_resolv_set_iface_for_uid_range range [%d,%d] overlaps\n", uid_start, uid_end);
   2543         rv = -1;
   2544         errno = EINVAL;
   2545     }
   2546 
   2547     pthread_mutex_unlock(&_res_uidiface_list_lock);
   2548     return rv;
   2549 }
   2550 
   2551 int
   2552 _resolv_clear_iface_for_uid_range(int uid_start, int uid_end)
   2553 {
   2554     pthread_once(&_res_cache_once, _res_cache_init);
   2555     pthread_mutex_lock(&_res_uidiface_list_lock);
   2556 
   2557     int rv = _remove_uidiface_info_locked(uid_start, uid_end);
   2558 
   2559     XLOG("_resolv_clear_iface_for_uid_range: [%d,%d]\n", uid_start, uid_end);
   2560 
   2561     pthread_mutex_unlock(&_res_uidiface_list_lock);
   2562 
   2563     return rv;
   2564 }
   2565 
   2566 int
   2567 _resolv_get_uids_associated_interface(int uid, char* buff, int buffLen)
   2568 {
   2569     int len = 0;
   2570 
   2571     if (!buff) {
   2572         return -1;
   2573     }
   2574 
   2575     pthread_once(&_res_cache_once, _res_cache_init);
   2576     pthread_mutex_lock(&_res_uidiface_list_lock);
   2577 
   2578     struct resolv_uidiface_info* uidiface_info = _get_uid_iface_info_locked(uid);
   2579     buff[0] = '\0';
   2580     if (uidiface_info) {
   2581         len = strlen(uidiface_info->ifname);
   2582         if (len < buffLen) {
   2583             strncpy(buff, uidiface_info->ifname, len);
   2584             buff[len] = '\0';
   2585         }
   2586     }
   2587 
   2588     XLOG("_resolv_get_uids_associated_interface buff: %s\n", buff);
   2589 
   2590     pthread_mutex_unlock(&_res_uidiface_list_lock);
   2591 
   2592     return len;
   2593 }
   2594 
   2595 size_t
   2596 _resolv_get_default_iface(char* buff, size_t buffLen)
   2597 {
   2598     if (!buff || buffLen == 0) {
   2599         return 0;
   2600     }
   2601 
   2602     pthread_once(&_res_cache_once, _res_cache_init);
   2603     pthread_mutex_lock(&_res_cache_list_lock);
   2604 
   2605     char* ifname = _get_default_iface_locked(); // never null, but may be empty
   2606 
   2607     // if default interface not set give up.
   2608     if (ifname[0] == '\0') {
   2609         pthread_mutex_unlock(&_res_cache_list_lock);
   2610         return 0;
   2611     }
   2612 
   2613     size_t len = strlen(ifname);
   2614     if (len < buffLen) {
   2615         strncpy(buff, ifname, len);
   2616         buff[len] = '\0';
   2617     } else {
   2618         buff[0] = '\0';
   2619     }
   2620 
   2621     pthread_mutex_unlock(&_res_cache_list_lock);
   2622 
   2623     return len;
   2624 }
   2625 
   2626 void
   2627 _resolv_populate_res_for_iface(res_state statp)
   2628 {
   2629     if (statp == NULL) {
   2630         return;
   2631     }
   2632 
   2633     if (statp->iface[0] == '\0') { // no interface set assign default
   2634         size_t if_len = _resolv_get_default_iface(statp->iface, sizeof(statp->iface));
   2635         if (if_len + 1 > sizeof(statp->iface)) {
   2636             XLOG("%s: INTERNAL_ERROR: can't fit interface name into statp->iface.\n", __FUNCTION__);
   2637             return;
   2638         }
   2639         if (if_len == 0) {
   2640             XLOG("%s: INTERNAL_ERROR: can't find any suitable interfaces.\n", __FUNCTION__);
   2641             return;
   2642         }
   2643     }
   2644 
   2645     pthread_once(&_res_cache_once, _res_cache_init);
   2646     pthread_mutex_lock(&_res_cache_list_lock);
   2647 
   2648     struct resolv_cache_info* info = _find_cache_info_locked(statp->iface);
   2649     if (info != NULL) {
   2650         int nserv;
   2651         struct addrinfo* ai;
   2652         XLOG("_resolv_populate_res_for_iface: %s\n", statp->iface);
   2653         for (nserv = 0; nserv < MAXNS; nserv++) {
   2654             ai = info->nsaddrinfo[nserv];
   2655             if (ai == NULL) {
   2656                 break;
   2657             }
   2658 
   2659             if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
   2660                 if (statp->_u._ext.ext != NULL) {
   2661                     memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
   2662                     statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
   2663                 } else {
   2664                     if ((size_t) ai->ai_addrlen
   2665                             <= sizeof(statp->nsaddr_list[0])) {
   2666                         memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
   2667                                 ai->ai_addrlen);
   2668                     } else {
   2669                         statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
   2670                     }
   2671                 }
   2672             } else {
   2673                 XLOG("_resolv_populate_res_for_iface found too long addrlen");
   2674             }
   2675         }
   2676         statp->nscount = nserv;
   2677         // now do search domains.  Note that we cache the offsets as this code runs alot
   2678         // but the setting/offset-computer only runs when set/changed
   2679         strlcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
   2680         register char **pp = statp->dnsrch;
   2681         register int *p = info->dnsrch_offset;
   2682         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
   2683             *pp++ = &statp->defdname + *p++;
   2684         }
   2685     }
   2686     pthread_mutex_unlock(&_res_cache_list_lock);
   2687 }
   2688