1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "resolv_cache.h"
30 
31 #include <resolv.h>
32 #include <stdarg.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <time.h>
37 #include "pthread.h"
38 
39 #include <errno.h>
40 #include <arpa/nameser.h>
41 #include <sys/system_properties.h>
42 #include <net/if.h>
43 #include <netdb.h>
44 #include <linux/if.h>
45 
46 #include <arpa/inet.h>
47 #include "resolv_private.h"
48 #include "resolv_netid.h"
49 #include "res_private.h"
50 
51 #include "private/libc_logging.h"
52 
53 /* This code implements a small and *simple* DNS resolver cache.
54  *
55  * It is only used to cache DNS answers for a time defined by the smallest TTL
56  * among the answer records in order to reduce DNS traffic. It is not supposed
57  * to be a full DNS cache, since we plan to implement that in the future in a
58  * dedicated process running on the system.
59  *
60  * Note that its design is kept simple very intentionally, i.e.:
61  *
62  *  - it takes raw DNS query packet data as input, and returns raw DNS
63  *    answer packet data as output
64  *
65  *    (this means that two similar queries that encode the DNS name
66  *     differently will be treated distinctly).
67  *
68  *    the smallest TTL value among the answer records are used as the time
69  *    to keep an answer in the cache.
70  *
71  *    this is bad, but we absolutely want to avoid parsing the answer packets
72  *    (and should be solved by the later full DNS cache process).
73  *
74  *  - the implementation is just a (query-data) => (answer-data) hash table
75  *    with a trivial least-recently-used expiration policy.
76  *
77  * Doing this keeps the code simple and avoids to deal with a lot of things
78  * that a full DNS cache is expected to do.
79  *
80  * The API is also very simple:
81  *
82  *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
83  *     this will initialize the cache on first usage. the result can be NULL
84  *     if the cache is disabled.
85  *
86  *   - the client calls _resolv_cache_lookup() before performing a query
87  *
88  *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
89  *     has been copied into the client-provided answer buffer.
90  *
91  *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
92  *     a request normally, *then* call _resolv_cache_add() to add the received
93  *     answer to the cache.
94  *
95  *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
96  *     perform a request normally, and *not* call _resolv_cache_add()
97  *
98  *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
99  *     is too short to accomodate the cached result.
100  */
101 
102 /* the name of an environment variable that will be checked the first time
103  * this code is called if its value is "0", then the resolver cache is
104  * disabled.
105  */
106 #define  CONFIG_ENV  "BIONIC_DNSCACHE"
107 
108 /* default number of entries kept in the cache. This value has been
109  * determined by browsing through various sites and counting the number
110  * of corresponding requests. Keep in mind that our framework is currently
111  * performing two requests per name lookup (one for IPv4, the other for IPv6)
112  *
113  *    www.google.com      4
114  *    www.ysearch.com     6
115  *    www.amazon.com      8
116  *    www.nytimes.com     22
117  *    www.espn.com        28
118  *    www.msn.com         28
119  *    www.lemonde.fr      35
120  *
121  * (determined in 2009-2-17 from Paris, France, results may vary depending
122  *  on location)
123  *
124  * most high-level websites use lots of media/ad servers with different names
125  * but these are generally reused when browsing through the site.
126  *
127  * As such, a value of 64 should be relatively comfortable at the moment.
128  *
129  * ******************************************
130  * * NOTE - this has changed.
131  * * 1) we've added IPv6 support so each dns query results in 2 responses
132  * * 2) we've made this a system-wide cache, so the cost is less (it's not
133  * *    duplicated in each process) and the need is greater (more processes
134  * *    making different requests).
135  * * Upping by 2x for IPv6
136  * * Upping by another 5x for the centralized nature
137  * *****************************************
138  */
139 #define  CONFIG_MAX_ENTRIES    64 * 2 * 5
140 /* name of the system property that can be used to set the cache size */
141 
142 /****************************************************************************/
143 /****************************************************************************/
144 /*****                                                                  *****/
145 /*****                                                                  *****/
146 /*****                                                                  *****/
147 /****************************************************************************/
148 /****************************************************************************/
149 
150 /* set to 1 to debug cache operations */
151 #define  DEBUG       0
152 
153 /* set to 1 to debug query data */
154 #define  DEBUG_DATA  0
155 
156 #if DEBUG
157 #define __DEBUG__
158 #else
159 #define __DEBUG__ __attribute__((unused))
160 #endif
161 
162 #undef XLOG
163 
164 #define XLOG(...) ({ \
165     if (DEBUG) { \
166         __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__); \
167     } else { \
168         ((void)0); \
169     } \
170 })
171 
172 /** BOUNDED BUFFER FORMATTING
173  **/
174 
175 /* technical note:
176  *
177  *   the following debugging routines are used to append data to a bounded
178  *   buffer they take two parameters that are:
179  *
180  *   - p : a pointer to the current cursor position in the buffer
181  *         this value is initially set to the buffer's address.
182  *
183  *   - end : the address of the buffer's limit, i.e. of the first byte
184  *           after the buffer. this address should never be touched.
185  *
186  *           IMPORTANT: it is assumed that end > buffer_address, i.e.
187  *                      that the buffer is at least one byte.
188  *
189  *   the _bprint_() functions return the new value of 'p' after the data
190  *   has been appended, and also ensure the following:
191  *
192  *   - the returned value will never be strictly greater than 'end'
193  *
194  *   - a return value equal to 'end' means that truncation occured
195  *     (in which case, end[-1] will be set to 0)
196  *
197  *   - after returning from a _bprint_() function, the content of the buffer
198  *     is always 0-terminated, even in the event of truncation.
199  *
200  *  these conventions allow you to call _bprint_ functions multiple times and
201  *  only check for truncation at the end of the sequence, as in:
202  *
203  *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
204  *
205  *     p = _bprint_c(p, end, '"');
206  *     p = _bprint_s(p, end, my_string);
207  *     p = _bprint_c(p, end, '"');
208  *
209  *     if (p >= end) {
210  *        // buffer was too small
211  *     }
212  *
213  *     printf( "%s", buff );
214  */
215 
216 /* add a char to a bounded buffer */
217 char*
_bprint_c(char * p,char * end,int c)218 _bprint_c( char*  p, char*  end, int  c )
219 {
220     if (p < end) {
221         if (p+1 == end)
222             *p++ = 0;
223         else {
224             *p++ = (char) c;
225             *p   = 0;
226         }
227     }
228     return p;
229 }
230 
231 /* add a sequence of bytes to a bounded buffer */
232 char*
_bprint_b(char * p,char * end,const char * buf,int len)233 _bprint_b( char*  p, char*  end, const char*  buf, int  len )
234 {
235     int  avail = end - p;
236 
237     if (avail <= 0 || len <= 0)
238         return p;
239 
240     if (avail > len)
241         avail = len;
242 
243     memcpy( p, buf, avail );
244     p += avail;
245 
246     if (p < end)
247         p[0] = 0;
248     else
249         end[-1] = 0;
250 
251     return p;
252 }
253 
254 /* add a string to a bounded buffer */
255 char*
_bprint_s(char * p,char * end,const char * str)256 _bprint_s( char*  p, char*  end, const char*  str )
257 {
258     return _bprint_b(p, end, str, strlen(str));
259 }
260 
261 /* add a formatted string to a bounded buffer */
262 char* _bprint( char*  p, char*  end, const char*  format, ... ) __DEBUG__;
_bprint(char * p,char * end,const char * format,...)263 char* _bprint( char*  p, char*  end, const char*  format, ... )
264 {
265     int      avail, n;
266     va_list  args;
267 
268     avail = end - p;
269 
270     if (avail <= 0)
271         return p;
272 
273     va_start(args, format);
274     n = vsnprintf( p, avail, format, args);
275     va_end(args);
276 
277     /* certain C libraries return -1 in case of truncation */
278     if (n < 0 || n > avail)
279         n = avail;
280 
281     p += n;
282     /* certain C libraries do not zero-terminate in case of truncation */
283     if (p == end)
284         p[-1] = 0;
285 
286     return p;
287 }
288 
289 /* add a hex value to a bounded buffer, up to 8 digits */
290 char*
_bprint_hex(char * p,char * end,unsigned value,int numDigits)291 _bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
292 {
293     char   text[sizeof(unsigned)*2];
294     int    nn = 0;
295 
296     while (numDigits-- > 0) {
297         text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
298     }
299     return _bprint_b(p, end, text, nn);
300 }
301 
302 /* add the hexadecimal dump of some memory area to a bounded buffer */
303 char*
_bprint_hexdump(char * p,char * end,const uint8_t * data,int datalen)304 _bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
305 {
306     int   lineSize = 16;
307 
308     while (datalen > 0) {
309         int  avail = datalen;
310         int  nn;
311 
312         if (avail > lineSize)
313             avail = lineSize;
314 
315         for (nn = 0; nn < avail; nn++) {
316             if (nn > 0)
317                 p = _bprint_c(p, end, ' ');
318             p = _bprint_hex(p, end, data[nn], 2);
319         }
320         for ( ; nn < lineSize; nn++ ) {
321             p = _bprint_s(p, end, "   ");
322         }
323         p = _bprint_s(p, end, "  ");
324 
325         for (nn = 0; nn < avail; nn++) {
326             int  c = data[nn];
327 
328             if (c < 32 || c > 127)
329                 c = '.';
330 
331             p = _bprint_c(p, end, c);
332         }
333         p = _bprint_c(p, end, '\n');
334 
335         data    += avail;
336         datalen -= avail;
337     }
338     return p;
339 }
340 
341 /* dump the content of a query of packet to the log */
342 void XLOG_BYTES( const void*  base, int  len ) __DEBUG__;
XLOG_BYTES(const void * base,int len)343 void XLOG_BYTES( const void*  base, int  len )
344 {
345     if (DEBUG_DATA) {
346         char  buff[1024];
347         char*  p = buff, *end = p + sizeof(buff);
348 
349         p = _bprint_hexdump(p, end, base, len);
350         XLOG("%s",buff);
351     }
352 } __DEBUG__
353 
354 static time_t
_time_now(void)355 _time_now( void )
356 {
357     struct timeval  tv;
358 
359     gettimeofday( &tv, NULL );
360     return tv.tv_sec;
361 }
362 
363 /* reminder: the general format of a DNS packet is the following:
364  *
365  *    HEADER  (12 bytes)
366  *    QUESTION  (variable)
367  *    ANSWER (variable)
368  *    AUTHORITY (variable)
369  *    ADDITIONNAL (variable)
370  *
371  * the HEADER is made of:
372  *
373  *   ID     : 16 : 16-bit unique query identification field
374  *
375  *   QR     :  1 : set to 0 for queries, and 1 for responses
376  *   Opcode :  4 : set to 0 for queries
377  *   AA     :  1 : set to 0 for queries
378  *   TC     :  1 : truncation flag, will be set to 0 in queries
379  *   RD     :  1 : recursion desired
380  *
381  *   RA     :  1 : recursion available (0 in queries)
382  *   Z      :  3 : three reserved zero bits
383  *   RCODE  :  4 : response code (always 0=NOERROR in queries)
384  *
385  *   QDCount: 16 : question count
386  *   ANCount: 16 : Answer count (0 in queries)
387  *   NSCount: 16: Authority Record count (0 in queries)
388  *   ARCount: 16: Additionnal Record count (0 in queries)
389  *
390  * the QUESTION is made of QDCount Question Record (QRs)
391  * the ANSWER is made of ANCount RRs
392  * the AUTHORITY is made of NSCount RRs
393  * the ADDITIONNAL is made of ARCount RRs
394  *
395  * Each Question Record (QR) is made of:
396  *
397  *   QNAME   : variable : Query DNS NAME
398  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
399  *   CLASS   : 16       : class of query (IN=1)
400  *
401  * Each Resource Record (RR) is made of:
402  *
403  *   NAME    : variable : DNS NAME
404  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
405  *   CLASS   : 16       : class of query (IN=1)
406  *   TTL     : 32       : seconds to cache this RR (0=none)
407  *   RDLENGTH: 16       : size of RDDATA in bytes
408  *   RDDATA  : variable : RR data (depends on TYPE)
409  *
410  * Each QNAME contains a domain name encoded as a sequence of 'labels'
411  * terminated by a zero. Each label has the following format:
412  *
413  *    LEN  : 8     : lenght of label (MUST be < 64)
414  *    NAME : 8*LEN : label length (must exclude dots)
415  *
416  * A value of 0 in the encoding is interpreted as the 'root' domain and
417  * terminates the encoding. So 'www.android.com' will be encoded as:
418  *
419  *   <3>www<7>android<3>com<0>
420  *
421  * Where <n> represents the byte with value 'n'
422  *
423  * Each NAME reflects the QNAME of the question, but has a slightly more
424  * complex encoding in order to provide message compression. This is achieved
425  * by using a 2-byte pointer, with format:
426  *
427  *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
428  *    OFFSET : 14 : offset to another part of the DNS packet
429  *
430  * The offset is relative to the start of the DNS packet and must point
431  * A pointer terminates the encoding.
432  *
433  * The NAME can be encoded in one of the following formats:
434  *
435  *   - a sequence of simple labels terminated by 0 (like QNAMEs)
436  *   - a single pointer
437  *   - a sequence of simple labels terminated by a pointer
438  *
439  * A pointer shall always point to either a pointer of a sequence of
440  * labels (which can themselves be terminated by either a 0 or a pointer)
441  *
442  * The expanded length of a given domain name should not exceed 255 bytes.
443  *
444  * NOTE: we don't parse the answer packets, so don't need to deal with NAME
445  *       records, only QNAMEs.
446  */
447 
448 #define  DNS_HEADER_SIZE  12
449 
450 #define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
451 #define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
452 #define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
453 #define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
454 #define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
455 
456 #define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
457 
458 typedef struct {
459     const uint8_t*  base;
460     const uint8_t*  end;
461     const uint8_t*  cursor;
462 } DnsPacket;
463 
464 static void
_dnsPacket_init(DnsPacket * packet,const uint8_t * buff,int bufflen)465 _dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
466 {
467     packet->base   = buff;
468     packet->end    = buff + bufflen;
469     packet->cursor = buff;
470 }
471 
472 static void
_dnsPacket_rewind(DnsPacket * packet)473 _dnsPacket_rewind( DnsPacket*  packet )
474 {
475     packet->cursor = packet->base;
476 }
477 
478 static void
_dnsPacket_skip(DnsPacket * packet,int count)479 _dnsPacket_skip( DnsPacket*  packet, int  count )
480 {
481     const uint8_t*  p = packet->cursor + count;
482 
483     if (p > packet->end)
484         p = packet->end;
485 
486     packet->cursor = p;
487 }
488 
489 static int
_dnsPacket_readInt16(DnsPacket * packet)490 _dnsPacket_readInt16( DnsPacket*  packet )
491 {
492     const uint8_t*  p = packet->cursor;
493 
494     if (p+2 > packet->end)
495         return -1;
496 
497     packet->cursor = p+2;
498     return (p[0]<< 8) | p[1];
499 }
500 
501 /** QUERY CHECKING
502  **/
503 
504 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
505  * the cursor is only advanced in the case of success
506  */
507 static int
_dnsPacket_checkBytes(DnsPacket * packet,int numBytes,const void * bytes)508 _dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
509 {
510     const uint8_t*  p = packet->cursor;
511 
512     if (p + numBytes > packet->end)
513         return 0;
514 
515     if (memcmp(p, bytes, numBytes) != 0)
516         return 0;
517 
518     packet->cursor = p + numBytes;
519     return 1;
520 }
521 
522 /* parse and skip a given QNAME stored in a query packet,
523  * from the current cursor position. returns 1 on success,
524  * or 0 for malformed data.
525  */
526 static int
_dnsPacket_checkQName(DnsPacket * packet)527 _dnsPacket_checkQName( DnsPacket*  packet )
528 {
529     const uint8_t*  p   = packet->cursor;
530     const uint8_t*  end = packet->end;
531 
532     for (;;) {
533         int  c;
534 
535         if (p >= end)
536             break;
537 
538         c = *p++;
539 
540         if (c == 0) {
541             packet->cursor = p;
542             return 1;
543         }
544 
545         /* we don't expect label compression in QNAMEs */
546         if (c >= 64)
547             break;
548 
549         p += c;
550         /* we rely on the bound check at the start
551          * of the loop here */
552     }
553     /* malformed data */
554     XLOG("malformed QNAME");
555     return 0;
556 }
557 
558 /* parse and skip a given QR stored in a packet.
559  * returns 1 on success, and 0 on failure
560  */
561 static int
_dnsPacket_checkQR(DnsPacket * packet)562 _dnsPacket_checkQR( DnsPacket*  packet )
563 {
564     if (!_dnsPacket_checkQName(packet))
565         return 0;
566 
567     /* TYPE must be one of the things we support */
568     if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
569         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
570         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
571         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
572         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
573     {
574         XLOG("unsupported TYPE");
575         return 0;
576     }
577     /* CLASS must be IN */
578     if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
579         XLOG("unsupported CLASS");
580         return 0;
581     }
582 
583     return 1;
584 }
585 
586 /* check the header of a DNS Query packet, return 1 if it is one
587  * type of query we can cache, or 0 otherwise
588  */
589 static int
_dnsPacket_checkQuery(DnsPacket * packet)590 _dnsPacket_checkQuery( DnsPacket*  packet )
591 {
592     const uint8_t*  p = packet->base;
593     int             qdCount, anCount, dnCount, arCount;
594 
595     if (p + DNS_HEADER_SIZE > packet->end) {
596         XLOG("query packet too small");
597         return 0;
598     }
599 
600     /* QR must be set to 0, opcode must be 0 and AA must be 0 */
601     /* RA, Z, and RCODE must be 0 */
602     if ((p[2] & 0xFC) != 0 || p[3] != 0) {
603         XLOG("query packet flags unsupported");
604         return 0;
605     }
606 
607     /* Note that we ignore the TC and RD bits here for the
608      * following reasons:
609      *
610      * - there is no point for a query packet sent to a server
611      *   to have the TC bit set, but the implementation might
612      *   set the bit in the query buffer for its own needs
613      *   between a _resolv_cache_lookup and a
614      *   _resolv_cache_add. We should not freak out if this
615      *   is the case.
616      *
617      * - we consider that the result from a RD=0 or a RD=1
618      *   query might be different, hence that the RD bit
619      *   should be used to differentiate cached result.
620      *
621      *   this implies that RD is checked when hashing or
622      *   comparing query packets, but not TC
623      */
624 
625     /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
626     qdCount = (p[4] << 8) | p[5];
627     anCount = (p[6] << 8) | p[7];
628     dnCount = (p[8] << 8) | p[9];
629     arCount = (p[10]<< 8) | p[11];
630 
631     if (anCount != 0 || dnCount != 0 || arCount != 0) {
632         XLOG("query packet contains non-query records");
633         return 0;
634     }
635 
636     if (qdCount == 0) {
637         XLOG("query packet doesn't contain query record");
638         return 0;
639     }
640 
641     /* Check QDCOUNT QRs */
642     packet->cursor = p + DNS_HEADER_SIZE;
643 
644     for (;qdCount > 0; qdCount--)
645         if (!_dnsPacket_checkQR(packet))
646             return 0;
647 
648     return 1;
649 }
650 
651 /** QUERY DEBUGGING
652  **/
653 #if DEBUG
654 static char*
_dnsPacket_bprintQName(DnsPacket * packet,char * bp,char * bend)655 _dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
656 {
657     const uint8_t*  p   = packet->cursor;
658     const uint8_t*  end = packet->end;
659     int             first = 1;
660 
661     for (;;) {
662         int  c;
663 
664         if (p >= end)
665             break;
666 
667         c = *p++;
668 
669         if (c == 0) {
670             packet->cursor = p;
671             return bp;
672         }
673 
674         /* we don't expect label compression in QNAMEs */
675         if (c >= 64)
676             break;
677 
678         if (first)
679             first = 0;
680         else
681             bp = _bprint_c(bp, bend, '.');
682 
683         bp = _bprint_b(bp, bend, (const char*)p, c);
684 
685         p += c;
686         /* we rely on the bound check at the start
687          * of the loop here */
688     }
689     /* malformed data */
690     bp = _bprint_s(bp, bend, "<MALFORMED>");
691     return bp;
692 }
693 
694 static char*
_dnsPacket_bprintQR(DnsPacket * packet,char * p,char * end)695 _dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
696 {
697 #define  QQ(x)   { DNS_TYPE_##x, #x }
698     static const struct {
699         const char*  typeBytes;
700         const char*  typeString;
701     } qTypes[] =
702     {
703         QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
704         { NULL, NULL }
705     };
706     int          nn;
707     const char*  typeString = NULL;
708 
709     /* dump QNAME */
710     p = _dnsPacket_bprintQName(packet, p, end);
711 
712     /* dump TYPE */
713     p = _bprint_s(p, end, " (");
714 
715     for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
716         if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
717             typeString = qTypes[nn].typeString;
718             break;
719         }
720     }
721 
722     if (typeString != NULL)
723         p = _bprint_s(p, end, typeString);
724     else {
725         int  typeCode = _dnsPacket_readInt16(packet);
726         p = _bprint(p, end, "UNKNOWN-%d", typeCode);
727     }
728 
729     p = _bprint_c(p, end, ')');
730 
731     /* skip CLASS */
732     _dnsPacket_skip(packet, 2);
733     return p;
734 }
735 
736 /* this function assumes the packet has already been checked */
737 static char*
_dnsPacket_bprintQuery(DnsPacket * packet,char * p,char * end)738 _dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
739 {
740     int   qdCount;
741 
742     if (packet->base[2] & 0x1) {
743         p = _bprint_s(p, end, "RECURSIVE ");
744     }
745 
746     _dnsPacket_skip(packet, 4);
747     qdCount = _dnsPacket_readInt16(packet);
748     _dnsPacket_skip(packet, 6);
749 
750     for ( ; qdCount > 0; qdCount-- ) {
751         p = _dnsPacket_bprintQR(packet, p, end);
752     }
753     return p;
754 }
755 #endif
756 
757 
758 /** QUERY HASHING SUPPORT
759  **
760  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
761  ** BEEN SUCCESFULLY CHECKED.
762  **/
763 
764 /* use 32-bit FNV hash function */
765 #define  FNV_MULT   16777619U
766 #define  FNV_BASIS  2166136261U
767 
768 static unsigned
_dnsPacket_hashBytes(DnsPacket * packet,int numBytes,unsigned hash)769 _dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
770 {
771     const uint8_t*  p   = packet->cursor;
772     const uint8_t*  end = packet->end;
773 
774     while (numBytes > 0 && p < end) {
775         hash = hash*FNV_MULT ^ *p++;
776     }
777     packet->cursor = p;
778     return hash;
779 }
780 
781 
782 static unsigned
_dnsPacket_hashQName(DnsPacket * packet,unsigned hash)783 _dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
784 {
785     const uint8_t*  p   = packet->cursor;
786     const uint8_t*  end = packet->end;
787 
788     for (;;) {
789         int  c;
790 
791         if (p >= end) {  /* should not happen */
792             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
793             break;
794         }
795 
796         c = *p++;
797 
798         if (c == 0)
799             break;
800 
801         if (c >= 64) {
802             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
803             break;
804         }
805         if (p + c >= end) {
806             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
807                     __FUNCTION__);
808             break;
809         }
810         while (c > 0) {
811             hash = hash*FNV_MULT ^ *p++;
812             c   -= 1;
813         }
814     }
815     packet->cursor = p;
816     return hash;
817 }
818 
819 static unsigned
_dnsPacket_hashQR(DnsPacket * packet,unsigned hash)820 _dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
821 {
822     hash = _dnsPacket_hashQName(packet, hash);
823     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
824     return hash;
825 }
826 
827 static unsigned
_dnsPacket_hashQuery(DnsPacket * packet)828 _dnsPacket_hashQuery( DnsPacket*  packet )
829 {
830     unsigned  hash = FNV_BASIS;
831     int       count;
832     _dnsPacket_rewind(packet);
833 
834     /* we ignore the TC bit for reasons explained in
835      * _dnsPacket_checkQuery().
836      *
837      * however we hash the RD bit to differentiate
838      * between answers for recursive and non-recursive
839      * queries.
840      */
841     hash = hash*FNV_MULT ^ (packet->base[2] & 1);
842 
843     /* assume: other flags are 0 */
844     _dnsPacket_skip(packet, 4);
845 
846     /* read QDCOUNT */
847     count = _dnsPacket_readInt16(packet);
848 
849     /* assume: ANcount, NScount, ARcount are 0 */
850     _dnsPacket_skip(packet, 6);
851 
852     /* hash QDCOUNT QRs */
853     for ( ; count > 0; count-- )
854         hash = _dnsPacket_hashQR(packet, hash);
855 
856     return hash;
857 }
858 
859 
860 /** QUERY COMPARISON
861  **
862  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
863  ** BEEN SUCCESFULLY CHECKED.
864  **/
865 
866 static int
_dnsPacket_isEqualDomainName(DnsPacket * pack1,DnsPacket * pack2)867 _dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
868 {
869     const uint8_t*  p1   = pack1->cursor;
870     const uint8_t*  end1 = pack1->end;
871     const uint8_t*  p2   = pack2->cursor;
872     const uint8_t*  end2 = pack2->end;
873 
874     for (;;) {
875         int  c1, c2;
876 
877         if (p1 >= end1 || p2 >= end2) {
878             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
879             break;
880         }
881         c1 = *p1++;
882         c2 = *p2++;
883         if (c1 != c2)
884             break;
885 
886         if (c1 == 0) {
887             pack1->cursor = p1;
888             pack2->cursor = p2;
889             return 1;
890         }
891         if (c1 >= 64) {
892             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
893             break;
894         }
895         if ((p1+c1 > end1) || (p2+c1 > end2)) {
896             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
897                     __FUNCTION__);
898             break;
899         }
900         if (memcmp(p1, p2, c1) != 0)
901             break;
902         p1 += c1;
903         p2 += c1;
904         /* we rely on the bound checks at the start of the loop */
905     }
906     /* not the same, or one is malformed */
907     XLOG("different DN");
908     return 0;
909 }
910 
911 static int
_dnsPacket_isEqualBytes(DnsPacket * pack1,DnsPacket * pack2,int numBytes)912 _dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
913 {
914     const uint8_t*  p1 = pack1->cursor;
915     const uint8_t*  p2 = pack2->cursor;
916 
917     if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
918         return 0;
919 
920     if ( memcmp(p1, p2, numBytes) != 0 )
921         return 0;
922 
923     pack1->cursor += numBytes;
924     pack2->cursor += numBytes;
925     return 1;
926 }
927 
928 static int
_dnsPacket_isEqualQR(DnsPacket * pack1,DnsPacket * pack2)929 _dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
930 {
931     /* compare domain name encoding + TYPE + CLASS */
932     if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
933          !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
934         return 0;
935 
936     return 1;
937 }
938 
939 static int
_dnsPacket_isEqualQuery(DnsPacket * pack1,DnsPacket * pack2)940 _dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
941 {
942     int  count1, count2;
943 
944     /* compare the headers, ignore most fields */
945     _dnsPacket_rewind(pack1);
946     _dnsPacket_rewind(pack2);
947 
948     /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
949     if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
950         XLOG("different RD");
951         return 0;
952     }
953 
954     /* assume: other flags are all 0 */
955     _dnsPacket_skip(pack1, 4);
956     _dnsPacket_skip(pack2, 4);
957 
958     /* compare QDCOUNT */
959     count1 = _dnsPacket_readInt16(pack1);
960     count2 = _dnsPacket_readInt16(pack2);
961     if (count1 != count2 || count1 < 0) {
962         XLOG("different QDCOUNT");
963         return 0;
964     }
965 
966     /* assume: ANcount, NScount and ARcount are all 0 */
967     _dnsPacket_skip(pack1, 6);
968     _dnsPacket_skip(pack2, 6);
969 
970     /* compare the QDCOUNT QRs */
971     for ( ; count1 > 0; count1-- ) {
972         if (!_dnsPacket_isEqualQR(pack1, pack2)) {
973             XLOG("different QR");
974             return 0;
975         }
976     }
977     return 1;
978 }
979 
980 /****************************************************************************/
981 /****************************************************************************/
982 /*****                                                                  *****/
983 /*****                                                                  *****/
984 /*****                                                                  *****/
985 /****************************************************************************/
986 /****************************************************************************/
987 
988 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
989  * structure though they are conceptually part of the hash table.
990  *
991  * similarly, mru_next and mru_prev are part of the global MRU list
992  */
993 typedef struct Entry {
994     unsigned int     hash;   /* hash value */
995     struct Entry*    hlink;  /* next in collision chain */
996     struct Entry*    mru_prev;
997     struct Entry*    mru_next;
998 
999     const uint8_t*   query;
1000     int              querylen;
1001     const uint8_t*   answer;
1002     int              answerlen;
1003     time_t           expires;   /* time_t when the entry isn't valid any more */
1004     int              id;        /* for debugging purpose */
1005 } Entry;
1006 
1007 /**
1008  * Find the TTL for a negative DNS result.  This is defined as the minimum
1009  * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
1010  *
1011  * Return 0 if not found.
1012  */
1013 static u_long
answer_getNegativeTTL(ns_msg handle)1014 answer_getNegativeTTL(ns_msg handle) {
1015     int n, nscount;
1016     u_long result = 0;
1017     ns_rr rr;
1018 
1019     nscount = ns_msg_count(handle, ns_s_ns);
1020     for (n = 0; n < nscount; n++) {
1021         if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
1022             const u_char *rdata = ns_rr_rdata(rr); // find the data
1023             const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
1024             int len;
1025             u_long ttl, rec_result = ns_rr_ttl(rr);
1026 
1027             // find the MINIMUM-TTL field from the blob of binary data for this record
1028             // skip the server name
1029             len = dn_skipname(rdata, edata);
1030             if (len == -1) continue; // error skipping
1031             rdata += len;
1032 
1033             // skip the admin name
1034             len = dn_skipname(rdata, edata);
1035             if (len == -1) continue; // error skipping
1036             rdata += len;
1037 
1038             if (edata - rdata != 5*NS_INT32SZ) continue;
1039             // skip: serial number + refresh interval + retry interval + expiry
1040             rdata += NS_INT32SZ * 4;
1041             // finally read the MINIMUM TTL
1042             ttl = ns_get32(rdata);
1043             if (ttl < rec_result) {
1044                 rec_result = ttl;
1045             }
1046             // Now that the record is read successfully, apply the new min TTL
1047             if (n == 0 || rec_result < result) {
1048                 result = rec_result;
1049             }
1050         }
1051     }
1052     return result;
1053 }
1054 
1055 /**
1056  * Parse the answer records and find the appropriate
1057  * smallest TTL among the records.  This might be from
1058  * the answer records if found or from the SOA record
1059  * if it's a negative result.
1060  *
1061  * The returned TTL is the number of seconds to
1062  * keep the answer in the cache.
1063  *
1064  * In case of parse error zero (0) is returned which
1065  * indicates that the answer shall not be cached.
1066  */
1067 static u_long
answer_getTTL(const void * answer,int answerlen)1068 answer_getTTL(const void* answer, int answerlen)
1069 {
1070     ns_msg handle;
1071     int ancount, n;
1072     u_long result, ttl;
1073     ns_rr rr;
1074 
1075     result = 0;
1076     if (ns_initparse(answer, answerlen, &handle) >= 0) {
1077         // get number of answer records
1078         ancount = ns_msg_count(handle, ns_s_an);
1079 
1080         if (ancount == 0) {
1081             // a response with no answers?  Cache this negative result.
1082             result = answer_getNegativeTTL(handle);
1083         } else {
1084             for (n = 0; n < ancount; n++) {
1085                 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1086                     ttl = ns_rr_ttl(rr);
1087                     if (n == 0 || ttl < result) {
1088                         result = ttl;
1089                     }
1090                 } else {
1091                     XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1092                 }
1093             }
1094         }
1095     } else {
1096         XLOG("ns_parserr failed. %s\n", strerror(errno));
1097     }
1098 
1099     XLOG("TTL = %lu\n", result);
1100 
1101     return result;
1102 }
1103 
1104 static void
entry_free(Entry * e)1105 entry_free( Entry*  e )
1106 {
1107     /* everything is allocated in a single memory block */
1108     if (e) {
1109         free(e);
1110     }
1111 }
1112 
1113 static __inline__ void
entry_mru_remove(Entry * e)1114 entry_mru_remove( Entry*  e )
1115 {
1116     e->mru_prev->mru_next = e->mru_next;
1117     e->mru_next->mru_prev = e->mru_prev;
1118 }
1119 
1120 static __inline__ void
entry_mru_add(Entry * e,Entry * list)1121 entry_mru_add( Entry*  e, Entry*  list )
1122 {
1123     Entry*  first = list->mru_next;
1124 
1125     e->mru_next = first;
1126     e->mru_prev = list;
1127 
1128     list->mru_next  = e;
1129     first->mru_prev = e;
1130 }
1131 
1132 /* compute the hash of a given entry, this is a hash of most
1133  * data in the query (key) */
1134 static unsigned
entry_hash(const Entry * e)1135 entry_hash( const Entry*  e )
1136 {
1137     DnsPacket  pack[1];
1138 
1139     _dnsPacket_init(pack, e->query, e->querylen);
1140     return _dnsPacket_hashQuery(pack);
1141 }
1142 
1143 /* initialize an Entry as a search key, this also checks the input query packet
1144  * returns 1 on success, or 0 in case of unsupported/malformed data */
1145 static int
entry_init_key(Entry * e,const void * query,int querylen)1146 entry_init_key( Entry*  e, const void*  query, int  querylen )
1147 {
1148     DnsPacket  pack[1];
1149 
1150     memset(e, 0, sizeof(*e));
1151 
1152     e->query    = query;
1153     e->querylen = querylen;
1154     e->hash     = entry_hash(e);
1155 
1156     _dnsPacket_init(pack, query, querylen);
1157 
1158     return _dnsPacket_checkQuery(pack);
1159 }
1160 
1161 /* allocate a new entry as a cache node */
1162 static Entry*
entry_alloc(const Entry * init,const void * answer,int answerlen)1163 entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
1164 {
1165     Entry*  e;
1166     int     size;
1167 
1168     size = sizeof(*e) + init->querylen + answerlen;
1169     e    = calloc(size, 1);
1170     if (e == NULL)
1171         return e;
1172 
1173     e->hash     = init->hash;
1174     e->query    = (const uint8_t*)(e+1);
1175     e->querylen = init->querylen;
1176 
1177     memcpy( (char*)e->query, init->query, e->querylen );
1178 
1179     e->answer    = e->query + e->querylen;
1180     e->answerlen = answerlen;
1181 
1182     memcpy( (char*)e->answer, answer, e->answerlen );
1183 
1184     return e;
1185 }
1186 
1187 static int
entry_equals(const Entry * e1,const Entry * e2)1188 entry_equals( const Entry*  e1, const Entry*  e2 )
1189 {
1190     DnsPacket  pack1[1], pack2[1];
1191 
1192     if (e1->querylen != e2->querylen) {
1193         return 0;
1194     }
1195     _dnsPacket_init(pack1, e1->query, e1->querylen);
1196     _dnsPacket_init(pack2, e2->query, e2->querylen);
1197 
1198     return _dnsPacket_isEqualQuery(pack1, pack2);
1199 }
1200 
1201 /****************************************************************************/
1202 /****************************************************************************/
1203 /*****                                                                  *****/
1204 /*****                                                                  *****/
1205 /*****                                                                  *****/
1206 /****************************************************************************/
1207 /****************************************************************************/
1208 
1209 /* We use a simple hash table with external collision lists
1210  * for simplicity, the hash-table fields 'hash' and 'hlink' are
1211  * inlined in the Entry structure.
1212  */
1213 
1214 /* Maximum time for a thread to wait for an pending request */
1215 #define PENDING_REQUEST_TIMEOUT 20;
1216 
1217 typedef struct pending_req_info {
1218     unsigned int                hash;
1219     pthread_cond_t              cond;
1220     struct pending_req_info*    next;
1221 } PendingReqInfo;
1222 
1223 typedef struct resolv_cache {
1224     int              max_entries;
1225     int              num_entries;
1226     Entry            mru_list;
1227     int              last_id;
1228     Entry*           entries;
1229     PendingReqInfo   pending_requests;
1230 } Cache;
1231 
1232 struct resolv_cache_info {
1233     unsigned                    netid;
1234     Cache*                      cache;
1235     struct resolv_cache_info*   next;
1236     int                         nscount;
1237     char*                       nameservers[MAXNS];
1238     struct addrinfo*            nsaddrinfo[MAXNS];
1239     int                         revision_id; // # times the nameservers have been replaced
1240     struct __res_params         params;
1241     struct __res_stats          nsstats[MAXNS];
1242     char                        defdname[MAXDNSRCHPATH];
1243     int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
1244 };
1245 
1246 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
1247 
1248 static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
1249 static void _res_cache_init(void);
1250 
1251 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1252 static pthread_mutex_t _res_cache_list_lock;
1253 
1254 /* gets cache associated with a network, or NULL if none exists */
1255 static struct resolv_cache* _find_named_cache_locked(unsigned netid);
1256 
1257 static void
_cache_flush_pending_requests_locked(struct resolv_cache * cache)1258 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
1259 {
1260     struct pending_req_info *ri, *tmp;
1261     if (cache) {
1262         ri = cache->pending_requests.next;
1263 
1264         while (ri) {
1265             tmp = ri;
1266             ri = ri->next;
1267             pthread_cond_broadcast(&tmp->cond);
1268 
1269             pthread_cond_destroy(&tmp->cond);
1270             free(tmp);
1271         }
1272 
1273         cache->pending_requests.next = NULL;
1274     }
1275 }
1276 
1277 /* Return 0 if no pending request is found matching the key.
1278  * If a matching request is found the calling thread will wait until
1279  * the matching request completes, then update *cache and return 1. */
1280 static int
_cache_check_pending_request_locked(struct resolv_cache ** cache,Entry * key,unsigned netid)1281 _cache_check_pending_request_locked( struct resolv_cache** cache, Entry* key, unsigned netid )
1282 {
1283     struct pending_req_info *ri, *prev;
1284     int exist = 0;
1285 
1286     if (*cache && key) {
1287         ri = (*cache)->pending_requests.next;
1288         prev = &(*cache)->pending_requests;
1289         while (ri) {
1290             if (ri->hash == key->hash) {
1291                 exist = 1;
1292                 break;
1293             }
1294             prev = ri;
1295             ri = ri->next;
1296         }
1297 
1298         if (!exist) {
1299             ri = calloc(1, sizeof(struct pending_req_info));
1300             if (ri) {
1301                 ri->hash = key->hash;
1302                 pthread_cond_init(&ri->cond, NULL);
1303                 prev->next = ri;
1304             }
1305         } else {
1306             struct timespec ts = {0,0};
1307             XLOG("Waiting for previous request");
1308             ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
1309             pthread_cond_timedwait(&ri->cond, &_res_cache_list_lock, &ts);
1310             /* Must update *cache as it could have been deleted. */
1311             *cache = _find_named_cache_locked(netid);
1312         }
1313     }
1314 
1315     return exist;
1316 }
1317 
1318 /* notify any waiting thread that waiting on a request
1319  * matching the key has been added to the cache */
1320 static void
_cache_notify_waiting_tid_locked(struct resolv_cache * cache,Entry * key)1321 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1322 {
1323     struct pending_req_info *ri, *prev;
1324 
1325     if (cache && key) {
1326         ri = cache->pending_requests.next;
1327         prev = &cache->pending_requests;
1328         while (ri) {
1329             if (ri->hash == key->hash) {
1330                 pthread_cond_broadcast(&ri->cond);
1331                 break;
1332             }
1333             prev = ri;
1334             ri = ri->next;
1335         }
1336 
1337         // remove item from list and destroy
1338         if (ri) {
1339             prev->next = ri->next;
1340             pthread_cond_destroy(&ri->cond);
1341             free(ri);
1342         }
1343     }
1344 }
1345 
1346 /* notify the cache that the query failed */
1347 void
_resolv_cache_query_failed(unsigned netid,const void * query,int querylen)1348 _resolv_cache_query_failed( unsigned    netid,
1349                    const void* query,
1350                    int         querylen)
1351 {
1352     Entry    key[1];
1353     Cache*   cache;
1354 
1355     if (!entry_init_key(key, query, querylen))
1356         return;
1357 
1358     pthread_mutex_lock(&_res_cache_list_lock);
1359 
1360     cache = _find_named_cache_locked(netid);
1361 
1362     if (cache) {
1363         _cache_notify_waiting_tid_locked(cache, key);
1364     }
1365 
1366     pthread_mutex_unlock(&_res_cache_list_lock);
1367 }
1368 
1369 static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
1370 
1371 static void
_cache_flush_locked(Cache * cache)1372 _cache_flush_locked( Cache*  cache )
1373 {
1374     int     nn;
1375 
1376     for (nn = 0; nn < cache->max_entries; nn++)
1377     {
1378         Entry**  pnode = (Entry**) &cache->entries[nn];
1379 
1380         while (*pnode != NULL) {
1381             Entry*  node = *pnode;
1382             *pnode = node->hlink;
1383             entry_free(node);
1384         }
1385     }
1386 
1387     // flush pending request
1388     _cache_flush_pending_requests_locked(cache);
1389 
1390     cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1391     cache->num_entries       = 0;
1392     cache->last_id           = 0;
1393 
1394     XLOG("*************************\n"
1395          "*** DNS CACHE FLUSHED ***\n"
1396          "*************************");
1397 }
1398 
1399 static int
_res_cache_get_max_entries(void)1400 _res_cache_get_max_entries( void )
1401 {
1402     int cache_size = CONFIG_MAX_ENTRIES;
1403 
1404     const char* cache_mode = getenv("ANDROID_DNS_MODE");
1405     if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
1406         // Don't use the cache in local mode. This is used by the proxy itself.
1407         cache_size = 0;
1408     }
1409 
1410     XLOG("cache size: %d", cache_size);
1411     return cache_size;
1412 }
1413 
1414 static struct resolv_cache*
_resolv_cache_create(void)1415 _resolv_cache_create( void )
1416 {
1417     struct resolv_cache*  cache;
1418 
1419     cache = calloc(sizeof(*cache), 1);
1420     if (cache) {
1421         cache->max_entries = _res_cache_get_max_entries();
1422         cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1423         if (cache->entries) {
1424             cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1425             XLOG("%s: cache created\n", __FUNCTION__);
1426         } else {
1427             free(cache);
1428             cache = NULL;
1429         }
1430     }
1431     return cache;
1432 }
1433 
1434 
1435 #if DEBUG
1436 static void
_dump_query(const uint8_t * query,int querylen)1437 _dump_query( const uint8_t*  query, int  querylen )
1438 {
1439     char       temp[256], *p=temp, *end=p+sizeof(temp);
1440     DnsPacket  pack[1];
1441 
1442     _dnsPacket_init(pack, query, querylen);
1443     p = _dnsPacket_bprintQuery(pack, p, end);
1444     XLOG("QUERY: %s", temp);
1445 }
1446 
1447 static void
_cache_dump_mru(Cache * cache)1448 _cache_dump_mru( Cache*  cache )
1449 {
1450     char    temp[512], *p=temp, *end=p+sizeof(temp);
1451     Entry*  e;
1452 
1453     p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1454     for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1455         p = _bprint(p, end, " %d", e->id);
1456 
1457     XLOG("%s", temp);
1458 }
1459 
1460 static void
_dump_answer(const void * answer,int answerlen)1461 _dump_answer(const void* answer, int answerlen)
1462 {
1463     res_state statep;
1464     FILE* fp;
1465     char* buf;
1466     int fileLen;
1467 
1468     fp = fopen("/data/reslog.txt", "w+e");
1469     if (fp != NULL) {
1470         statep = __res_get_state();
1471 
1472         res_pquery(statep, answer, answerlen, fp);
1473 
1474         //Get file length
1475         fseek(fp, 0, SEEK_END);
1476         fileLen=ftell(fp);
1477         fseek(fp, 0, SEEK_SET);
1478         buf = (char *)malloc(fileLen+1);
1479         if (buf != NULL) {
1480             //Read file contents into buffer
1481             fread(buf, fileLen, 1, fp);
1482             XLOG("%s\n", buf);
1483             free(buf);
1484         }
1485         fclose(fp);
1486         remove("/data/reslog.txt");
1487     }
1488     else {
1489         errno = 0; // else debug is introducing error signals
1490         XLOG("%s: can't open file\n", __FUNCTION__);
1491     }
1492 }
1493 #endif
1494 
1495 #if DEBUG
1496 #  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
1497 #  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
1498 #else
1499 #  define  XLOG_QUERY(q,len)   ((void)0)
1500 #  define  XLOG_ANSWER(a,len)  ((void)0)
1501 #endif
1502 
1503 /* This function tries to find a key within the hash table
1504  * In case of success, it will return a *pointer* to the hashed key.
1505  * In case of failure, it will return a *pointer* to NULL
1506  *
1507  * So, the caller must check '*result' to check for success/failure.
1508  *
1509  * The main idea is that the result can later be used directly in
1510  * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1511  * parameter. This makes the code simpler and avoids re-searching
1512  * for the key position in the htable.
1513  *
1514  * The result of a lookup_p is only valid until you alter the hash
1515  * table.
1516  */
1517 static Entry**
_cache_lookup_p(Cache * cache,Entry * key)1518 _cache_lookup_p( Cache*   cache,
1519                  Entry*   key )
1520 {
1521     int      index = key->hash % cache->max_entries;
1522     Entry**  pnode = (Entry**) &cache->entries[ index ];
1523 
1524     while (*pnode != NULL) {
1525         Entry*  node = *pnode;
1526 
1527         if (node == NULL)
1528             break;
1529 
1530         if (node->hash == key->hash && entry_equals(node, key))
1531             break;
1532 
1533         pnode = &node->hlink;
1534     }
1535     return pnode;
1536 }
1537 
1538 /* Add a new entry to the hash table. 'lookup' must be the
1539  * result of an immediate previous failed _lookup_p() call
1540  * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1541  * newly created entry
1542  */
1543 static void
_cache_add_p(Cache * cache,Entry ** lookup,Entry * e)1544 _cache_add_p( Cache*   cache,
1545               Entry**  lookup,
1546               Entry*   e )
1547 {
1548     *lookup = e;
1549     e->id = ++cache->last_id;
1550     entry_mru_add(e, &cache->mru_list);
1551     cache->num_entries += 1;
1552 
1553     XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1554          e->id, cache->num_entries);
1555 }
1556 
1557 /* Remove an existing entry from the hash table,
1558  * 'lookup' must be the result of an immediate previous
1559  * and succesful _lookup_p() call.
1560  */
1561 static void
_cache_remove_p(Cache * cache,Entry ** lookup)1562 _cache_remove_p( Cache*   cache,
1563                  Entry**  lookup )
1564 {
1565     Entry*  e  = *lookup;
1566 
1567     XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1568          e->id, cache->num_entries-1);
1569 
1570     entry_mru_remove(e);
1571     *lookup = e->hlink;
1572     entry_free(e);
1573     cache->num_entries -= 1;
1574 }
1575 
1576 /* Remove the oldest entry from the hash table.
1577  */
1578 static void
_cache_remove_oldest(Cache * cache)1579 _cache_remove_oldest( Cache*  cache )
1580 {
1581     Entry*   oldest = cache->mru_list.mru_prev;
1582     Entry**  lookup = _cache_lookup_p(cache, oldest);
1583 
1584     if (*lookup == NULL) { /* should not happen */
1585         XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1586         return;
1587     }
1588     if (DEBUG) {
1589         XLOG("Cache full - removing oldest");
1590         XLOG_QUERY(oldest->query, oldest->querylen);
1591     }
1592     _cache_remove_p(cache, lookup);
1593 }
1594 
1595 /* Remove all expired entries from the hash table.
1596  */
_cache_remove_expired(Cache * cache)1597 static void _cache_remove_expired(Cache* cache) {
1598     Entry* e;
1599     time_t now = _time_now();
1600 
1601     for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1602         // Entry is old, remove
1603         if (now >= e->expires) {
1604             Entry** lookup = _cache_lookup_p(cache, e);
1605             if (*lookup == NULL) { /* should not happen */
1606                 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
1607                 return;
1608             }
1609             e = e->mru_next;
1610             _cache_remove_p(cache, lookup);
1611         } else {
1612             e = e->mru_next;
1613         }
1614     }
1615 }
1616 
1617 ResolvCacheStatus
_resolv_cache_lookup(unsigned netid,const void * query,int querylen,void * answer,int answersize,int * answerlen)1618 _resolv_cache_lookup( unsigned              netid,
1619                       const void*           query,
1620                       int                   querylen,
1621                       void*                 answer,
1622                       int                   answersize,
1623                       int                  *answerlen )
1624 {
1625     Entry      key[1];
1626     Entry**    lookup;
1627     Entry*     e;
1628     time_t     now;
1629     Cache*     cache;
1630 
1631     ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
1632 
1633     XLOG("%s: lookup", __FUNCTION__);
1634     XLOG_QUERY(query, querylen);
1635 
1636     /* we don't cache malformed queries */
1637     if (!entry_init_key(key, query, querylen)) {
1638         XLOG("%s: unsupported query", __FUNCTION__);
1639         return RESOLV_CACHE_UNSUPPORTED;
1640     }
1641     /* lookup cache */
1642     pthread_once(&_res_cache_once, _res_cache_init);
1643     pthread_mutex_lock(&_res_cache_list_lock);
1644 
1645     cache = _find_named_cache_locked(netid);
1646     if (cache == NULL) {
1647         result = RESOLV_CACHE_UNSUPPORTED;
1648         goto Exit;
1649     }
1650 
1651     /* see the description of _lookup_p to understand this.
1652      * the function always return a non-NULL pointer.
1653      */
1654     lookup = _cache_lookup_p(cache, key);
1655     e      = *lookup;
1656 
1657     if (e == NULL) {
1658         XLOG( "NOT IN CACHE");
1659         // calling thread will wait if an outstanding request is found
1660         // that matching this query
1661         if (!_cache_check_pending_request_locked(&cache, key, netid) || cache == NULL) {
1662             goto Exit;
1663         } else {
1664             lookup = _cache_lookup_p(cache, key);
1665             e = *lookup;
1666             if (e == NULL) {
1667                 goto Exit;
1668             }
1669         }
1670     }
1671 
1672     now = _time_now();
1673 
1674     /* remove stale entries here */
1675     if (now >= e->expires) {
1676         XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1677         XLOG_QUERY(e->query, e->querylen);
1678         _cache_remove_p(cache, lookup);
1679         goto Exit;
1680     }
1681 
1682     *answerlen = e->answerlen;
1683     if (e->answerlen > answersize) {
1684         /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1685         result = RESOLV_CACHE_UNSUPPORTED;
1686         XLOG(" ANSWER TOO LONG");
1687         goto Exit;
1688     }
1689 
1690     memcpy( answer, e->answer, e->answerlen );
1691 
1692     /* bump up this entry to the top of the MRU list */
1693     if (e != cache->mru_list.mru_next) {
1694         entry_mru_remove( e );
1695         entry_mru_add( e, &cache->mru_list );
1696     }
1697 
1698     XLOG( "FOUND IN CACHE entry=%p", e );
1699     result = RESOLV_CACHE_FOUND;
1700 
1701 Exit:
1702     pthread_mutex_unlock(&_res_cache_list_lock);
1703     return result;
1704 }
1705 
1706 
1707 void
_resolv_cache_add(unsigned netid,const void * query,int querylen,const void * answer,int answerlen)1708 _resolv_cache_add( unsigned              netid,
1709                    const void*           query,
1710                    int                   querylen,
1711                    const void*           answer,
1712                    int                   answerlen )
1713 {
1714     Entry    key[1];
1715     Entry*   e;
1716     Entry**  lookup;
1717     u_long   ttl;
1718     Cache*   cache = NULL;
1719 
1720     /* don't assume that the query has already been cached
1721      */
1722     if (!entry_init_key( key, query, querylen )) {
1723         XLOG( "%s: passed invalid query ?", __FUNCTION__);
1724         return;
1725     }
1726 
1727     pthread_mutex_lock(&_res_cache_list_lock);
1728 
1729     cache = _find_named_cache_locked(netid);
1730     if (cache == NULL) {
1731         goto Exit;
1732     }
1733 
1734     XLOG( "%s: query:", __FUNCTION__ );
1735     XLOG_QUERY(query,querylen);
1736     XLOG_ANSWER(answer, answerlen);
1737 #if DEBUG_DATA
1738     XLOG( "answer:");
1739     XLOG_BYTES(answer,answerlen);
1740 #endif
1741 
1742     lookup = _cache_lookup_p(cache, key);
1743     e      = *lookup;
1744 
1745     if (e != NULL) { /* should not happen */
1746         XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1747              __FUNCTION__, e);
1748         goto Exit;
1749     }
1750 
1751     if (cache->num_entries >= cache->max_entries) {
1752         _cache_remove_expired(cache);
1753         if (cache->num_entries >= cache->max_entries) {
1754             _cache_remove_oldest(cache);
1755         }
1756         /* need to lookup again */
1757         lookup = _cache_lookup_p(cache, key);
1758         e      = *lookup;
1759         if (e != NULL) {
1760             XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1761                 __FUNCTION__, e);
1762             goto Exit;
1763         }
1764     }
1765 
1766     ttl = answer_getTTL(answer, answerlen);
1767     if (ttl > 0) {
1768         e = entry_alloc(key, answer, answerlen);
1769         if (e != NULL) {
1770             e->expires = ttl + _time_now();
1771             _cache_add_p(cache, lookup, e);
1772         }
1773     }
1774 #if DEBUG
1775     _cache_dump_mru(cache);
1776 #endif
1777 Exit:
1778     if (cache != NULL) {
1779       _cache_notify_waiting_tid_locked(cache, key);
1780     }
1781     pthread_mutex_unlock(&_res_cache_list_lock);
1782 }
1783 
1784 /****************************************************************************/
1785 /****************************************************************************/
1786 /*****                                                                  *****/
1787 /*****                                                                  *****/
1788 /*****                                                                  *****/
1789 /****************************************************************************/
1790 /****************************************************************************/
1791 
1792 // Head of the list of caches.  Protected by _res_cache_list_lock.
1793 static struct resolv_cache_info _res_cache_list;
1794 
1795 /* insert resolv_cache_info into the list of resolv_cache_infos */
1796 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1797 /* creates a resolv_cache_info */
1798 static struct resolv_cache_info* _create_cache_info( void );
1799 /* gets a resolv_cache_info associated with a network, or NULL if not found */
1800 static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
1801 /* look up the named cache, and creates one if needed */
1802 static struct resolv_cache* _get_res_cache_for_net_locked(unsigned netid);
1803 /* empty the named cache */
1804 static void _flush_cache_for_net_locked(unsigned netid);
1805 /* empty the nameservers set for the named cache */
1806 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1807 /* return 1 if the provided list of name servers differs from the list of name servers
1808  * currently attached to the provided cache_info */
1809 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1810         const char** servers, int numservers);
1811 /* clears the stats samples contained withing the given cache_info */
1812 static void _res_cache_clear_stats_locked(struct resolv_cache_info* cache_info);
1813 
1814 static void
_res_cache_init(void)1815 _res_cache_init(void)
1816 {
1817     const char*  env = getenv(CONFIG_ENV);
1818 
1819     if (env && atoi(env) == 0) {
1820         /* the cache is disabled */
1821         return;
1822     }
1823 
1824     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1825     pthread_mutex_init(&_res_cache_list_lock, NULL);
1826 }
1827 
1828 static struct resolv_cache*
_get_res_cache_for_net_locked(unsigned netid)1829 _get_res_cache_for_net_locked(unsigned netid)
1830 {
1831     struct resolv_cache* cache = _find_named_cache_locked(netid);
1832     if (!cache) {
1833         struct resolv_cache_info* cache_info = _create_cache_info();
1834         if (cache_info) {
1835             cache = _resolv_cache_create();
1836             if (cache) {
1837                 cache_info->cache = cache;
1838                 cache_info->netid = netid;
1839                 _insert_cache_info_locked(cache_info);
1840             } else {
1841                 free(cache_info);
1842             }
1843         }
1844     }
1845     return cache;
1846 }
1847 
1848 void
_resolv_flush_cache_for_net(unsigned netid)1849 _resolv_flush_cache_for_net(unsigned netid)
1850 {
1851     pthread_once(&_res_cache_once, _res_cache_init);
1852     pthread_mutex_lock(&_res_cache_list_lock);
1853 
1854     _flush_cache_for_net_locked(netid);
1855 
1856     pthread_mutex_unlock(&_res_cache_list_lock);
1857 }
1858 
1859 static void
_flush_cache_for_net_locked(unsigned netid)1860 _flush_cache_for_net_locked(unsigned netid)
1861 {
1862     struct resolv_cache* cache = _find_named_cache_locked(netid);
1863     if (cache) {
1864         _cache_flush_locked(cache);
1865     }
1866 
1867     // Also clear the NS statistics.
1868     struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
1869     _res_cache_clear_stats_locked(cache_info);
1870 }
1871 
_resolv_delete_cache_for_net(unsigned netid)1872 void _resolv_delete_cache_for_net(unsigned netid)
1873 {
1874     pthread_once(&_res_cache_once, _res_cache_init);
1875     pthread_mutex_lock(&_res_cache_list_lock);
1876 
1877     struct resolv_cache_info* prev_cache_info = &_res_cache_list;
1878 
1879     while (prev_cache_info->next) {
1880         struct resolv_cache_info* cache_info = prev_cache_info->next;
1881 
1882         if (cache_info->netid == netid) {
1883             prev_cache_info->next = cache_info->next;
1884             _cache_flush_locked(cache_info->cache);
1885             free(cache_info->cache->entries);
1886             free(cache_info->cache);
1887             _free_nameservers_locked(cache_info);
1888             free(cache_info);
1889             break;
1890         }
1891 
1892         prev_cache_info = prev_cache_info->next;
1893     }
1894 
1895     pthread_mutex_unlock(&_res_cache_list_lock);
1896 }
1897 
1898 static struct resolv_cache_info*
_create_cache_info(void)1899 _create_cache_info(void)
1900 {
1901     struct resolv_cache_info* cache_info;
1902 
1903     cache_info = calloc(sizeof(*cache_info), 1);
1904     return cache_info;
1905 }
1906 
1907 static void
_insert_cache_info_locked(struct resolv_cache_info * cache_info)1908 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
1909 {
1910     struct resolv_cache_info* last;
1911 
1912     for (last = &_res_cache_list; last->next; last = last->next);
1913 
1914     last->next = cache_info;
1915 
1916 }
1917 
1918 static struct resolv_cache*
_find_named_cache_locked(unsigned netid)1919 _find_named_cache_locked(unsigned netid) {
1920 
1921     struct resolv_cache_info* info = _find_cache_info_locked(netid);
1922 
1923     if (info != NULL) return info->cache;
1924 
1925     return NULL;
1926 }
1927 
1928 static struct resolv_cache_info*
_find_cache_info_locked(unsigned netid)1929 _find_cache_info_locked(unsigned netid)
1930 {
1931     struct resolv_cache_info* cache_info = _res_cache_list.next;
1932 
1933     while (cache_info) {
1934         if (cache_info->netid == netid) {
1935             break;
1936         }
1937 
1938         cache_info = cache_info->next;
1939     }
1940     return cache_info;
1941 }
1942 
1943 void
_resolv_set_default_params(struct __res_params * params)1944 _resolv_set_default_params(struct __res_params* params) {
1945     params->sample_validity = NSSAMPLE_VALIDITY;
1946     params->success_threshold = SUCCESS_THRESHOLD;
1947     params->min_samples = 0;
1948     params->max_samples = 0;
1949 }
1950 
1951 int
_resolv_set_nameservers_for_net(unsigned netid,const char ** servers,unsigned numservers,const char * domains,const struct __res_params * params)1952 _resolv_set_nameservers_for_net(unsigned netid, const char** servers, unsigned numservers,
1953         const char *domains, const struct __res_params* params)
1954 {
1955     char sbuf[NI_MAXSERV];
1956     register char *cp;
1957     int *offset;
1958     struct addrinfo* nsaddrinfo[MAXNS];
1959 
1960     if (numservers > MAXNS) {
1961         XLOG("%s: numservers=%u, MAXNS=%u", __FUNCTION__, numservers, MAXNS);
1962         return E2BIG;
1963     }
1964 
1965     // Parse the addresses before actually locking or changing any state, in case there is an error.
1966     // As a side effect this also reduces the time the lock is kept.
1967     struct addrinfo hints = {
1968         .ai_family = AF_UNSPEC,
1969         .ai_socktype = SOCK_DGRAM,
1970         .ai_flags = AI_NUMERICHOST
1971     };
1972     snprintf(sbuf, sizeof(sbuf), "%u", NAMESERVER_PORT);
1973     for (unsigned i = 0; i < numservers; i++) {
1974         // The addrinfo structures allocated here are freed in _free_nameservers_locked().
1975         int rt = getaddrinfo(servers[i], sbuf, &hints, &nsaddrinfo[i]);
1976         if (rt != 0) {
1977             for (unsigned j = 0 ; j < i ; j++) {
1978                 freeaddrinfo(nsaddrinfo[j]);
1979                 nsaddrinfo[j] = NULL;
1980             }
1981             XLOG("%s: getaddrinfo(%s)=%s", __FUNCTION__, servers[i], gai_strerror(rt));
1982             return EINVAL;
1983         }
1984     }
1985 
1986     pthread_once(&_res_cache_once, _res_cache_init);
1987     pthread_mutex_lock(&_res_cache_list_lock);
1988 
1989     // creates the cache if not created
1990     _get_res_cache_for_net_locked(netid);
1991 
1992     struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
1993 
1994     if (cache_info != NULL) {
1995         uint8_t old_max_samples = cache_info->params.max_samples;
1996         if (params != NULL) {
1997             cache_info->params = *params;
1998         } else {
1999             _resolv_set_default_params(&cache_info->params);
2000         }
2001 
2002         if (!_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
2003             // free current before adding new
2004             _free_nameservers_locked(cache_info);
2005             unsigned i;
2006             for (i = 0; i < numservers; i++) {
2007                 cache_info->nsaddrinfo[i] = nsaddrinfo[i];
2008                 cache_info->nameservers[i] = strdup(servers[i]);
2009                 XLOG("%s: netid = %u, addr = %s\n", __FUNCTION__, netid, servers[i]);
2010             }
2011             cache_info->nscount = numservers;
2012 
2013             // Flush the cache and reset the stats.
2014             _flush_cache_for_net_locked(netid);
2015 
2016             // increment the revision id to ensure that sample state is not written back if the
2017             // servers change; in theory it would suffice to do so only if the servers or
2018             // max_samples actually change, in practice the overhead of checking is higher than the
2019             // cost, and overflows are unlikely
2020             ++cache_info->revision_id;
2021         } else if (cache_info->params.max_samples != old_max_samples) {
2022             // If the maximum number of samples changes, the overhead of keeping the most recent
2023             // samples around is not considered worth the effort, so they are cleared instead. All
2024             // other parameters do not affect shared state: Changing these parameters does not
2025             // invalidate the samples, as they only affect aggregation and the conditions under
2026             // which servers are considered usable.
2027             _res_cache_clear_stats_locked(cache_info);
2028             ++cache_info->revision_id;
2029         }
2030 
2031         // Always update the search paths, since determining whether they actually changed is
2032         // complex due to the zero-padding, and probably not worth the effort. Cache-flushing
2033         // however is not // necessary, since the stored cache entries do contain the domain, not
2034         // just the host name.
2035         // code moved from res_init.c, load_domain_search_list
2036         strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
2037         if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
2038             *cp = '\0';
2039 
2040         cp = cache_info->defdname;
2041         offset = cache_info->dnsrch_offset;
2042         while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
2043             while (*cp == ' ' || *cp == '\t') /* skip leading white space */
2044                 cp++;
2045             if (*cp == '\0') /* stop if nothing more to do */
2046                 break;
2047             *offset++ = cp - cache_info->defdname; /* record this search domain */
2048             while (*cp) { /* zero-terminate it */
2049                 if (*cp == ' '|| *cp == '\t') {
2050                     *cp++ = '\0';
2051                     break;
2052                 }
2053                 cp++;
2054             }
2055         }
2056         *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
2057     }
2058 
2059     pthread_mutex_unlock(&_res_cache_list_lock);
2060     return 0;
2061 }
2062 
2063 static int
_resolv_is_nameservers_equal_locked(struct resolv_cache_info * cache_info,const char ** servers,int numservers)2064 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
2065         const char** servers, int numservers)
2066 {
2067     if (cache_info->nscount != numservers) {
2068         return 0;
2069     }
2070 
2071     // Compare each name server against current name servers.
2072     // TODO: this is incorrect if the list of current or previous nameservers
2073     // contains duplicates. This does not really matter because the framework
2074     // filters out duplicates, but we should probably fix it. It's also
2075     // insensitive to the order of the nameservers; we should probably fix that
2076     // too.
2077     for (int i = 0; i < numservers; i++) {
2078         for (int j = 0 ; ; j++) {
2079             if (j >= numservers) {
2080                 return 0;
2081             }
2082             if (strcmp(cache_info->nameservers[i], servers[j]) == 0) {
2083                 break;
2084             }
2085         }
2086     }
2087 
2088     return 1;
2089 }
2090 
2091 static void
_free_nameservers_locked(struct resolv_cache_info * cache_info)2092 _free_nameservers_locked(struct resolv_cache_info* cache_info)
2093 {
2094     int i;
2095     for (i = 0; i < cache_info->nscount; i++) {
2096         free(cache_info->nameservers[i]);
2097         cache_info->nameservers[i] = NULL;
2098         if (cache_info->nsaddrinfo[i] != NULL) {
2099             freeaddrinfo(cache_info->nsaddrinfo[i]);
2100             cache_info->nsaddrinfo[i] = NULL;
2101         }
2102         cache_info->nsstats[i].sample_count =
2103             cache_info->nsstats[i].sample_next = 0;
2104     }
2105     cache_info->nscount = 0;
2106     _res_cache_clear_stats_locked(cache_info);
2107     ++cache_info->revision_id;
2108 }
2109 
2110 void
_resolv_populate_res_for_net(res_state statp)2111 _resolv_populate_res_for_net(res_state statp)
2112 {
2113     if (statp == NULL) {
2114         return;
2115     }
2116 
2117     pthread_once(&_res_cache_once, _res_cache_init);
2118     pthread_mutex_lock(&_res_cache_list_lock);
2119 
2120     struct resolv_cache_info* info = _find_cache_info_locked(statp->netid);
2121     if (info != NULL) {
2122         int nserv;
2123         struct addrinfo* ai;
2124         XLOG("%s: %u\n", __FUNCTION__, statp->netid);
2125         for (nserv = 0; nserv < MAXNS; nserv++) {
2126             ai = info->nsaddrinfo[nserv];
2127             if (ai == NULL) {
2128                 break;
2129             }
2130 
2131             if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
2132                 if (statp->_u._ext.ext != NULL) {
2133                     memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
2134                     statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2135                 } else {
2136                     if ((size_t) ai->ai_addrlen
2137                             <= sizeof(statp->nsaddr_list[0])) {
2138                         memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
2139                                 ai->ai_addrlen);
2140                     } else {
2141                         statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2142                     }
2143                 }
2144             } else {
2145                 XLOG("%s: found too long addrlen", __FUNCTION__);
2146             }
2147         }
2148         statp->nscount = nserv;
2149         // now do search domains.  Note that we cache the offsets as this code runs alot
2150         // but the setting/offset-computer only runs when set/changed
2151         // WARNING: Don't use str*cpy() here, this string contains zeroes.
2152         memcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
2153         register char **pp = statp->dnsrch;
2154         register int *p = info->dnsrch_offset;
2155         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
2156             *pp++ = &statp->defdname[0] + *p++;
2157         }
2158     }
2159     pthread_mutex_unlock(&_res_cache_list_lock);
2160 }
2161 
2162 /* Resolver reachability statistics. */
2163 
2164 static void
_res_cache_add_stats_sample_locked(struct __res_stats * stats,const struct __res_sample * sample,int max_samples)2165 _res_cache_add_stats_sample_locked(struct __res_stats* stats, const struct __res_sample* sample,
2166         int max_samples) {
2167     // Note: This function expects max_samples > 0, otherwise a (harmless) modification of the
2168     // allocated but supposedly unused memory for samples[0] will happen
2169     XLOG("%s: adding sample to stats, next = %d, count = %d", __FUNCTION__,
2170             stats->sample_next, stats->sample_count);
2171     stats->samples[stats->sample_next] = *sample;
2172     if (stats->sample_count < max_samples) {
2173         ++stats->sample_count;
2174     }
2175     if (++stats->sample_next >= max_samples) {
2176         stats->sample_next = 0;
2177     }
2178 }
2179 
2180 static void
_res_cache_clear_stats_locked(struct resolv_cache_info * cache_info)2181 _res_cache_clear_stats_locked(struct resolv_cache_info* cache_info) {
2182     if (cache_info) {
2183         for (int i = 0 ; i < MAXNS ; ++i) {
2184             cache_info->nsstats->sample_count = cache_info->nsstats->sample_next = 0;
2185         }
2186     }
2187 }
2188 
2189 int
android_net_res_stats_get_info_for_net(unsigned netid,int * nscount,struct sockaddr_storage servers[MAXNS],int * dcount,char domains[MAXDNSRCH][MAXDNSRCHPATH],struct __res_params * params,struct __res_stats stats[MAXNS])2190 android_net_res_stats_get_info_for_net(unsigned netid, int* nscount,
2191         struct sockaddr_storage servers[MAXNS], int* dcount, char domains[MAXDNSRCH][MAXDNSRCHPATH],
2192         struct __res_params* params, struct __res_stats stats[MAXNS]) {
2193     int revision_id = -1;
2194     pthread_mutex_lock(&_res_cache_list_lock);
2195 
2196     struct resolv_cache_info* info = _find_cache_info_locked(netid);
2197     if (info) {
2198         if (info->nscount > MAXNS) {
2199             pthread_mutex_unlock(&_res_cache_list_lock);
2200             XLOG("%s: nscount %d > MAXNS %d", __FUNCTION__, info->nscount, MAXNS);
2201             errno = EFAULT;
2202             return -1;
2203         }
2204         int i;
2205         for (i = 0; i < info->nscount; i++) {
2206             // Verify that the following assumptions are held, failure indicates corruption:
2207             //  - getaddrinfo() may never return a sockaddr > sockaddr_storage
2208             //  - all addresses are valid
2209             //  - there is only one address per addrinfo thanks to numeric resolution
2210             int addrlen = info->nsaddrinfo[i]->ai_addrlen;
2211             if (addrlen < (int) sizeof(struct sockaddr) ||
2212                     addrlen > (int) sizeof(servers[0])) {
2213                 pthread_mutex_unlock(&_res_cache_list_lock);
2214                 XLOG("%s: nsaddrinfo[%d].ai_addrlen == %d", __FUNCTION__, i, addrlen);
2215                 errno = EMSGSIZE;
2216                 return -1;
2217             }
2218             if (info->nsaddrinfo[i]->ai_addr == NULL) {
2219                 pthread_mutex_unlock(&_res_cache_list_lock);
2220                 XLOG("%s: nsaddrinfo[%d].ai_addr == NULL", __FUNCTION__, i);
2221                 errno = ENOENT;
2222                 return -1;
2223             }
2224             if (info->nsaddrinfo[i]->ai_next != NULL) {
2225                 pthread_mutex_unlock(&_res_cache_list_lock);
2226                 XLOG("%s: nsaddrinfo[%d].ai_next != NULL", __FUNCTION__, i);
2227                 errno = ENOTUNIQ;
2228                 return -1;
2229             }
2230         }
2231         *nscount = info->nscount;
2232         for (i = 0; i < info->nscount; i++) {
2233             memcpy(&servers[i], info->nsaddrinfo[i]->ai_addr, info->nsaddrinfo[i]->ai_addrlen);
2234             stats[i] = info->nsstats[i];
2235         }
2236         for (i = 0; i < MAXDNSRCH; i++) {
2237             const char* cur_domain = info->defdname + info->dnsrch_offset[i];
2238             // dnsrch_offset[i] can either be -1 or point to an empty string to indicate the end
2239             // of the search offsets. Checking for < 0 is not strictly necessary, but safer.
2240             // TODO: Pass in a search domain array instead of a string to
2241             // _resolv_set_nameservers_for_net() and make this double check unnecessary.
2242             if (info->dnsrch_offset[i] < 0 ||
2243                     ((size_t)info->dnsrch_offset[i]) >= sizeof(info->defdname) || !cur_domain[0]) {
2244                 break;
2245             }
2246             strlcpy(domains[i], cur_domain, MAXDNSRCHPATH);
2247         }
2248         *dcount = i;
2249         *params = info->params;
2250         revision_id = info->revision_id;
2251     }
2252 
2253     pthread_mutex_unlock(&_res_cache_list_lock);
2254     return revision_id;
2255 }
2256 
2257 int
_resolv_cache_get_resolver_stats(unsigned netid,struct __res_params * params,struct __res_stats stats[MAXNS])2258 _resolv_cache_get_resolver_stats( unsigned netid, struct __res_params* params,
2259         struct __res_stats stats[MAXNS]) {
2260     int revision_id = -1;
2261     pthread_mutex_lock(&_res_cache_list_lock);
2262 
2263     struct resolv_cache_info* info = _find_cache_info_locked(netid);
2264     if (info) {
2265         memcpy(stats, info->nsstats, sizeof(info->nsstats));
2266         *params = info->params;
2267         revision_id = info->revision_id;
2268     }
2269 
2270     pthread_mutex_unlock(&_res_cache_list_lock);
2271     return revision_id;
2272 }
2273 
2274 void
_resolv_cache_add_resolver_stats_sample(unsigned netid,int revision_id,int ns,const struct __res_sample * sample,int max_samples)2275 _resolv_cache_add_resolver_stats_sample( unsigned netid, int revision_id, int ns,
2276        const struct __res_sample* sample, int max_samples) {
2277     if (max_samples <= 0) return;
2278 
2279     pthread_mutex_lock(&_res_cache_list_lock);
2280 
2281     struct resolv_cache_info* info = _find_cache_info_locked(netid);
2282 
2283     if (info && info->revision_id == revision_id) {
2284         _res_cache_add_stats_sample_locked(&info->nsstats[ns], sample, max_samples);
2285     }
2286 
2287     pthread_mutex_unlock(&_res_cache_list_lock);
2288 }
2289 
2290