Modernization: Implement base data-structures and replace usage (#540)

c-ares currently lacks modern data structures that can make coding easier and more efficient. This PR implements a new linked list, skip list (sorted linked list), and hashtable implementation that are easy to use and hard to misuse. Though these implementations use more memory allocations than the prior implementation, the ability to more rapidly iterate on the codebase is a bigger win than any marginal performance difference (which is unlikely to be visible, modern systems are much more powerful than when c-ares was initially created).

The data structure implementation favors readability and audit-ability over performance, however using the algorithmically correct data type for the purpose should offset any perceived losses.

The primary motivation for this PR is to facilitate future implementation for Issues #444, #135, #458, and possibly #301

A couple additional notes:

The ares_timeout() function is now O(1) complexity instead of O(n) due to the use of a skiplist.
Some obscure bugs were uncovered which were actually being incorrectly validated in the test cases. These have been addressed in this PR but are not explicitly discussed.
Fixed some dead code warnings in ares_rand for systems that don't need rc4

Fix By: Brad House (@bradh352)
pull/542/head
Brad House 1 year ago committed by GitHub
parent 39311a6031
commit cf99c025cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 16
      src/lib/Makefile.inc
  2. 328
      src/lib/ares__htable.c
  3. 154
      src/lib/ares__htable.h
  4. 183
      src/lib/ares__htable_stvp.c
  5. 106
      src/lib/ares__htable_stvp.h
  6. 307
      src/lib/ares__llist.c
  7. 194
      src/lib/ares__llist.h
  8. 493
      src/lib/ares__slist.c
  9. 188
      src/lib/ares__slist.h
  10. 52
      src/lib/ares_cancel.c
  11. 45
      src/lib/ares_destroy.c
  12. 2
      src/lib/ares_fds.c
  13. 8
      src/lib/ares_getaddrinfo.c
  14. 2
      src/lib/ares_getsock.c
  15. 148
      src/lib/ares_init.c
  16. 65
      src/lib/ares_llist.c
  17. 41
      src/lib/ares_llist.h
  18. 10
      src/lib/ares_options.c
  19. 57
      src/lib/ares_private.h
  20. 175
      src/lib/ares_process.c
  21. 25
      src/lib/ares_query.c
  22. 108
      src/lib/ares_rand.c
  23. 23
      src/lib/ares_send.c
  24. 88
      src/lib/ares_timeout.c
  25. 9
      test/ares-test-mock-ai.cc
  26. 9
      test/ares-test-mock.cc

@ -2,11 +2,15 @@
# SPDX-License-Identifier: MIT
CSOURCES = ares__addrinfo2hostent.c \
ares__addrinfo_localhost.c \
ares__close_sockets.c \
ares__addrinfo_localhost.c \
ares__close_sockets.c \
ares__get_hostent.c \
ares__htable.c \
ares__htable_stvp.c \
ares__llist.c \
ares__parse_into_addrinfo.c \
ares__readaddrinfo.c \
ares__slist.c \
ares__sortaddrinfo.c \
ares__read_line.c \
ares__timeval.c \
@ -28,7 +32,6 @@ CSOURCES = ares__addrinfo2hostent.c \
ares_getsock.c \
ares_init.c \
ares_library_init.c \
ares_llist.c \
ares_mkquery.c \
ares_create_query.c \
ares_nowarn.c \
@ -62,13 +65,16 @@ CSOURCES = ares__addrinfo2hostent.c \
inet_ntop.c \
windows_port.c
HHEADERS = ares_android.h \
HHEADERS = ares__htable.h \
ares__htable_stvp.h \
ares__llist.h \
ares__slist.h \
ares_android.h \
ares_data.h \
ares_getenv.h \
ares_inet_net_pton.h \
ares_iphlpapi.h \
ares_ipv6.h \
ares_llist.h \
ares_nowarn.h \
ares_platform.h \
ares_private.h \

@ -0,0 +1,328 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_setup.h"
#include "ares.h"
#include "ares_private.h"
#include "ares__llist.h"
#include "ares__htable.h"
#define ARES__HTABLE_MAX_BUCKETS (1U<<24)
#define ARES__HTABLE_MIN_BUCKETS (1U<<4)
#define ARES__HTABLE_EXPAND_PERCENT 75
struct ares__htable {
ares__htable_hashfunc_t hash;
ares__htable_bucket_key_t bucket_key;
ares__htable_bucket_free_t bucket_free;
ares__htable_key_eq_t key_eq;
unsigned int seed;
unsigned int size;
size_t num_keys;
/* NOTE: if we converted buckets into ares__slist_t we could guarantee on
* hash collisions we would have O(log n) worst case insert and search
* performance. (We'd also need to make key_eq into a key_cmp to
* support sort). That said, risk with a random hash seed is near zero,
* and ares__slist_t is heavier weight so I think using ares__llist_t is
* is an overall win. */
ares__llist_t **buckets;
};
static unsigned int ares__htable_generate_seed(ares__htable_t *htable)
{
unsigned int seed = 0;
/* Mix stack address, heap address, and time to generate a random seed, it
* doesn't have to be super secure, just quick. Likelihood of a hash
* collision attack is very low with a small amount of effort */
seed |= (unsigned int)((size_t)htable & 0xFFFFFFFF);
seed |= (unsigned int)((size_t)&seed & 0xFFFFFFFF);
seed |= (unsigned int)time(NULL) & 0xFFFFFFFF;
return seed;
}
static void ares__htable_buckets_destroy(ares__llist_t **buckets,
unsigned int size,
unsigned char destroy_vals)
{
unsigned int i;
if (buckets == NULL)
return;
for (i=0; i<size; i++) {
if (buckets[i] == NULL)
continue;
if (!destroy_vals)
ares__llist_replace_destructor(buckets[i], NULL);
ares__llist_destroy(buckets[i]);
}
ares_free(buckets);
}
void ares__htable_destroy(ares__htable_t *htable)
{
if (htable == NULL)
return;
ares__htable_buckets_destroy(htable->buckets, htable->size, 1);
ares_free(htable);
}
ares__htable_t *ares__htable_create(ares__htable_hashfunc_t hash_func,
ares__htable_bucket_key_t bucket_key,
ares__htable_bucket_free_t bucket_free,
ares__htable_key_eq_t key_eq)
{
ares__htable_t *htable = NULL;
if (hash_func == NULL || bucket_key == NULL || bucket_free == NULL ||
key_eq == NULL) {
goto fail;
}
htable = ares_malloc(sizeof(*htable));
if (htable == NULL)
goto fail;
memset(htable, 0, sizeof(*htable));
htable->hash = hash_func;
htable->bucket_key = bucket_key;
htable->bucket_free = bucket_free;
htable->key_eq = key_eq;
htable->seed = ares__htable_generate_seed(htable);
htable->size = ARES__HTABLE_MIN_BUCKETS;
htable->buckets = ares_malloc(sizeof(*htable->buckets) * htable->size);
if (htable->buckets == NULL)
goto fail;
memset(htable->buckets, 0, sizeof(*htable->buckets) * htable->size);
return htable;
fail:
ares__htable_destroy(htable);
return NULL;
}
/*! Grabs the Hashtable index from the key and length. The h index is
* the hash of the function reduced to the size of the bucket list.
* We are doing "hash & (size - 1)" since we are guaranteeing a power of
* 2 for size. This is equivalent to "hash % size", but should be more
* efficient */
#define HASH_IDX(h, key) h->hash(key, h->seed) & (h->size - 1)
static ares__llist_node_t *ares__htable_find(ares__htable_t *htable,
unsigned int idx,
const void *key)
{
ares__llist_node_t *node = NULL;
for (node = ares__llist_node_first(htable->buckets[idx]);
node != NULL;
node = ares__llist_node_next(node)) {
if (htable->key_eq(key, htable->bucket_key(ares__llist_node_val(node))))
break;
}
return node;
}
static unsigned int ares__htable_expand(ares__htable_t *htable)
{
ares__llist_t **buckets = NULL;
unsigned int old_size = htable->size;
size_t i;
/* Not a failure, just won't expand */
if (old_size == ARES__HTABLE_MAX_BUCKETS)
return 1;
htable->size <<= 1;
/* We must do this in 2 passes as we want it to be non-destructive in case
* there is a memory allocation failure. So we will actually use more
* memory doing it this way, but at least we might be able to gracefully
* recover */
buckets = ares_malloc(sizeof(*buckets) * htable->size);
if (buckets == NULL)
goto fail;
memset(buckets, 0, sizeof(*buckets) * htable->size);
for (i=0; i<old_size; i++) {
ares__llist_node_t *node;
for (node = ares__llist_node_first(htable->buckets[i]);
node != NULL;
node = ares__llist_node_next(node)) {
void *val = ares__llist_node_val(node);
size_t idx = HASH_IDX(htable, htable->bucket_key(val));
if (buckets[idx] == NULL) {
buckets[idx] = ares__llist_create(htable->bucket_free);
if (buckets[idx] == NULL)
goto fail;
}
if (ares__llist_insert_first(buckets[idx], val) == NULL) {
goto fail;
}
}
}
/* Swap out buckets */
ares__htable_buckets_destroy(htable->buckets, old_size, 0);
htable->buckets = buckets;
return 1;
fail:
ares__htable_buckets_destroy(buckets, htable->size, 0);
htable->size = old_size;
return 0;
}
unsigned int ares__htable_insert(ares__htable_t *htable, void *bucket)
{
unsigned int idx = 0;
ares__llist_node_t *node = NULL;
const void *key = NULL;
if (htable == NULL || bucket == NULL)
return 0;
key = htable->bucket_key(bucket);
idx = HASH_IDX(htable, key);
/* See if we have a matching bucket already, if so, replace it */
node = ares__htable_find(htable, idx, key);
if (node != NULL) {
ares__llist_node_replace(node, bucket);
return 1;
}
/* Check to see if we should rehash because likelihood of collisions has
* increased beyond our threshold */
if (htable->num_keys+1 > (htable->size * ARES__HTABLE_EXPAND_PERCENT) / 100) {
if (!ares__htable_expand(htable)) {
return 0;
}
/* If we expanded, need to calculate a new index */
idx = HASH_IDX(htable, key);
}
/* We lazily allocate the linked list */
if (htable->buckets[idx] == NULL) {
htable->buckets[idx] = ares__llist_create(htable->bucket_free);
if (htable->buckets[idx] == NULL)
return 0;
}
node = ares__llist_insert_first(htable->buckets[idx], bucket);
if (node == NULL)
return 0;
htable->num_keys++;
return 1;
}
void *ares__htable_get(ares__htable_t *htable, const void *key)
{
unsigned int idx;
if (htable == NULL || key == NULL)
return NULL;
idx = HASH_IDX(htable, key);
return ares__llist_node_val(ares__htable_find(htable, idx, key));
}
unsigned int ares__htable_remove(ares__htable_t *htable, const void *key)
{
ares__llist_node_t *node;
unsigned int idx;
if (htable == NULL || key == NULL)
return 0;
idx = HASH_IDX(htable, key);
node = ares__htable_find(htable, idx, key);
if (node == NULL)
return 0;
htable->num_keys--;
ares__llist_node_destroy(node);
return 1;
}
size_t ares__htable_num_keys(ares__htable_t *htable)
{
if (htable == NULL)
return 0;
return htable->num_keys;
}
unsigned int ares__htable_hash_FNV1a(const void *key, size_t key_len,
unsigned int seed)
{
const unsigned char *data = key;
/* recommended seed is 2166136261U, but we don't want collisions */
unsigned int hv = seed;
size_t i;
for (i = 0; i < key_len; i++) {
hv ^= (unsigned int)data[i];
/* hv *= 0x01000193 */
hv += (hv<<1) + (hv<<4) + (hv<<7) + (hv<<8) + (hv<<24);
}
return hv;
}
/* Case insensitive version, meant for strings */
unsigned int ares__htable_hash_FNV1a_casecmp(const void *key, size_t key_len,
unsigned int seed)
{
const unsigned char *data = key;
unsigned int hv = seed;
size_t i;
for (i = 0; i < key_len; i++) {
hv ^= (unsigned int)tolower((char)data[i]);
/* hv *= 16777619 */
hv += (hv<<1) + (hv<<4) + (hv<<7) + (hv<<8) + (hv<<24);
}
return hv;
}

@ -0,0 +1,154 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#ifndef __ARES__HTABLE_H
#define __ARES__HTABLE_H
/*! \addtogroup ares__htable Base HashTable Data Structure
*
* This is a basic hashtable data structure that is meant to be wrapped
* by a higher level implementation. This data structure is designed to
* be callback-based in order to facilitate wrapping without needing to
* worry about any underlying complexities of the hashtable implementation.
*
* This implementation supports automatic growing by powers of 2 when reaching
* 75% capacity. A rehash will be performed on the expanded bucket list.
*
* Average time complexity:
* - Insert: O(1)
* - Search: O(1)
* - Delete: O(1)
*
* @{
*/
struct ares__htable_t;
/*! Opaque data type for generic hash table implementation */
typedef struct ares__htable ares__htable_t;
/*! Callback for generating a hash of the key.
*
* \param[in] key pointer to key to be hashed
* \param[in] seed randomly generated seed used by hash function.
* value is specific to the hashtable instance
* but otherwise will not change between calls.
* \return hash
*/
typedef unsigned int (*ares__htable_hashfunc_t)(const void *key,
unsigned int seed);
/*! Callback to free the bucket
*
* \param[in] bucket user provided bucket
*/
typedef void (*ares__htable_bucket_free_t)(void *bucket);
/*! Callback to extract the key from the user-provided bucket
*
* \param[in] bucket user provided bucket
* \return pointer to key held in bucket
*/
typedef const void *(*ares__htable_bucket_key_t)(const void *bucket);
/*! Callback to compare two keys for equality
*
* \param[in] key1 first key
* \param[in] key2 second key
* \return 1 if equal, 0 if not
*/
typedef unsigned int (*ares__htable_key_eq_t)(const void *key1,
const void *key2);
/*! Destroy the initialized hashtable
*
* \param[in] initialized hashtable
*/
void ares__htable_destroy(ares__htable_t *htable);
/*! Create a new hashtable
*
* \param[in] hash_func Required. Callback for Hash function.
* \param[in] bucket_key Required. Callback to extract key from bucket.
* \param[in] bucket_free Required. Callback to free bucket.
* \param[in] key_eq Required. Callback to check for key equality.
* \return initialized hashtable. NULL if out of memory or misuse.
*/
ares__htable_t *ares__htable_create(ares__htable_hashfunc_t hash_func,
ares__htable_bucket_key_t bucket_key,
ares__htable_bucket_free_t bucket_free,
ares__htable_key_eq_t key_eq);
/*! Count of keys from initialized hashtable
*
* \param[in] htable Initialized hashtable.
* \return count of keys
*/
size_t ares__htable_num_keys(ares__htable_t *htable);
/*! Insert bucket into hashtable
*
* \param[in] htable Initialized hashtable.
* \param[in] bucket User-provided bucket to insert. Takes "ownership". Not
* allowed to be NULL.
* \return 1 on success, 0 if out of memory
*/
unsigned int ares__htable_insert(ares__htable_t *htable, void *bucket);
/*! Retrieve bucket from hashtable based on key.
*
* \param[in] htable Initialized hashtable
* \param[in] key Pointer to key to use for comparison.
* \return matching bucket, or NULL if not found.
*/
void *ares__htable_get(ares__htable_t *htable, const void *key);
/*! Remove bucket from hashtable by key
*
* \param[in] htable Initialized hashtable
* \param[in] key Pointer to key to use for comparison
* \return 1 if found, 0 if not found
*/
unsigned int ares__htable_remove(ares__htable_t *htable, const void *key);
/*! FNV1a hash algorithm. Can be used as underlying primitive for building
* a wrapper hashtable.
*
* \param[in] key pointer to key
* \param[in] key_len Length of key
* \param[in] seed Seed for generating hash
* \return hash value
*/
unsigned int ares__htable_hash_FNV1a(const void *key, size_t key_len,
unsigned int seed);
/*! FNV1a hash algorithm, but converts all characters to lowercase before
* hashing to make the hash case-insensitive. Can be used as underlying
* primitive for building a wrapper hashtable. Used on string-based keys.
*
* \param[in] key pointer to key
* \param[in] key_len Length of key
* \param[in] seed Seed for generating hash
* \return hash value
*/
unsigned int ares__htable_hash_FNV1a_casecmp(const void *key, size_t key_len,
unsigned int);
/*! @} */
#endif /* __ARES__HTABLE_H */

@ -0,0 +1,183 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_setup.h"
#include "ares.h"
#include "ares_private.h"
#include "ares__htable.h"
#include "ares__htable_stvp.h"
struct ares__htable_stvp {
ares__htable_stvp_val_free_t free_val;
ares__htable_t *hash;
};
typedef struct {
size_t key;
void *val;
ares__htable_stvp_t *parent;
} ares__htable_stvp_bucket_t;
void ares__htable_stvp_destroy(ares__htable_stvp_t *htable)
{
if (htable == NULL)
return;
ares__htable_destroy(htable->hash);
ares_free(htable);
}
static unsigned int hash_func(const void *bucket, unsigned int seed)
{
const ares__htable_stvp_bucket_t *arg = bucket;
return ares__htable_hash_FNV1a(&arg->key, sizeof(arg->key), seed);
}
static const void *bucket_key(const void *bucket)
{
const ares__htable_stvp_bucket_t *arg = bucket;
return &arg->key;
}
static void bucket_free(void *bucket)
{
ares__htable_stvp_bucket_t *arg = bucket;
if (arg->parent->free_val)
arg->parent->free_val(arg->val);
ares_free(arg);
}
static unsigned int key_eq(const void *key1, const void *key2)
{
const size_t *k1 = key1;
const size_t *k2 = key2;
if (*k1 == *k2)
return 1;
return 0;
}
ares__htable_stvp_t *ares__htable_stvp_create(
ares__htable_stvp_val_free_t val_free)
{
ares__htable_stvp_t *htable = ares_malloc(sizeof(*htable));
if (htable == NULL)
goto fail;
htable->hash = ares__htable_create(hash_func,
bucket_key,
bucket_free,
key_eq);
if (htable->hash == NULL)
goto fail;
htable->free_val = val_free;
return htable;
fail:
if (htable) {
ares__htable_destroy(htable->hash);
ares_free(htable);
}
return NULL;
}
unsigned int ares__htable_stvp_insert(ares__htable_stvp_t *htable, size_t key,
void *val)
{
ares__htable_stvp_bucket_t *bucket = NULL;
if (htable == NULL)
goto fail;
bucket = ares_malloc(sizeof(*bucket));
if (bucket == NULL)
goto fail;
bucket->parent = htable;
bucket->key = key;
bucket->val = val;
if (!ares__htable_insert(htable->hash, bucket))
goto fail;
return 1;
fail:
if (bucket) {
ares_free(bucket);
}
return 0;
}
unsigned int ares__htable_stvp_get(ares__htable_stvp_t *htable, size_t key,
void **val)
{
ares__htable_stvp_bucket_t *bucket = NULL;
if (val)
*val = NULL;
if (htable == NULL)
return 0;
bucket = ares__htable_get(htable->hash, &key);
if (bucket == NULL)
return 0;
if (val)
*val = bucket->val;
return 1;
}
void *ares__htable_stvp_get_direct(ares__htable_stvp_t *htable, size_t key)
{
void *val = NULL;
ares__htable_stvp_get(htable, key, &val);
return val;
}
unsigned int ares__htable_stvp_remove(ares__htable_stvp_t *htable, size_t key)
{
if (htable == NULL)
return 0;
return ares__htable_remove(htable->hash, &key);
}
size_t ares__htable_stvp_num_keys(ares__htable_stvp_t *htable)
{
if (htable == NULL)
return 0;
return ares__htable_num_keys(htable->hash);
}

@ -0,0 +1,106 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#ifndef __ARES__HTABLE_STVP_H
#define __ARES__HTABLE_STVP_H
/*! \addtogroup ares__htable_stvp HashTable with size_t Key and void pointer Value
*
* This data structure wraps the base ares__htable data structure in order to
* split the key and value data types as size_t and void pointer, respectively.
*
* Average time complexity:
* - Insert: O(1)
* - Search: O(1)
* - Delete: O(1)
*
* @{
*/
struct ares__htable_stvp;
/*! Opaque data type for size_t key, void pointer hash table implementation */
typedef struct ares__htable_stvp ares__htable_stvp_t;
/*! Callback to free value stored in hashtable
*
* \param[in] val user-supplied value
*/
typedef void (*ares__htable_stvp_val_free_t)(void *val);
/*! Destroy hashtable
*
* \param[in] htable Initialized hashtable
*/
void ares__htable_stvp_destroy(ares__htable_stvp_t *htable);
/*! Create size_t key, void pointer value hash table
*
* \param[in] val_free Optional. Call back to free user-supplied value. If
* NULL it is expected the caller will clean up any user
* supplied values.
*/
ares__htable_stvp_t *ares__htable_stvp_create(
ares__htable_stvp_val_free_t val_free);
/*! Insert key/value into hash table
*
* \param[in] htable Initialized hash table
* \param[in] key key to associate with value
* \param[in] val value to store (takes ownership). May be NULL.
* \return 1 on success, 0 on out of memory or misuse
*/
unsigned int ares__htable_stvp_insert(ares__htable_stvp_t *htable, size_t key,
void *val);
/*! Retrieve value from hashtable based on key
*
* \param[in] htable Initialized hash table
* \param[in] key key to use to search
* \param[out] val Optional. Pointer to store value.
* \return 1 on success, 0 on failure
*/
unsigned int ares__htable_stvp_get(ares__htable_stvp_t *htable, size_t key,
void **val);
/*! Retrieve value from hashtable directly as return value. Caveat to this
* function over ares__htable_stvp_get() is that if a NULL value is stored
* you cannot determine if the key is not found or the value is NULL.
*
* \param[in] htable Initialized hash table
* \param[in] key key to use to search
* \return value associated with key in hashtable or NULL
*/
void *ares__htable_stvp_get_direct(ares__htable_stvp_t *htable, size_t key);
/*! Remove a value from the hashtable by key
*
* \param[in] htable Initialized hash table
* \param[in] key key to use to search
* \return 1 if found, 0 if not
*/
unsigned int ares__htable_stvp_remove(ares__htable_stvp_t *htable, size_t key);
/*! Retrieve the number of keys stored in the hash table
*
* \param[in] htable Initialized hash table
* \return count
*/
size_t ares__htable_stvp_num_keys(ares__htable_stvp_t *htable);
/*! @} */
#endif /* __ARES__HTABLE_STVP_H */

@ -0,0 +1,307 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_setup.h"
#include "ares.h"
#include "ares_private.h"
#include "ares__llist.h"
struct ares__llist {
ares__llist_node_t *head;
ares__llist_node_t *tail;
ares__llist_destructor_t destruct;
size_t cnt;
};
struct ares__llist_node {
void *data;
ares__llist_node_t *prev;
ares__llist_node_t *next;
ares__llist_t *parent;
};
ares__llist_t *ares__llist_create(ares__llist_destructor_t destruct)
{
ares__llist_t *list = ares_malloc(sizeof(*list));
if (list == NULL)
return NULL;
memset(list, 0, sizeof(*list));
list->destruct = destruct;
return list;
}
void ares__llist_replace_destructor(ares__llist_t *list,
ares__llist_destructor_t destruct)
{
if (list == NULL)
return;
list->destruct = destruct;
}
typedef enum {
ARES__LLIST_INSERT_HEAD,
ARES__LLIST_INSERT_TAIL,
ARES__LLIST_INSERT_BEFORE
} ares__llist_insert_type_t;
static ares__llist_node_t *ares__llist_insert_at(ares__llist_t *list,
ares__llist_insert_type_t type,
ares__llist_node_t *at,
void *val)
{
ares__llist_node_t *node = NULL;
if (list == NULL || val == NULL)
return NULL;
node = ares_malloc(sizeof(*node));
if (node == NULL)
return NULL;
memset(node, 0, sizeof(*node));
node->data = val;
node->parent = list;
if (type == ARES__LLIST_INSERT_BEFORE && (at == list->head || at == NULL)) {
type = ARES__LLIST_INSERT_HEAD;
}
switch (type) {
case ARES__LLIST_INSERT_HEAD:
node->next = list->head;
node->prev = NULL;
if (list->head)
list->head->prev = node;
list->head = node;
break;
case ARES__LLIST_INSERT_TAIL:
node->next = NULL;
node->prev = list->tail;
if (list->tail)
list->tail->next = node;
list->tail = node;
break;
case ARES__LLIST_INSERT_BEFORE:
node->next = at;
node->prev = at->prev;
at->prev = node;
break;
}
if (list->tail == NULL)
list->tail = node;
if (list->head == NULL)
list->head = node;
list->cnt++;
return node;
}
ares__llist_node_t *ares__llist_insert_first(ares__llist_t *list, void *val)
{
return ares__llist_insert_at(list, ARES__LLIST_INSERT_HEAD, NULL, val);
}
ares__llist_node_t *ares__llist_insert_last(ares__llist_t *list, void *val)
{
return ares__llist_insert_at(list, ARES__LLIST_INSERT_TAIL, NULL, val);
}
ares__llist_node_t *ares__llist_insert_before(ares__llist_node_t *node,
void *val)
{
if (node == NULL)
return NULL;
return ares__llist_insert_at(node->parent, ARES__LLIST_INSERT_BEFORE, node,
val);
}
ares__llist_node_t *ares__llist_insert_after(ares__llist_node_t *node,
void *val)
{
if (node == NULL)
return NULL;
if (node->next == NULL)
return ares__llist_insert_last(node->parent, val);
return ares__llist_insert_at(node->parent, ARES__LLIST_INSERT_BEFORE,
node->next, val);
}
ares__llist_node_t *ares__llist_node_first(ares__llist_t *list)
{
if (list == NULL)
return NULL;
return list->head;
}
ares__llist_node_t *ares__llist_node_last(ares__llist_t *list)
{
if (list == NULL)
return NULL;
return list->tail;
}
ares__llist_node_t *ares__llist_node_next(ares__llist_node_t *node)
{
if (node == NULL)
return NULL;
return node->next;
}
ares__llist_node_t *ares__llist_node_prev(ares__llist_node_t *node)
{
if (node == NULL)
return NULL;
return node->prev;
}
void *ares__llist_node_val(ares__llist_node_t *node)
{
if (node == NULL)
return NULL;
return node->data;
}
size_t ares__llist_len(ares__llist_t *list)
{
if (list == NULL)
return 0;
return list->cnt;
}
ares__llist_t *ares__llist_node_parent(ares__llist_node_t *node)
{
if (node == NULL)
return NULL;
return node->parent;
}
void *ares__llist_first_val(ares__llist_t *list)
{
return ares__llist_node_val(ares__llist_node_first(list));
}
void *ares__llist_last_val(ares__llist_t *list)
{
return ares__llist_node_val(ares__llist_node_last(list));
}
void *ares__llist_node_claim(ares__llist_node_t *node)
{
void *val;
ares__llist_t *list;
if (node == NULL)
return NULL;
list = node->parent;
val = node->data;
if (node->prev) {
node->prev->next = node->next;
}
if (node->next) {
node->next->prev = node->prev;
}
if (node == list->head) {
list->head = node->next;
}
if (node == list->tail) {
list->tail = node->prev;
}
ares_free(node);
list->cnt--;
return val;
}
void ares__llist_node_destroy(ares__llist_node_t *node)
{
ares__llist_destructor_t destruct;
void *val;
if (node == NULL)
return;
destruct = node->parent->destruct;
val = ares__llist_node_claim(node);
if (val != NULL && destruct != NULL)
destruct(val);
}
void ares__llist_node_replace(ares__llist_node_t *node, void *val)
{
ares__llist_destructor_t destruct;
if (node == NULL)
return;
destruct = node->parent->destruct;
if (destruct != NULL)
destruct(node->data);
node->data = val;
}
void ares__llist_destroy(ares__llist_t *list)
{
ares__llist_node_t *node;
if (list == NULL)
return;
while ((node = ares__llist_node_first(list)) != NULL) {
ares__llist_node_destroy(node);
}
ares_free(list);
}

@ -0,0 +1,194 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#ifndef __ARES__LLIST_H
#define __ARES__LLIST_H
/*! \addtogroup ares__llist LinkedList Data Structure
*
* This is a doubly-linked list data structure.
*
* Average time complexity:
* - Insert: O(1) -- head or tail
* - Search: O(n)
* - Delete: O(1) -- delete assumes you hold a node pointer
*
* @{
*/
struct ares__llist;
/*! Opaque data structure for linked list */
typedef struct ares__llist ares__llist_t;
struct ares__llist_node;
/*! Opaque data structure for a node in a linked list */
typedef struct ares__llist_node ares__llist_node_t;
/*! Callback to free user-defined node data
*
* \param[in] data user supplied data
*/
typedef void (*ares__llist_destructor_t)(void *data);
/*! Create a linked list object
*
* \param[in] destruct Optional. Destructor to call on all removed nodes
* \return linked list object or NULL on out of memory
*/
ares__llist_t *ares__llist_create(ares__llist_destructor_t destruct);
/*! Replace destructor for linked list nodes. Typically this is used
* when wanting to disable the destructor by using NULL.
*
* \param[in] list Initialized linked list object
* \param[in] destruct replacement destructor, NULL is allowed
*/
void ares__llist_replace_destructor(ares__llist_t *list,
ares__llist_destructor_t destruct);
/*! Insert value as the first node in the linked list
*
* \param[in] list Initialized linked list object
* \param[in] val user-supplied value.
* \return node object referencing place in list, or null if out of memory or
* misuse
*/
ares__llist_node_t *ares__llist_insert_first(ares__llist_t *list, void *val);
/*! Insert value as the last node in the linked list
*
* \param[in] list Initialized linked list object
* \param[in] val user-supplied value.
* \return node object referencing place in list, or null if out of memory or
* misuse
*/
ares__llist_node_t *ares__llist_insert_last(ares__llist_t *list, void *val);
/*! Insert value before specified node in the linked list
*
* \param[in] node node referenced to insert before
* \param[in] val user-supplied value.
* \return node object referencing place in list, or null if out of memory or
* misuse
*/
ares__llist_node_t *ares__llist_insert_before(ares__llist_node_t *node,
void *val);
/*! Insert value after specified node in the linked list
*
* \param[in] node node referenced to insert after
* \param[in] val user-supplied value.
* \return node object referencing place in list, or null if out of memory or
* misuse
*/
ares__llist_node_t *ares__llist_insert_after(ares__llist_node_t *node,
void *val);
/*! Obtain first node in list
*
* \param[in] list Initialized list object
* \return first node in list or NULL if none
*/
ares__llist_node_t *ares__llist_node_first(ares__llist_t *list);
/*! Obtain last node in list
*
* \param[in] list Initialized list object
* \return last node in list or NULL if none
*/
ares__llist_node_t *ares__llist_node_last(ares__llist_t *list);
/*! Obtain next node in respect to specified node
*
* \param[in] node Node referenced
* \return node or NULL if none
*/
ares__llist_node_t *ares__llist_node_next(ares__llist_node_t *node);
/*! Obtain previous node in respect to specified node
*
* \param[in] node Node referenced
* \return node or NULL if none
*/
ares__llist_node_t *ares__llist_node_prev(ares__llist_node_t *node);
/*! Obtain value from node
*
* \param[in] node Node referenced
* \return user provided value from node
*/
void *ares__llist_node_val(ares__llist_node_t *node);
/*! Obtain the number of entries in the list
*
* \param[in] list Initialized list object
* \return count
*/
size_t ares__llist_len(ares__llist_t *list);
/*! Obtain list object from referenced node
*
* \param[in] node Node referenced
* \return list object node belongs to
*/
ares__llist_t *ares__llist_node_parent(ares__llist_node_t *node);
/*! Obtain the first user-supplied value in the list
*
* \param[in] list Initialized list object
* \return first user supplied value or NULL if none
*/
void *ares__llist_first_val(ares__llist_t *list);
/*! Obtain the last user-supplied value in the list
*
* \param[in] list Initialized list object
* \return last user supplied value or NULL if none
*/
void *ares__llist_last_val(ares__llist_t *list);
/*! Take ownership of user-supplied value in list without calling destructor.
* Will unchain entry from list.
*
* \param[in] node Node referenced
* \return user supplied value
*/
void *ares__llist_node_claim(ares__llist_node_t *node);
/*! Replace user-supplied value for node
*
* \param[in] node Node referenced
* \param[in] val new user-supplied value
*/
void ares__llist_node_replace(ares__llist_node_t *node, void *val);
/*! Destroy the node, removing it from the list and calling destructor.
*
* \param[in] node Node referenced
*/
void ares__llist_node_destroy(ares__llist_node_t *node);
/*! Destroy the list object and all nodes in the list.
*
* \param[in] list Initialized list object
*/
void ares__llist_destroy(ares__llist_t *list);
/*! @} */
#endif /* __ARES__LLIST_H */

@ -0,0 +1,493 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_setup.h"
#include "ares.h"
#include "ares_private.h"
#include "ares__slist.h"
/* SkipList implementation */
#define ARES__SLIST_START_LEVELS 4
struct ares__slist {
ares_rand_state *rand_state;
unsigned char rand_data[8];
size_t rand_bits;
ares__slist_node_t **head;
size_t levels;
ares__slist_node_t *tail;
ares__slist_cmp_t cmp;
ares__slist_destructor_t destruct;
size_t cnt;
};
struct ares__slist_node {
void *data;
ares__slist_node_t **prev;
ares__slist_node_t **next;
size_t levels;
ares__slist_t *parent;
};
ares__slist_t *ares__slist_create(ares_rand_state *rand_state,
ares__slist_cmp_t cmp,
ares__slist_destructor_t destruct)
{
ares__slist_t *list;
if (rand_state == NULL || cmp == NULL)
return NULL;
list = ares_malloc(sizeof(*list));
if (list == NULL)
return NULL;
memset(list, 0, sizeof(*list));
list->rand_state = rand_state;
list->cmp = cmp;
list->destruct = destruct;
list->levels = ARES__SLIST_START_LEVELS;
list->head = ares_malloc(sizeof(*list->head) * list->levels);
if (list->head == NULL) {
ares_free(list);
return NULL;
}
memset(list->head, 0, sizeof(*list->head) * list->levels);
return list;
}
static unsigned int ares__slist_coin_flip(ares__slist_t *list)
{
size_t total_bits = sizeof(list->rand_data) * 8;
size_t bit;
/* Refill random data used for coin flips. We pull this in 8 byte chunks.
* ares__rand_bytes() has some built-in caching of its own so we don't need
* to be excessive in caching ourselves. Prefer to require less memory per
* skiplist */
if (list->rand_bits == 0) {
ares__rand_bytes(list->rand_state, list->rand_data,
sizeof(list->rand_data));
list->rand_bits = total_bits;
}
bit = total_bits - list->rand_bits;
list->rand_bits--;
return (list->rand_data[bit / 8] & (1 << (bit % 8)))?1:0;
}
void ares__slist_replace_destructor(ares__slist_t *list,
ares__slist_destructor_t destruct)
{
if (list == NULL)
return;
list->destruct = destruct;
}
/* Uses public domain code snipets from http://graphics.stanford.edu/~seander/bithacks.html */
static size_t ares__round_up_pow2(size_t n)
{
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
if (sizeof(size_t) > 4)
n |= n >> 32;
n++;
return n;
}
static size_t ares__log2(size_t n)
{
static const unsigned char tab32[32] = {
0, 1, 28, 2, 29, 14, 24, 3,
30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7,
26, 12, 18, 6, 11, 5, 10, 9
};
static const unsigned char tab64[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
55, 30, 34, 11, 43, 14, 22, 4,
62, 57, 46, 52, 38, 26, 32, 41,
50, 36, 17, 19, 29, 10, 13, 21,
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5
};
if (sizeof(size_t) == 4)
return tab32[(size_t)(n*0x077CB531) >> 27];
return tab64[((size_t)(n*0x07EDD5E59A4E28C2)) >> 58];
}
static size_t ares__slist_max_level(ares__slist_t *list)
{
size_t max_level = 0;
if (list->cnt+1 <= (1 << ARES__SLIST_START_LEVELS)) {
max_level = ARES__SLIST_START_LEVELS;
} else {
max_level = ares__log2(ares__round_up_pow2(list->cnt+1));
}
if (list->levels > max_level)
max_level = list->levels;
return max_level;
}
static size_t ares__slist_calc_level(ares__slist_t *list)
{
size_t max_level = ares__slist_max_level(list);
size_t level;
for (level=1; ares__slist_coin_flip(list) && level < max_level; level++)
;
return level;
}
ares__slist_node_t *ares__slist_insert(ares__slist_t *list, void *val)
{
ares__slist_node_t *node = NULL;
ares__slist_node_t *left = NULL;
size_t i;
if (list == NULL || val == NULL)
return NULL;
node = ares_malloc(sizeof(*node));
if (node == NULL)
goto fail;
memset(node, 0, sizeof(*node));
node->data = val;
node->parent = list;
/* Randomly determine the number of levels we want to use */
node->levels = ares__slist_calc_level(list);
/* Allocate array of next and prev nodes for linking each level */
node->next = ares_malloc(sizeof(*node->next) * node->levels);
if (node->next == NULL)
goto fail;
memset(node->next, 0, sizeof(*node->next) * node->levels);
node->prev = ares_malloc(sizeof(*node->prev) * node->levels);
if (node->prev == NULL)
goto fail;
memset(node->prev, 0, sizeof(*node->prev) * node->levels);
/* If the number of levels is greater than we currently support in the slist,
* increase the count */
if (list->levels < node->levels) {
size_t zero_len = sizeof(*list->head) * (node->levels - list->levels);
size_t offset = sizeof(*list->head) * list->levels;
void *ptr = ares_realloc(list->head, sizeof(*list->head) * list->levels);
if (ptr == NULL)
goto fail;
memset((unsigned char *)ptr + offset, 0, zero_len);
list->head = ptr;
list->levels = node->levels;
}
/* Scan from highest level in the slist, even if we're not using that number
* of levels for this entry as this is what makes it O(log n) */
for (i=list->levels; i-- > 0; ) {
/* set left if left is NULL and the current node value is greater than the
* head at this level */
if (left == NULL &&
list->head[i] != NULL &&
list->cmp(node->data, list->head[i]->data) > 0
) {
left = list->head[i];
}
if (left != NULL) {
/* scan forward to find our insertion point */
while (left->next[i] != NULL &&
list->cmp(node->data, left->next[i]->data) > 0) {
left = left->next[i];
}
}
/* search only as we didn't randomly select this number of levels */
if (i >= node->levels)
continue;
if (left == NULL) {
/* head insertion */
node->next[i] = list->head[i];
node->prev[i] = NULL;
list->head[i] = node;
} else {
/* Chain */
node->next[i] = left->next[i];
node->prev[i] = left;
left->next[i] = node;
}
if (node->next[i] != NULL) {
/* chain prev */
node->next[i]->prev[i] = node;
} else {
if (i == 0) {
/* update tail */
list->tail = node;
}
}
}
list->cnt++;
return node;
fail:
if (node) {
ares_free(node->prev);
ares_free(node->next);
ares_free(node);
}
return NULL;
}
ares__slist_node_t *ares__slist_node_find(ares__slist_t *list, const void *val)
{
size_t i;
ares__slist_node_t *node = NULL;
int rv = -1;
if (list == NULL || val == NULL)
return NULL;
/* Scan nodes starting at the highest level. For each level scan forward
* until the value is between the prior and next node, or if equal quit
* as we found a match */
for (i=list->levels; i-- > 0; ) {
if (node == NULL)
node = list->head[i];
if (node == NULL)
continue;
do {
rv = list->cmp(val, node->data);
if (rv < 0) {
/* back off, our value is greater than current node reference */
node = node->prev[i];
} else if (rv > 0) {
/* move forward and try again. if it goes past, it will loop again and
* go to previous entry */
node = node->next[i];
}
/* rv == 0 will terminate loop */
} while (node != NULL && rv > 0);
/* Found a match, no need to continue */
if (rv == 0) {
break;
}
}
/* no match */
if (rv != 0) {
return NULL;
}
/* The list may have multiple entries that match. They're guaranteed to be
* in order, but we're not guaranteed to have selected the _first_ matching
* node. Lets scan backwards to find the first match */
while (node->prev[0] != NULL && list->cmp(node->prev[0]->data, val) == 0) {
node = node->prev[0];
}
return node;
}
ares__slist_node_t *ares__slist_node_first(ares__slist_t *list)
{
if (list == NULL)
return NULL;
return list->head[0];
}
ares__slist_node_t *ares__slist_node_last(ares__slist_t *list)
{
if (list == NULL)
return NULL;
return list->tail;
}
ares__slist_node_t *ares__slist_node_next(ares__slist_node_t *node)
{
if (node == NULL)
return NULL;
return node->next[0];
}
ares__slist_node_t *ares__slist_node_prev(ares__slist_node_t *node)
{
if (node == NULL)
return NULL;
return node->prev[0];
}
void *ares__slist_node_val(ares__slist_node_t *node)
{
if (node == NULL)
return NULL;
return node->data;
}
size_t ares__slist_len(ares__slist_t *list)
{
if (list == NULL)
return 0;
return list->cnt;
}
ares__slist_t *ares__slist_node_parent(ares__slist_node_t *node)
{
if (node == NULL)
return NULL;
return node->parent;
}
void *ares__slist_first_val(ares__slist_t *list)
{
return ares__slist_node_val(ares__slist_node_first(list));
}
void *ares__slist_last_val(ares__slist_t *list)
{
return ares__slist_node_val(ares__slist_node_last(list));
}
void *ares__slist_node_claim(ares__slist_node_t *node)
{
void *val;
ares__slist_t *list;
size_t i;
if (node == NULL)
return NULL;
list = node->parent;
val = node->data;
/* relink each node at each level */
for (i=node->levels; i-- > 0; ) {
if (node->next[i] == NULL) {
if (i == 0) {
list->tail = node->prev[0];
}
} else {
node->next[i]->prev[i] = node->prev[i];
}
if (node->prev[i] == NULL) {
list->head[i] = node->next[i];
} else {
node->prev[i]->next[i] = node->next[i];
}
}
ares_free(node->next);
ares_free(node->prev);
ares_free(node);
list->cnt--;
return val;
}
void ares__slist_node_destroy(ares__slist_node_t *node)
{
ares__slist_destructor_t destruct;
void *val;
if (node == NULL)
return;
destruct = node->parent->destruct;
val = ares__slist_node_claim(node);
if (val != NULL && destruct != NULL)
destruct(val);
}
void ares__slist_destroy(ares__slist_t *list)
{
ares__slist_node_t *node;
if (list == NULL)
return;
while ((node = ares__slist_node_first(list)) != NULL) {
ares__slist_node_destroy(node);
}
ares_free(list->head);
ares_free(list);
}

@ -0,0 +1,188 @@
/* Copyright (C) 2023 by Brad House
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#ifndef __ARES__SLIST_H
#define __ARES__SLIST_H
/*! \addtogroup ares__slist SkipList Data Structure
*
* This data structure is known as a Skip List, which in essence is a sorted
* linked list with multiple levels of linkage to gain some algorithmic
* advantages. The usage symantecs are almost identical to what you'd expect
* with a linked list.
*
* Average time complexity:
* - Insert: O(log n)
* - Search: O(log n)
* - Delete: O(1) -- delete assumes you hold a node pointer
*
* It should be noted, however, that "effort" involved with an insert or
* remove operation is higher than a normal linked list. For very small
* lists this may be less efficient, but for any list with a moderate number
* of entries this will prove much more efficient.
*
* This data structure is often compared with a Binary Search Tree in
* functionality and usage.
*
* @{
*/
struct ares__slist;
/*! SkipList Object, opaque */
typedef struct ares__slist ares__slist_t;
struct ares__slist_node;
/*! SkipList Node Object, opaque */
typedef struct ares__slist_node ares__slist_node_t;
/*! SkipList Node Value destructor callback
*
* \param[in] data User-defined data to destroy
*/
typedef void (*ares__slist_destructor_t)(void *data);
/*! SkipList comparison function
*
* \param[in] data1 First user-defined data object
* \param[in] data2 Second user-defined data object
* \return < 0 if data1 < data1, > 0 if data1 > data2, 0 if data1 == data2
*/
typedef int (*ares__slist_cmp_t)(const void *data1, const void *data2);
/*! Create SkipList
*
* \param[in] rand_state Initialized ares random state.
* \param[in] cmp SkipList comparison function
* \param[in] destruct SkipList Node Value Destructor. Optional, use NULL.
* \return Initialized SkipList Object or NULL on misuse or ENOMEM
*/
ares__slist_t *ares__slist_create(ares_rand_state *rand_state,
ares__slist_cmp_t cmp,
ares__slist_destructor_t destruct);
/*! Replace SkipList Node Value Destructor
*
* \param[in] list Initialized SkipList Object
* \param[in] destruct Replacement destructor. May be NULL.
*/
void ares__slist_replace_destructor(ares__slist_t *list,
ares__slist_destructor_t destruct);
/*! Insert Value into SkipList
*
* \param[in] list Initialized SkipList Object
* \param[in] val Node Value. Must not be NULL. Function takes ownership
* and will have destructor called.
* \return SkipList Node Object or NULL on misuse or ENOMEM
*/
ares__slist_node_t *ares__slist_insert(ares__slist_t *list, void *val);
/*! Fetch first node in SkipList
*
* \param[in] list Initialized SkipList Object
* \return SkipList Node Object or NULL if none
*/
ares__slist_node_t *ares__slist_node_first(ares__slist_t *list);
/*! Fetch last node in SkipList
*
* \param[in] list Initialized SkipList Object
* \return SkipList Node Object or NULL if none
*/
ares__slist_node_t *ares__slist_node_last(ares__slist_t *list);
/*! Fetch next node in SkipList
*
* \param[in] node SkipList Node Object
* \return SkipList Node Object or NULL if none
*/
ares__slist_node_t *ares__slist_node_next(ares__slist_node_t *node);
/*! Fetch previous node in SkipList
*
* \param[in] node SkipList Node Object
* \return SkipList Node Object or NULL if none
*/
ares__slist_node_t *ares__slist_node_prev(ares__slist_node_t *node);
/*! Fetch SkipList Node Object by Value
*
* \param[in] list Initialized SkipList Object
* \param[in] val Object to use for comparison
* \return SkipList Node Object or NULL if not found
*/
ares__slist_node_t *ares__slist_node_find(ares__slist_t *list, const void *val);
/*! Fetch Node Value
*
* \param[in] node SkipList Node Object
* \return user defined node value
*/
void *ares__slist_node_val(ares__slist_node_t *node);
/*! Fetch number of entries in SkipList Object
*
* \param[in] list Initialized SkipList Object
* \return number of entries
*/
size_t ares__slist_len(ares__slist_t *list);
/*! Fetch SkipList Object from SkipList Node
*
* \param[in] node SkipList Node Object
* \return SkipList Object
*/
ares__slist_t *ares__slist_node_parent(ares__slist_node_t *node);
/*! Fetch first Node Value in SkipList
*
* \param[in] list Initialized SkipList Object
* \return user defined node value or NULL if none
*/
void *ares__slist_first_val(ares__slist_t *list);
/*! Fetch last Node Value in SkipList
*
* \param[in] list Initialized SkipList Object
* \return user defined node value or NULL if none
*/
void *ares__slist_last_val(ares__slist_t *list);
/*! Take back ownership of Node Value in SkipList, remove from SkipList.
*
* \param[in] node SkipList Node Object
* \return user defined node value
*/
void *ares__slist_node_claim(ares__slist_node_t *node);
/*! Remove Node from SkipList, calling destructor for Node Value.
*
* \param[in] node SkipList Node Object
*/
void ares__slist_node_destroy(ares__slist_node_t *node);
/*! Destroy SkipList Object. If there are any nodes, they will be destroyed.
*
* \param[in] list Initialized SkipList Object
*/
void ares__slist_destroy(ares__slist_t *list);
/*! @} */
#endif /* __ARES__SLIST_H */

@ -27,37 +27,51 @@
*/
void ares_cancel(ares_channel channel)
{
struct query *query;
struct list_node list_head_copy;
struct list_node* list_head;
struct list_node* list_node;
int i;
if (!ares__is_list_empty(&(channel->all_queries)))
if (ares__llist_len(channel->all_queries) > 0)
{
ares__llist_node_t *node = NULL;
ares__llist_node_t *next = NULL;
/* Swap list heads, so that only those queries which were present on entry
* into this function are cancelled. New queries added by callbacks of
* queries being cancelled will not be cancelled themselves.
*/
list_head = &(channel->all_queries);
list_head_copy.prev = list_head->prev;
list_head_copy.next = list_head->next;
list_head_copy.prev->next = &list_head_copy;
list_head_copy.next->prev = &list_head_copy;
list_head->prev = list_head;
list_head->next = list_head;
for (list_node = list_head_copy.next; list_node != &list_head_copy; )
{
query = list_node->data;
list_node = list_node->next; /* since we're deleting the query */
ares__llist_t *list_copy = channel->all_queries;
channel->all_queries = ares__llist_create(NULL);
/* Out of memory, this function doesn't return a result code though so we
* can't report to caller */
if (channel->all_queries == NULL) {
channel->all_queries = list_copy;
return;
}
node = ares__llist_node_first(list_copy);
while (node != NULL) {
struct query *query;
/* Cache next since this node is being deleted */
next = ares__llist_node_next(node);
query = ares__llist_node_claim(node);
query->node_all_queries = NULL;
/* NOTE: its possible this may enqueue new queries */
query->callback(query->arg, ARES_ECANCELLED, 0, NULL, 0);
ares__free_query(query);
node = next;
}
ares__llist_destroy(list_copy);
}
if (!(channel->flags & ARES_FLAG_STAYOPEN) && ares__is_list_empty(&(channel->all_queries)))
if (!(channel->flags & ARES_FLAG_STAYOPEN) && ares__llist_len(channel->all_queries) == 0)
{
if (channel->servers)
{
int i;
for (i = 0; i < channel->nservers; i++)
ares__close_sockets(channel, &channel->servers[i]);
}

@ -46,35 +46,31 @@ void ares_destroy_options(struct ares_options *options)
void ares_destroy(ares_channel channel)
{
int i;
struct query *query;
struct list_node* list_head;
struct list_node* list_node;
int i;
ares__llist_node_t *node = NULL;
if (!channel)
return;
list_head = &(channel->all_queries);
for (list_node = list_head->next; list_node != list_head; )
{
query = list_node->data;
list_node = list_node->next; /* since we're deleting the query */
query->callback(query->arg, ARES_EDESTRUCTION, 0, NULL, 0);
ares__free_query(query);
}
node = ares__llist_node_first(channel->all_queries);
while (node != NULL) {
ares__llist_node_t *next = ares__llist_node_next(node);
struct query *query = ares__llist_node_claim(node);
query->node_all_queries = NULL;
query->callback(query->arg, ARES_EDESTRUCTION, 0, NULL, 0);
ares__free_query(query);
node = next;
}
#ifndef NDEBUG
/* Freeing the query should remove it from all the lists in which it sits,
* so all query lists should be empty now.
*/
assert(ares__is_list_empty(&(channel->all_queries)));
for (i = 0; i < ARES_QID_TABLE_SIZE; i++)
{
assert(ares__is_list_empty(&(channel->queries_by_qid[i])));
}
for (i = 0; i < ARES_TIMEOUT_TABLE_SIZE; i++)
{
assert(ares__is_list_empty(&(channel->queries_by_timeout[i])));
}
assert(ares__llist_len(channel->all_queries) == 0);
assert(ares__htable_stvp_num_keys(channel->queries_by_qid) == 0);
assert(ares__slist_len(channel->queries_by_timeout) == 0);
#endif
ares__destroy_servers_state(channel);
@ -85,6 +81,10 @@ void ares_destroy(ares_channel channel)
ares_free(channel->domains);
}
ares__llist_destroy(channel->all_queries);
ares__slist_destroy(channel->queries_by_timeout);
ares__htable_stvp_destroy(channel->queries_by_qid);
if(channel->sortlist)
ares_free(channel->sortlist);
@ -114,7 +114,8 @@ void ares__destroy_servers_state(ares_channel channel)
{
server = &channel->servers[i];
ares__close_sockets(channel, server);
assert(ares__is_list_empty(&server->queries_to_server));
assert(ares__llist_len(server->queries_to_server) == 0);
ares__llist_destroy(server->queries_to_server);
}
ares_free(channel->servers);
channel->servers = NULL;

@ -29,7 +29,7 @@ int ares_fds(ares_channel channel, fd_set *read_fds, fd_set *write_fds)
int i;
/* Are there any active queries? */
int active_queries = !ares__is_list_empty(&(channel->all_queries));
size_t active_queries = ares__llist_len(channel->all_queries);
nfds = 0;
for (i = 0; i < channel->nservers; i++)

@ -115,7 +115,7 @@ static int as_is_first(const struct host_query *hquery);
static int as_is_only(const struct host_query* hquery);
static int next_dns_lookup(struct host_query *hquery);
struct ares_addrinfo_cname *ares__malloc_addrinfo_cname()
static struct ares_addrinfo_cname *ares__malloc_addrinfo_cname(void)
{
struct ares_addrinfo_cname *cname = ares_malloc(sizeof(struct ares_addrinfo_cname));
if (!cname)
@ -162,7 +162,7 @@ void ares__addrinfo_cat_cnames(struct ares_addrinfo_cname **head,
last->next = tail;
}
struct ares_addrinfo *ares__malloc_addrinfo()
static struct ares_addrinfo *ares__malloc_addrinfo(void)
{
struct ares_addrinfo *ai = ares_malloc(sizeof(struct ares_addrinfo));
if (!ai)
@ -172,10 +172,10 @@ struct ares_addrinfo *ares__malloc_addrinfo()
return ai;
}
struct ares_addrinfo_node *ares__malloc_addrinfo_node()
static struct ares_addrinfo_node *ares__malloc_addrinfo_node(void)
{
struct ares_addrinfo_node *node =
ares_malloc(sizeof(struct ares_addrinfo_node));
ares_malloc(sizeof(*node));
if (!node)
return NULL;

@ -30,7 +30,7 @@ int ares_getsock(ares_channel channel,
unsigned int setbits = 0xffffffff;
/* Are there any active queries? */
int active_queries = !ares__is_list_empty(&(channel->all_queries));
size_t active_queries = ares__llist_len(channel->all_queries);
for (i = 0; i < channel->nservers; i++)
{

@ -105,24 +105,41 @@ int ares_init(ares_channel *channelptr)
return ares_init_options(channelptr, NULL, 0);
}
static int ares_query_timeout_cmp_cb(const void *arg1, const void *arg2)
{
const struct query *q1 = arg1;
const struct query *q2 = arg2;
if (q1->timeout.tv_sec > q2->timeout.tv_sec)
return 1;
if (q1->timeout.tv_sec < q2->timeout.tv_sec)
return -1;
if (q1->timeout.tv_usec > q2->timeout.tv_usec)
return 1;
if (q1->timeout.tv_usec < q2->timeout.tv_usec)
return -1;
return 0;
}
int ares_init_options(ares_channel *channelptr, struct ares_options *options,
int optmask)
{
ares_channel channel;
int i;
int status = ARES_SUCCESS;
struct timeval now;
if (ares_library_initialized() != ARES_SUCCESS)
return ARES_ENOTINITIALIZED; /* LCOV_EXCL_LINE: n/a on non-WinSock */
channel = ares_malloc(sizeof(struct ares_channeldata));
channel = ares_malloc(sizeof(*channel));
if (!channel) {
*channelptr = NULL;
return ARES_ENOMEM;
}
now = ares__tvnow();
memset(channel, 0, sizeof(*channel));
/* Set everything to distinguished values so we know they haven't
* been set yet.
@ -140,40 +157,37 @@ int ares_init_options(ares_channel *channelptr, struct ares_options *options,
channel->nservers = -1;
channel->ndomains = -1;
channel->nsort = -1;
channel->tcp_connection_generation = 0;
channel->lookups = NULL;
channel->domains = NULL;
channel->sortlist = NULL;
channel->servers = NULL;
channel->sock_state_cb = NULL;
channel->sock_state_cb_data = NULL;
channel->sock_create_cb = NULL;
channel->sock_create_cb_data = NULL;
channel->sock_config_cb = NULL;
channel->sock_config_cb_data = NULL;
channel->sock_funcs = NULL;
channel->sock_func_cb_data = NULL;
channel->resolvconf_path = NULL;
channel->hosts_path = NULL;
channel->rand_state = NULL;
channel->last_server = 0;
channel->last_timeout_processed = (time_t)now.tv_sec;
memset(&channel->local_dev_name, 0, sizeof(channel->local_dev_name));
channel->local_ip4 = 0;
memset(&channel->local_ip6, 0, sizeof(channel->local_ip6));
/* Generate random key */
channel->rand_state = ares__init_rand_state();
if (channel->rand_state == NULL) {
status = ARES_ENOMEM;
DEBUGF(fprintf(stderr, "Error: init_id_key failed: %s\n",
ares_strerror(status)));
goto done;
}
/* Initialize our lists of queries */
ares__init_list_head(&(channel->all_queries));
for (i = 0; i < ARES_QID_TABLE_SIZE; i++)
{
ares__init_list_head(&(channel->queries_by_qid[i]));
}
for (i = 0; i < ARES_TIMEOUT_TABLE_SIZE; i++)
{
ares__init_list_head(&(channel->queries_by_timeout[i]));
}
channel->all_queries = ares__llist_create(NULL);
if (channel->all_queries == NULL) {
status = ARES_ENOMEM;
goto done;
}
channel->queries_by_qid = ares__htable_stvp_create(NULL);
if (channel->queries_by_qid == NULL) {
status = ARES_ENOMEM;
goto done;
}
channel->queries_by_timeout = ares__slist_create(channel->rand_state,
ares_query_timeout_cmp_cb,
NULL);
if (channel->queries_by_timeout == NULL) {
status = ARES_ENOMEM;
goto done;
}
/* Initialize configuration by each of the four sources, from highest
* precedence to lowest.
@ -206,27 +220,25 @@ int ares_init_options(ares_channel *channelptr, struct ares_options *options,
DEBUGF(fprintf(stderr, "Error: init_by_defaults failed: %s\n",
ares_strerror(status)));
/* Generate random key */
if (status == ARES_SUCCESS) {
channel->rand_state = ares__init_rand_state();
if (channel->rand_state == NULL) {
status = ARES_ENOMEM;
}
/* Trim to one server if ARES_FLAG_PRIMARY is set. */
if ((channel->flags & ARES_FLAG_PRIMARY) && channel->nservers > 1)
channel->nservers = 1;
if (status == ARES_SUCCESS)
channel->next_id = ares__generate_new_id(channel->rand_state);
else
DEBUGF(fprintf(stderr, "Error: init_id_key failed: %s\n",
ares_strerror(status)));
status = ares__init_servers_state(channel);
if (status != ARES_SUCCESS) {
goto done;
}
done:
if (status != ARES_SUCCESS)
{
/* Something failed; clean up memory we may have allocated. */
if (channel->servers)
if (channel->servers) {
for (i = 0; i < channel->nservers; i++) {
ares__llist_destroy(channel->servers[i].queries_to_server);
}
ares_free(channel->servers);
}
if (channel->ndomains != -1)
ares__strsplit_free(channel->domains, channel->ndomains);
if (channel->sortlist)
@ -239,16 +251,14 @@ done:
ares_free(channel->hosts_path);
if (channel->rand_state)
ares__destroy_rand_state(channel->rand_state);
ares__htable_stvp_destroy(channel->queries_by_qid);
ares__llist_destroy(channel->all_queries);
ares__slist_destroy(channel->queries_by_timeout);
ares_free(channel);
return status;
}
/* Trim to one server if ARES_FLAG_PRIMARY is set. */
if ((channel->flags & ARES_FLAG_PRIMARY) && channel->nservers > 1)
channel->nservers = 1;
ares__init_servers_state(channel);
*channelptr = channel;
return ARES_SUCCESS;
}
@ -379,6 +389,7 @@ int ares_save_options(ares_channel channel, struct ares_options *options,
options->servers = ares_malloc(ipv4_nservers * sizeof(struct in_addr));
if (!options->servers)
return ARES_ENOMEM;
for (i = j = 0; i < channel->nservers; i++)
{
if ((channel->servers[i].addr.family == AF_INET) &&
@ -489,9 +500,10 @@ static int init_by_options(ares_channel channel,
if (options->nservers > 0)
{
channel->servers =
ares_malloc(options->nservers * sizeof(struct server_state));
ares_malloc(options->nservers * sizeof(*channel->servers));
if (!channel->servers)
return ARES_ENOMEM;
memset(channel->servers, 0, options->nservers * sizeof(*channel->servers));
for (i = 0; i < options->nservers; i++)
{
channel->servers[i].addr.family = AF_INET;
@ -1170,11 +1182,11 @@ static int init_by_resolv_conf(ares_channel channel)
}
nservers = count4 + count6;
servers = ares_malloc(nservers * sizeof(struct server_state));
servers = ares_malloc(nservers * sizeof(*servers));
if (!servers)
return ARES_ENOMEM;
memset(servers, 0, nservers * sizeof(struct server_state));
memset(servers, 0, nservers * sizeof(*servers));
pserver = servers;
for (int i = 0; i < count4; ++i, ++pserver) {
@ -1236,10 +1248,10 @@ static int init_by_resolv_conf(ares_channel channel)
return ARES_SUCCESS; /* use localhost DNS server */
nservers = i;
servers = ares_malloc(sizeof(struct server_state));
servers = ares_malloc(sizeof(*servers));
if (!servers)
return ARES_ENOMEM;
memset(servers, 0, sizeof(struct server_state));
memset(servers, 0, sizeof(*servers));
for (i = 0; def_nameservers[i]; i++)
{
@ -1312,8 +1324,9 @@ static int init_by_resolv_conf(ares_channel channel)
# endif /* HAVE___SYSTEM_PROPERTY_GET */
#elif defined(CARES_USE_LIBRESOLV)
struct __res_state res;
int result;
memset(&res, 0, sizeof(res));
int result = res_ninit(&res);
result = res_ninit(&res);
if (result == 0 && (res.options & RES_INIT)) {
status = ARES_EOF;
@ -1599,11 +1612,12 @@ static int init_by_defaults(ares_channel channel)
if (channel->nservers == -1) {
/* If nobody specified servers, try a local named. */
channel->servers = ares_malloc(sizeof(struct server_state));
channel->servers = ares_malloc(sizeof(*channel->servers));
if (!channel->servers) {
rc = ARES_ENOMEM;
goto error;
}
memset(channel->servers, 0, sizeof(*channel->servers));
channel->servers[0].addr.family = AF_INET;
channel->servers[0].addr.addrV4.s_addr = htonl(INADDR_LOOPBACK);
channel->servers[0].addr.udp_port = 0;
@ -1697,6 +1711,7 @@ static int init_by_defaults(ares_channel channel)
ares_free(channel->servers);
channel->servers = NULL;
}
channel->nservers = 0;
if(channel->domains && channel->domains[0])
ares_free(channel->domains[0]);
@ -1996,10 +2011,12 @@ static int config_nameserver(struct server_state **servers, int *nservers,
/* Resize servers state array. */
newserv = ares_realloc(*servers, (*nservers + 1) *
sizeof(struct server_state));
sizeof(*newserv));
if (!newserv)
return ARES_ENOMEM;
memset(((unsigned char *)newserv) + ((*nservers) * sizeof(*newserv)), 0, sizeof(*newserv));
/* Store address data. */
newserv[*nservers].addr.family = host.family;
newserv[*nservers].addr.udp_port = htons(port);
@ -2357,7 +2374,7 @@ int ares_set_sortlist(ares_channel channel, const char *sortstr)
return status;
}
void ares__init_servers_state(ares_channel channel)
int ares__init_servers_state(ares_channel channel)
{
struct server_state *server;
int i;
@ -2374,8 +2391,11 @@ void ares__init_servers_state(ares_channel channel)
server->tcp_length = 0;
server->qhead = NULL;
server->qtail = NULL;
ares__init_list_head(&server->queries_to_server);
server->channel = channel;
server->is_broken = 0;
server->queries_to_server = ares__llist_create(NULL);
if (server->queries_to_server == NULL)
return ARES_ENOMEM;
}
return ARES_SUCCESS;
}

@ -1,65 +0,0 @@
/* Copyright 1998 by the Massachusetts Institute of Technology.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_setup.h"
#include "ares.h"
#include "ares_private.h"
/* Routines for managing doubly-linked circular linked lists with a
* dummy head.
*/
/* Initialize a new head node */
void ares__init_list_head(struct list_node* head) {
head->prev = head;
head->next = head;
head->data = NULL;
}
/* Initialize a list node */
void ares__init_list_node(struct list_node* node, void* d) {
node->prev = NULL;
node->next = NULL;
node->data = d;
}
/* Returns true iff the given list is empty */
int ares__is_list_empty(struct list_node* head) {
return ((head->next == head) && (head->prev == head));
}
/* Inserts new_node before old_node */
void ares__insert_in_list(struct list_node* new_node,
struct list_node* old_node) {
new_node->next = old_node;
new_node->prev = old_node->prev;
old_node->prev->next = new_node;
old_node->prev = new_node;
}
/* Removes the node from the list it's in, if any */
void ares__remove_from_list(struct list_node* node) {
if (node->next != NULL) {
node->prev->next = node->next;
node->next->prev = node->prev;
node->prev = NULL;
node->next = NULL;
}
}

@ -1,41 +0,0 @@
#ifndef __ARES_LLIST_H
#define __ARES_LLIST_H
/* Copyright 1998 by the Massachusetts Institute of Technology.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of M.I.T. not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SPDX-License-Identifier: MIT
*/
/* Node definition for circular, doubly-linked list */
struct list_node {
struct list_node *prev;
struct list_node *next;
void* data;
};
void ares__init_list_head(struct list_node* head);
void ares__init_list_node(struct list_node* node, void* d);
int ares__is_list_empty(struct list_node* head);
void ares__insert_in_list(struct list_node* new_node,
struct list_node* old_node);
void ares__remove_from_list(struct list_node* node);
#endif /* __ARES_LLIST_H */

@ -155,7 +155,7 @@ int ares_set_servers(ares_channel channel,
if (!channel)
return ARES_ENODATA;
if (!ares__is_list_empty(&channel->all_queries))
if (ares__llist_len(channel->all_queries) != 0)
return ARES_ENOTIMP;
ares__destroy_servers_state(channel);
@ -168,11 +168,12 @@ int ares_set_servers(ares_channel channel,
if (num_srvrs > 0)
{
/* Allocate storage for servers state */
channel->servers = ares_malloc(num_srvrs * sizeof(struct server_state));
channel->servers = ares_malloc(num_srvrs * sizeof(*channel->servers));
if (!channel->servers)
{
return ARES_ENOMEM;
}
memset(channel->servers, 0, num_srvrs * sizeof(*channel->servers));
channel->nservers = num_srvrs;
/* Fill servers state address data */
for (i = 0, srvr = servers; srvr; i++, srvr = srvr->next)
@ -207,7 +208,7 @@ int ares_set_servers_ports(ares_channel channel,
if (!channel)
return ARES_ENODATA;
if (!ares__is_list_empty(&channel->all_queries))
if (ares__llist_len(channel->all_queries) != 0)
return ARES_ENOTIMP;
ares__destroy_servers_state(channel);
@ -220,11 +221,12 @@ int ares_set_servers_ports(ares_channel channel,
if (num_srvrs > 0)
{
/* Allocate storage for servers state */
channel->servers = ares_malloc(num_srvrs * sizeof(struct server_state));
channel->servers = ares_malloc(num_srvrs * sizeof(*channel->servers));
if (!channel->servers)
{
return ARES_ENOMEM;
}
memset(channel->servers, 0, num_srvrs * sizeof(*channel->servers));
channel->nservers = num_srvrs;
/* Fill servers state address data */
for (i = 0, srvr = servers; srvr; i++, srvr = srvr->next)

@ -104,7 +104,13 @@ W32_FUNC const char *_w32_GetHostsFile (void);
#endif
#include "ares_ipv6.h"
#include "ares_llist.h"
struct ares_rand_state;
typedef struct ares_rand_state ares_rand_state;
#include "ares__llist.h"
#include "ares__slist.h"
#include "ares__htable_stvp.h"
#ifndef HAVE_GETENV
# include "ares_getenv.h"
@ -188,8 +194,8 @@ struct server_state {
* re-send. */
int tcp_connection_generation;
/* Circular, doubly-linked list of outstanding queries to this server */
struct list_node queries_to_server;
/* list of outstanding queries to this server */
ares__llist_t *queries_to_server;
/* Link back to owning channel */
ares_channel channel;
@ -203,19 +209,17 @@ struct server_state {
/* State to represent a DNS query */
struct query {
/* Query ID from qbuf, for faster lookup, and current timeout */
unsigned short qid;
unsigned short qid; /* host byte order */
struct timeval timeout;
ares_channel channel;
/*
* Links for the doubly-linked lists in which we insert a query.
* These circular, doubly-linked lists that are hash-bucketed based
* the attributes we care about, help making most important
* operations O(1).
* Node object for each list entry the query belongs to in order to
* make removal operations O(1).
*/
struct list_node queries_by_qid; /* hopefully in same cache line as qid */
struct list_node queries_by_timeout;
struct list_node queries_to_server;
struct list_node all_queries;
ares__slist_node_t *node_queries_by_timeout;
ares__llist_node_t *node_queries_to_server;
ares__llist_node_t *node_all_queries;
/* Query buf with length at beginning, for TCP transmission */
unsigned char *tcpbuf;
@ -262,9 +266,6 @@ struct apattern {
unsigned short type;
};
struct ares_rand_state;
typedef struct ares_rand_state ares_rand_state;
struct ares_channeldata {
/* Configuration data */
int flags;
@ -296,30 +297,22 @@ struct ares_channeldata {
struct server_state *servers;
int nservers;
/* ID to use for next query */
unsigned short next_id;
/* random state to use when generating new ids */
ares_rand_state *rand_state;
/* Generation number to use for the next TCP socket open/close */
int tcp_connection_generation;
/* The time at which we last called process_timeouts(). Uses integer seconds
just to draw the line somewhere. */
time_t last_timeout_processed;
/* Last server we sent a query to. */
int last_server;
/* Circular, doubly-linked list of queries, bucketed various ways.... */
/* All active queries in a single list: */
struct list_node all_queries;
/* All active queries in a single list */
ares__llist_t *all_queries;
/* Queries bucketed by qid, for quickly dispatching DNS responses: */
#define ARES_QID_TABLE_SIZE 2048
struct list_node queries_by_qid[ARES_QID_TABLE_SIZE];
ares__htable_stvp_t *queries_by_qid;
/* Queries bucketed by timeout, for quickly handling timeouts: */
#define ARES_TIMEOUT_TABLE_SIZE 1024
struct list_node queries_by_timeout[ARES_TIMEOUT_TABLE_SIZE];
ares__slist_t *queries_by_timeout;
ares_sock_state_cb sock_state_cb;
void *sock_state_cb_data;
@ -361,6 +354,8 @@ void ares__free_query(struct query *query);
ares_rand_state *ares__init_rand_state(void);
void ares__destroy_rand_state(ares_rand_state *state);
void ares__rand_bytes(ares_rand_state *state, unsigned char *buf, size_t len);
unsigned short ares__generate_new_id(ares_rand_state *state);
struct timeval ares__tvnow(void);
int ares__expand_name_validated(const unsigned char *encoded,
@ -370,7 +365,7 @@ int ares__expand_name_validated(const unsigned char *encoded,
int ares__expand_name_for_response(const unsigned char *encoded,
const unsigned char *abuf, int alen,
char **s, long *enclen, int is_hostname);
void ares__init_servers_state(ares_channel channel);
int ares__init_servers_state(ares_channel channel);
void ares__destroy_servers_state(ares_channel channel);
int ares__parse_qtype_reply(const unsigned char* abuf, int alen, int* qtype);
int ares__single_domain(ares_channel channel, const char *name, char **s);
@ -380,16 +375,12 @@ int ares__readaddrinfo(FILE *fp, const char *name, unsigned short port,
const struct ares_addrinfo_hints *hints,
struct ares_addrinfo *ai);
struct ares_addrinfo *ares__malloc_addrinfo(void);
struct ares_addrinfo_node *ares__malloc_addrinfo_node(void);
void ares__freeaddrinfo_nodes(struct ares_addrinfo_node *ai_node);
struct ares_addrinfo_node *ares__append_addrinfo_node(struct ares_addrinfo_node **ai_node);
void ares__addrinfo_cat_nodes(struct ares_addrinfo_node **head,
struct ares_addrinfo_node *tail);
struct ares_addrinfo_cname *ares__malloc_addrinfo_cname(void);
void ares__freeaddrinfo_cnames(struct ares_addrinfo_cname *ai_cname);
struct ares_addrinfo_cname *ares__append_addrinfo_cname(struct ares_addrinfo_cname **ai_cname);

@ -556,32 +556,24 @@ static void read_udp_packets(ares_channel channel, fd_set *read_fds,
/* If any queries have timed out, note the timeout and move them on. */
static void process_timeouts(ares_channel channel, struct timeval *now)
{
time_t t; /* the time of the timeouts we're processing */
struct query *query;
struct list_node* list_head;
struct list_node* list_node;
ares__slist_node_t *node = ares__slist_node_first(channel->queries_by_timeout);
/* Process all the timeouts that have fired since the last time we processed
* timeouts. If things are going well, then we'll have hundreds/thousands of
* queries that fall into future buckets, and only a handful of requests
* that fall into the "now" bucket, so this should be quite quick.
*/
for (t = channel->last_timeout_processed; t <= now->tv_sec; t++)
{
list_head = &(channel->queries_by_timeout[t % ARES_TIMEOUT_TABLE_SIZE]);
for (list_node = list_head->next; list_node != list_head; )
{
query = list_node->data;
list_node = list_node->next; /* in case the query gets deleted */
if (query->timeout.tv_sec && ares__timedout(now, &query->timeout))
{
query->error_status = ARES_ETIMEOUT;
++query->timeouts;
next_server(channel, query, now);
}
}
}
channel->last_timeout_processed = now->tv_sec;
while (node != NULL) {
struct query *query = ares__slist_node_val(node);
/* Node might be removed, cache next */
ares__slist_node_t *next = ares__slist_node_next(node);
/* Since this is sorted, as soon as we hit a query that isn't timed out, break */
if (!ares__timedout(now, &query->timeout)) {
break;
}
query->error_status = ARES_ETIMEOUT;
query->timeouts++;
next_server(channel, query, now);
node = next;
}
}
/* Handle an answer from a server. */
@ -592,8 +584,6 @@ static void process_answer(ares_channel channel, unsigned char *abuf,
int tc, rcode, packetsz;
unsigned short id;
struct query *query;
struct list_node* list_head;
struct list_node* list_node;
/* If there's no room in the answer for a header, we can't do much
* with it. */
@ -601,31 +591,22 @@ static void process_answer(ares_channel channel, unsigned char *abuf,
return;
/* Grab the query ID, truncate bit, and response code from the packet. */
id = DNS_HEADER_QID(abuf);
id = DNS_HEADER_QID(abuf); /* Converts to host byte order */
tc = DNS_HEADER_TC(abuf);
rcode = DNS_HEADER_RCODE(abuf);
/* Find the query corresponding to this packet. The queries are
* hashed/bucketed by query id, so this lookup should be quick. Note that
* both the query id and the questions must be the same; when the query id
* wraps around we can have multiple outstanding queries with the same query
* id, so we need to check both the id and question.
* hashed/bucketed by query id, so this lookup should be quick.
*/
query = NULL;
list_head = &(channel->queries_by_qid[id % ARES_QID_TABLE_SIZE]);
for (list_node = list_head->next; list_node != list_head;
list_node = list_node->next)
{
struct query *q = list_node->data;
if ((q->qid == id) && same_questions(q->qbuf, q->qlen, abuf, alen))
{
query = q;
break;
}
}
query = ares__htable_stvp_get_direct(channel->queries_by_qid, id);
if (!query)
return;
/* Both the query id and the questions must be the same. We will drop any
* replies that aren't for the same query as this is considered invalid. */
if (!same_questions(query->qbuf, query->qlen, abuf, alen))
return;
packetsz = PACKETSZ;
/* If we use EDNS and server answers with FORMERR without an OPT RR, the protocol
* extension is not understood by the responder. We must retry the query
@ -676,6 +657,17 @@ static void process_answer(ares_channel channel, unsigned char *abuf,
{
if (rcode == SERVFAIL || rcode == NOTIMP || rcode == REFUSED)
{
switch (rcode) {
case SERVFAIL:
query->error_status = ARES_ESERVFAIL;
break;
case NOTIMP:
query->error_status = ARES_ENOTIMP;
break;
case REFUSED:
query->error_status = ARES_EREFUSED;
break;
}
skip_server(channel, query, whichserver);
if (query->server == whichserver)
next_server(channel, query, now);
@ -701,38 +693,13 @@ static void process_broken_connections(ares_channel channel,
}
}
/* Swap the contents of two lists */
static void swap_lists(struct list_node* head_a,
struct list_node* head_b)
{
int is_a_empty = ares__is_list_empty(head_a);
int is_b_empty = ares__is_list_empty(head_b);
struct list_node old_a = *head_a;
struct list_node old_b = *head_b;
if (is_a_empty) {
ares__init_list_head(head_b);
} else {
*head_b = old_a;
old_a.next->prev = head_b;
old_a.prev->next = head_b;
}
if (is_b_empty) {
ares__init_list_head(head_a);
} else {
*head_a = old_b;
old_b.next->prev = head_a;
old_b.prev->next = head_a;
}
}
static void handle_error(ares_channel channel, int whichserver,
struct timeval *now)
{
struct server_state *server;
struct query *query;
struct list_node list_head;
struct list_node* list_node;
ares__llist_t *list_copy;
ares__llist_node_t *node;
server = &channel->servers[whichserver];
@ -745,20 +712,32 @@ static void handle_error(ares_channel channel, int whichserver,
* be re-sent to this server, which will re-insert these queries in that
* same server->queries_to_server list.
*/
ares__init_list_head(&list_head);
swap_lists(&list_head, &(server->queries_to_server));
for (list_node = list_head.next; list_node != &list_head; )
{
query = list_node->data;
list_node = list_node->next; /* in case the query gets deleted */
assert(query->server == whichserver);
skip_server(channel, query, whichserver);
next_server(channel, query, now);
}
list_copy = server->queries_to_server;
server->queries_to_server = ares__llist_create(NULL);
if (server->queries_to_server == NULL) {
/* No way to recover from this type of out of memory, just restore the list.
* Timeouts should handle this condition. */
server->queries_to_server = list_copy;
return;
}
node = ares__llist_node_first(list_copy);
while (node != NULL) {
ares__llist_node_t *next = ares__llist_node_next(node);
struct query *query = ares__llist_node_val(node);
assert(query->server == whichserver);
skip_server(channel, query, whichserver);
next_server(channel, query, now);
node = next;
}
/* Each query should have removed itself from our temporary list as
* it re-sent itself or finished up...
*/
assert(ares__is_list_empty(&list_head));
assert(ares__llist_len(list_copy) == 0);
ares__llist_destroy(list_copy);
}
static void skip_server(ares_channel channel, struct query *query,
@ -916,23 +895,25 @@ void ares__send_query(ares_channel channel, struct query *query,
}
}
query->timeout = *now;
timeadd(&query->timeout, timeplus);
/* Keep track of queries bucketed by timeout, so we can process
* timeout events quickly.
*/
ares__remove_from_list(&(query->queries_by_timeout));
ares__insert_in_list(
&(query->queries_by_timeout),
&(channel->queries_by_timeout[query->timeout.tv_sec %
ARES_TIMEOUT_TABLE_SIZE]));
ares__slist_node_destroy(query->node_queries_by_timeout);
query->timeout = *now;
timeadd(&query->timeout, timeplus);
query->node_queries_by_timeout = ares__slist_insert(channel->queries_by_timeout, query);
if (!query->node_queries_by_timeout) {
end_query(channel, query, ARES_ENOMEM, NULL, 0);
return;
}
/* Keep track of queries bucketed by server, so we can process server
* errors quickly.
*/
ares__remove_from_list(&(query->queries_to_server));
ares__insert_in_list(&(query->queries_to_server),
&(server->queries_to_server));
ares__llist_node_destroy(query->node_queries_to_server);
query->node_queries_to_server =
ares__llist_insert_last(server->queries_to_server, query);
}
/*
@ -1531,7 +1512,7 @@ static void end_query (ares_channel channel, struct query *query, int status,
* sockets unless STAYOPEN is set.
*/
if (!(channel->flags & ARES_FLAG_STAYOPEN) &&
ares__is_list_empty(&(channel->all_queries)))
ares__llist_len(channel->all_queries) == 0)
{
for (i = 0; i < channel->nservers; i++)
ares__close_sockets(channel, &channel->servers[i]);
@ -1541,10 +1522,10 @@ static void end_query (ares_channel channel, struct query *query, int status,
void ares__free_query(struct query *query)
{
/* Remove the query from all the lists in which it is linked */
ares__remove_from_list(&(query->queries_by_qid));
ares__remove_from_list(&(query->queries_by_timeout));
ares__remove_from_list(&(query->queries_to_server));
ares__remove_from_list(&(query->all_queries));
ares__htable_stvp_remove(query->channel->queries_by_qid, query->qid);
ares__slist_node_destroy(query->node_queries_by_timeout);
ares__llist_node_destroy(query->node_queries_to_server);
ares__llist_node_destroy(query->node_all_queries);
/* Zero out some important stuff, to help catch bugs */
query->callback = NULL;
query->arg = NULL;

@ -35,24 +35,6 @@ struct qquery {
static void qcallback(void *arg, int status, int timeouts, unsigned char *abuf, int alen);
static struct query* find_query_by_id(ares_channel channel, unsigned short id)
{
unsigned short qid;
struct list_node* list_head;
struct list_node* list_node;
DNS_HEADER_SET_QID(((unsigned char*)&qid), id);
/* Find the query corresponding to this packet. */
list_head = &(channel->queries_by_qid[qid % ARES_QID_TABLE_SIZE]);
for (list_node = list_head->next; list_node != list_head;
list_node = list_node->next)
{
struct query *q = list_node->data;
if (q->qid == qid)
return q;
}
return NULL;
}
/* a unique query id is generated using an rc4 key. Since the id may already
be used by a running query (as infrequent as it may be), a lookup is
@ -65,7 +47,7 @@ static unsigned short generate_unique_id(ares_channel channel)
do {
id = ares__generate_new_id(channel->rand_state);
} while (find_query_by_id(channel, id));
} while (ares__htable_stvp_get(channel->queries_by_qid, id, NULL));
return (unsigned short)id;
}
@ -76,10 +58,11 @@ void ares_query(ares_channel channel, const char *name, int dnsclass,
struct qquery *qquery;
unsigned char *qbuf;
int qlen, rd, status;
unsigned short id = generate_unique_id(channel);
/* Compose the query. */
rd = !(channel->flags & ARES_FLAG_NORECURSE);
status = ares_create_query(name, dnsclass, type, channel->next_id, rd, &qbuf,
status = ares_create_query(name, dnsclass, type, id, rd, &qbuf,
&qlen, (channel->flags & ARES_FLAG_EDNS) ? channel->ednspsz : 0);
if (status != ARES_SUCCESS)
{
@ -88,8 +71,6 @@ void ares_query(ares_channel channel, const char *name, int dnsclass,
return;
}
channel->next_id = generate_unique_id(channel);
/* Allocate and fill in the query structure. */
qquery = ares_malloc(sizeof(struct qquery));
if (!qquery)

@ -22,12 +22,24 @@
#include "ares_nowarn.h"
#include <stdlib.h>
#if !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_GETRANDOM) && !defined(_WIN32)
# define ARES_NEEDS_RC4 1
#endif
typedef enum {
ARES_RAND_OS = 1, /* OS-provided such as RtlGenRandom or arc4random */
ARES_RAND_FILE = 2, /* OS file-backed random number generator */
#ifdef ARES_NEEDS_RC4
ARES_RAND_RC4 = 3 /* Internal RC4 based PRNG */
#endif
} ares_rand_backend;
/* Don't build RC4 code if it goes unused as it will generate dead code
* warnings */
#ifdef ARES_NEEDS_RC4
# define ARES_RC4_KEY_LEN 32 /* 256 bits */
typedef struct ares_rand_rc4
{
unsigned char S[256];
@ -35,30 +47,6 @@ typedef struct ares_rand_rc4
size_t j;
} ares_rand_rc4;
struct ares_rand_state
{
ares_rand_backend type;
union {
FILE *rand_file;
ares_rand_rc4 rc4;
} state;
};
/* Define RtlGenRandom = SystemFunction036. This is in advapi32.dll. There is
* no need to dynamically load this, other software used widely does not.
* http://blogs.msdn.com/michael_howard/archive/2005/01/14/353379.aspx
* https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom
*/
#ifdef _WIN32
BOOLEAN WINAPI SystemFunction036(PVOID RandomBuffer, ULONG RandomBufferLength);
# ifndef RtlGenRandom
# define RtlGenRandom(a,b) SystemFunction036(a,b)
# endif
#endif
#define ARES_RC4_KEY_LEN 32 /* 256 bits */
#ifdef _MSC_VER
typedef unsigned __int64 cares_u64;
@ -66,6 +54,7 @@ typedef unsigned __int64 cares_u64;
typedef unsigned long long cares_u64;
#endif
static unsigned int ares_u32_from_ptr(void *addr)
{
if (sizeof(void *) == 8) {
@ -132,6 +121,7 @@ static void ares_rc4_init(ares_rand_rc4 *rc4_state)
rc4_state->j = 0;
}
/* Just outputs the key schedule, no need to XOR with any data since we have none */
static void ares_rc4_prng(ares_rand_rc4 *rc4_state, unsigned char *buf, size_t len)
{
@ -152,6 +142,41 @@ static void ares_rc4_prng(ares_rand_rc4 *rc4_state, unsigned char *buf, size_t l
rc4_state->j = j;
}
#endif /* ARES_NEEDS_RC4 */
struct ares_rand_state
{
ares_rand_backend type;
union {
FILE *rand_file;
#ifdef ARES_NEEDS_RC4
ares_rand_rc4 rc4;
#endif
} state;
/* Since except for RC4, random data will likely result in a syscall, lets
* pre-pull 256 bytes at a time. Every query will pull 2 bytes off this so
* that means we should only need a syscall every 128 queries. 256bytes
* appears to be a sweet spot that may be able to be served without
* interruption */
unsigned char cache[256];
size_t cache_remaining;
};
/* Define RtlGenRandom = SystemFunction036. This is in advapi32.dll. There is
* no need to dynamically load this, other software used widely does not.
* http://blogs.msdn.com/michael_howard/archive/2005/01/14/353379.aspx
* https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom
*/
#ifdef _WIN32
BOOLEAN WINAPI SystemFunction036(PVOID RandomBuffer, ULONG RandomBufferLength);
# ifndef RtlGenRandom
# define RtlGenRandom(a,b) SystemFunction036(a,b)
# endif
#endif
static int ares__init_rand_engine(ares_rand_state *state)
{
@ -170,15 +195,17 @@ static int ares__init_rand_engine(ares_rand_state *state)
/* Fall-Thru on failure to RC4 */
#endif
#ifdef ARES_NEEDS_RC4
state->type = ARES_RAND_RC4;
ares_rc4_init(&state->state.rc4);
/* Currently cannot fail */
return 1;
#endif
}
ares_rand_state *ares__init_rand_state()
ares_rand_state *ares__init_rand_state(void)
{
ares_rand_state *state = NULL;
@ -206,8 +233,10 @@ static void ares__clear_rand_state(ares_rand_state *state)
case ARES_RAND_FILE:
fclose(state->state.rand_file);
break;
#ifdef ARES_NEEDS_RC4
case ARES_RAND_RC4:
break;
#endif
}
}
@ -229,7 +258,8 @@ void ares__destroy_rand_state(ares_rand_state *state)
}
static void ares__rand_bytes(ares_rand_state *state, unsigned char *buf, size_t len)
static void ares__rand_bytes_fetch(ares_rand_state *state, unsigned char *buf,
size_t len)
{
while (1) {
@ -275,9 +305,11 @@ static void ares__rand_bytes(ares_rand_state *state, unsigned char *buf, size_t
}
break;
#ifdef ARES_NEEDS_RC4
case ARES_RAND_RC4:
ares_rc4_prng(&state->state.rc4, buf, len);
return;
#endif
}
/* If we didn't return before we got here, that means we had a critical rand
@ -286,6 +318,30 @@ static void ares__rand_bytes(ares_rand_state *state, unsigned char *buf, size_t
}
}
void ares__rand_bytes(ares_rand_state *state, unsigned char *buf, size_t len)
{
/* See if we need to refill the cache to serve the request, but if len is
* excessive, we're not going to update our cache or serve from cache */
if (len > state->cache_remaining && len < sizeof(state->cache)) {
size_t fetch_size = sizeof(state->cache) - state->cache_remaining;
ares__rand_bytes_fetch(state, state->cache, fetch_size);
state->cache_remaining = sizeof(state->cache);
}
/* Serve from cache */
if (len <= state->cache_remaining) {
size_t offset = sizeof(state->cache) - state->cache_remaining;
memcpy(buf, state->cache + offset, len);
state->cache_remaining -= len;
return;
}
/* Serve direct due to excess size of request */
ares__rand_bytes_fetch(state, buf, len);
}
unsigned short ares__generate_new_id(ares_rand_state *state)
{
unsigned short r=0;

@ -53,6 +53,7 @@ void ares_send(ares_channel channel, const unsigned char *qbuf, int qlen,
callback(arg, ARES_ENOMEM, 0, NULL, 0);
return;
}
query->channel = channel;
query->tcpbuf = ares_malloc(qlen + 2);
if (!query->tcpbuf)
{
@ -111,19 +112,25 @@ void ares_send(ares_channel channel, const unsigned char *qbuf, int qlen,
query->timeouts = 0;
/* Initialize our list nodes. */
ares__init_list_node(&(query->queries_by_qid), query);
ares__init_list_node(&(query->queries_by_timeout), query);
ares__init_list_node(&(query->queries_to_server), query);
ares__init_list_node(&(query->all_queries), query);
query->node_queries_by_timeout = NULL;
query->node_queries_to_server = NULL;
/* Chain the query into the list of all queries. */
ares__insert_in_list(&(query->all_queries), &(channel->all_queries));
query->node_all_queries = ares__llist_insert_last(channel->all_queries, query);
if (query->node_all_queries == NULL) {
callback(arg, ARES_ENOMEM, 0, NULL, 0);
ares__free_query(query);
return;
}
/* Keep track of queries bucketed by qid, so we can process DNS
* responses quickly.
*/
ares__insert_in_list(
&(query->queries_by_qid),
&(channel->queries_by_qid[query->qid % ARES_QID_TABLE_SIZE]));
if (!ares__htable_stvp_insert(channel->queries_by_qid, query->qid, query)) {
callback(arg, ARES_ENOMEM, 0, NULL, 0);
ares__free_query(query);
return;
}
/* Perform the first query action. */
now = ares__tvnow();

@ -32,59 +32,47 @@ static long timeoffset(struct timeval *now, struct timeval *check)
(check->tv_usec - now->tv_usec)/1000;
}
/* WARNING: Beware that this is linear in the number of outstanding
* requests! You are probably far better off just calling ares_process()
* once per second, rather than calling ares_timeout() to figure out
* when to next call ares_process().
*/
struct timeval *ares_timeout(ares_channel channel, struct timeval *maxtv,
struct timeval *tvbuf)
{
struct query *query;
struct list_node* list_head;
struct list_node* list_node;
struct timeval now;
struct timeval nextstop;
long offset, min_offset;
/* No queries, no timeout (and no fetch of the current time). */
if (ares__is_list_empty(&(channel->all_queries)))
return maxtv;
struct query *query;
ares__slist_node_t *node;
struct timeval now;
long offset;
/* The minimum timeout of all queries is always the first entry in
* channel->queries_by_timeout */
node = ares__slist_node_first(channel->queries_by_timeout);
/* no queries/timeout */
if (node == NULL) {
return maxtv; /* <-- maxtv can be null though, hrm */
}
query = ares__slist_node_val(node);
/* Find the minimum timeout for the current set of queries. */
now = ares__tvnow();
min_offset = -1;
list_head = &(channel->all_queries);
for (list_node = list_head->next; list_node != list_head;
list_node = list_node->next)
{
query = list_node->data;
if (query->timeout.tv_sec == 0)
continue;
offset = timeoffset(&now, &query->timeout);
if (offset < 0)
offset = 0;
if (min_offset == -1 || offset < min_offset)
min_offset = offset;
}
/* If we found a minimum timeout and it's sooner than the one specified in
* maxtv (if any), return it. Otherwise go with maxtv.
*/
if (min_offset != -1)
{
int ioffset = (min_offset > (long)INT_MAX) ? INT_MAX : (int)min_offset;
nextstop.tv_sec = ioffset/1000;
nextstop.tv_usec = (ioffset%1000)*1000;
if (!maxtv || ares__timedout(maxtv, &nextstop))
{
*tvbuf = nextstop;
return tvbuf;
}
}
return maxtv;
offset = timeoffset(&now, &query->timeout);
if (offset < 0)
offset = 0;
if (offset > (long)INT_MAX)
offset = INT_MAX;
tvbuf->tv_sec = offset / 1000;
tvbuf->tv_usec = (offset % 1000) * 1000;
if (maxtv == NULL)
return tvbuf;
/* Return the minimum time between maxtv and tvbuf */
if (tvbuf->tv_sec > maxtv->tv_sec)
return maxtv;
if (tvbuf->tv_sec < maxtv->tv_sec)
return tvbuf;
if (tvbuf->tv_usec > maxtv->tv_usec)
return maxtv;
return tvbuf;
}

@ -194,8 +194,7 @@ TEST_P(MockTCPChannelTestAI, ServFailResponse) {
ares_getaddrinfo(channel_, "www.google.com.", NULL, &hints, AddrInfoCallback, &result);
Process();
EXPECT_TRUE(result.done_);
// ARES_FLAG_NOCHECKRESP not set, so SERVFAIL consumed
EXPECT_EQ(ARES_ECONNREFUSED, result.status_);
EXPECT_EQ(ARES_ESERVFAIL, result.status_);
}
TEST_P(MockTCPChannelTestAI, NotImplResponse) {
@ -213,8 +212,7 @@ TEST_P(MockTCPChannelTestAI, NotImplResponse) {
ares_getaddrinfo(channel_, "www.google.com.", NULL, &hints, AddrInfoCallback, &result);
Process();
EXPECT_TRUE(result.done_);
// ARES_FLAG_NOCHECKRESP not set, so NOTIMP consumed
EXPECT_EQ(ARES_ECONNREFUSED, result.status_);
EXPECT_EQ(ARES_ENOTIMP, result.status_);
}
TEST_P(MockTCPChannelTestAI, RefusedResponse) {
@ -232,8 +230,7 @@ TEST_P(MockTCPChannelTestAI, RefusedResponse) {
ares_getaddrinfo(channel_, "www.google.com.", NULL, &hints, AddrInfoCallback, &result);
Process();
EXPECT_TRUE(result.done_);
// ARES_FLAG_NOCHECKRESP not set, so REFUSED consumed
EXPECT_EQ(ARES_ECONNREFUSED, result.status_);
EXPECT_EQ(ARES_EREFUSED, result.status_);
}
TEST_P(MockTCPChannelTestAI, YXDomainResponse) {

@ -257,8 +257,7 @@ TEST_P(MockTCPChannelTest, ServFailResponse) {
ares_gethostbyname(channel_, "www.google.com.", AF_INET, HostCallback, &result);
Process();
EXPECT_TRUE(result.done_);
// ARES_FLAG_NOCHECKRESP not set, so SERVFAIL consumed
EXPECT_EQ(ARES_ECONNREFUSED, result.status_);
EXPECT_EQ(ARES_ESERVFAIL, result.status_);
}
TEST_P(MockTCPChannelTest, NotImplResponse) {
@ -272,8 +271,7 @@ TEST_P(MockTCPChannelTest, NotImplResponse) {
ares_gethostbyname(channel_, "www.google.com.", AF_INET, HostCallback, &result);
Process();
EXPECT_TRUE(result.done_);
// ARES_FLAG_NOCHECKRESP not set, so NOTIMP consumed
EXPECT_EQ(ARES_ECONNREFUSED, result.status_);
EXPECT_EQ(ARES_ENOTIMP, result.status_);
}
TEST_P(MockTCPChannelTest, RefusedResponse) {
@ -287,8 +285,7 @@ TEST_P(MockTCPChannelTest, RefusedResponse) {
ares_gethostbyname(channel_, "www.google.com.", AF_INET, HostCallback, &result);
Process();
EXPECT_TRUE(result.done_);
// ARES_FLAG_NOCHECKRESP not set, so REFUSED consumed
EXPECT_EQ(ARES_ECONNREFUSED, result.status_);
EXPECT_EQ(ARES_EREFUSED, result.status_);
}
TEST_P(MockTCPChannelTest, YXDomainResponse) {

Loading…
Cancel
Save