added unbound to external deps

This commit is contained in:
Riccardo Spagni
2014-10-05 23:44:31 +02:00
parent 732493c5cb
commit 9ef094b356
394 changed files with 199264 additions and 0 deletions

642
external/unbound/util/alloc.c vendored Normal file
View File

@@ -0,0 +1,642 @@
/*
* util/alloc.c - memory allocation service.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains memory allocation functions.
*/
#include "config.h"
#include "util/alloc.h"
#include "util/regional.h"
#include "util/data/packed_rrset.h"
#include "util/fptr_wlist.h"
/** custom size of cached regional blocks */
#define ALLOC_REG_SIZE 16384
/** number of bits for ID part of uint64, rest for number of threads. */
#define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */
/** setup new special type */
static void
alloc_setup_special(alloc_special_t* t)
{
memset(t, 0, sizeof(*t));
lock_rw_init(&t->entry.lock);
t->entry.key = t;
}
/** prealloc some entries in the cache. To minimize contention.
* Result is 1 lock per alloc_max newly created entries.
* @param alloc: the structure to fill up.
*/
static void
prealloc(struct alloc_cache* alloc)
{
alloc_special_t* p;
int i;
for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) {
log_err("prealloc: out of memory");
return;
}
alloc_setup_special(p);
alloc_set_special_next(p, alloc->quar);
alloc->quar = p;
alloc->num_quar++;
}
}
/** prealloc region blocks */
static void
prealloc_blocks(struct alloc_cache* alloc, size_t num)
{
size_t i;
struct regional* r;
for(i=0; i<num; i++) {
r = regional_create_custom(ALLOC_REG_SIZE);
if(!r) {
log_err("prealloc blocks: out of memory");
return;
}
r->next = (char*)alloc->reg_list;
alloc->reg_list = r;
alloc->num_reg_blocks ++;
}
}
void
alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
int thread_num)
{
memset(alloc, 0, sizeof(*alloc));
alloc->super = super;
alloc->thread_num = thread_num;
alloc->next_id = (uint64_t)thread_num; /* in steps, so that type */
alloc->next_id <<= THRNUM_SHIFT; /* of *_id is used. */
alloc->last_id = 1; /* so no 64bit constants, */
alloc->last_id <<= THRNUM_SHIFT; /* or implicit 'int' ops. */
alloc->last_id -= 1; /* for compiler portability. */
alloc->last_id |= alloc->next_id;
alloc->next_id += 1; /* because id=0 is special. */
alloc->max_reg_blocks = 100;
alloc->num_reg_blocks = 0;
alloc->reg_list = NULL;
alloc->cleanup = NULL;
alloc->cleanup_arg = NULL;
if(alloc->super)
prealloc_blocks(alloc, alloc->max_reg_blocks);
if(!alloc->super) {
lock_quick_init(&alloc->lock);
lock_protect(&alloc->lock, alloc, sizeof(*alloc));
}
}
void
alloc_clear(struct alloc_cache* alloc)
{
alloc_special_t* p, *np;
struct regional* r, *nr;
if(!alloc)
return;
if(!alloc->super) {
lock_quick_destroy(&alloc->lock);
}
if(alloc->super && alloc->quar) {
/* push entire list into super */
p = alloc->quar;
while(alloc_special_next(p)) /* find last */
p = alloc_special_next(p);
lock_quick_lock(&alloc->super->lock);
alloc_set_special_next(p, alloc->super->quar);
alloc->super->quar = alloc->quar;
alloc->super->num_quar += alloc->num_quar;
lock_quick_unlock(&alloc->super->lock);
} else {
/* free */
p = alloc->quar;
while(p) {
np = alloc_special_next(p);
/* deinit special type */
lock_rw_destroy(&p->entry.lock);
free(p);
p = np;
}
}
alloc->quar = 0;
alloc->num_quar = 0;
r = alloc->reg_list;
while(r) {
nr = (struct regional*)r->next;
free(r);
r = nr;
}
alloc->reg_list = NULL;
alloc->num_reg_blocks = 0;
}
uint64_t
alloc_get_id(struct alloc_cache* alloc)
{
uint64_t id = alloc->next_id++;
if(id == alloc->last_id) {
log_warn("rrset alloc: out of 64bit ids. Clearing cache.");
fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup));
(*alloc->cleanup)(alloc->cleanup_arg);
/* start back at first number */ /* like in alloc_init*/
alloc->next_id = (uint64_t)alloc->thread_num;
alloc->next_id <<= THRNUM_SHIFT; /* in steps for comp. */
alloc->next_id += 1; /* portability. */
/* and generate new and safe id */
id = alloc->next_id++;
}
return id;
}
alloc_special_t*
alloc_special_obtain(struct alloc_cache* alloc)
{
alloc_special_t* p;
log_assert(alloc);
/* see if in local cache */
if(alloc->quar) {
p = alloc->quar;
alloc->quar = alloc_special_next(p);
alloc->num_quar--;
p->id = alloc_get_id(alloc);
return p;
}
/* see if in global cache */
if(alloc->super) {
/* could maybe grab alloc_max/2 entries in one go,
* but really, isn't that just as fast as this code? */
lock_quick_lock(&alloc->super->lock);
if((p = alloc->super->quar)) {
alloc->super->quar = alloc_special_next(p);
alloc->super->num_quar--;
}
lock_quick_unlock(&alloc->super->lock);
if(p) {
p->id = alloc_get_id(alloc);
return p;
}
}
/* allocate new */
prealloc(alloc);
if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) {
log_err("alloc_special_obtain: out of memory");
return NULL;
}
alloc_setup_special(p);
p->id = alloc_get_id(alloc);
return p;
}
/** push mem and some more items to the super */
static void
pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem)
{
int i;
alloc_special_t *p = alloc->quar;
log_assert(p);
log_assert(alloc && alloc->super &&
alloc->num_quar >= ALLOC_SPECIAL_MAX);
/* push ALLOC_SPECIAL_MAX/2 after mem */
alloc_set_special_next(mem, alloc->quar);
for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) {
p = alloc_special_next(p);
}
alloc->quar = alloc_special_next(p);
alloc->num_quar -= ALLOC_SPECIAL_MAX/2;
/* dump mem+list into the super quar list */
lock_quick_lock(&alloc->super->lock);
alloc_set_special_next(p, alloc->super->quar);
alloc->super->quar = mem;
alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1;
lock_quick_unlock(&alloc->super->lock);
/* so 1 lock per mem+alloc/2 deletes */
}
void
alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem)
{
log_assert(alloc);
if(!mem)
return;
if(!alloc->super) {
lock_quick_lock(&alloc->lock); /* superalloc needs locking */
}
alloc_special_clean(mem);
if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) {
/* push it to the super structure */
pushintosuper(alloc, mem);
return;
}
alloc_set_special_next(mem, alloc->quar);
alloc->quar = mem;
alloc->num_quar++;
if(!alloc->super) {
lock_quick_unlock(&alloc->lock);
}
}
void
alloc_stats(struct alloc_cache* alloc)
{
log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup",
(int)alloc->num_quar, (int)alloc->num_reg_blocks);
}
size_t alloc_get_mem(struct alloc_cache* alloc)
{
alloc_special_t* p;
size_t s = sizeof(*alloc);
if(!alloc->super) {
lock_quick_lock(&alloc->lock); /* superalloc needs locking */
}
s += sizeof(alloc_special_t) * alloc->num_quar;
for(p = alloc->quar; p; p = alloc_special_next(p)) {
s += lock_get_mem(&p->entry.lock);
}
s += alloc->num_reg_blocks * ALLOC_REG_SIZE;
if(!alloc->super) {
lock_quick_unlock(&alloc->lock);
}
return s;
}
struct regional*
alloc_reg_obtain(struct alloc_cache* alloc)
{
if(alloc->num_reg_blocks > 0) {
struct regional* r = alloc->reg_list;
alloc->reg_list = (struct regional*)r->next;
r->next = NULL;
alloc->num_reg_blocks--;
return r;
}
return regional_create_custom(ALLOC_REG_SIZE);
}
void
alloc_reg_release(struct alloc_cache* alloc, struct regional* r)
{
if(alloc->num_reg_blocks >= alloc->max_reg_blocks) {
regional_destroy(r);
return;
}
if(!r) return;
regional_free_all(r);
log_assert(r->next == NULL);
r->next = (char*)alloc->reg_list;
alloc->reg_list = r;
alloc->num_reg_blocks++;
}
void
alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*),
void* arg)
{
alloc->cleanup = cleanup;
alloc->cleanup_arg = arg;
}
/** global debug value to keep track of total memory mallocs */
size_t unbound_mem_alloc = 0;
/** global debug value to keep track of total memory frees */
size_t unbound_mem_freed = 0;
#ifdef UNBOUND_ALLOC_STATS
/** special value to know if the memory is being tracked */
uint64_t mem_special = (uint64_t)0xfeed43327766abcdLL;
#ifdef malloc
#undef malloc
#endif
/** malloc with stats */
void *unbound_stat_malloc(size_t size)
{
void* res;
if(size == 0) size = 1;
res = malloc(size+16);
if(!res) return NULL;
unbound_mem_alloc += size;
log_info("stat %p=malloc(%u)", res+16, (unsigned)size);
memcpy(res, &size, sizeof(size));
memcpy(res+8, &mem_special, sizeof(mem_special));
return res+16;
}
#ifdef calloc
#undef calloc
#endif
/** calloc with stats */
void *unbound_stat_calloc(size_t nmemb, size_t size)
{
size_t s = (nmemb*size==0)?(size_t)1:nmemb*size;
void* res = calloc(1, s+16);
if(!res) return NULL;
log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
unbound_mem_alloc += s;
memcpy(res, &s, sizeof(s));
memcpy(res+8, &mem_special, sizeof(mem_special));
return res+16;
}
#ifdef free
#undef free
#endif
/** free with stats */
void unbound_stat_free(void *ptr)
{
size_t s;
if(!ptr) return;
if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) {
free(ptr);
return;
}
ptr-=16;
memcpy(&s, ptr, sizeof(s));
log_info("stat free(%p) size %u", ptr+16, (unsigned)s);
memset(ptr+8, 0, 8);
unbound_mem_freed += s;
free(ptr);
}
#ifdef realloc
#undef realloc
#endif
/** realloc with stats */
void *unbound_stat_realloc(void *ptr, size_t size)
{
size_t cursz;
void* res;
if(!ptr) return unbound_stat_malloc(size);
if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) {
return realloc(ptr, size);
}
if(size==0) {
unbound_stat_free(ptr);
return NULL;
}
ptr -= 16;
memcpy(&cursz, ptr, sizeof(cursz));
if(cursz == size) {
/* nothing changes */
return ptr;
}
res = malloc(size+16);
if(!res) return NULL;
unbound_mem_alloc += size;
unbound_mem_freed += cursz;
log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz);
if(cursz > size) {
memcpy(res+16, ptr+16, size);
} else if(size > cursz) {
memcpy(res+16, ptr+16, cursz);
}
memset(ptr+8, 0, 8);
free(ptr);
memcpy(res, &size, sizeof(size));
memcpy(res+8, &mem_special, sizeof(mem_special));
return res+16;
}
/** log to file where alloc was done */
void *unbound_stat_malloc_log(size_t size, const char* file, int line,
const char* func)
{
log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size);
return unbound_stat_malloc(size);
}
/** log to file where alloc was done */
void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file,
int line, const char* func)
{
log_info("%s:%d %s calloc(%u, %u)", file, line, func,
(unsigned) nmemb, (unsigned)size);
return unbound_stat_calloc(nmemb, size);
}
/** log to file where free was done */
void unbound_stat_free_log(void *ptr, const char* file, int line,
const char* func)
{
if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) {
size_t s;
memcpy(&s, ptr-16, sizeof(s));
log_info("%s:%d %s free(%p) size %u",
file, line, func, ptr, (unsigned)s);
} else
log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr);
unbound_stat_free(ptr);
}
/** log to file where alloc was done */
void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file,
int line, const char* func)
{
log_info("%s:%d %s realloc(%p, %u)", file, line, func,
ptr, (unsigned)size);
return unbound_stat_realloc(ptr, size);
}
#endif /* UNBOUND_ALLOC_STATS */
#ifdef UNBOUND_ALLOC_LITE
#undef malloc
#undef calloc
#undef free
#undef realloc
/** length of prefix and suffix */
static size_t lite_pad = 16;
/** prefix value to check */
static char* lite_pre = "checkfront123456";
/** suffix value to check */
static char* lite_post= "checkafter123456";
void *unbound_stat_malloc_lite(size_t size, const char* file, int line,
const char* func)
{
/* [prefix .. len .. actual data .. suffix] */
void* res = malloc(size+lite_pad*2+sizeof(size_t));
if(!res) return NULL;
memmove(res, lite_pre, lite_pad);
memmove(res+lite_pad, &size, sizeof(size_t));
memset(res+lite_pad+sizeof(size_t), 0x1a, size); /* init the memory */
memmove(res+lite_pad+size+sizeof(size_t), lite_post, lite_pad);
return res+lite_pad+sizeof(size_t);
}
void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
int line, const char* func)
{
size_t req = nmemb * size;
void* res = malloc(req+lite_pad*2+sizeof(size_t));
if(!res) return NULL;
memmove(res, lite_pre, lite_pad);
memmove(res+lite_pad, &req, sizeof(size_t));
memset(res+lite_pad+sizeof(size_t), 0, req);
memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad);
return res+lite_pad+sizeof(size_t);
}
void unbound_stat_free_lite(void *ptr, const char* file, int line,
const char* func)
{
void* real;
size_t orig = 0;
if(!ptr) return;
real = ptr-lite_pad-sizeof(size_t);
if(memcmp(real, lite_pre, lite_pad) != 0) {
log_err("free(): prefix failed %s:%d %s", file, line, func);
log_hex("prefix here", real, lite_pad);
log_hex(" should be", lite_pre, lite_pad);
fatal_exit("alloc assertion failed");
}
memmove(&orig, real+lite_pad, sizeof(size_t));
if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){
log_err("free(): suffix failed %s:%d %s", file, line, func);
log_err("alloc size is %d", (int)orig);
log_hex("suffix here", real+lite_pad+orig+sizeof(size_t),
lite_pad);
log_hex(" should be", lite_post, lite_pad);
fatal_exit("alloc assertion failed");
}
memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */
free(real);
}
void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file,
int line, const char* func)
{
/* always free and realloc (no growing) */
void* real, *newa;
size_t orig = 0;
if(!ptr) {
/* like malloc() */
return unbound_stat_malloc_lite(size, file, line, func);
}
if(!size) {
/* like free() */
unbound_stat_free_lite(ptr, file, line, func);
return NULL;
}
/* change allocation size and copy */
real = ptr-lite_pad-sizeof(size_t);
if(memcmp(real, lite_pre, lite_pad) != 0) {
log_err("realloc(): prefix failed %s:%d %s", file, line, func);
log_hex("prefix here", real, lite_pad);
log_hex(" should be", lite_pre, lite_pad);
fatal_exit("alloc assertion failed");
}
memmove(&orig, real+lite_pad, sizeof(size_t));
if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){
log_err("realloc(): suffix failed %s:%d %s", file, line, func);
log_err("alloc size is %d", (int)orig);
log_hex("suffix here", real+lite_pad+orig+sizeof(size_t),
lite_pad);
log_hex(" should be", lite_post, lite_pad);
fatal_exit("alloc assertion failed");
}
/* new alloc and copy over */
newa = unbound_stat_malloc_lite(size, file, line, func);
if(!newa)
return NULL;
if(orig < size)
memmove(newa, ptr, orig);
else memmove(newa, ptr, size);
memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */
free(real);
return newa;
}
char* unbound_strdup_lite(const char* s, const char* file, int line,
const char* func)
{
/* this routine is made to make sure strdup() uses the malloc_lite */
size_t l = strlen(s)+1;
char* n = (char*)unbound_stat_malloc_lite(l, file, line, func);
if(!n) return NULL;
memmove(n, s, l);
return n;
}
char* unbound_lite_wrapstr(char* s)
{
char* n = unbound_strdup_lite(s, __FILE__, __LINE__, __func__);
free(s);
return n;
}
#undef sldns_pkt2wire
sldns_status unbound_lite_pkt2wire(uint8_t **dest, const sldns_pkt *p,
size_t *size)
{
uint8_t* md = NULL;
size_t ms = 0;
sldns_status s = sldns_pkt2wire(&md, p, &ms);
if(md) {
*dest = unbound_stat_malloc_lite(ms, __FILE__, __LINE__,
__func__);
*size = ms;
if(!*dest) { free(md); return LDNS_STATUS_MEM_ERR; }
memcpy(*dest, md, ms);
free(md);
} else {
*dest = NULL;
*size = 0;
}
return s;
}
#undef i2d_DSA_SIG
int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig)
{
unsigned char* n = NULL;
int r= i2d_DSA_SIG(dsasig, &n);
if(n) {
*sig = unbound_stat_malloc_lite((size_t)r, __FILE__, __LINE__,
__func__);
if(!*sig) return -1;
memcpy(*sig, n, (size_t)r);
free(n);
return r;
}
*sig = NULL;
return r;
}
#endif /* UNBOUND_ALLOC_LITE */

217
external/unbound/util/alloc.h vendored Normal file
View File

@@ -0,0 +1,217 @@
/*
* util/alloc.h - memory allocation service.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains memory allocation functions.
*
* The reasons for this service are:
* o Avoid locking costs of getting global lock to call malloc().
* o The packed rrset type needs to be kept on special freelists,
* so that they are reused for other packet rrset allocations.
*
*/
#ifndef UTIL_ALLOC_H
#define UTIL_ALLOC_H
#include "util/locks.h"
struct ub_packed_rrset_key;
struct regional;
/** The special type, packed rrset. Not allowed to be used for other memory */
typedef struct ub_packed_rrset_key alloc_special_t;
/** clean the special type. Pass pointer. */
#define alloc_special_clean(x) (x)->id = 0;
/** access next pointer. (in available spot). Pass pointer. */
#define alloc_special_next(x) ((alloc_special_t*)((x)->entry.overflow_next))
/** set next pointer. (in available spot). Pass pointers. */
#define alloc_set_special_next(x, y) \
((x)->entry.overflow_next) = (struct lruhash_entry*)(y);
/** how many blocks to cache locally. */
#define ALLOC_SPECIAL_MAX 10
/**
* Structure that provides allocation. Use one per thread.
* The one on top has a NULL super pointer.
*/
struct alloc_cache {
/** lock, only used for the super. */
lock_quick_t lock;
/** global allocator above this one. NULL for none (malloc/free) */
struct alloc_cache* super;
/** singly linked lists of special type. These are free for use. */
alloc_special_t* quar;
/** number of items in quarantine. */
size_t num_quar;
/** thread number for id creation */
int thread_num;
/** next id number to pass out */
uint64_t next_id;
/** last id number possible */
uint64_t last_id;
/** what function to call to cleanup when last id is reached */
void (*cleanup)(void*);
/** user arg for cleanup */
void* cleanup_arg;
/** how many regional blocks to keep back max */
size_t max_reg_blocks;
/** how many regional blocks are kept now */
size_t num_reg_blocks;
/** linked list of regional blocks, using regional->next */
struct regional* reg_list;
};
/**
* Init alloc (zeroes the struct).
* @param alloc: this parameter is allocated by the caller.
* @param super: super to use (init that before with super_init).
* Pass this argument NULL to init the toplevel alloc structure.
* @param thread_num: thread number for id creation of special type.
*/
void alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
int thread_num);
/**
* Free the alloc. Pushes all the cached items into the super structure.
* Or deletes them if alloc->super is NULL.
* Does not free the alloc struct itself (it was also allocated by caller).
* @param alloc: is almost zeroed on exit (except some stats).
*/
void alloc_clear(struct alloc_cache* alloc);
/**
* Get a new special_t element.
* @param alloc: where to alloc it.
* @return: memory block. Will not return NULL (instead fatal_exit).
* The block is zeroed.
*/
alloc_special_t* alloc_special_obtain(struct alloc_cache* alloc);
/**
* Return special_t back to pool.
* The block is cleaned up (zeroed) which also invalidates the ID inside.
* @param alloc: where to alloc it.
* @param mem: block to free.
*/
void alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem);
/**
* Set ID number of special type to a fresh new ID number.
* In case of ID number overflow, the rrset cache has to be cleared.
* @param alloc: the alloc cache
* @return: fresh id is returned.
*/
uint64_t alloc_get_id(struct alloc_cache* alloc);
/**
* Get memory size of alloc cache, alloc structure including special types.
* @param alloc: on what alloc.
* @return size in bytes.
*/
size_t alloc_get_mem(struct alloc_cache* alloc);
/**
* Print debug information (statistics).
* @param alloc: on what alloc.
*/
void alloc_stats(struct alloc_cache* alloc);
/**
* Get a new regional for query states
* @param alloc: where to alloc it.
* @return regional for use or NULL on alloc failure.
*/
struct regional* alloc_reg_obtain(struct alloc_cache* alloc);
/**
* Put regional for query states back into alloc cache.
* @param alloc: where to alloc it.
* @param r: regional to put back.
*/
void alloc_reg_release(struct alloc_cache* alloc, struct regional* r);
/**
* Set cleanup on ID overflow callback function. This should remove all
* RRset ID references from the program. Clear the caches.
* @param alloc: the alloc
* @param cleanup: the callback function, called as cleanup(arg).
* @param arg: user argument to callback function.
*/
void alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*),
void* arg);
#ifdef UNBOUND_ALLOC_LITE
# include <ldns/ldns.h>
# include <ldns/packet.h>
# ifdef HAVE_OPENSSL_SSL_H
# include <openssl/ssl.h>
# endif
# define malloc(s) unbound_stat_malloc_lite(s, __FILE__, __LINE__, __func__)
# define calloc(n,s) unbound_stat_calloc_lite(n, s, __FILE__, __LINE__, __func__)
# define free(p) unbound_stat_free_lite(p, __FILE__, __LINE__, __func__)
# define realloc(p,s) unbound_stat_realloc_lite(p, s, __FILE__, __LINE__, __func__)
void *unbound_stat_malloc_lite(size_t size, const char* file, int line,
const char* func);
void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
int line, const char* func);
void unbound_stat_free_lite(void *ptr, const char* file, int line,
const char* func);
void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file,
int line, const char* func);
# ifdef strdup
# undef strdup
# endif
# define strdup(s) unbound_strdup_lite(s, __FILE__, __LINE__, __func__)
char* unbound_strdup_lite(const char* s, const char* file, int line,
const char* func);
char* unbound_lite_wrapstr(char* s);
# define sldns_rr2str(rr) unbound_lite_wrapstr(sldns_rr2str(rr))
# define sldns_rdf2str(rdf) unbound_lite_wrapstr(sldns_rdf2str(rdf))
# define sldns_rr_type2str(t) unbound_lite_wrapstr(sldns_rr_type2str(t))
# define sldns_rr_class2str(c) unbound_lite_wrapstr(sldns_rr_class2str(c))
# define sldns_rr_list2str(r) unbound_lite_wrapstr(sldns_rr_list2str(r))
# define sldns_pkt2str(p) unbound_lite_wrapstr(sldns_pkt2str(p))
# define sldns_pkt_rcode2str(r) unbound_lite_wrapstr(sldns_pkt_rcode2str(r))
# define sldns_pkt2wire(a, r, s) unbound_lite_pkt2wire(a, r, s)
sldns_status unbound_lite_pkt2wire(uint8_t **dest, const sldns_pkt *p, size_t *size);
# define i2d_DSA_SIG(d, s) unbound_lite_i2d_DSA_SIG(d, s)
int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig);
#endif /* UNBOUND_ALLOC_LITE */
#endif /* UTIL_ALLOC_H */

1571
external/unbound/util/config_file.c vendored Normal file

File diff suppressed because it is too large Load Diff

704
external/unbound/util/config_file.h vendored Normal file
View File

@@ -0,0 +1,704 @@
/*
* util/config_file.h - reads and stores the config file for unbound.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions for the config file.
*/
#ifndef UTIL_CONFIG_FILE_H
#define UTIL_CONFIG_FILE_H
struct config_stub;
struct config_strlist;
struct config_str2list;
struct module_qstate;
struct sock_list;
struct ub_packed_rrset_key;
/**
* The configuration options.
* Strings are malloced.
*/
struct config_file {
/** verbosity level as specified in the config file */
int verbosity;
/** statistics interval (in seconds) */
int stat_interval;
/** if false, statistics values are reset after printing them */
int stat_cumulative;
/** if true, the statistics are kept in greater detail */
int stat_extended;
/** number of threads to create */
int num_threads;
/** port on which queries are answered. */
int port;
/** do ip4 query support. */
int do_ip4;
/** do ip6 query support. */
int do_ip6;
/** do udp query support. */
int do_udp;
/** do tcp query support. */
int do_tcp;
/** tcp upstream queries (no UDP upstream queries) */
int tcp_upstream;
/** private key file for dnstcp-ssl service (enabled if not NULL) */
char* ssl_service_key;
/** public key file for dnstcp-ssl service */
char* ssl_service_pem;
/** port on which to provide ssl service */
int ssl_port;
/** if outgoing tcp connections use SSL */
int ssl_upstream;
/** outgoing port range number of ports (per thread) */
int outgoing_num_ports;
/** number of outgoing tcp buffers per (per thread) */
size_t outgoing_num_tcp;
/** number of incoming tcp buffers per (per thread) */
size_t incoming_num_tcp;
/** allowed udp port numbers, array with 0 if not allowed */
int* outgoing_avail_ports;
/** EDNS buffer size to use */
size_t edns_buffer_size;
/** number of bytes buffer size for DNS messages */
size_t msg_buffer_size;
/** size of the message cache */
size_t msg_cache_size;
/** slabs in the message cache. */
size_t msg_cache_slabs;
/** number of queries every thread can service */
size_t num_queries_per_thread;
/** number of msec to wait before items can be jostled out */
size_t jostle_time;
/** size of the rrset cache */
size_t rrset_cache_size;
/** slabs in the rrset cache */
size_t rrset_cache_slabs;
/** host cache ttl in seconds */
int host_ttl;
/** number of slabs in the infra host cache */
size_t infra_cache_slabs;
/** max number of hosts in the infra cache */
size_t infra_cache_numhosts;
/** delay close of udp-timeouted ports, if 0 no delayclose. in msec */
int delay_close;
/** the target fetch policy for the iterator */
char* target_fetch_policy;
/** automatic interface for incoming messages. Uses ipv6 remapping,
* and recvmsg/sendmsg ancillary data to detect interfaces, boolean */
int if_automatic;
/** SO_RCVBUF size to set on port 53 UDP socket */
size_t so_rcvbuf;
/** SO_SNDBUF size to set on port 53 UDP socket */
size_t so_sndbuf;
/** SO_REUSEPORT requested on port 53 sockets */
int so_reuseport;
/** number of interfaces to open. If 0 default all interfaces. */
int num_ifs;
/** interface description strings (IP addresses) */
char **ifs;
/** number of outgoing interfaces to open.
* If 0 default all interfaces. */
int num_out_ifs;
/** outgoing interface description strings (IP addresses) */
char **out_ifs;
/** the root hints */
struct config_strlist* root_hints;
/** the stub definitions, linked list */
struct config_stub* stubs;
/** the forward zone definitions, linked list */
struct config_stub* forwards;
/** list of donotquery addresses, linked list */
struct config_strlist* donotqueryaddrs;
/** list of access control entries, linked list */
struct config_str2list* acls;
/** use default localhost donotqueryaddr entries */
int donotquery_localhost;
/** harden against very small edns buffer sizes */
int harden_short_bufsize;
/** harden against very large query sizes */
int harden_large_queries;
/** harden against spoofed glue (out of zone data) */
int harden_glue;
/** harden against receiving no DNSSEC data for trust anchor */
int harden_dnssec_stripped;
/** harden against queries that fall under known nxdomain names */
int harden_below_nxdomain;
/** harden the referral path, query for NS,A,AAAA and validate */
int harden_referral_path;
/** use 0x20 bits in query as random ID bits */
int use_caps_bits_for_id;
/** strip away these private addrs from answers, no DNS Rebinding */
struct config_strlist* private_address;
/** allow domain (and subdomains) to use private address space */
struct config_strlist* private_domain;
/** what threshold for unwanted action. */
size_t unwanted_threshold;
/** the number of seconds maximal TTL used for RRsets and messages */
int max_ttl;
/** the number of seconds minimum TTL used for RRsets and messages */
int min_ttl;
/** if prefetching of messages should be performed. */
int prefetch;
/** if prefetching of DNSKEYs should be performed. */
int prefetch_key;
/** chrootdir, if not "" or chroot will be done */
char* chrootdir;
/** username to change to, if not "". */
char* username;
/** working directory */
char* directory;
/** filename to log to. */
char* logfile;
/** pidfile to write pid to. */
char* pidfile;
/** should log messages be sent to syslogd */
int use_syslog;
/** log timestamp in ascii UTC */
int log_time_ascii;
/** log queries with one line per query */
int log_queries;
/** do not report identity (id.server, hostname.bind) */
int hide_identity;
/** do not report version (version.server, version.bind) */
int hide_version;
/** identity, hostname is returned if "". */
char* identity;
/** version, package version returned if "". */
char* version;
/** the module configuration string */
char* module_conf;
/** files with trusted DS and DNSKEYs in zonefile format, list */
struct config_strlist* trust_anchor_file_list;
/** list of trustanchor keys, linked list */
struct config_strlist* trust_anchor_list;
/** files with 5011 autotrust tracked keys */
struct config_strlist* auto_trust_anchor_file_list;
/** files with trusted DNSKEYs in named.conf format, list */
struct config_strlist* trusted_keys_file_list;
/** DLV anchor file */
char* dlv_anchor_file;
/** DLV anchor inline */
struct config_strlist* dlv_anchor_list;
/** insecure domain list */
struct config_strlist* domain_insecure;
/** if not 0, this value is the validation date for RRSIGs */
int32_t val_date_override;
/** the minimum for signature clock skew */
int32_t val_sig_skew_min;
/** the maximum for signature clock skew */
int32_t val_sig_skew_max;
/** this value sets the number of seconds before revalidating bogus */
int bogus_ttl;
/** should validator clean additional section for secure msgs */
int val_clean_additional;
/** log bogus messages by the validator */
int val_log_level;
/** squelch val_log_level to log - this is library goes to callback */
int val_log_squelch;
/** should validator allow bogus messages to go through */
int val_permissive_mode;
/** ignore the CD flag in incoming queries and refuse them bogus data */
int ignore_cd;
/** nsec3 maximum iterations per key size, string */
char* val_nsec3_key_iterations;
/** autotrust add holddown time, in seconds */
unsigned int add_holddown;
/** autotrust del holddown time, in seconds */
unsigned int del_holddown;
/** autotrust keep_missing time, in seconds. 0 is forever. */
unsigned int keep_missing;
/** size of the key cache */
size_t key_cache_size;
/** slabs in the key cache. */
size_t key_cache_slabs;
/** size of the neg cache */
size_t neg_cache_size;
/** local zones config */
struct config_str2list* local_zones;
/** local zones nodefault list */
struct config_strlist* local_zones_nodefault;
/** local data RRs configged */
struct config_strlist* local_data;
/** unblock lan zones (reverse lookups for 10/8 and so on) */
int unblock_lan_zones;
/** remote control section. enable toggle. */
int remote_control_enable;
/** the interfaces the remote control should listen on */
struct config_strlist* control_ifs;
/** port number for the control port */
int control_port;
/** private key file for server */
char* server_key_file;
/** certificate file for server */
char* server_cert_file;
/** private key file for unbound-control */
char* control_key_file;
/** certificate file for unbound-control */
char* control_cert_file;
/** Python script file */
char* python_script;
/** daemonize, i.e. fork into the background. */
int do_daemonize;
/* minimal response when positive answer */
int minimal_responses;
/* RRSet roundrobin */
int rrset_roundrobin;
/* maximum UDP response size */
size_t max_udp_size;
/* DNS64 prefix */
char* dns64_prefix;
/* Synthetize all AAAA record despite the presence of an authoritative one */
int dns64_synthall;
/** true to enable dnstap support */
int dnstap;
/** dnstap socket path */
char* dnstap_socket_path;
/** true to send "identity" via dnstap */
int dnstap_send_identity;
/** true to send "version" via dnstap */
int dnstap_send_version;
/** dnstap "identity", hostname is used if "". */
char* dnstap_identity;
/** dnstap "version", package version is used if "". */
char* dnstap_version;
/** true to log dnstap RESOLVER_QUERY message events */
int dnstap_log_resolver_query_messages;
/** true to log dnstap RESOLVER_RESPONSE message events */
int dnstap_log_resolver_response_messages;
/** true to log dnstap CLIENT_QUERY message events */
int dnstap_log_client_query_messages;
/** true to log dnstap CLIENT_RESPONSE message events */
int dnstap_log_client_response_messages;
/** true to log dnstap FORWARDER_QUERY message events */
int dnstap_log_forwarder_query_messages;
/** true to log dnstap FORWARDER_RESPONSE message events */
int dnstap_log_forwarder_response_messages;
};
/**
* Stub config options
*/
struct config_stub {
/** next in list */
struct config_stub* next;
/** domain name (in text) of the stub apex domain */
char* name;
/** list of stub nameserver hosts (domain name) */
struct config_strlist* hosts;
/** list of stub nameserver addresses (IP address) */
struct config_strlist* addrs;
/** if stub-prime is set */
int isprime;
/** if forward-first is set (failover to without if fails) */
int isfirst;
};
/**
* List of strings for config options
*/
struct config_strlist {
/** next item in list */
struct config_strlist* next;
/** config option string */
char* str;
};
/**
* List of two strings for config options
*/
struct config_str2list {
/** next item in list */
struct config_str2list* next;
/** first string */
char* str;
/** second string */
char* str2;
};
/** List head for strlist processing, used for append operation. */
struct config_strlist_head {
/** first in list of text items */
struct config_strlist* first;
/** last in list of text items */
struct config_strlist* last;
};
/**
* Create config file structure. Filled with default values.
* @return: the new structure or NULL on memory error.
*/
struct config_file* config_create(void);
/**
* Create config file structure for library use. Filled with default values.
* @return: the new structure or NULL on memory error.
*/
struct config_file* config_create_forlib(void);
/**
* Read the config file from the specified filename.
* @param config: where options are stored into, must be freshly created.
* @param filename: name of configfile. If NULL nothing is done.
* @param chroot: if not NULL, the chroot dir currently in use (for include).
* @return: false on error. In that case errno is set, ENOENT means
* file not found.
*/
int config_read(struct config_file* config, const char* filename,
const char* chroot);
/**
* Destroy the config file structure.
* @param config: to delete.
*/
void config_delete(struct config_file* config);
/**
* Apply config to global constants; this routine is called in single thread.
* @param config: to apply. Side effect: global constants change.
*/
void config_apply(struct config_file* config);
/**
* Set the given keyword to the given value.
* @param config: where to store config
* @param option: option name, including the ':' character.
* @param value: value, this string is copied if needed, or parsed.
* The caller owns the value string.
* @return 0 on error (malloc or syntax error).
*/
int config_set_option(struct config_file* config, const char* option,
const char* value);
/**
* Call print routine for the given option.
* @param cfg: config.
* @param opt: option name without trailing :.
* This is different from config_set_option.
* @param func: print func, called as (str, arg) for every data element.
* @param arg: user argument for print func.
* @return false if the option name is not supported (syntax error).
*/
int config_get_option(struct config_file* cfg, const char* opt,
void (*func)(char*,void*), void* arg);
/**
* Get an option and return strlist
* @param cfg: config file
* @param opt: option name.
* @param list: list is returned here. malloced, caller must free it.
* @return 0=OK, 1=syntax error, 2=malloc failed.
*/
int config_get_option_list(struct config_file* cfg, const char* opt,
struct config_strlist** list);
/**
* Get an option and collate results into string
* @param cfg: config file
* @param opt: option name.
* @param str: string. malloced, caller must free it.
* @return 0=OK, 1=syntax error, 2=malloc failed.
*/
int config_get_option_collate(struct config_file* cfg, const char* opt,
char** str);
/**
* function to print to a file, use as func with config_get_option.
* @param line: text to print. \n appended.
* @param arg: pass a FILE*, like stdout.
*/
void config_print_func(char* line, void* arg);
/**
* function to collate the text strings into a strlist_head.
* @param line: text to append.
* @param arg: pass a strlist_head structure. zeroed on start.
*/
void config_collate_func(char* line, void* arg);
/**
* take a strlist_head list and return a malloc string. separated with newline.
* @param list: strlist first to collate. zeroes return "".
* @return NULL on malloc failure. Or if malloc failure happened in strlist.
*/
char* config_collate_cat(struct config_strlist* list);
/**
* Append text at end of list.
* @param list: list head. zeroed at start.
* @param item: new item. malloced by caller. if NULL the insertion fails.
* @return true on success.
*/
int cfg_strlist_append(struct config_strlist_head* list, char* item);
/**
* Insert string into strlist.
* @param head: pointer to strlist head variable.
* @param item: new item. malloced by caller. If NULL the insertion fails.
* @return: true on success.
*/
int cfg_strlist_insert(struct config_strlist** head, char* item);
/**
* Insert string into str2list.
* @param head: pointer to str2list head variable.
* @param item: new item. malloced by caller. If NULL the insertion fails.
* @param i2: 2nd string, malloced by caller. If NULL the insertion fails.
* @return: true on success.
*/
int cfg_str2list_insert(struct config_str2list** head, char* item, char* i2);
/**
* Delete items in config string list.
* @param list: list.
*/
void config_delstrlist(struct config_strlist* list);
/**
* Delete items in config double string list.
* @param list: list.
*/
void config_deldblstrlist(struct config_str2list* list);
/**
* Delete items in config stub list.
* @param list: list.
*/
void config_delstubs(struct config_stub* list);
/**
* Convert 14digit to time value
* @param str: string of 14 digits
* @return time value or 0 for error.
*/
time_t cfg_convert_timeval(const char* str);
/**
* Count number of values in the string.
* format ::= (sp num)+ sp
* num ::= [-](0-9)+
* sp ::= (space|tab)*
*
* @param str: string
* @return: 0 on parse error, or empty string, else
* number of integer values in the string.
*/
int cfg_count_numbers(const char* str);
/**
* Convert a 'nice' memory or file size into a bytecount
* From '100k' to 102400. and so on. Understands kKmMgG.
* k=1024, m=1024*1024, g=1024*1024*1024.
* @param str: string
* @param res: result is stored here, size in bytes.
* @return: true if parsed correctly, or 0 on a parse error (and an error
* is logged).
*/
int cfg_parse_memsize(const char* str, size_t* res);
/**
* Parse local-zone directive into two strings and register it in the config.
* @param cfg: to put it in.
* @param val: argument strings to local-zone, "example.com nodefault".
* @return: false on failure
*/
int cfg_parse_local_zone(struct config_file* cfg, const char* val);
/**
* Mark "number" or "low-high" as available or not in ports array.
* @param str: string in input
* @param allow: give true if this range is permitted.
* @param avail: the array from cfg.
* @param num: size of the array (65536).
* @return: true if parsed correctly, or 0 on a parse error (and an error
* is logged).
*/
int cfg_mark_ports(const char* str, int allow, int* avail, int num);
/**
* Get a condensed list of ports returned. allocated.
* @param cfg: config file.
* @param avail: the available ports array is returned here.
* @return: number of ports in array or 0 on error.
*/
int cfg_condense_ports(struct config_file* cfg, int** avail);
/**
* Scan ports available
* @param avail: the array from cfg.
* @param num: size of the array (65536).
* @return the number of ports available for use.
*/
int cfg_scan_ports(int* avail, int num);
/**
* Convert a filename to full pathname in original filesys
* @param fname: the path name to convert.
* Must not be null or empty.
* @param cfg: config struct for chroot and chdir (if set).
* @param use_chdir: if false, only chroot is applied.
* @return pointer to malloced buffer which is: [chroot][chdir]fname
* or NULL on malloc failure.
*/
char* fname_after_chroot(const char* fname, struct config_file* cfg,
int use_chdir);
/**
* Convert a ptr shorthand into a full reverse-notation PTR record.
* @param str: input string, "IP name"
* @return: malloced string "reversed-ip-name PTR name"
*/
char* cfg_ptr_reverse(char* str);
/**
* Append text to the error info for validation.
* @param qstate: query state.
* @param str: copied into query region and appended.
* Failures to allocate are logged.
*/
void errinf(struct module_qstate* qstate, const char* str);
/**
* Append text to error info: from 1.2.3.4
* @param qstate: query state.
* @param origin: sock list with origin of trouble.
* Every element added.
* If NULL: nothing is added.
* if 0len element: 'from cache' is added.
*/
void errinf_origin(struct module_qstate* qstate, struct sock_list *origin);
/**
* Append text to error info: for RRset name type class
* @param qstate: query state.
* @param rr: rrset_key.
*/
void errinf_rrset(struct module_qstate* qstate, struct ub_packed_rrset_key *rr);
/**
* Append text to error info: str dname
* @param qstate: query state.
* @param str: explanation string
* @param dname: the dname.
*/
void errinf_dname(struct module_qstate* qstate, const char* str,
uint8_t* dname);
/**
* Create error info in string
* @param qstate: query state.
* @return string or NULL on malloc failure (already logged).
* This string is malloced and has to be freed by caller.
*/
char* errinf_to_str(struct module_qstate* qstate);
/**
* Used during options parsing
*/
struct config_parser_state {
/** name of file being parser */
char* filename;
/** line number in the file, starts at 1 */
int line;
/** number of errors encountered */
int errors;
/** the result of parsing is stored here. */
struct config_file* cfg;
/** the current chroot dir (or NULL if none) */
const char* chroot;
};
/** global config parser object used during config parsing */
extern struct config_parser_state* cfg_parser;
/** init lex state */
void init_cfg_parse(void);
/** lex in file */
extern FILE* ub_c_in;
/** lex out file */
extern FILE* ub_c_out;
/** the yacc lex generated parse function */
int ub_c_parse(void);
/** the lexer function */
int ub_c_lex(void);
/** wrap function */
int ub_c_wrap(void);
/** parsing helpers: print error with file and line numbers */
void ub_c_error(const char* msg);
/** parsing helpers: print error with file and line numbers */
void ub_c_error_msg(const char* fmt, ...) ATTR_FORMAT(printf, 1, 2);
#ifdef UB_ON_WINDOWS
/**
* Obtain registry string (if it exists).
* @param key: key string
* @param name: name of value to fetch.
* @return malloced string with the result or NULL if it did not
* exist on an error (logged with log_err) was encountered.
*/
char* w_lookup_reg_str(const char* key, const char* name);
#endif /* UB_ON_WINDOWS */
#endif /* UTIL_CONFIG_FILE_H */

4023
external/unbound/util/configlexer.c vendored Normal file

File diff suppressed because it is too large Load Diff

444
external/unbound/util/configlexer.lex vendored Normal file
View File

@@ -0,0 +1,444 @@
%{
/*
* configlexer.lex - lexical analyzer for unbound config file
*
* Copyright (c) 2001-2006, NLnet Labs. All rights reserved
*
* See LICENSE for the license.
*
*/
#include <ctype.h>
#include <string.h>
#include <strings.h>
#ifdef HAVE_GLOB_H
# include <glob.h>
#endif
#include "util/config_file.h"
#include "util/configparser.h"
void ub_c_error(const char *message);
#if 0
#define LEXOUT(s) printf s /* used ONLY when debugging */
#else
#define LEXOUT(s)
#endif
/** avoid warning in about fwrite return value */
#define ECHO ub_c_error_msg("syntax error at text: %s", yytext)
/** A parser variable, this is a statement in the config file which is
* of the form variable: value1 value2 ... nargs is the number of values. */
#define YDVAR(nargs, var) \
num_args=(nargs); \
LEXOUT(("v(%s%d) ", yytext, num_args)); \
if(num_args > 0) { BEGIN(val); } \
return (var);
struct inc_state {
char* filename;
int line;
YY_BUFFER_STATE buffer;
struct inc_state* next;
};
static struct inc_state* config_include_stack = NULL;
static int inc_depth = 0;
static int inc_prev = 0;
static int num_args = 0;
void init_cfg_parse(void)
{
config_include_stack = NULL;
inc_depth = 0;
inc_prev = 0;
num_args = 0;
}
static void config_start_include(const char* filename)
{
FILE *input;
struct inc_state* s;
char* nm;
if(inc_depth++ > 100000) {
ub_c_error_msg("too many include files");
return;
}
if(strlen(filename) == 0) {
ub_c_error_msg("empty include file name");
return;
}
s = (struct inc_state*)malloc(sizeof(*s));
if(!s) {
ub_c_error_msg("include %s: malloc failure", filename);
return;
}
if(cfg_parser->chroot && strncmp(filename, cfg_parser->chroot,
strlen(cfg_parser->chroot)) == 0) {
filename += strlen(cfg_parser->chroot);
}
nm = strdup(filename);
if(!nm) {
ub_c_error_msg("include %s: strdup failure", filename);
free(s);
return;
}
input = fopen(filename, "r");
if(!input) {
ub_c_error_msg("cannot open include file '%s': %s",
filename, strerror(errno));
free(s);
free(nm);
return;
}
LEXOUT(("switch_to_include_file(%s)\n", filename));
s->filename = cfg_parser->filename;
s->line = cfg_parser->line;
s->buffer = YY_CURRENT_BUFFER;
s->next = config_include_stack;
config_include_stack = s;
cfg_parser->filename = nm;
cfg_parser->line = 1;
yy_switch_to_buffer(yy_create_buffer(input, YY_BUF_SIZE));
}
static void config_start_include_glob(const char* filename)
{
/* check for wildcards */
#ifdef HAVE_GLOB
glob_t g;
size_t i;
int r, flags;
if(!(!strchr(filename, '*') && !strchr(filename, '?') && !strchr(filename, '[') &&
!strchr(filename, '{') && !strchr(filename, '~'))) {
flags = 0
#ifdef GLOB_ERR
| GLOB_ERR
#endif
#ifdef GLOB_NOSORT
| GLOB_NOSORT
#endif
#ifdef GLOB_BRACE
| GLOB_BRACE
#endif
#ifdef GLOB_TILDE
| GLOB_TILDE
#endif
;
memset(&g, 0, sizeof(g));
r = glob(filename, flags, NULL, &g);
if(r) {
/* some error */
globfree(&g);
if(r == GLOB_NOMATCH)
return; /* no matches for pattern */
config_start_include(filename); /* let original deal with it */
return;
}
/* process files found, if any */
for(i=0; i<(size_t)g.gl_pathc; i++) {
config_start_include(g.gl_pathv[i]);
}
globfree(&g);
return;
}
#endif /* HAVE_GLOB */
config_start_include(filename);
}
static void config_end_include(void)
{
struct inc_state* s = config_include_stack;
--inc_depth;
if(!s) return;
free(cfg_parser->filename);
cfg_parser->filename = s->filename;
cfg_parser->line = s->line;
yy_delete_buffer(YY_CURRENT_BUFFER);
yy_switch_to_buffer(s->buffer);
config_include_stack = s->next;
free(s);
}
#ifndef yy_set_bol /* compat definition, for flex 2.4.6 */
#define yy_set_bol(at_bol) \
{ \
if ( ! yy_current_buffer ) \
yy_current_buffer = yy_create_buffer( yyin, YY_BUF_SIZE ); \
yy_current_buffer->yy_ch_buf[0] = ((at_bol)?'\n':' '); \
}
#endif
%}
%option noinput
%option nounput
%{
#ifndef YY_NO_UNPUT
#define YY_NO_UNPUT 1
#endif
#ifndef YY_NO_INPUT
#define YY_NO_INPUT 1
#endif
%}
SPACE [ \t]
LETTER [a-zA-Z]
UNQUOTEDLETTER [^\'\"\n\r \t\\]|\\.
UNQUOTEDLETTER_NOCOLON [^\:\'\"\n\r \t\\]|\\.
NEWLINE [\r\n]
COMMENT \#
COLON \:
DQANY [^\"\n\r\\]|\\.
SQANY [^\'\n\r\\]|\\.
%x quotedstring singlequotedstr include include_quoted val
%%
<INITIAL,val>{SPACE}* {
LEXOUT(("SP ")); /* ignore */ }
<INITIAL,val>{SPACE}*{COMMENT}.* {
/* note that flex makes the longest match and '.' is any but not nl */
LEXOUT(("comment(%s) ", yytext)); /* ignore */ }
server{COLON} { YDVAR(0, VAR_SERVER) }
num-threads{COLON} { YDVAR(1, VAR_NUM_THREADS) }
verbosity{COLON} { YDVAR(1, VAR_VERBOSITY) }
port{COLON} { YDVAR(1, VAR_PORT) }
outgoing-range{COLON} { YDVAR(1, VAR_OUTGOING_RANGE) }
outgoing-port-permit{COLON} { YDVAR(1, VAR_OUTGOING_PORT_PERMIT) }
outgoing-port-avoid{COLON} { YDVAR(1, VAR_OUTGOING_PORT_AVOID) }
outgoing-num-tcp{COLON} { YDVAR(1, VAR_OUTGOING_NUM_TCP) }
incoming-num-tcp{COLON} { YDVAR(1, VAR_INCOMING_NUM_TCP) }
do-ip4{COLON} { YDVAR(1, VAR_DO_IP4) }
do-ip6{COLON} { YDVAR(1, VAR_DO_IP6) }
do-udp{COLON} { YDVAR(1, VAR_DO_UDP) }
do-tcp{COLON} { YDVAR(1, VAR_DO_TCP) }
tcp-upstream{COLON} { YDVAR(1, VAR_TCP_UPSTREAM) }
ssl-upstream{COLON} { YDVAR(1, VAR_SSL_UPSTREAM) }
ssl-service-key{COLON} { YDVAR(1, VAR_SSL_SERVICE_KEY) }
ssl-service-pem{COLON} { YDVAR(1, VAR_SSL_SERVICE_PEM) }
ssl-port{COLON} { YDVAR(1, VAR_SSL_PORT) }
do-daemonize{COLON} { YDVAR(1, VAR_DO_DAEMONIZE) }
interface{COLON} { YDVAR(1, VAR_INTERFACE) }
ip-address{COLON} { YDVAR(1, VAR_INTERFACE) }
outgoing-interface{COLON} { YDVAR(1, VAR_OUTGOING_INTERFACE) }
interface-automatic{COLON} { YDVAR(1, VAR_INTERFACE_AUTOMATIC) }
so-rcvbuf{COLON} { YDVAR(1, VAR_SO_RCVBUF) }
so-sndbuf{COLON} { YDVAR(1, VAR_SO_SNDBUF) }
so-reuseport{COLON} { YDVAR(1, VAR_SO_REUSEPORT) }
chroot{COLON} { YDVAR(1, VAR_CHROOT) }
username{COLON} { YDVAR(1, VAR_USERNAME) }
directory{COLON} { YDVAR(1, VAR_DIRECTORY) }
logfile{COLON} { YDVAR(1, VAR_LOGFILE) }
pidfile{COLON} { YDVAR(1, VAR_PIDFILE) }
root-hints{COLON} { YDVAR(1, VAR_ROOT_HINTS) }
edns-buffer-size{COLON} { YDVAR(1, VAR_EDNS_BUFFER_SIZE) }
msg-buffer-size{COLON} { YDVAR(1, VAR_MSG_BUFFER_SIZE) }
msg-cache-size{COLON} { YDVAR(1, VAR_MSG_CACHE_SIZE) }
msg-cache-slabs{COLON} { YDVAR(1, VAR_MSG_CACHE_SLABS) }
rrset-cache-size{COLON} { YDVAR(1, VAR_RRSET_CACHE_SIZE) }
rrset-cache-slabs{COLON} { YDVAR(1, VAR_RRSET_CACHE_SLABS) }
cache-max-ttl{COLON} { YDVAR(1, VAR_CACHE_MAX_TTL) }
cache-min-ttl{COLON} { YDVAR(1, VAR_CACHE_MIN_TTL) }
infra-host-ttl{COLON} { YDVAR(1, VAR_INFRA_HOST_TTL) }
infra-lame-ttl{COLON} { YDVAR(1, VAR_INFRA_LAME_TTL) }
infra-cache-slabs{COLON} { YDVAR(1, VAR_INFRA_CACHE_SLABS) }
infra-cache-numhosts{COLON} { YDVAR(1, VAR_INFRA_CACHE_NUMHOSTS) }
infra-cache-lame-size{COLON} { YDVAR(1, VAR_INFRA_CACHE_LAME_SIZE) }
num-queries-per-thread{COLON} { YDVAR(1, VAR_NUM_QUERIES_PER_THREAD) }
jostle-timeout{COLON} { YDVAR(1, VAR_JOSTLE_TIMEOUT) }
delay-close{COLON} { YDVAR(1, VAR_DELAY_CLOSE) }
target-fetch-policy{COLON} { YDVAR(1, VAR_TARGET_FETCH_POLICY) }
harden-short-bufsize{COLON} { YDVAR(1, VAR_HARDEN_SHORT_BUFSIZE) }
harden-large-queries{COLON} { YDVAR(1, VAR_HARDEN_LARGE_QUERIES) }
harden-glue{COLON} { YDVAR(1, VAR_HARDEN_GLUE) }
harden-dnssec-stripped{COLON} { YDVAR(1, VAR_HARDEN_DNSSEC_STRIPPED) }
harden-below-nxdomain{COLON} { YDVAR(1, VAR_HARDEN_BELOW_NXDOMAIN) }
harden-referral-path{COLON} { YDVAR(1, VAR_HARDEN_REFERRAL_PATH) }
use-caps-for-id{COLON} { YDVAR(1, VAR_USE_CAPS_FOR_ID) }
unwanted-reply-threshold{COLON} { YDVAR(1, VAR_UNWANTED_REPLY_THRESHOLD) }
private-address{COLON} { YDVAR(1, VAR_PRIVATE_ADDRESS) }
private-domain{COLON} { YDVAR(1, VAR_PRIVATE_DOMAIN) }
prefetch-key{COLON} { YDVAR(1, VAR_PREFETCH_KEY) }
prefetch{COLON} { YDVAR(1, VAR_PREFETCH) }
stub-zone{COLON} { YDVAR(0, VAR_STUB_ZONE) }
name{COLON} { YDVAR(1, VAR_NAME) }
stub-addr{COLON} { YDVAR(1, VAR_STUB_ADDR) }
stub-host{COLON} { YDVAR(1, VAR_STUB_HOST) }
stub-prime{COLON} { YDVAR(1, VAR_STUB_PRIME) }
stub-first{COLON} { YDVAR(1, VAR_STUB_FIRST) }
forward-zone{COLON} { YDVAR(0, VAR_FORWARD_ZONE) }
forward-addr{COLON} { YDVAR(1, VAR_FORWARD_ADDR) }
forward-host{COLON} { YDVAR(1, VAR_FORWARD_HOST) }
forward-first{COLON} { YDVAR(1, VAR_FORWARD_FIRST) }
do-not-query-address{COLON} { YDVAR(1, VAR_DO_NOT_QUERY_ADDRESS) }
do-not-query-localhost{COLON} { YDVAR(1, VAR_DO_NOT_QUERY_LOCALHOST) }
access-control{COLON} { YDVAR(2, VAR_ACCESS_CONTROL) }
hide-identity{COLON} { YDVAR(1, VAR_HIDE_IDENTITY) }
hide-version{COLON} { YDVAR(1, VAR_HIDE_VERSION) }
identity{COLON} { YDVAR(1, VAR_IDENTITY) }
version{COLON} { YDVAR(1, VAR_VERSION) }
module-config{COLON} { YDVAR(1, VAR_MODULE_CONF) }
dlv-anchor{COLON} { YDVAR(1, VAR_DLV_ANCHOR) }
dlv-anchor-file{COLON} { YDVAR(1, VAR_DLV_ANCHOR_FILE) }
trust-anchor-file{COLON} { YDVAR(1, VAR_TRUST_ANCHOR_FILE) }
auto-trust-anchor-file{COLON} { YDVAR(1, VAR_AUTO_TRUST_ANCHOR_FILE) }
trusted-keys-file{COLON} { YDVAR(1, VAR_TRUSTED_KEYS_FILE) }
trust-anchor{COLON} { YDVAR(1, VAR_TRUST_ANCHOR) }
val-override-date{COLON} { YDVAR(1, VAR_VAL_OVERRIDE_DATE) }
val-sig-skew-min{COLON} { YDVAR(1, VAR_VAL_SIG_SKEW_MIN) }
val-sig-skew-max{COLON} { YDVAR(1, VAR_VAL_SIG_SKEW_MAX) }
val-bogus-ttl{COLON} { YDVAR(1, VAR_BOGUS_TTL) }
val-clean-additional{COLON} { YDVAR(1, VAR_VAL_CLEAN_ADDITIONAL) }
val-permissive-mode{COLON} { YDVAR(1, VAR_VAL_PERMISSIVE_MODE) }
ignore-cd-flag{COLON} { YDVAR(1, VAR_IGNORE_CD_FLAG) }
val-log-level{COLON} { YDVAR(1, VAR_VAL_LOG_LEVEL) }
key-cache-size{COLON} { YDVAR(1, VAR_KEY_CACHE_SIZE) }
key-cache-slabs{COLON} { YDVAR(1, VAR_KEY_CACHE_SLABS) }
neg-cache-size{COLON} { YDVAR(1, VAR_NEG_CACHE_SIZE) }
val-nsec3-keysize-iterations{COLON} {
YDVAR(1, VAR_VAL_NSEC3_KEYSIZE_ITERATIONS) }
add-holddown{COLON} { YDVAR(1, VAR_ADD_HOLDDOWN) }
del-holddown{COLON} { YDVAR(1, VAR_DEL_HOLDDOWN) }
keep-missing{COLON} { YDVAR(1, VAR_KEEP_MISSING) }
use-syslog{COLON} { YDVAR(1, VAR_USE_SYSLOG) }
log-time-ascii{COLON} { YDVAR(1, VAR_LOG_TIME_ASCII) }
log-queries{COLON} { YDVAR(1, VAR_LOG_QUERIES) }
local-zone{COLON} { YDVAR(2, VAR_LOCAL_ZONE) }
local-data{COLON} { YDVAR(1, VAR_LOCAL_DATA) }
local-data-ptr{COLON} { YDVAR(1, VAR_LOCAL_DATA_PTR) }
unblock-lan-zones{COLON} { YDVAR(1, VAR_UNBLOCK_LAN_ZONES) }
statistics-interval{COLON} { YDVAR(1, VAR_STATISTICS_INTERVAL) }
statistics-cumulative{COLON} { YDVAR(1, VAR_STATISTICS_CUMULATIVE) }
extended-statistics{COLON} { YDVAR(1, VAR_EXTENDED_STATISTICS) }
remote-control{COLON} { YDVAR(0, VAR_REMOTE_CONTROL) }
control-enable{COLON} { YDVAR(1, VAR_CONTROL_ENABLE) }
control-interface{COLON} { YDVAR(1, VAR_CONTROL_INTERFACE) }
control-port{COLON} { YDVAR(1, VAR_CONTROL_PORT) }
server-key-file{COLON} { YDVAR(1, VAR_SERVER_KEY_FILE) }
server-cert-file{COLON} { YDVAR(1, VAR_SERVER_CERT_FILE) }
control-key-file{COLON} { YDVAR(1, VAR_CONTROL_KEY_FILE) }
control-cert-file{COLON} { YDVAR(1, VAR_CONTROL_CERT_FILE) }
python-script{COLON} { YDVAR(1, VAR_PYTHON_SCRIPT) }
python{COLON} { YDVAR(0, VAR_PYTHON) }
domain-insecure{COLON} { YDVAR(1, VAR_DOMAIN_INSECURE) }
minimal-responses{COLON} { YDVAR(1, VAR_MINIMAL_RESPONSES) }
rrset-roundrobin{COLON} { YDVAR(1, VAR_RRSET_ROUNDROBIN) }
max-udp-size{COLON} { YDVAR(1, VAR_MAX_UDP_SIZE) }
dns64-prefix{COLON} { YDVAR(1, VAR_DNS64_PREFIX) }
dns64-synthall{COLON} { YDVAR(1, VAR_DNS64_SYNTHALL) }
dnstap{COLON} { YDVAR(0, VAR_DNSTAP) }
dnstap-enable{COLON} { YDVAR(1, VAR_DNSTAP_ENABLE) }
dnstap-socket-path{COLON} { YDVAR(1, VAR_DNSTAP_SOCKET_PATH) }
dnstap-send-identity{COLON} { YDVAR(1, VAR_DNSTAP_SEND_IDENTITY) }
dnstap-send-version{COLON} { YDVAR(1, VAR_DNSTAP_SEND_VERSION) }
dnstap-identity{COLON} { YDVAR(1, VAR_DNSTAP_IDENTITY) }
dnstap-version{COLON} { YDVAR(1, VAR_DNSTAP_VERSION) }
dnstap-log-resolver-query-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_RESOLVER_QUERY_MESSAGES) }
dnstap-log-resolver-response-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_RESOLVER_RESPONSE_MESSAGES) }
dnstap-log-client-query-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_CLIENT_QUERY_MESSAGES) }
dnstap-log-client-response-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_CLIENT_RESPONSE_MESSAGES) }
dnstap-log-forwarder-query-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_FORWARDER_QUERY_MESSAGES) }
dnstap-log-forwarder-response-messages{COLON} {
YDVAR(1, VAR_DNSTAP_LOG_FORWARDER_RESPONSE_MESSAGES) }
<INITIAL,val>{NEWLINE} { LEXOUT(("NL\n")); cfg_parser->line++; }
/* Quoted strings. Strip leading and ending quotes */
<val>\" { BEGIN(quotedstring); LEXOUT(("QS ")); }
<quotedstring><<EOF>> {
yyerror("EOF inside quoted string");
if(--num_args == 0) { BEGIN(INITIAL); }
else { BEGIN(val); }
}
<quotedstring>{DQANY}* { LEXOUT(("STR(%s) ", yytext)); yymore(); }
<quotedstring>{NEWLINE} { yyerror("newline inside quoted string, no end \"");
cfg_parser->line++; BEGIN(INITIAL); }
<quotedstring>\" {
LEXOUT(("QE "));
if(--num_args == 0) { BEGIN(INITIAL); }
else { BEGIN(val); }
yytext[yyleng - 1] = '\0';
yylval.str = strdup(yytext);
if(!yylval.str)
yyerror("out of memory");
return STRING_ARG;
}
/* Single Quoted strings. Strip leading and ending quotes */
<val>\' { BEGIN(singlequotedstr); LEXOUT(("SQS ")); }
<singlequotedstr><<EOF>> {
yyerror("EOF inside quoted string");
if(--num_args == 0) { BEGIN(INITIAL); }
else { BEGIN(val); }
}
<singlequotedstr>{SQANY}* { LEXOUT(("STR(%s) ", yytext)); yymore(); }
<singlequotedstr>{NEWLINE} { yyerror("newline inside quoted string, no end '");
cfg_parser->line++; BEGIN(INITIAL); }
<singlequotedstr>\' {
LEXOUT(("SQE "));
if(--num_args == 0) { BEGIN(INITIAL); }
else { BEGIN(val); }
yytext[yyleng - 1] = '\0';
yylval.str = strdup(yytext);
if(!yylval.str)
yyerror("out of memory");
return STRING_ARG;
}
/* include: directive */
<INITIAL,val>include{COLON} {
LEXOUT(("v(%s) ", yytext)); inc_prev = YYSTATE; BEGIN(include); }
<include><<EOF>> {
yyerror("EOF inside include directive");
BEGIN(inc_prev);
}
<include>{SPACE}* { LEXOUT(("ISP ")); /* ignore */ }
<include>{NEWLINE} { LEXOUT(("NL\n")); cfg_parser->line++;}
<include>\" { LEXOUT(("IQS ")); BEGIN(include_quoted); }
<include>{UNQUOTEDLETTER}* {
LEXOUT(("Iunquotedstr(%s) ", yytext));
config_start_include_glob(yytext);
BEGIN(inc_prev);
}
<include_quoted><<EOF>> {
yyerror("EOF inside quoted string");
BEGIN(inc_prev);
}
<include_quoted>{DQANY}* { LEXOUT(("ISTR(%s) ", yytext)); yymore(); }
<include_quoted>{NEWLINE} { yyerror("newline before \" in include name");
cfg_parser->line++; BEGIN(inc_prev); }
<include_quoted>\" {
LEXOUT(("IQE "));
yytext[yyleng - 1] = '\0';
config_start_include_glob(yytext);
BEGIN(inc_prev);
}
<INITIAL,val><<EOF>> {
LEXOUT(("LEXEOF "));
yy_set_bol(1); /* Set beginning of line, so "^" rules match. */
if (!config_include_stack) {
yyterminate();
} else {
fclose(yyin);
config_end_include();
}
}
<val>{UNQUOTEDLETTER}* { LEXOUT(("unquotedstr(%s) ", yytext));
if(--num_args == 0) { BEGIN(INITIAL); }
yylval.str = strdup(yytext); return STRING_ARG; }
{UNQUOTEDLETTER_NOCOLON}* {
ub_c_error_msg("unknown keyword '%s'", yytext);
}
<*>. {
ub_c_error_msg("stray '%s'", yytext);
}
%%

4109
external/unbound/util/configparser.c vendored Normal file

File diff suppressed because it is too large Load Diff

380
external/unbound/util/configparser.h vendored Normal file
View File

@@ -0,0 +1,380 @@
/* A Bison parser, made by GNU Bison 2.6.1. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
#ifndef YY_UTIL_CONFIGPARSER_H
# define YY_UTIL_CONFIGPARSER_H
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
#if YYDEBUG
extern int yydebug;
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
SPACE = 258,
LETTER = 259,
NEWLINE = 260,
COMMENT = 261,
COLON = 262,
ANY = 263,
ZONESTR = 264,
STRING_ARG = 265,
VAR_SERVER = 266,
VAR_VERBOSITY = 267,
VAR_NUM_THREADS = 268,
VAR_PORT = 269,
VAR_OUTGOING_RANGE = 270,
VAR_INTERFACE = 271,
VAR_DO_IP4 = 272,
VAR_DO_IP6 = 273,
VAR_DO_UDP = 274,
VAR_DO_TCP = 275,
VAR_CHROOT = 276,
VAR_USERNAME = 277,
VAR_DIRECTORY = 278,
VAR_LOGFILE = 279,
VAR_PIDFILE = 280,
VAR_MSG_CACHE_SIZE = 281,
VAR_MSG_CACHE_SLABS = 282,
VAR_NUM_QUERIES_PER_THREAD = 283,
VAR_RRSET_CACHE_SIZE = 284,
VAR_RRSET_CACHE_SLABS = 285,
VAR_OUTGOING_NUM_TCP = 286,
VAR_INFRA_HOST_TTL = 287,
VAR_INFRA_LAME_TTL = 288,
VAR_INFRA_CACHE_SLABS = 289,
VAR_INFRA_CACHE_NUMHOSTS = 290,
VAR_INFRA_CACHE_LAME_SIZE = 291,
VAR_NAME = 292,
VAR_STUB_ZONE = 293,
VAR_STUB_HOST = 294,
VAR_STUB_ADDR = 295,
VAR_TARGET_FETCH_POLICY = 296,
VAR_HARDEN_SHORT_BUFSIZE = 297,
VAR_HARDEN_LARGE_QUERIES = 298,
VAR_FORWARD_ZONE = 299,
VAR_FORWARD_HOST = 300,
VAR_FORWARD_ADDR = 301,
VAR_DO_NOT_QUERY_ADDRESS = 302,
VAR_HIDE_IDENTITY = 303,
VAR_HIDE_VERSION = 304,
VAR_IDENTITY = 305,
VAR_VERSION = 306,
VAR_HARDEN_GLUE = 307,
VAR_MODULE_CONF = 308,
VAR_TRUST_ANCHOR_FILE = 309,
VAR_TRUST_ANCHOR = 310,
VAR_VAL_OVERRIDE_DATE = 311,
VAR_BOGUS_TTL = 312,
VAR_VAL_CLEAN_ADDITIONAL = 313,
VAR_VAL_PERMISSIVE_MODE = 314,
VAR_INCOMING_NUM_TCP = 315,
VAR_MSG_BUFFER_SIZE = 316,
VAR_KEY_CACHE_SIZE = 317,
VAR_KEY_CACHE_SLABS = 318,
VAR_TRUSTED_KEYS_FILE = 319,
VAR_VAL_NSEC3_KEYSIZE_ITERATIONS = 320,
VAR_USE_SYSLOG = 321,
VAR_OUTGOING_INTERFACE = 322,
VAR_ROOT_HINTS = 323,
VAR_DO_NOT_QUERY_LOCALHOST = 324,
VAR_CACHE_MAX_TTL = 325,
VAR_HARDEN_DNSSEC_STRIPPED = 326,
VAR_ACCESS_CONTROL = 327,
VAR_LOCAL_ZONE = 328,
VAR_LOCAL_DATA = 329,
VAR_INTERFACE_AUTOMATIC = 330,
VAR_STATISTICS_INTERVAL = 331,
VAR_DO_DAEMONIZE = 332,
VAR_USE_CAPS_FOR_ID = 333,
VAR_STATISTICS_CUMULATIVE = 334,
VAR_OUTGOING_PORT_PERMIT = 335,
VAR_OUTGOING_PORT_AVOID = 336,
VAR_DLV_ANCHOR_FILE = 337,
VAR_DLV_ANCHOR = 338,
VAR_NEG_CACHE_SIZE = 339,
VAR_HARDEN_REFERRAL_PATH = 340,
VAR_PRIVATE_ADDRESS = 341,
VAR_PRIVATE_DOMAIN = 342,
VAR_REMOTE_CONTROL = 343,
VAR_CONTROL_ENABLE = 344,
VAR_CONTROL_INTERFACE = 345,
VAR_CONTROL_PORT = 346,
VAR_SERVER_KEY_FILE = 347,
VAR_SERVER_CERT_FILE = 348,
VAR_CONTROL_KEY_FILE = 349,
VAR_CONTROL_CERT_FILE = 350,
VAR_EXTENDED_STATISTICS = 351,
VAR_LOCAL_DATA_PTR = 352,
VAR_JOSTLE_TIMEOUT = 353,
VAR_STUB_PRIME = 354,
VAR_UNWANTED_REPLY_THRESHOLD = 355,
VAR_LOG_TIME_ASCII = 356,
VAR_DOMAIN_INSECURE = 357,
VAR_PYTHON = 358,
VAR_PYTHON_SCRIPT = 359,
VAR_VAL_SIG_SKEW_MIN = 360,
VAR_VAL_SIG_SKEW_MAX = 361,
VAR_CACHE_MIN_TTL = 362,
VAR_VAL_LOG_LEVEL = 363,
VAR_AUTO_TRUST_ANCHOR_FILE = 364,
VAR_KEEP_MISSING = 365,
VAR_ADD_HOLDDOWN = 366,
VAR_DEL_HOLDDOWN = 367,
VAR_SO_RCVBUF = 368,
VAR_EDNS_BUFFER_SIZE = 369,
VAR_PREFETCH = 370,
VAR_PREFETCH_KEY = 371,
VAR_SO_SNDBUF = 372,
VAR_SO_REUSEPORT = 373,
VAR_HARDEN_BELOW_NXDOMAIN = 374,
VAR_IGNORE_CD_FLAG = 375,
VAR_LOG_QUERIES = 376,
VAR_TCP_UPSTREAM = 377,
VAR_SSL_UPSTREAM = 378,
VAR_SSL_SERVICE_KEY = 379,
VAR_SSL_SERVICE_PEM = 380,
VAR_SSL_PORT = 381,
VAR_FORWARD_FIRST = 382,
VAR_STUB_FIRST = 383,
VAR_MINIMAL_RESPONSES = 384,
VAR_RRSET_ROUNDROBIN = 385,
VAR_MAX_UDP_SIZE = 386,
VAR_DELAY_CLOSE = 387,
VAR_UNBLOCK_LAN_ZONES = 388,
VAR_DNS64_PREFIX = 389,
VAR_DNS64_SYNTHALL = 390,
VAR_DNSTAP = 391,
VAR_DNSTAP_ENABLE = 392,
VAR_DNSTAP_SOCKET_PATH = 393,
VAR_DNSTAP_SEND_IDENTITY = 394,
VAR_DNSTAP_SEND_VERSION = 395,
VAR_DNSTAP_IDENTITY = 396,
VAR_DNSTAP_VERSION = 397,
VAR_DNSTAP_LOG_RESOLVER_QUERY_MESSAGES = 398,
VAR_DNSTAP_LOG_RESOLVER_RESPONSE_MESSAGES = 399,
VAR_DNSTAP_LOG_CLIENT_QUERY_MESSAGES = 400,
VAR_DNSTAP_LOG_CLIENT_RESPONSE_MESSAGES = 401,
VAR_DNSTAP_LOG_FORWARDER_QUERY_MESSAGES = 402,
VAR_DNSTAP_LOG_FORWARDER_RESPONSE_MESSAGES = 403
};
#endif
/* Tokens. */
#define SPACE 258
#define LETTER 259
#define NEWLINE 260
#define COMMENT 261
#define COLON 262
#define ANY 263
#define ZONESTR 264
#define STRING_ARG 265
#define VAR_SERVER 266
#define VAR_VERBOSITY 267
#define VAR_NUM_THREADS 268
#define VAR_PORT 269
#define VAR_OUTGOING_RANGE 270
#define VAR_INTERFACE 271
#define VAR_DO_IP4 272
#define VAR_DO_IP6 273
#define VAR_DO_UDP 274
#define VAR_DO_TCP 275
#define VAR_CHROOT 276
#define VAR_USERNAME 277
#define VAR_DIRECTORY 278
#define VAR_LOGFILE 279
#define VAR_PIDFILE 280
#define VAR_MSG_CACHE_SIZE 281
#define VAR_MSG_CACHE_SLABS 282
#define VAR_NUM_QUERIES_PER_THREAD 283
#define VAR_RRSET_CACHE_SIZE 284
#define VAR_RRSET_CACHE_SLABS 285
#define VAR_OUTGOING_NUM_TCP 286
#define VAR_INFRA_HOST_TTL 287
#define VAR_INFRA_LAME_TTL 288
#define VAR_INFRA_CACHE_SLABS 289
#define VAR_INFRA_CACHE_NUMHOSTS 290
#define VAR_INFRA_CACHE_LAME_SIZE 291
#define VAR_NAME 292
#define VAR_STUB_ZONE 293
#define VAR_STUB_HOST 294
#define VAR_STUB_ADDR 295
#define VAR_TARGET_FETCH_POLICY 296
#define VAR_HARDEN_SHORT_BUFSIZE 297
#define VAR_HARDEN_LARGE_QUERIES 298
#define VAR_FORWARD_ZONE 299
#define VAR_FORWARD_HOST 300
#define VAR_FORWARD_ADDR 301
#define VAR_DO_NOT_QUERY_ADDRESS 302
#define VAR_HIDE_IDENTITY 303
#define VAR_HIDE_VERSION 304
#define VAR_IDENTITY 305
#define VAR_VERSION 306
#define VAR_HARDEN_GLUE 307
#define VAR_MODULE_CONF 308
#define VAR_TRUST_ANCHOR_FILE 309
#define VAR_TRUST_ANCHOR 310
#define VAR_VAL_OVERRIDE_DATE 311
#define VAR_BOGUS_TTL 312
#define VAR_VAL_CLEAN_ADDITIONAL 313
#define VAR_VAL_PERMISSIVE_MODE 314
#define VAR_INCOMING_NUM_TCP 315
#define VAR_MSG_BUFFER_SIZE 316
#define VAR_KEY_CACHE_SIZE 317
#define VAR_KEY_CACHE_SLABS 318
#define VAR_TRUSTED_KEYS_FILE 319
#define VAR_VAL_NSEC3_KEYSIZE_ITERATIONS 320
#define VAR_USE_SYSLOG 321
#define VAR_OUTGOING_INTERFACE 322
#define VAR_ROOT_HINTS 323
#define VAR_DO_NOT_QUERY_LOCALHOST 324
#define VAR_CACHE_MAX_TTL 325
#define VAR_HARDEN_DNSSEC_STRIPPED 326
#define VAR_ACCESS_CONTROL 327
#define VAR_LOCAL_ZONE 328
#define VAR_LOCAL_DATA 329
#define VAR_INTERFACE_AUTOMATIC 330
#define VAR_STATISTICS_INTERVAL 331
#define VAR_DO_DAEMONIZE 332
#define VAR_USE_CAPS_FOR_ID 333
#define VAR_STATISTICS_CUMULATIVE 334
#define VAR_OUTGOING_PORT_PERMIT 335
#define VAR_OUTGOING_PORT_AVOID 336
#define VAR_DLV_ANCHOR_FILE 337
#define VAR_DLV_ANCHOR 338
#define VAR_NEG_CACHE_SIZE 339
#define VAR_HARDEN_REFERRAL_PATH 340
#define VAR_PRIVATE_ADDRESS 341
#define VAR_PRIVATE_DOMAIN 342
#define VAR_REMOTE_CONTROL 343
#define VAR_CONTROL_ENABLE 344
#define VAR_CONTROL_INTERFACE 345
#define VAR_CONTROL_PORT 346
#define VAR_SERVER_KEY_FILE 347
#define VAR_SERVER_CERT_FILE 348
#define VAR_CONTROL_KEY_FILE 349
#define VAR_CONTROL_CERT_FILE 350
#define VAR_EXTENDED_STATISTICS 351
#define VAR_LOCAL_DATA_PTR 352
#define VAR_JOSTLE_TIMEOUT 353
#define VAR_STUB_PRIME 354
#define VAR_UNWANTED_REPLY_THRESHOLD 355
#define VAR_LOG_TIME_ASCII 356
#define VAR_DOMAIN_INSECURE 357
#define VAR_PYTHON 358
#define VAR_PYTHON_SCRIPT 359
#define VAR_VAL_SIG_SKEW_MIN 360
#define VAR_VAL_SIG_SKEW_MAX 361
#define VAR_CACHE_MIN_TTL 362
#define VAR_VAL_LOG_LEVEL 363
#define VAR_AUTO_TRUST_ANCHOR_FILE 364
#define VAR_KEEP_MISSING 365
#define VAR_ADD_HOLDDOWN 366
#define VAR_DEL_HOLDDOWN 367
#define VAR_SO_RCVBUF 368
#define VAR_EDNS_BUFFER_SIZE 369
#define VAR_PREFETCH 370
#define VAR_PREFETCH_KEY 371
#define VAR_SO_SNDBUF 372
#define VAR_SO_REUSEPORT 373
#define VAR_HARDEN_BELOW_NXDOMAIN 374
#define VAR_IGNORE_CD_FLAG 375
#define VAR_LOG_QUERIES 376
#define VAR_TCP_UPSTREAM 377
#define VAR_SSL_UPSTREAM 378
#define VAR_SSL_SERVICE_KEY 379
#define VAR_SSL_SERVICE_PEM 380
#define VAR_SSL_PORT 381
#define VAR_FORWARD_FIRST 382
#define VAR_STUB_FIRST 383
#define VAR_MINIMAL_RESPONSES 384
#define VAR_RRSET_ROUNDROBIN 385
#define VAR_MAX_UDP_SIZE 386
#define VAR_DELAY_CLOSE 387
#define VAR_UNBLOCK_LAN_ZONES 388
#define VAR_DNS64_PREFIX 389
#define VAR_DNS64_SYNTHALL 390
#define VAR_DNSTAP 391
#define VAR_DNSTAP_ENABLE 392
#define VAR_DNSTAP_SOCKET_PATH 393
#define VAR_DNSTAP_SEND_IDENTITY 394
#define VAR_DNSTAP_SEND_VERSION 395
#define VAR_DNSTAP_IDENTITY 396
#define VAR_DNSTAP_VERSION 397
#define VAR_DNSTAP_LOG_RESOLVER_QUERY_MESSAGES 398
#define VAR_DNSTAP_LOG_RESOLVER_RESPONSE_MESSAGES 399
#define VAR_DNSTAP_LOG_CLIENT_QUERY_MESSAGES 400
#define VAR_DNSTAP_LOG_CLIENT_RESPONSE_MESSAGES 401
#define VAR_DNSTAP_LOG_FORWARDER_QUERY_MESSAGES 402
#define VAR_DNSTAP_LOG_FORWARDER_RESPONSE_MESSAGES 403
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 2049 of yacc.c */
#line 64 "./util/configparser.y"
char* str;
/* Line 2049 of yacc.c */
#line 358 "util/configparser.h"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
extern YYSTYPE yylval;
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
#endif /* !YY_UTIL_CONFIGPARSER_H */

1462
external/unbound/util/configparser.y vendored Normal file

File diff suppressed because it is too large Load Diff

88
external/unbound/util/configyyrename.h vendored Normal file
View File

@@ -0,0 +1,88 @@
/*
* configyyrename.h -- renames for config file yy values to avoid conflicts.
*
* Copyright (c) 2001-2006, NLnet Labs. All rights reserved.
*
* See LICENSE for the license.
*
*/
#ifndef UTIL_CONFIGYYRENAME_H
#define UTIL_CONFIGYYRENAME_H
/* defines to change symbols so that no yacc/lex symbols clash */
#define yymaxdepth ub_c_maxdepth
#define yyparse ub_c_parse
#define yylex ub_c_lex
#define yyerror ub_c_error
#define yylval ub_c_lval
#define yychar ub_c_char
#define yydebug ub_c_debug
#define yypact ub_c_pact
#define yyr1 ub_c_r1
#define yyr2 ub_c_r2
#define yydef ub_c_def
#define yychk ub_c_chk
#define yypgo ub_c_pgo
#define yyact ub_c_act
#define yyexca ub_c_exca
#define yyerrflag ub_c_errflag
#define yynerrs ub_c_nerrs
#define yyps ub_c_ps
#define yypv ub_c_pv
#define yys ub_c_s
#define yy_yys ub_c_yys
#define yystate ub_c_state
#define yytmp ub_c_tmp
#define yyv ub_c_v
#define yy_yyv ub_c_yyv
#define yyval ub_c_val
#define yylloc ub_c_lloc
#define yyreds ub_c_reds
#define yytoks ub_c_toks
#define yylhs ub_c_yylhs
#define yylen ub_c_yylen
#define yydefred ub_c_yydefred
#define yydgoto ub_c_yydgoto
#define yysindex ub_c_yysindex
#define yyrindex ub_c_yyrindex
#define yygindex ub_c_yygindex
#define yytable ub_c_yytable
#define yycheck ub_c_yycheck
#define yyname ub_c_yyname
#define yyrule ub_c_yyrule
#define yyin ub_c_in
#define yyout ub_c_out
#define yywrap ub_c_wrap
#define yy_load_buffer_state ub_c_load_buffer_state
#define yy_switch_to_buffer ub_c_switch_to_buffer
#define yy_flush_buffer ub_c_flush_buffer
#define yy_init_buffer ub_c_init_buffer
#define yy_scan_buffer ub_c_scan_buffer
#define yy_scan_bytes ub_c_scan_bytes
#define yy_scan_string ub_c_scan_string
#define yy_create_buffer ub_c_create_buffer
#define yyrestart ub_c_restart
#define yy_delete_buffer ub_c_delete_buffer
#define yypop_buffer_state ub_c_pop_buffer_state
#define yypush_buffer_state ub_c_push_buffer_state
#define yyunput ub_c_unput
#define yyset_in ub_c_set_in
#define yyget_in ub_c_get_in
#define yyset_out ub_c_set_out
#define yyget_out ub_c_get_out
#define yyget_lineno ub_c_get_lineno
#define yyset_lineno ub_c_set_lineno
#define yyset_debug ub_c_set_debug
#define yyget_debug ub_c_get_debug
#define yy_flex_debug ub_c_flex_debug
#define yylex_destroy ub_c_lex_destroy
#define yyfree ub_c_free
#define yyrealloc ub_c_realloc
#define yyalloc ub_c_alloc
#define yymalloc ub_c_malloc
#define yyget_leng ub_c_get_leng
#define yylineno ub_c_lineno
#define yyget_text ub_c_get_text
#endif /* UTIL_CONFIGYYRENAME_H */

782
external/unbound/util/data/dname.c vendored Normal file
View File

@@ -0,0 +1,782 @@
/*
* util/data/dname.h - domain name handling
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains domain name handling functions.
*/
#include "config.h"
#include <ctype.h>
#include "util/data/dname.h"
#include "util/data/msgparse.h"
#include "util/log.h"
#include "util/storage/lookup3.h"
#include "ldns/sbuffer.h"
/* determine length of a dname in buffer, no compression pointers allowed */
size_t
query_dname_len(sldns_buffer* query)
{
size_t len = 0;
size_t labellen;
while(1) {
if(sldns_buffer_remaining(query) < 1)
return 0; /* parse error, need label len */
labellen = sldns_buffer_read_u8(query);
if(labellen&0xc0)
return 0; /* no compression allowed in queries */
len += labellen + 1;
if(len > LDNS_MAX_DOMAINLEN)
return 0; /* too long */
if(labellen == 0)
return len;
if(sldns_buffer_remaining(query) < labellen)
return 0; /* parse error, need content */
sldns_buffer_skip(query, (ssize_t)labellen);
}
}
size_t
dname_valid(uint8_t* dname, size_t maxlen)
{
size_t len = 0;
size_t labellen;
labellen = *dname++;
while(labellen) {
if(labellen&0xc0)
return 0; /* no compression ptrs allowed */
len += labellen + 1;
if(len >= LDNS_MAX_DOMAINLEN)
return 0; /* too long */
if(len > maxlen)
return 0; /* does not fit in memory allocation */
dname += labellen;
labellen = *dname++;
}
len += 1;
if(len > maxlen)
return 0; /* does not fit in memory allocation */
return len;
}
/** compare uncompressed, noncanonical, registers are hints for speed */
int
query_dname_compare(register uint8_t* d1, register uint8_t* d2)
{
register uint8_t lab1, lab2;
log_assert(d1 && d2);
lab1 = *d1++;
lab2 = *d2++;
while( lab1 != 0 || lab2 != 0 ) {
/* compare label length */
/* if one dname ends, it has labellength 0 */
if(lab1 != lab2) {
if(lab1 < lab2)
return -1;
return 1;
}
log_assert(lab1 == lab2 && lab1 != 0);
/* compare lowercased labels. */
while(lab1--) {
/* compare bytes first for speed */
if(*d1 != *d2 &&
tolower((int)*d1) != tolower((int)*d2)) {
if(tolower((int)*d1) < tolower((int)*d2))
return -1;
return 1;
}
d1++;
d2++;
}
/* next pair of labels. */
lab1 = *d1++;
lab2 = *d2++;
}
return 0;
}
void
query_dname_tolower(uint8_t* dname)
{
/* the dname is stored uncompressed */
uint8_t labellen;
labellen = *dname;
while(labellen) {
dname++;
while(labellen--) {
*dname = (uint8_t)tolower((int)*dname);
dname++;
}
labellen = *dname;
}
}
void
pkt_dname_tolower(sldns_buffer* pkt, uint8_t* dname)
{
uint8_t lablen;
int count = 0;
if(dname >= sldns_buffer_end(pkt))
return;
lablen = *dname++;
while(lablen) {
if(LABEL_IS_PTR(lablen)) {
if((size_t)PTR_OFFSET(lablen, *dname)
>= sldns_buffer_limit(pkt))
return;
dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname));
lablen = *dname++;
if(count++ > MAX_COMPRESS_PTRS)
return;
continue;
}
if(dname+lablen >= sldns_buffer_end(pkt))
return;
while(lablen--) {
*dname = (uint8_t)tolower((int)*dname);
dname++;
}
if(dname >= sldns_buffer_end(pkt))
return;
lablen = *dname++;
}
}
size_t
pkt_dname_len(sldns_buffer* pkt)
{
size_t len = 0;
int ptrcount = 0;
uint8_t labellen;
size_t endpos = 0;
/* read dname and determine length */
/* check compression pointers, loops, out of bounds */
while(1) {
/* read next label */
if(sldns_buffer_remaining(pkt) < 1)
return 0;
labellen = sldns_buffer_read_u8(pkt);
if(LABEL_IS_PTR(labellen)) {
/* compression ptr */
uint16_t ptr;
if(sldns_buffer_remaining(pkt) < 1)
return 0;
ptr = PTR_OFFSET(labellen, sldns_buffer_read_u8(pkt));
if(ptrcount++ > MAX_COMPRESS_PTRS)
return 0; /* loop! */
if(sldns_buffer_limit(pkt) <= ptr)
return 0; /* out of bounds! */
if(!endpos)
endpos = sldns_buffer_position(pkt);
sldns_buffer_set_position(pkt, ptr);
} else {
/* label contents */
if(labellen > 0x3f)
return 0; /* label too long */
len += 1 + labellen;
if(len > LDNS_MAX_DOMAINLEN)
return 0;
if(labellen == 0) {
/* end of dname */
break;
}
if(sldns_buffer_remaining(pkt) < labellen)
return 0;
sldns_buffer_skip(pkt, (ssize_t)labellen);
}
}
if(endpos)
sldns_buffer_set_position(pkt, endpos);
return len;
}
int
dname_pkt_compare(sldns_buffer* pkt, uint8_t* d1, uint8_t* d2)
{
uint8_t len1, len2;
log_assert(pkt && d1 && d2);
len1 = *d1++;
len2 = *d2++;
while( len1 != 0 || len2 != 0 ) {
/* resolve ptrs */
if(LABEL_IS_PTR(len1)) {
d1 = sldns_buffer_at(pkt, PTR_OFFSET(len1, *d1));
len1 = *d1++;
continue;
}
if(LABEL_IS_PTR(len2)) {
d2 = sldns_buffer_at(pkt, PTR_OFFSET(len2, *d2));
len2 = *d2++;
continue;
}
/* check label length */
log_assert(len1 <= LDNS_MAX_LABELLEN);
log_assert(len2 <= LDNS_MAX_LABELLEN);
if(len1 != len2) {
if(len1 < len2) return -1;
return 1;
}
log_assert(len1 == len2 && len1 != 0);
/* compare labels */
while(len1--) {
if(tolower((int)*d1++) != tolower((int)*d2++)) {
if(tolower((int)d1[-1]) < tolower((int)d2[-1]))
return -1;
return 1;
}
}
len1 = *d1++;
len2 = *d2++;
}
return 0;
}
hashvalue_t
dname_query_hash(uint8_t* dname, hashvalue_t h)
{
uint8_t labuf[LDNS_MAX_LABELLEN+1];
uint8_t lablen;
int i;
/* preserve case of query, make hash label by label */
lablen = *dname++;
while(lablen) {
log_assert(lablen <= LDNS_MAX_LABELLEN);
labuf[0] = lablen;
i=0;
while(lablen--)
labuf[++i] = (uint8_t)tolower((int)*dname++);
h = hashlittle(labuf, labuf[0] + 1, h);
lablen = *dname++;
}
return h;
}
hashvalue_t
dname_pkt_hash(sldns_buffer* pkt, uint8_t* dname, hashvalue_t h)
{
uint8_t labuf[LDNS_MAX_LABELLEN+1];
uint8_t lablen;
int i;
/* preserve case of query, make hash label by label */
lablen = *dname++;
while(lablen) {
if(LABEL_IS_PTR(lablen)) {
/* follow pointer */
dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname));
lablen = *dname++;
continue;
}
log_assert(lablen <= LDNS_MAX_LABELLEN);
labuf[0] = lablen;
i=0;
while(lablen--)
labuf[++i] = (uint8_t)tolower((int)*dname++);
h = hashlittle(labuf, labuf[0] + 1, h);
lablen = *dname++;
}
return h;
}
void dname_pkt_copy(sldns_buffer* pkt, uint8_t* to, uint8_t* dname)
{
/* copy over the dname and decompress it at the same time */
size_t len = 0;
uint8_t lablen;
lablen = *dname++;
while(lablen) {
if(LABEL_IS_PTR(lablen)) {
/* follow pointer */
dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname));
lablen = *dname++;
continue;
}
log_assert(lablen <= LDNS_MAX_LABELLEN);
len += (size_t)lablen+1;
if(len >= LDNS_MAX_DOMAINLEN) {
*to = 0; /* end the result prematurely */
log_err("bad dname in dname_pkt_copy");
return;
}
*to++ = lablen;
memmove(to, dname, lablen);
dname += lablen;
to += lablen;
lablen = *dname++;
}
/* copy last \0 */
*to = 0;
}
void dname_print(FILE* out, struct sldns_buffer* pkt, uint8_t* dname)
{
uint8_t lablen;
if(!out) out = stdout;
if(!dname) return;
lablen = *dname++;
if(!lablen)
fputc('.', out);
while(lablen) {
if(LABEL_IS_PTR(lablen)) {
/* follow pointer */
if(!pkt) {
fputs("??compressionptr??", out);
return;
}
dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname));
lablen = *dname++;
continue;
}
if(lablen > LDNS_MAX_LABELLEN) {
fputs("??extendedlabel??", out);
return;
}
while(lablen--)
fputc((int)*dname++, out);
fputc('.', out);
lablen = *dname++;
}
}
int
dname_count_labels(uint8_t* dname)
{
uint8_t lablen;
int labs = 1;
lablen = *dname++;
while(lablen) {
labs++;
dname += lablen;
lablen = *dname++;
}
return labs;
}
int
dname_count_size_labels(uint8_t* dname, size_t* size)
{
uint8_t lablen;
int labs = 1;
size_t sz = 1;
lablen = *dname++;
while(lablen) {
labs++;
sz += lablen+1;
dname += lablen;
lablen = *dname++;
}
*size = sz;
return labs;
}
/**
* Compare labels in memory, lowercase while comparing.
* @param p1: label 1
* @param p2: label 2
* @param len: number of bytes to compare.
* @return: 0, -1, +1 comparison result.
*/
static int
memlowercmp(uint8_t* p1, uint8_t* p2, uint8_t len)
{
while(len--) {
if(*p1 != *p2 && tolower((int)*p1) != tolower((int)*p2)) {
if(tolower((int)*p1) < tolower((int)*p2))
return -1;
return 1;
}
p1++;
p2++;
}
return 0;
}
int
dname_lab_cmp(uint8_t* d1, int labs1, uint8_t* d2, int labs2, int* mlabs)
{
uint8_t len1, len2;
int atlabel = labs1;
int lastmlabs;
int lastdiff = 0;
/* first skip so that we compare same label. */
if(labs1 > labs2) {
while(atlabel > labs2) {
len1 = *d1++;
d1 += len1;
atlabel--;
}
log_assert(atlabel == labs2);
} else if(labs1 < labs2) {
atlabel = labs2;
while(atlabel > labs1) {
len2 = *d2++;
d2 += len2;
atlabel--;
}
log_assert(atlabel == labs1);
}
lastmlabs = atlabel+1;
/* now at same label in d1 and d2, atlabel */
/* www.example.com. */
/* 4 3 2 1 atlabel number */
/* repeat until at root label (which is always the same) */
while(atlabel > 1) {
len1 = *d1++;
len2 = *d2++;
if(len1 != len2) {
log_assert(len1 != 0 && len2 != 0);
if(len1<len2)
lastdiff = -1;
else lastdiff = 1;
lastmlabs = atlabel;
d1 += len1;
d2 += len2;
} else {
/* memlowercmp is inlined here; or just like
* if((c=memlowercmp(d1, d2, len1)) != 0) {
* lastdiff = c;
* lastmlabs = atlabel; } apart from d1++,d2++ */
while(len1) {
if(*d1 != *d2 && tolower((int)*d1)
!= tolower((int)*d2)) {
if(tolower((int)*d1) <
tolower((int)*d2)) {
lastdiff = -1;
lastmlabs = atlabel;
d1 += len1;
d2 += len1;
break;
}
lastdiff = 1;
lastmlabs = atlabel;
d1 += len1;
d2 += len1;
break; /* out of memlowercmp */
}
d1++;
d2++;
len1--;
}
}
atlabel--;
}
/* last difference atlabel number, so number of labels matching,
* at the right side, is one less. */
*mlabs = lastmlabs-1;
if(lastdiff == 0) {
/* all labels compared were equal, check if one has more
* labels, so that example.com. > com. */
if(labs1 > labs2)
return 1;
else if(labs1 < labs2)
return -1;
}
return lastdiff;
}
int
dname_buffer_write(sldns_buffer* pkt, uint8_t* dname)
{
uint8_t lablen;
if(sldns_buffer_remaining(pkt) < 1)
return 0;
lablen = *dname++;
sldns_buffer_write_u8(pkt, lablen);
while(lablen) {
if(sldns_buffer_remaining(pkt) < (size_t)lablen+1)
return 0;
sldns_buffer_write(pkt, dname, lablen);
dname += lablen;
lablen = *dname++;
sldns_buffer_write_u8(pkt, lablen);
}
return 1;
}
void dname_str(uint8_t* dname, char* str)
{
size_t len = 0;
uint8_t lablen = 0;
char* s = str;
if(!dname || !*dname) {
*s++ = '.';
*s = 0;
return;
}
lablen = *dname++;
while(lablen) {
if(lablen > LDNS_MAX_LABELLEN) {
*s++ = '#';
*s = 0;
return;
}
len += lablen+1;
if(len >= LDNS_MAX_DOMAINLEN-1) {
*s++ = '&';
*s = 0;
return;
}
while(lablen--) {
if(isalnum((int)*dname)
|| *dname == '-' || *dname == '_'
|| *dname == '*')
*s++ = *(char*)dname++;
else {
*s++ = '?';
dname++;
}
}
*s++ = '.';
lablen = *dname++;
}
*s = 0;
}
int
dname_strict_subdomain(uint8_t* d1, int labs1, uint8_t* d2, int labs2)
{
int m;
/* check subdomain: d1: www.example.com. and d2: example.com. */
if(labs2 >= labs1)
return 0;
if(dname_lab_cmp(d1, labs1, d2, labs2, &m) > 0) {
/* subdomain if all labels match */
return (m == labs2);
}
return 0;
}
int
dname_strict_subdomain_c(uint8_t* d1, uint8_t* d2)
{
return dname_strict_subdomain(d1, dname_count_labels(d1), d2,
dname_count_labels(d2));
}
int
dname_subdomain_c(uint8_t* d1, uint8_t* d2)
{
int m;
/* check subdomain: d1: www.example.com. and d2: example.com. */
/* or d1: example.com. and d2: example.com. */
int labs1 = dname_count_labels(d1);
int labs2 = dname_count_labels(d2);
if(labs2 > labs1)
return 0;
if(dname_lab_cmp(d1, labs1, d2, labs2, &m) < 0) {
/* must have been example.com , www.example.com - wrong */
/* or otherwise different dnames */
return 0;
}
return (m == labs2);
}
int
dname_is_root(uint8_t* dname)
{
uint8_t len;
log_assert(dname);
len = dname[0];
log_assert(!LABEL_IS_PTR(len));
return (len == 0);
}
void
dname_remove_label(uint8_t** dname, size_t* len)
{
size_t lablen;
log_assert(dname && *dname && len);
lablen = (*dname)[0];
log_assert(!LABEL_IS_PTR(lablen));
log_assert(*len > lablen);
if(lablen == 0)
return; /* do not modify root label */
*len -= lablen+1;
*dname += lablen+1;
}
void
dname_remove_labels(uint8_t** dname, size_t* len, int n)
{
int i;
for(i=0; i<n; i++)
dname_remove_label(dname, len);
}
int
dname_signame_label_count(uint8_t* dname)
{
uint8_t lablen;
int count = 0;
if(!*dname)
return 0;
if(dname[0] == 1 && dname[1] == '*')
dname += 2;
lablen = dname[0];
while(lablen) {
count++;
dname += lablen;
dname += 1;
lablen = dname[0];
}
return count;
}
int
dname_is_wild(uint8_t* dname)
{
return (dname[0] == 1 && dname[1] == '*');
}
/**
* Compare labels in memory, lowercase while comparing.
* Returns canonical order for labels. If all is equal, the
* shortest is first.
*
* @param p1: label 1
* @param len1: length of label 1.
* @param p2: label 2
* @param len2: length of label 2.
* @return: 0, -1, +1 comparison result.
*/
static int
memcanoncmp(uint8_t* p1, uint8_t len1, uint8_t* p2, uint8_t len2)
{
uint8_t min = (len1<len2)?len1:len2;
int c = memlowercmp(p1, p2, min);
if(c != 0)
return c;
/* equal, see who is shortest */
if(len1 < len2)
return -1;
if(len1 > len2)
return 1;
return 0;
}
int
dname_canon_lab_cmp(uint8_t* d1, int labs1, uint8_t* d2, int labs2, int* mlabs)
{
/* like dname_lab_cmp, but with different label comparison,
* empty character sorts before \000.
* So ylyly is before z. */
uint8_t len1, len2;
int atlabel = labs1;
int lastmlabs;
int lastdiff = 0;
int c;
/* first skip so that we compare same label. */
if(labs1 > labs2) {
while(atlabel > labs2) {
len1 = *d1++;
d1 += len1;
atlabel--;
}
log_assert(atlabel == labs2);
} else if(labs1 < labs2) {
atlabel = labs2;
while(atlabel > labs1) {
len2 = *d2++;
d2 += len2;
atlabel--;
}
log_assert(atlabel == labs1);
}
lastmlabs = atlabel+1;
/* now at same label in d1 and d2, atlabel */
/* www.example.com. */
/* 4 3 2 1 atlabel number */
/* repeat until at root label (which is always the same) */
while(atlabel > 1) {
len1 = *d1++;
len2 = *d2++;
if((c=memcanoncmp(d1, len1, d2, len2)) != 0) {
if(c<0)
lastdiff = -1;
else lastdiff = 1;
lastmlabs = atlabel;
}
d1 += len1;
d2 += len2;
atlabel--;
}
/* last difference atlabel number, so number of labels matching,
* at the right side, is one less. */
*mlabs = lastmlabs-1;
if(lastdiff == 0) {
/* all labels compared were equal, check if one has more
* labels, so that example.com. > com. */
if(labs1 > labs2)
return 1;
else if(labs1 < labs2)
return -1;
}
return lastdiff;
}
int
dname_canonical_compare(uint8_t* d1, uint8_t* d2)
{
int labs1, labs2, m;
labs1 = dname_count_labels(d1);
labs2 = dname_count_labels(d2);
return dname_canon_lab_cmp(d1, labs1, d2, labs2, &m);
}
uint8_t* dname_get_shared_topdomain(uint8_t* d1, uint8_t* d2)
{
int labs1, labs2, m;
size_t len = LDNS_MAX_DOMAINLEN;
labs1 = dname_count_labels(d1);
labs2 = dname_count_labels(d2);
(void)dname_lab_cmp(d1, labs1, d2, labs2, &m);
dname_remove_labels(&d1, &len, labs1-m);
return d1;
}

304
external/unbound/util/data/dname.h vendored Normal file
View File

@@ -0,0 +1,304 @@
/*
* util/data/dname.h - domain name routines
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions to deal with domain names (dnames).
*
* Some of the functions deal with domain names as a wireformat buffer,
* with a length.
*/
#ifndef UTIL_DATA_DNAME_H
#define UTIL_DATA_DNAME_H
#include "util/storage/lruhash.h"
struct sldns_buffer;
/** max number of compression ptrs to follow */
#define MAX_COMPRESS_PTRS 256
/**
* Determine length of dname in buffer, no compression ptrs allowed,
* @param query: the ldns buffer, current position at start of dname.
* at end, position is at end of the dname.
* @return: 0 on parse failure, or length including ending 0 of dname.
*/
size_t query_dname_len(struct sldns_buffer* query);
/**
* Determine if dname in memory is correct. no compression ptrs allowed.
* @param dname: where dname starts in memory.
* @param len: dname is not allowed to exceed this length (i.e. of allocation).
* @return length of dname if dname is ok, 0 on a parse error.
*/
size_t dname_valid(uint8_t* dname, size_t len);
/** lowercase query dname */
void query_dname_tolower(uint8_t* dname);
/**
* lowercase pkt dname (follows compression pointers)
* @param pkt: the packet, used to follow compression pointers. Position
* is unchanged.
* @param dname: start of dname in packet.
*/
void pkt_dname_tolower(struct sldns_buffer* pkt, uint8_t* dname);
/**
* Compare query dnames (uncompressed storage). The Dnames passed do not
* have to be lowercased, comparison routine does this.
*
* This routine is special, in that the comparison that it does corresponds
* with the canonical comparison needed when comparing dnames inside rdata
* for RR types that need canonicalization. That means that the first byte
* that is smaller (possibly after lowercasing) makes an RR smaller, or the
* shortest name makes an RR smaller.
*
* This routine does not compute the canonical order needed for NSEC
* processing.
*
* Dnames have to be valid format.
* @param d1: dname to compare
* @param d2: dname to compare
* @return: -1, 0, or +1 depending on comparison results.
* Sort order is first difference found. not the canonical ordering.
*/
int query_dname_compare(uint8_t* d1, uint8_t* d2);
/**
* Determine correct, compressed, dname present in packet.
* Checks for parse errors.
* @param pkt: packet to read from (from current start position).
* @return: 0 on parse error.
* At exit the position is right after the (compressed) dname.
* Compression pointers are followed and checked for loops.
* The uncompressed wireformat length is returned.
*/
size_t pkt_dname_len(struct sldns_buffer* pkt);
/**
* Compare dnames in packet (compressed). Dnames must be valid.
* routine performs lowercasing, so the packet casing is preserved.
* @param pkt: packet, used to resolve compression pointers.
* @param d1: dname to compare
* @param d2: dname to compare
* @return: -1, 0, or +1 depending on comparison results.
* Sort order is first difference found. not the canonical ordering.
*/
int dname_pkt_compare(struct sldns_buffer* pkt, uint8_t* d1, uint8_t* d2);
/**
* Hash dname, label by label, lowercasing, into hashvalue.
* Dname in query format (not compressed).
* @param dname: dname to hash.
* @param h: initial hash value.
* @return: result hash value.
*/
hashvalue_t dname_query_hash(uint8_t* dname, hashvalue_t h);
/**
* Hash dname, label by label, lowercasing, into hashvalue.
* Dname in pkt format (compressed).
* @param pkt: packet, for resolving compression pointers.
* @param dname: dname to hash, pointer to the pkt buffer.
* Must be valid format. No loops, etc.
* @param h: initial hash value.
* @return: result hash value.
* Result is the same as dname_query_hash, even if compression is used.
*/
hashvalue_t dname_pkt_hash(struct sldns_buffer* pkt, uint8_t* dname, hashvalue_t h);
/**
* Copy over a valid dname and decompress it.
* @param pkt: packet to resolve compression pointers.
* @param to: buffer of size from pkt_len function to hold result.
* @param dname: pointer into packet where dname starts.
*/
void dname_pkt_copy(struct sldns_buffer* pkt, uint8_t* to, uint8_t* dname);
/**
* Copy over a valid dname to a packet.
* @param pkt: packet to copy to.
* @param dname: dname to copy.
* @return: 0 if not enough space in buffer.
*/
int dname_buffer_write(struct sldns_buffer* pkt, uint8_t* dname);
/**
* Count the number of labels in an uncompressed dname in memory.
* @param dname: pointer to uncompressed dname.
* @return: count of labels, including root label, "com." has 2 labels.
*/
int dname_count_labels(uint8_t* dname);
/**
* Count labels and dname length both, for uncompressed dname in memory.
* @param dname: pointer to uncompressed dname.
* @param size: length of dname, including root label.
* @return: count of labels, including root label, "com." has 2 labels.
*/
int dname_count_size_labels(uint8_t* dname, size_t* size);
/**
* Compare dnames, sorted not canonical, but by label.
* Such that zone contents follows zone apex.
* @param d1: first dname. pointer to uncompressed wireformat.
* @param labs1: number of labels in first dname.
* @param d2: second dname. pointer to uncompressed wireformat.
* @param labs2: number of labels in second dname.
* @param mlabs: number of labels that matched exactly (the shared topdomain).
* @return: 0 for equal, -1 smaller, or +1 d1 larger than d2.
*/
int dname_lab_cmp(uint8_t* d1, int labs1, uint8_t* d2, int labs2, int* mlabs);
/**
* See if domain name d1 is a strict subdomain of d2.
* That is a subdomain, but not equal.
* @param d1: domain name, uncompressed wireformat
* @param labs1: number of labels in d1, including root label.
* @param d2: domain name, uncompressed wireformat
* @param labs2: number of labels in d2, including root label.
* @return true if d1 is a subdomain of d2, but not equal to d2.
*/
int dname_strict_subdomain(uint8_t* d1, int labs1, uint8_t* d2, int labs2);
/**
* Like dname_strict_subdomain but counts labels
* @param d1: domain name, uncompressed wireformat
* @param d2: domain name, uncompressed wireformat
* @return true if d1 is a subdomain of d2, but not equal to d2.
*/
int dname_strict_subdomain_c(uint8_t* d1, uint8_t* d2);
/**
* Counts labels. Tests is d1 is a subdomain of d2.
* @param d1: domain name, uncompressed wireformat
* @param d2: domain name, uncompressed wireformat
* @return true if d1 is a subdomain of d2.
*/
int dname_subdomain_c(uint8_t* d1, uint8_t* d2);
/**
* Debug helper. Print wireformat dname to output.
* @param out: like stdout or a file.
* @param pkt: if not NULL, the packet for resolving compression ptrs.
* @param dname: pointer to (start of) dname.
*/
void dname_print(FILE* out, struct sldns_buffer* pkt, uint8_t* dname);
/**
* Debug helper. Print dname to given string buffer (string buffer must
* be at least 255 chars + 1 for the 0, in printable form.
* This may lose information (? for nonprintable characters, or & if
* the name is too long, # for a bad label length).
* @param dname: uncompressed wireformat.
* @param str: buffer of 255+1 length.
*/
void dname_str(uint8_t* dname, char* str);
/**
* Returns true if the uncompressed wireformat dname is the root "."
* @param dname: the dname to check
* @return true if ".", false if not.
*/
int dname_is_root(uint8_t* dname);
/**
* Snip off first label from a dname, returning the parent zone.
* @param dname: from what to strip off. uncompressed wireformat.
* @param len: length, adjusted to become less.
* @return stripped off, or "." if input was ".".
*/
void dname_remove_label(uint8_t** dname, size_t* len);
/**
* Snip off first N labels from a dname, returning the parent zone.
* @param dname: from what to strip off. uncompressed wireformat.
* @param len: length, adjusted to become less.
* @param n: number of labels to strip off (from the left).
* if 0, nothing happens.
* @return stripped off, or "." if input was ".".
*/
void dname_remove_labels(uint8_t** dname, size_t* len, int n);
/**
* Count labels for the RRSIG signature label field.
* Like a normal labelcount, but "*" wildcard and "." root are not counted.
* @param dname: valid uncompressed wireformat.
* @return number of labels like in RRSIG; '*' and '.' are not counted.
*/
int dname_signame_label_count(uint8_t* dname);
/**
* Return true if the label is a wildcard, *.example.com.
* @param dname: valid uncompressed wireformat.
* @return true if wildcard, or false.
*/
int dname_is_wild(uint8_t* dname);
/**
* Compare dnames, Canonical in rfc4034 sense, but by label.
* Such that zone contents follows zone apex.
*
* @param d1: first dname. pointer to uncompressed wireformat.
* @param labs1: number of labels in first dname.
* @param d2: second dname. pointer to uncompressed wireformat.
* @param labs2: number of labels in second dname.
* @param mlabs: number of labels that matched exactly (the shared topdomain).
* @return: 0 for equal, -1 smaller, or +1 d1 larger than d2.
*/
int dname_canon_lab_cmp(uint8_t* d1, int labs1, uint8_t* d2, int labs2,
int* mlabs);
/**
* Canonical dname compare. Takes care of counting labels.
* Per rfc 4034 canonical order.
*
* @param d1: first dname. pointer to uncompressed wireformat.
* @param d2: second dname. pointer to uncompressed wireformat.
* @return: 0 for equal, -1 smaller, or +1 d1 larger than d2.
*/
int dname_canonical_compare(uint8_t* d1, uint8_t* d2);
/**
* Get the shared topdomain between two names. Root "." or longer.
* @param d1: first dname. pointer to uncompressed wireformat.
* @param d2: second dname. pointer to uncompressed wireformat.
* @return pointer to shared topdomain. Ptr to a part of d1.
*/
uint8_t* dname_get_shared_topdomain(uint8_t* d1, uint8_t* d2);
#endif /* UTIL_DATA_DNAME_H */

841
external/unbound/util/data/msgencode.c vendored Normal file
View File

@@ -0,0 +1,841 @@
/*
* util/data/msgencode.c - Encode DNS messages, queries and replies.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a routines to encode DNS messages.
*/
#include "config.h"
#include "util/data/msgencode.h"
#include "util/data/msgreply.h"
#include "util/data/msgparse.h"
#include "util/data/dname.h"
#include "util/log.h"
#include "util/regional.h"
#include "util/net_help.h"
#include "ldns/sbuffer.h"
/** return code that means the function ran out of memory. negative so it does
* not conflict with DNS rcodes. */
#define RETVAL_OUTMEM -2
/** return code that means the data did not fit (completely) in the packet */
#define RETVAL_TRUNC -4
/** return code that means all is peachy keen. Equal to DNS rcode NOERROR */
#define RETVAL_OK 0
/**
* Data structure to help domain name compression in outgoing messages.
* A tree of dnames and their offsets in the packet is kept.
* It is kept sorted, not canonical, but by label at least, so that after
* a lookup of a name you know its closest match, and the parent from that
* closest match. These are possible compression targets.
*
* It is a binary tree, not a rbtree or balanced tree, as the effort
* of keeping it balanced probably outweighs usefulness (given typical
* DNS packet size).
*/
struct compress_tree_node {
/** left node in tree, all smaller to this */
struct compress_tree_node* left;
/** right node in tree, all larger than this */
struct compress_tree_node* right;
/** the parent node - not for tree, but zone parent. One less label */
struct compress_tree_node* parent;
/** the domain name for this node. Pointer to uncompressed memory. */
uint8_t* dname;
/** number of labels in domain name, kept to help compare func. */
int labs;
/** offset in packet that points to this dname */
size_t offset;
};
/**
* Find domain name in tree, returns exact and closest match.
* @param tree: root of tree.
* @param dname: pointer to uncompressed dname.
* @param labs: number of labels in domain name.
* @param match: closest or exact match.
* guaranteed to be smaller or equal to the sought dname.
* can be null if the tree is empty.
* @param matchlabels: number of labels that match with closest match.
* can be zero is there is no match.
* @param insertpt: insert location for dname, if not found.
* @return: 0 if no exact match.
*/
static int
compress_tree_search(struct compress_tree_node** tree, uint8_t* dname,
int labs, struct compress_tree_node** match, int* matchlabels,
struct compress_tree_node*** insertpt)
{
int c, n, closen=0;
struct compress_tree_node* p = *tree;
struct compress_tree_node* close = 0;
struct compress_tree_node** prev = tree;
while(p) {
if((c = dname_lab_cmp(dname, labs, p->dname, p->labs, &n))
== 0) {
*matchlabels = n;
*match = p;
return 1;
}
if(c<0) {
prev = &p->left;
p = p->left;
} else {
closen = n;
close = p; /* p->dname is smaller than dname */
prev = &p->right;
p = p->right;
}
}
*insertpt = prev;
*matchlabels = closen;
*match = close;
return 0;
}
/**
* Lookup a domain name in compression tree.
* @param tree: root of tree (not the node with '.').
* @param dname: pointer to uncompressed dname.
* @param labs: number of labels in domain name.
* @param insertpt: insert location for dname, if not found.
* @return: 0 if not found or compress treenode with best compression.
*/
static struct compress_tree_node*
compress_tree_lookup(struct compress_tree_node** tree, uint8_t* dname,
int labs, struct compress_tree_node*** insertpt)
{
struct compress_tree_node* p;
int m;
if(labs <= 1)
return 0; /* do not compress root node */
if(compress_tree_search(tree, dname, labs, &p, &m, insertpt)) {
/* exact match */
return p;
}
/* return some ancestor of p that compresses well. */
if(m>1) {
/* www.example.com. (labs=4) matched foo.example.com.(labs=4)
* then matchcount = 3. need to go up. */
while(p && p->labs > m)
p = p->parent;
return p;
}
return 0;
}
/**
* Create node for domain name compression tree.
* @param dname: pointer to uncompressed dname (stored in tree).
* @param labs: number of labels in dname.
* @param offset: offset into packet for dname.
* @param region: how to allocate memory for new node.
* @return new node or 0 on malloc failure.
*/
static struct compress_tree_node*
compress_tree_newnode(uint8_t* dname, int labs, size_t offset,
struct regional* region)
{
struct compress_tree_node* n = (struct compress_tree_node*)
regional_alloc(region, sizeof(struct compress_tree_node));
if(!n) return 0;
n->left = 0;
n->right = 0;
n->parent = 0;
n->dname = dname;
n->labs = labs;
n->offset = offset;
return n;
}
/**
* Store domain name and ancestors into compression tree.
* @param dname: pointer to uncompressed dname (stored in tree).
* @param labs: number of labels in dname.
* @param offset: offset into packet for dname.
* @param region: how to allocate memory for new node.
* @param closest: match from previous lookup, used to compress dname.
* may be NULL if no previous match.
* if the tree has an ancestor of dname already, this must be it.
* @param insertpt: where to insert the dname in tree.
* @return: 0 on memory error.
*/
static int
compress_tree_store(uint8_t* dname, int labs, size_t offset,
struct regional* region, struct compress_tree_node* closest,
struct compress_tree_node** insertpt)
{
uint8_t lablen;
struct compress_tree_node* newnode;
struct compress_tree_node* prevnode = NULL;
int uplabs = labs-1; /* does not store root in tree */
if(closest) uplabs = labs - closest->labs;
log_assert(uplabs >= 0);
/* algorithms builds up a vine of dname-labels to hang into tree */
while(uplabs--) {
if(offset > PTR_MAX_OFFSET) {
/* insertion failed, drop vine */
return 1; /* compression pointer no longer useful */
}
if(!(newnode = compress_tree_newnode(dname, labs, offset,
region))) {
/* insertion failed, drop vine */
return 0;
}
if(prevnode) {
/* chain nodes together, last one has one label more,
* so is larger than newnode, thus goes right. */
newnode->right = prevnode;
prevnode->parent = newnode;
}
/* next label */
lablen = *dname++;
dname += lablen;
offset += lablen+1;
prevnode = newnode;
labs--;
}
/* if we have a vine, hang the vine into the tree */
if(prevnode) {
*insertpt = prevnode;
prevnode->parent = closest;
}
return 1;
}
/** compress a domain name */
static int
write_compressed_dname(sldns_buffer* pkt, uint8_t* dname, int labs,
struct compress_tree_node* p)
{
/* compress it */
int labcopy = labs - p->labs;
uint8_t lablen;
uint16_t ptr;
if(labs == 1) {
/* write root label */
if(sldns_buffer_remaining(pkt) < 1)
return 0;
sldns_buffer_write_u8(pkt, 0);
return 1;
}
/* copy the first couple of labels */
while(labcopy--) {
lablen = *dname++;
if(sldns_buffer_remaining(pkt) < (size_t)lablen+1)
return 0;
sldns_buffer_write_u8(pkt, lablen);
sldns_buffer_write(pkt, dname, lablen);
dname += lablen;
}
/* insert compression ptr */
if(sldns_buffer_remaining(pkt) < 2)
return 0;
ptr = PTR_CREATE(p->offset);
sldns_buffer_write_u16(pkt, ptr);
return 1;
}
/** compress owner name of RR, return RETVAL_OUTMEM RETVAL_TRUNC */
static int
compress_owner(struct ub_packed_rrset_key* key, sldns_buffer* pkt,
struct regional* region, struct compress_tree_node** tree,
size_t owner_pos, uint16_t* owner_ptr, int owner_labs)
{
struct compress_tree_node* p;
struct compress_tree_node** insertpt;
if(!*owner_ptr) {
/* compress first time dname */
if((p = compress_tree_lookup(tree, key->rk.dname,
owner_labs, &insertpt))) {
if(p->labs == owner_labs)
/* avoid ptr chains, since some software is
* not capable of decoding ptr after a ptr. */
*owner_ptr = htons(PTR_CREATE(p->offset));
if(!write_compressed_dname(pkt, key->rk.dname,
owner_labs, p))
return RETVAL_TRUNC;
/* check if typeclass+4 ttl + rdatalen is available */
if(sldns_buffer_remaining(pkt) < 4+4+2)
return RETVAL_TRUNC;
} else {
/* no compress */
if(sldns_buffer_remaining(pkt) < key->rk.dname_len+4+4+2)
return RETVAL_TRUNC;
sldns_buffer_write(pkt, key->rk.dname,
key->rk.dname_len);
if(owner_pos <= PTR_MAX_OFFSET)
*owner_ptr = htons(PTR_CREATE(owner_pos));
}
if(!compress_tree_store(key->rk.dname, owner_labs,
owner_pos, region, p, insertpt))
return RETVAL_OUTMEM;
} else {
/* always compress 2nd-further RRs in RRset */
if(owner_labs == 1) {
if(sldns_buffer_remaining(pkt) < 1+4+4+2)
return RETVAL_TRUNC;
sldns_buffer_write_u8(pkt, 0);
} else {
if(sldns_buffer_remaining(pkt) < 2+4+4+2)
return RETVAL_TRUNC;
sldns_buffer_write(pkt, owner_ptr, 2);
}
}
return RETVAL_OK;
}
/** compress any domain name to the packet, return RETVAL_* */
static int
compress_any_dname(uint8_t* dname, sldns_buffer* pkt, int labs,
struct regional* region, struct compress_tree_node** tree)
{
struct compress_tree_node* p;
struct compress_tree_node** insertpt = NULL;
size_t pos = sldns_buffer_position(pkt);
if((p = compress_tree_lookup(tree, dname, labs, &insertpt))) {
if(!write_compressed_dname(pkt, dname, labs, p))
return RETVAL_TRUNC;
} else {
if(!dname_buffer_write(pkt, dname))
return RETVAL_TRUNC;
}
if(!compress_tree_store(dname, labs, pos, region, p, insertpt))
return RETVAL_OUTMEM;
return RETVAL_OK;
}
/** return true if type needs domain name compression in rdata */
static const sldns_rr_descriptor*
type_rdata_compressable(struct ub_packed_rrset_key* key)
{
uint16_t t = ntohs(key->rk.type);
if(sldns_rr_descript(t) &&
sldns_rr_descript(t)->_compress == LDNS_RR_COMPRESS)
return sldns_rr_descript(t);
return 0;
}
/** compress domain names in rdata, return RETVAL_* */
static int
compress_rdata(sldns_buffer* pkt, uint8_t* rdata, size_t todolen,
struct regional* region, struct compress_tree_node** tree,
const sldns_rr_descriptor* desc)
{
int labs, r, rdf = 0;
size_t dname_len, len, pos = sldns_buffer_position(pkt);
uint8_t count = desc->_dname_count;
sldns_buffer_skip(pkt, 2); /* rdata len fill in later */
/* space for rdatalen checked for already */
rdata += 2;
todolen -= 2;
while(todolen > 0 && count) {
switch(desc->_wireformat[rdf]) {
case LDNS_RDF_TYPE_DNAME:
labs = dname_count_size_labels(rdata, &dname_len);
if((r=compress_any_dname(rdata, pkt, labs, region,
tree)) != RETVAL_OK)
return r;
rdata += dname_len;
todolen -= dname_len;
count--;
len = 0;
break;
case LDNS_RDF_TYPE_STR:
len = *rdata + 1;
break;
default:
len = get_rdf_size(desc->_wireformat[rdf]);
}
if(len) {
/* copy over */
if(sldns_buffer_remaining(pkt) < len)
return RETVAL_TRUNC;
sldns_buffer_write(pkt, rdata, len);
todolen -= len;
rdata += len;
}
rdf++;
}
/* copy remainder */
if(todolen > 0) {
if(sldns_buffer_remaining(pkt) < todolen)
return RETVAL_TRUNC;
sldns_buffer_write(pkt, rdata, todolen);
}
/* set rdata len */
sldns_buffer_write_u16_at(pkt, pos, sldns_buffer_position(pkt)-pos-2);
return RETVAL_OK;
}
/** Returns true if RR type should be included */
static int
rrset_belongs_in_reply(sldns_pkt_section s, uint16_t rrtype, uint16_t qtype,
int dnssec)
{
if(dnssec)
return 1;
/* skip non DNSSEC types, except if directly queried for */
if(s == LDNS_SECTION_ANSWER) {
if(qtype == LDNS_RR_TYPE_ANY || qtype == rrtype)
return 1;
}
/* check DNSSEC-ness */
switch(rrtype) {
case LDNS_RR_TYPE_SIG:
case LDNS_RR_TYPE_KEY:
case LDNS_RR_TYPE_NXT:
case LDNS_RR_TYPE_DS:
case LDNS_RR_TYPE_RRSIG:
case LDNS_RR_TYPE_NSEC:
case LDNS_RR_TYPE_DNSKEY:
case LDNS_RR_TYPE_NSEC3:
case LDNS_RR_TYPE_NSEC3PARAMS:
return 0;
}
return 1;
}
/** store rrset in buffer in wireformat, return RETVAL_* */
static int
packed_rrset_encode(struct ub_packed_rrset_key* key, sldns_buffer* pkt,
uint16_t* num_rrs, time_t timenow, struct regional* region,
int do_data, int do_sig, struct compress_tree_node** tree,
sldns_pkt_section s, uint16_t qtype, int dnssec, size_t rr_offset)
{
size_t i, j, owner_pos;
int r, owner_labs;
uint16_t owner_ptr = 0;
struct packed_rrset_data* data = (struct packed_rrset_data*)
key->entry.data;
/* does this RR type belong in the answer? */
if(!rrset_belongs_in_reply(s, ntohs(key->rk.type), qtype, dnssec))
return RETVAL_OK;
owner_labs = dname_count_labels(key->rk.dname);
owner_pos = sldns_buffer_position(pkt);
if(do_data) {
const sldns_rr_descriptor* c = type_rdata_compressable(key);
for(i=0; i<data->count; i++) {
/* rrset roundrobin */
j = (i + rr_offset) % data->count;
if((r=compress_owner(key, pkt, region, tree,
owner_pos, &owner_ptr, owner_labs))
!= RETVAL_OK)
return r;
sldns_buffer_write(pkt, &key->rk.type, 2);
sldns_buffer_write(pkt, &key->rk.rrset_class, 2);
if(data->rr_ttl[j] < timenow)
sldns_buffer_write_u32(pkt, 0);
else sldns_buffer_write_u32(pkt,
data->rr_ttl[j]-timenow);
if(c) {
if((r=compress_rdata(pkt, data->rr_data[j],
data->rr_len[j], region, tree, c))
!= RETVAL_OK)
return r;
} else {
if(sldns_buffer_remaining(pkt) < data->rr_len[j])
return RETVAL_TRUNC;
sldns_buffer_write(pkt, data->rr_data[j],
data->rr_len[j]);
}
}
}
/* insert rrsigs */
if(do_sig && dnssec) {
size_t total = data->count+data->rrsig_count;
for(i=data->count; i<total; i++) {
if(owner_ptr && owner_labs != 1) {
if(sldns_buffer_remaining(pkt) <
2+4+4+data->rr_len[i])
return RETVAL_TRUNC;
sldns_buffer_write(pkt, &owner_ptr, 2);
} else {
if((r=compress_any_dname(key->rk.dname,
pkt, owner_labs, region, tree))
!= RETVAL_OK)
return r;
if(sldns_buffer_remaining(pkt) <
4+4+data->rr_len[i])
return RETVAL_TRUNC;
}
sldns_buffer_write_u16(pkt, LDNS_RR_TYPE_RRSIG);
sldns_buffer_write(pkt, &key->rk.rrset_class, 2);
if(data->rr_ttl[i] < timenow)
sldns_buffer_write_u32(pkt, 0);
else sldns_buffer_write_u32(pkt,
data->rr_ttl[i]-timenow);
/* rrsig rdata cannot be compressed, perform 100+ byte
* memcopy. */
sldns_buffer_write(pkt, data->rr_data[i],
data->rr_len[i]);
}
}
/* change rrnum only after we are sure it fits */
if(do_data)
*num_rrs += data->count;
if(do_sig && dnssec)
*num_rrs += data->rrsig_count;
return RETVAL_OK;
}
/** store msg section in wireformat buffer, return RETVAL_* */
static int
insert_section(struct reply_info* rep, size_t num_rrsets, uint16_t* num_rrs,
sldns_buffer* pkt, size_t rrsets_before, time_t timenow,
struct regional* region, struct compress_tree_node** tree,
sldns_pkt_section s, uint16_t qtype, int dnssec, size_t rr_offset)
{
int r;
size_t i, setstart;
*num_rrs = 0;
if(s != LDNS_SECTION_ADDITIONAL) {
if(s == LDNS_SECTION_ANSWER && qtype == LDNS_RR_TYPE_ANY)
dnssec = 1; /* include all types in ANY answer */
for(i=0; i<num_rrsets; i++) {
setstart = sldns_buffer_position(pkt);
if((r=packed_rrset_encode(rep->rrsets[rrsets_before+i],
pkt, num_rrs, timenow, region, 1, 1, tree,
s, qtype, dnssec, rr_offset))
!= RETVAL_OK) {
/* Bad, but if due to size must set TC bit */
/* trim off the rrset neatly. */
sldns_buffer_set_position(pkt, setstart);
return r;
}
}
} else {
for(i=0; i<num_rrsets; i++) {
setstart = sldns_buffer_position(pkt);
if((r=packed_rrset_encode(rep->rrsets[rrsets_before+i],
pkt, num_rrs, timenow, region, 1, 0, tree,
s, qtype, dnssec, rr_offset))
!= RETVAL_OK) {
sldns_buffer_set_position(pkt, setstart);
return r;
}
}
if(dnssec)
for(i=0; i<num_rrsets; i++) {
setstart = sldns_buffer_position(pkt);
if((r=packed_rrset_encode(rep->rrsets[rrsets_before+i],
pkt, num_rrs, timenow, region, 0, 1, tree,
s, qtype, dnssec, rr_offset))
!= RETVAL_OK) {
sldns_buffer_set_position(pkt, setstart);
return r;
}
}
}
return RETVAL_OK;
}
/** store query section in wireformat buffer, return RETVAL */
static int
insert_query(struct query_info* qinfo, struct compress_tree_node** tree,
sldns_buffer* buffer, struct regional* region)
{
if(sldns_buffer_remaining(buffer) <
qinfo->qname_len+sizeof(uint16_t)*2)
return RETVAL_TRUNC; /* buffer too small */
/* the query is the first name inserted into the tree */
if(!compress_tree_store(qinfo->qname,
dname_count_labels(qinfo->qname),
sldns_buffer_position(buffer), region, NULL, tree))
return RETVAL_OUTMEM;
if(sldns_buffer_current(buffer) == qinfo->qname)
sldns_buffer_skip(buffer, (ssize_t)qinfo->qname_len);
else sldns_buffer_write(buffer, qinfo->qname, qinfo->qname_len);
sldns_buffer_write_u16(buffer, qinfo->qtype);
sldns_buffer_write_u16(buffer, qinfo->qclass);
return RETVAL_OK;
}
static int
positive_answer(struct reply_info* rep, uint16_t qtype) {
size_t i;
if (FLAGS_GET_RCODE(rep->flags) != LDNS_RCODE_NOERROR)
return 0;
for(i=0;i<rep->an_numrrsets; i++) {
if(ntohs(rep->rrsets[i]->rk.type) == qtype) {
/* in case it is a wildcard with DNSSEC, there will
* be NSEC/NSEC3 records in the authority section
* that we cannot remove */
for(i=rep->an_numrrsets; i<rep->an_numrrsets+
rep->ns_numrrsets; i++) {
if(ntohs(rep->rrsets[i]->rk.type) ==
LDNS_RR_TYPE_NSEC ||
ntohs(rep->rrsets[i]->rk.type) ==
LDNS_RR_TYPE_NSEC3)
return 0;
}
return 1;
}
}
return 0;
}
int
reply_info_encode(struct query_info* qinfo, struct reply_info* rep,
uint16_t id, uint16_t flags, sldns_buffer* buffer, time_t timenow,
struct regional* region, uint16_t udpsize, int dnssec)
{
uint16_t ancount=0, nscount=0, arcount=0;
struct compress_tree_node* tree = 0;
int r;
size_t rr_offset;
sldns_buffer_clear(buffer);
if(udpsize < sldns_buffer_limit(buffer))
sldns_buffer_set_limit(buffer, udpsize);
if(sldns_buffer_remaining(buffer) < LDNS_HEADER_SIZE)
return 0;
sldns_buffer_write(buffer, &id, sizeof(uint16_t));
sldns_buffer_write_u16(buffer, flags);
sldns_buffer_write_u16(buffer, rep->qdcount);
/* set an, ns, ar counts to zero in case of small packets */
sldns_buffer_write(buffer, "\000\000\000\000\000\000", 6);
/* insert query section */
if(rep->qdcount) {
if((r=insert_query(qinfo, &tree, buffer, region)) !=
RETVAL_OK) {
if(r == RETVAL_TRUNC) {
/* create truncated message */
sldns_buffer_write_u16_at(buffer, 4, 0);
LDNS_TC_SET(sldns_buffer_begin(buffer));
sldns_buffer_flip(buffer);
return 1;
}
return 0;
}
}
/* roundrobin offset. using query id for random number. With ntohs
* for different roundrobins for sequential id client senders. */
rr_offset = RRSET_ROUNDROBIN?ntohs(id):0;
/* insert answer section */
if((r=insert_section(rep, rep->an_numrrsets, &ancount, buffer,
0, timenow, region, &tree, LDNS_SECTION_ANSWER, qinfo->qtype,
dnssec, rr_offset)) != RETVAL_OK) {
if(r == RETVAL_TRUNC) {
/* create truncated message */
sldns_buffer_write_u16_at(buffer, 6, ancount);
LDNS_TC_SET(sldns_buffer_begin(buffer));
sldns_buffer_flip(buffer);
return 1;
}
return 0;
}
sldns_buffer_write_u16_at(buffer, 6, ancount);
/* if response is positive answer, auth/add sections are not required */
if( ! (MINIMAL_RESPONSES && positive_answer(rep, qinfo->qtype)) ) {
/* insert auth section */
if((r=insert_section(rep, rep->ns_numrrsets, &nscount, buffer,
rep->an_numrrsets, timenow, region, &tree,
LDNS_SECTION_AUTHORITY, qinfo->qtype,
dnssec, rr_offset)) != RETVAL_OK) {
if(r == RETVAL_TRUNC) {
/* create truncated message */
sldns_buffer_write_u16_at(buffer, 8, nscount);
LDNS_TC_SET(sldns_buffer_begin(buffer));
sldns_buffer_flip(buffer);
return 1;
}
return 0;
}
sldns_buffer_write_u16_at(buffer, 8, nscount);
/* insert add section */
if((r=insert_section(rep, rep->ar_numrrsets, &arcount, buffer,
rep->an_numrrsets + rep->ns_numrrsets, timenow, region,
&tree, LDNS_SECTION_ADDITIONAL, qinfo->qtype,
dnssec, rr_offset)) != RETVAL_OK) {
if(r == RETVAL_TRUNC) {
/* no need to set TC bit, this is the additional */
sldns_buffer_write_u16_at(buffer, 10, arcount);
sldns_buffer_flip(buffer);
return 1;
}
return 0;
}
sldns_buffer_write_u16_at(buffer, 10, arcount);
}
sldns_buffer_flip(buffer);
return 1;
}
uint16_t
calc_edns_field_size(struct edns_data* edns)
{
if(!edns || !edns->edns_present)
return 0;
/* domain root '.' + type + class + ttl + rdatalen(=0) */
return 1 + 2 + 2 + 4 + 2;
}
void
attach_edns_record(sldns_buffer* pkt, struct edns_data* edns)
{
size_t len;
if(!edns || !edns->edns_present)
return;
/* inc additional count */
sldns_buffer_write_u16_at(pkt, 10,
sldns_buffer_read_u16_at(pkt, 10) + 1);
len = sldns_buffer_limit(pkt);
sldns_buffer_clear(pkt);
sldns_buffer_set_position(pkt, len);
/* write EDNS record */
sldns_buffer_write_u8(pkt, 0); /* '.' label */
sldns_buffer_write_u16(pkt, LDNS_RR_TYPE_OPT); /* type */
sldns_buffer_write_u16(pkt, edns->udp_size); /* class */
sldns_buffer_write_u8(pkt, edns->ext_rcode); /* ttl */
sldns_buffer_write_u8(pkt, edns->edns_version);
sldns_buffer_write_u16(pkt, edns->bits);
sldns_buffer_write_u16(pkt, 0); /* rdatalen */
sldns_buffer_flip(pkt);
}
int
reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep,
uint16_t id, uint16_t qflags, sldns_buffer* pkt, time_t timenow,
int cached, struct regional* region, uint16_t udpsize,
struct edns_data* edns, int dnssec, int secure)
{
uint16_t flags;
int attach_edns = 1;
if(!cached || rep->authoritative) {
/* original flags, copy RD and CD bits from query. */
flags = rep->flags | (qflags & (BIT_RD|BIT_CD));
} else {
/* remove AA bit, copy RD and CD bits from query. */
flags = (rep->flags & ~BIT_AA) | (qflags & (BIT_RD|BIT_CD));
}
if(secure && (dnssec || (qflags&BIT_AD)))
flags |= BIT_AD;
log_assert(flags & BIT_QR); /* QR bit must be on in our replies */
if(udpsize < LDNS_HEADER_SIZE)
return 0;
if(udpsize < LDNS_HEADER_SIZE + calc_edns_field_size(edns)) {
/* packet too small to contain edns, omit it. */
attach_edns = 0;
} else {
/* reserve space for edns record */
udpsize -= calc_edns_field_size(edns);
}
if(!reply_info_encode(qinf, rep, id, flags, pkt, timenow, region,
udpsize, dnssec)) {
log_err("reply encode: out of memory");
return 0;
}
if(attach_edns)
attach_edns_record(pkt, edns);
return 1;
}
void
qinfo_query_encode(sldns_buffer* pkt, struct query_info* qinfo)
{
uint16_t flags = 0; /* QUERY, NOERROR */
sldns_buffer_clear(pkt);
log_assert(sldns_buffer_remaining(pkt) >= 12+255+4/*max query*/);
sldns_buffer_skip(pkt, 2); /* id done later */
sldns_buffer_write_u16(pkt, flags);
sldns_buffer_write_u16(pkt, 1); /* query count */
sldns_buffer_write(pkt, "\000\000\000\000\000\000", 6); /* counts */
sldns_buffer_write(pkt, qinfo->qname, qinfo->qname_len);
sldns_buffer_write_u16(pkt, qinfo->qtype);
sldns_buffer_write_u16(pkt, qinfo->qclass);
sldns_buffer_flip(pkt);
}
void
error_encode(sldns_buffer* buf, int r, struct query_info* qinfo,
uint16_t qid, uint16_t qflags, struct edns_data* edns)
{
uint16_t flags;
sldns_buffer_clear(buf);
sldns_buffer_write(buf, &qid, sizeof(uint16_t));
flags = (uint16_t)(BIT_QR | BIT_RA | r); /* QR and retcode*/
flags |= (qflags & (BIT_RD|BIT_CD)); /* copy RD and CD bit */
sldns_buffer_write_u16(buf, flags);
if(qinfo) flags = 1;
else flags = 0;
sldns_buffer_write_u16(buf, flags);
flags = 0;
sldns_buffer_write(buf, &flags, sizeof(uint16_t));
sldns_buffer_write(buf, &flags, sizeof(uint16_t));
sldns_buffer_write(buf, &flags, sizeof(uint16_t));
if(qinfo) {
if(sldns_buffer_current(buf) == qinfo->qname)
sldns_buffer_skip(buf, (ssize_t)qinfo->qname_len);
else sldns_buffer_write(buf, qinfo->qname, qinfo->qname_len);
sldns_buffer_write_u16(buf, qinfo->qtype);
sldns_buffer_write_u16(buf, qinfo->qclass);
}
sldns_buffer_flip(buf);
if(edns) {
struct edns_data es = *edns;
es.edns_version = EDNS_ADVERTISED_VERSION;
es.udp_size = EDNS_ADVERTISED_SIZE;
es.ext_rcode = 0;
es.bits &= EDNS_DO;
if(sldns_buffer_limit(buf) + calc_edns_field_size(&es) >
edns->udp_size)
return;
attach_edns_record(buf, &es);
}
}

131
external/unbound/util/data/msgencode.h vendored Normal file
View File

@@ -0,0 +1,131 @@
/*
* util/data/msgencode.h - encode compressed DNS messages.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains temporary data structures and routines to create
* compressed DNS messages.
*/
#ifndef UTIL_DATA_MSGENCODE_H
#define UTIL_DATA_MSGENCODE_H
struct sldns_buffer;
struct query_info;
struct reply_info;
struct regional;
struct edns_data;
/**
* Generate answer from reply_info.
* @param qinf: query information that provides query section in packet.
* @param rep: reply to fill in.
* @param id: id word from the query.
* @param qflags: flags word from the query.
* @param dest: buffer to put message into; will truncate if it does not fit.
* @param timenow: time to subtract.
* @param cached: set true if a cached reply (so no AA bit).
* set false for the first reply.
* @param region: where to allocate temp variables (for compression).
* @param udpsize: size of the answer, 512, from EDNS, or 64k for TCP.
* @param edns: EDNS data included in the answer, NULL for none.
* or if edns_present = 0, it is not included.
* @param dnssec: if 0 DNSSEC records are omitted from the answer.
* @param secure: if 1, the AD bit is set in the reply.
* @return: 0 on error (server failure).
*/
int reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep,
uint16_t id, uint16_t qflags, struct sldns_buffer* dest, time_t timenow,
int cached, struct regional* region, uint16_t udpsize,
struct edns_data* edns, int dnssec, int secure);
/**
* Regenerate the wireformat from the stored msg reply.
* If the buffer is too small then the message is truncated at a whole
* rrset and the TC bit set, or whole rrsets are left out of the additional
* and the TC bit is not set.
* @param qinfo: query info to store.
* @param rep: reply to store.
* @param id: id value to store, network order.
* @param flags: flags value to store, host order.
* @param buffer: buffer to store the packet into.
* @param timenow: time now, to adjust ttl values.
* @param region: to store temporary data in.
* @param udpsize: size of the answer, 512, from EDNS, or 64k for TCP.
* @param dnssec: if 0 DNSSEC records are omitted from the answer.
* @return: nonzero is success, or
* 0 on error: malloc failure (no log_err has been done).
*/
int reply_info_encode(struct query_info* qinfo, struct reply_info* rep,
uint16_t id, uint16_t flags, struct sldns_buffer* buffer, time_t timenow,
struct regional* region, uint16_t udpsize, int dnssec);
/**
* Encode query packet. Assumes the buffer is large enough.
* @param pkt: where to store the packet.
* @param qinfo: query info.
*/
void qinfo_query_encode(struct sldns_buffer* pkt, struct query_info* qinfo);
/**
* Estimate size of EDNS record in packet. EDNS record will be no larger.
* @param edns: edns data or NULL.
* @return octets to reserve for EDNS.
*/
uint16_t calc_edns_field_size(struct edns_data* edns);
/**
* Attach EDNS record to buffer. Buffer has complete packet. There must
* be enough room left for the EDNS record.
* @param pkt: packet added to.
* @param edns: if NULL or present=0, nothing is added to the packet.
*/
void attach_edns_record(struct sldns_buffer* pkt, struct edns_data* edns);
/**
* Encode an error. With QR and RA set.
*
* @param pkt: where to store the packet.
* @param r: RCODE value to encode.
* @param qinfo: if not NULL, the query is included.
* @param qid: query ID to set in packet. network order.
* @param qflags: original query flags (to copy RD and CD bits). host order.
* @param edns: if not NULL, this is the query edns info,
* and an edns reply is attached. Only attached if EDNS record fits reply.
*/
void error_encode(struct sldns_buffer* pkt, int r, struct query_info* qinfo,
uint16_t qid, uint16_t qflags, struct edns_data* edns);
#endif /* UTIL_DATA_MSGENCODE_H */

1022
external/unbound/util/data/msgparse.c vendored Normal file

File diff suppressed because it is too large Load Diff

301
external/unbound/util/data/msgparse.h vendored Normal file
View File

@@ -0,0 +1,301 @@
/*
* util/data/msgparse.h - parse wireformat DNS messages.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Contains message parsing data structures.
* These point back into the packet buffer.
*
* During parsing RRSIGS are put together with the rrsets they (claim to) sign.
* This process works as follows:
* o if RRSIG follows the data rrset, it is added to the rrset rrsig list.
* o if no matching data rrset is found, the RRSIG becomes a new rrset.
* o If the data rrset later follows the RRSIG
* o See if the RRSIG rrset contains multiple types, and needs to
* have the rrsig(s) for that data type split off.
* o Put the data rr as data type in the rrset and rrsig in list.
* o RRSIGs are allowed to move to a different section. The section of
* the data item is used for the final rrset.
* o multiple signatures over an RRset are possible.
*
* For queries of qtype=RRSIG, some special handling is needed, to avoid
* splitting the RRSIG in the answer section.
* o duplicate, not split, RRSIGs from the answer section, if qtype=RRSIG.
* o check for doubles in the rrsig list when adding an RRSIG to data,
* so that a data rrset is signed by RRSIGs with different rdata.
* when qtype=RRSIG.
* This will move the RRSIG from the answer section to sign the data further
* in the packet (if possible). If then after that, more RRSIGs are found
* that sign the data as well, doubles are removed.
*/
#ifndef UTIL_DATA_MSGPARSE_H
#define UTIL_DATA_MSGPARSE_H
#include "util/storage/lruhash.h"
#include "ldns/pkthdr.h"
#include "ldns/rrdef.h"
struct sldns_buffer;
struct rrset_parse;
struct rr_parse;
struct regional;
/** number of buckets in parse rrset hash table. Must be power of 2. */
#define PARSE_TABLE_SIZE 32
/** Maximum TTL that is allowed. */
extern time_t MAX_TTL;
/** Minimum TTL that is allowed. */
extern time_t MIN_TTL;
/** Negative cache time (for entries without any RRs.) */
#define NORR_TTL 5 /* seconds */
/**
* Data stored in scratch pad memory during parsing.
* Stores the data that will enter into the msgreply and packet result.
*/
struct msg_parse {
/** id from message, network format. */
uint16_t id;
/** flags from message, host format. */
uint16_t flags;
/** count of RRs, host format */
uint16_t qdcount;
/** count of RRs, host format */
uint16_t ancount;
/** count of RRs, host format */
uint16_t nscount;
/** count of RRs, host format */
uint16_t arcount;
/** count of RRsets per section. */
size_t an_rrsets;
/** count of RRsets per section. */
size_t ns_rrsets;
/** count of RRsets per section. */
size_t ar_rrsets;
/** total number of rrsets found. */
size_t rrset_count;
/** query dname (pointer to start location in packet, NULL if none */
uint8_t* qname;
/** length of query dname in octets, 0 if none */
size_t qname_len;
/** query type, host order. 0 if qdcount=0 */
uint16_t qtype;
/** query class, host order. 0 if qdcount=0 */
uint16_t qclass;
/**
* Hash table array used during parsing to lookup rrset types.
* Based on name, type, class. Same hash value as in rrset cache.
*/
struct rrset_parse* hashtable[PARSE_TABLE_SIZE];
/** linked list of rrsets that have been found (in order). */
struct rrset_parse* rrset_first;
/** last element of rrset list. */
struct rrset_parse* rrset_last;
};
/**
* Data stored for an rrset during parsing.
*/
struct rrset_parse {
/** next in hash bucket */
struct rrset_parse* rrset_bucket_next;
/** next in list of all rrsets */
struct rrset_parse* rrset_all_next;
/** hash value of rrset */
hashvalue_t hash;
/** which section was it found in: one of
* LDNS_SECTION_ANSWER, LDNS_SECTION_AUTHORITY, LDNS_SECTION_ADDITIONAL
*/
sldns_pkt_section section;
/** start of (possibly compressed) dname in packet */
uint8_t* dname;
/** length of the dname uncompressed wireformat */
size_t dname_len;
/** type, host order. */
uint16_t type;
/** class, network order. var name so that it is not a c++ keyword. */
uint16_t rrset_class;
/** the flags for the rrset, like for packedrrset */
uint32_t flags;
/** number of RRs in the rr list */
size_t rr_count;
/** sum of RR rdata sizes */
size_t size;
/** linked list of RRs in this rrset. */
struct rr_parse* rr_first;
/** last in list of RRs in this rrset. */
struct rr_parse* rr_last;
/** number of RRSIGs over this rrset. */
size_t rrsig_count;
/** linked list of RRsig RRs over this rrset. */
struct rr_parse* rrsig_first;
/** last in list of RRSIG RRs over this rrset. */
struct rr_parse* rrsig_last;
};
/**
* Data stored for an RR during parsing.
*/
struct rr_parse {
/**
* Pointer to the RR. Points to start of TTL value in the packet.
* Rdata length and rdata follow it.
* its dname, type and class are the same and stored for the rrset.
*/
uint8_t* ttl_data;
/** true if ttl_data is not part of the packet, but elsewhere in mem.
* Set for generated CNAMEs for DNAMEs. */
int outside_packet;
/** the length of the rdata if allocated (with no dname compression)*/
size_t size;
/** next in list of RRs. */
struct rr_parse* next;
};
/** Check if label length is first octet of a compression pointer, pass u8. */
#define LABEL_IS_PTR(x) ( ((x)&0xc0) == 0xc0 )
/** Calculate destination offset of a compression pointer. pass first and
* second octets of the compression pointer. */
#define PTR_OFFSET(x, y) ( ((x)&0x3f)<<8 | (y) )
/** create a compression pointer to the given offset. */
#define PTR_CREATE(offset) ((uint16_t)(0xc000 | (offset)))
/** error codes, extended with EDNS, so > 15. */
#define EDNS_RCODE_BADVERS 16 /** bad EDNS version */
/** largest valid compression offset */
#define PTR_MAX_OFFSET 0x3fff
/**
* EDNS data storage
* EDNS rdata is ignored.
*/
struct edns_data {
/** if EDNS OPT record was present */
int edns_present;
/** Extended RCODE */
uint8_t ext_rcode;
/** The EDNS version number */
uint8_t edns_version;
/** the EDNS bits field from ttl (host order): Z */
uint16_t bits;
/** UDP reassembly size. */
uint16_t udp_size;
};
/**
* Obtain size in the packet of an rr type, that is before dname type.
* Do TYPE_DNAME, and type STR, yourself. Gives size for most regular types.
* @param rdf: the rdf type from the descriptor.
* @return: size in octets. 0 on failure.
*/
size_t get_rdf_size(sldns_rdf_type rdf);
/**
* Parse the packet.
* @param pkt: packet, position at call must be at start of packet.
* at end position is after packet.
* @param msg: where to store results.
* @param region: how to alloc results.
* @return: 0 if OK, or rcode on error.
*/
int parse_packet(struct sldns_buffer* pkt, struct msg_parse* msg,
struct regional* region);
/**
* After parsing the packet, extract EDNS data from packet.
* If not present this is noted in the data structure.
* If a parse error happens, an error code is returned.
*
* Quirks:
* o ignores OPT rdata.
* o ignores OPT owner name.
* o ignores extra OPT records, except the last one in the packet.
*
* @param msg: parsed message structure. Modified on exit, if EDNS was present
* it is removed from the additional section.
* @param edns: the edns data is stored here. Does not have to be initialised.
* @return: 0 on success. or an RCODE on an error.
* RCODE formerr if OPT in wrong section, and so on.
*/
int parse_extract_edns(struct msg_parse* msg, struct edns_data* edns);
/**
* If EDNS data follows a query section, extract it and initialize edns struct.
* @param pkt: the packet. position at start must be right after the query
* section. At end, right after EDNS data or no movement if failed.
* @param edns: the edns data allocated by the caller. Does not have to be
* initialised.
* @return: 0 on success, or an RCODE on error.
* RCODE formerr if OPT is badly formatted and so on.
*/
int parse_edns_from_pkt(struct sldns_buffer* pkt, struct edns_data* edns);
/**
* Calculate hash value for rrset in packet.
* @param pkt: the packet.
* @param dname: pointer to uncompressed dname, or compressed dname in packet.
* @param type: rrset type in host order.
* @param dclass: rrset class in network order.
* @param rrset_flags: rrset flags (same as packed_rrset flags).
* @return hash value
*/
hashvalue_t pkt_hash_rrset(struct sldns_buffer* pkt, uint8_t* dname, uint16_t type,
uint16_t dclass, uint32_t rrset_flags);
/**
* Lookup in msg hashtable to find a rrset.
* @param msg: with the hashtable.
* @param pkt: packet for compressed names.
* @param h: hash value
* @param rrset_flags: flags of rrset sought for.
* @param dname: name of rrset sought for.
* @param dnamelen: len of dname.
* @param type: rrset type, host order.
* @param dclass: rrset class, network order.
* @return NULL or the rrset_parse if found.
*/
struct rrset_parse* msgparse_hashtable_lookup(struct msg_parse* msg,
struct sldns_buffer* pkt, hashvalue_t h, uint32_t rrset_flags,
uint8_t* dname, size_t dnamelen, uint16_t type, uint16_t dclass);
/**
* Remove rrset from hash table.
* @param msg: with hashtable.
* @param rrset: with hash value and id info.
*/
void msgparse_bucket_remove(struct msg_parse* msg, struct rrset_parse* rrset);
#endif /* UTIL_DATA_MSGPARSE_H */

830
external/unbound/util/data/msgreply.c vendored Normal file
View File

@@ -0,0 +1,830 @@
/*
* util/data/msgreply.c - store message and reply data.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a data structure to store a message and its reply.
*/
#include "config.h"
#include "util/data/msgreply.h"
#include "util/storage/lookup3.h"
#include "util/log.h"
#include "util/alloc.h"
#include "util/netevent.h"
#include "util/net_help.h"
#include "util/data/dname.h"
#include "util/regional.h"
#include "util/data/msgparse.h"
#include "util/data/msgencode.h"
#include "ldns/sbuffer.h"
#include "ldns/wire2str.h"
/** MAX TTL default for messages and rrsets */
time_t MAX_TTL = 3600 * 24 * 10; /* ten days */
/** MIN TTL default for messages and rrsets */
time_t MIN_TTL = 0;
/** allocate qinfo, return 0 on error */
static int
parse_create_qinfo(sldns_buffer* pkt, struct msg_parse* msg,
struct query_info* qinf, struct regional* region)
{
if(msg->qname) {
if(region)
qinf->qname = (uint8_t*)regional_alloc(region,
msg->qname_len);
else qinf->qname = (uint8_t*)malloc(msg->qname_len);
if(!qinf->qname) return 0;
dname_pkt_copy(pkt, qinf->qname, msg->qname);
} else qinf->qname = 0;
qinf->qname_len = msg->qname_len;
qinf->qtype = msg->qtype;
qinf->qclass = msg->qclass;
return 1;
}
/** constructor for replyinfo */
struct reply_info*
construct_reply_info_base(struct regional* region, uint16_t flags, size_t qd,
time_t ttl, time_t prettl, size_t an, size_t ns, size_t ar,
size_t total, enum sec_status sec)
{
struct reply_info* rep;
/* rrset_count-1 because the first ref is part of the struct. */
size_t s = sizeof(struct reply_info) - sizeof(struct rrset_ref) +
sizeof(struct ub_packed_rrset_key*) * total;
if(region)
rep = (struct reply_info*)regional_alloc(region, s);
else rep = (struct reply_info*)malloc(s +
sizeof(struct rrset_ref) * (total));
if(!rep)
return NULL;
rep->flags = flags;
rep->qdcount = qd;
rep->ttl = ttl;
rep->prefetch_ttl = prettl;
rep->an_numrrsets = an;
rep->ns_numrrsets = ns;
rep->ar_numrrsets = ar;
rep->rrset_count = total;
rep->security = sec;
rep->authoritative = 0;
/* array starts after the refs */
if(region)
rep->rrsets = (struct ub_packed_rrset_key**)&(rep->ref[0]);
else rep->rrsets = (struct ub_packed_rrset_key**)&(rep->ref[total]);
/* zero the arrays to assist cleanup in case of malloc failure */
memset( rep->rrsets, 0, sizeof(struct ub_packed_rrset_key*) * total);
if(!region)
memset( &rep->ref[0], 0, sizeof(struct rrset_ref) * total);
return rep;
}
/** allocate replyinfo, return 0 on error */
static int
parse_create_repinfo(struct msg_parse* msg, struct reply_info** rep,
struct regional* region)
{
*rep = construct_reply_info_base(region, msg->flags, msg->qdcount, 0,
0, msg->an_rrsets, msg->ns_rrsets, msg->ar_rrsets,
msg->rrset_count, sec_status_unchecked);
if(!*rep)
return 0;
return 1;
}
/** allocate (special) rrset keys, return 0 on error */
static int
repinfo_alloc_rrset_keys(struct reply_info* rep, struct alloc_cache* alloc,
struct regional* region)
{
size_t i;
for(i=0; i<rep->rrset_count; i++) {
if(region) {
rep->rrsets[i] = (struct ub_packed_rrset_key*)
regional_alloc(region,
sizeof(struct ub_packed_rrset_key));
if(rep->rrsets[i]) {
memset(rep->rrsets[i], 0,
sizeof(struct ub_packed_rrset_key));
rep->rrsets[i]->entry.key = rep->rrsets[i];
}
}
else rep->rrsets[i] = alloc_special_obtain(alloc);
if(!rep->rrsets[i])
return 0;
rep->rrsets[i]->entry.data = NULL;
}
return 1;
}
/** do the rdata copy */
static int
rdata_copy(sldns_buffer* pkt, struct packed_rrset_data* data, uint8_t* to,
struct rr_parse* rr, time_t* rr_ttl, uint16_t type)
{
uint16_t pkt_len;
const sldns_rr_descriptor* desc;
*rr_ttl = sldns_read_uint32(rr->ttl_data);
/* RFC 2181 Section 8. if msb of ttl is set treat as if zero. */
if(*rr_ttl & 0x80000000U)
*rr_ttl = 0;
if(*rr_ttl < MIN_TTL)
*rr_ttl = MIN_TTL;
if(*rr_ttl < data->ttl)
data->ttl = *rr_ttl;
if(rr->outside_packet) {
/* uncompressed already, only needs copy */
memmove(to, rr->ttl_data+sizeof(uint32_t), rr->size);
return 1;
}
sldns_buffer_set_position(pkt, (size_t)
(rr->ttl_data - sldns_buffer_begin(pkt) + sizeof(uint32_t)));
/* insert decompressed size into rdata len stored in memory */
/* -2 because rdatalen bytes are not included. */
pkt_len = htons(rr->size - 2);
memmove(to, &pkt_len, sizeof(uint16_t));
to += 2;
/* read packet rdata len */
pkt_len = sldns_buffer_read_u16(pkt);
if(sldns_buffer_remaining(pkt) < pkt_len)
return 0;
desc = sldns_rr_descript(type);
if(pkt_len > 0 && desc && desc->_dname_count > 0) {
int count = (int)desc->_dname_count;
int rdf = 0;
size_t len;
size_t oldpos;
/* decompress dnames. */
while(pkt_len > 0 && count) {
switch(desc->_wireformat[rdf]) {
case LDNS_RDF_TYPE_DNAME:
oldpos = sldns_buffer_position(pkt);
dname_pkt_copy(pkt, to,
sldns_buffer_current(pkt));
to += pkt_dname_len(pkt);
pkt_len -= sldns_buffer_position(pkt)-oldpos;
count--;
len = 0;
break;
case LDNS_RDF_TYPE_STR:
len = sldns_buffer_current(pkt)[0] + 1;
break;
default:
len = get_rdf_size(desc->_wireformat[rdf]);
break;
}
if(len) {
memmove(to, sldns_buffer_current(pkt), len);
to += len;
sldns_buffer_skip(pkt, (ssize_t)len);
log_assert(len <= pkt_len);
pkt_len -= len;
}
rdf++;
}
}
/* copy remaining rdata */
if(pkt_len > 0)
memmove(to, sldns_buffer_current(pkt), pkt_len);
return 1;
}
/** copy over the data into packed rrset */
static int
parse_rr_copy(sldns_buffer* pkt, struct rrset_parse* pset,
struct packed_rrset_data* data)
{
size_t i;
struct rr_parse* rr = pset->rr_first;
uint8_t* nextrdata;
size_t total = pset->rr_count + pset->rrsig_count;
data->ttl = MAX_TTL;
data->count = pset->rr_count;
data->rrsig_count = pset->rrsig_count;
data->trust = rrset_trust_none;
data->security = sec_status_unchecked;
/* layout: struct - rr_len - rr_data - rr_ttl - rdata - rrsig */
data->rr_len = (size_t*)((uint8_t*)data +
sizeof(struct packed_rrset_data));
data->rr_data = (uint8_t**)&(data->rr_len[total]);
data->rr_ttl = (time_t*)&(data->rr_data[total]);
nextrdata = (uint8_t*)&(data->rr_ttl[total]);
for(i=0; i<data->count; i++) {
data->rr_len[i] = rr->size;
data->rr_data[i] = nextrdata;
nextrdata += rr->size;
if(!rdata_copy(pkt, data, data->rr_data[i], rr,
&data->rr_ttl[i], pset->type))
return 0;
rr = rr->next;
}
/* if rrsig, its rdata is at nextrdata */
rr = pset->rrsig_first;
for(i=data->count; i<total; i++) {
data->rr_len[i] = rr->size;
data->rr_data[i] = nextrdata;
nextrdata += rr->size;
if(!rdata_copy(pkt, data, data->rr_data[i], rr,
&data->rr_ttl[i], LDNS_RR_TYPE_RRSIG))
return 0;
rr = rr->next;
}
return 1;
}
/** create rrset return 0 on failure */
static int
parse_create_rrset(sldns_buffer* pkt, struct rrset_parse* pset,
struct packed_rrset_data** data, struct regional* region)
{
/* allocate */
size_t s = sizeof(struct packed_rrset_data) +
(pset->rr_count + pset->rrsig_count) *
(sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t)) +
pset->size;
if(region)
*data = regional_alloc(region, s);
else *data = malloc(s);
if(!*data)
return 0;
/* copy & decompress */
if(!parse_rr_copy(pkt, pset, *data)) {
if(!region) free(*data);
return 0;
}
return 1;
}
/** get trust value for rrset */
static enum rrset_trust
get_rrset_trust(struct msg_parse* msg, struct rrset_parse* rrset)
{
uint16_t AA = msg->flags & BIT_AA;
if(rrset->section == LDNS_SECTION_ANSWER) {
if(AA) {
/* RFC2181 says remainder of CNAME chain is nonauth*/
if(msg->rrset_first &&
msg->rrset_first->section==LDNS_SECTION_ANSWER
&& msg->rrset_first->type==LDNS_RR_TYPE_CNAME){
if(rrset == msg->rrset_first)
return rrset_trust_ans_AA;
else return rrset_trust_ans_noAA;
}
if(msg->rrset_first &&
msg->rrset_first->section==LDNS_SECTION_ANSWER
&& msg->rrset_first->type==LDNS_RR_TYPE_DNAME){
if(rrset == msg->rrset_first ||
rrset == msg->rrset_first->rrset_all_next)
return rrset_trust_ans_AA;
else return rrset_trust_ans_noAA;
}
return rrset_trust_ans_AA;
}
else return rrset_trust_ans_noAA;
} else if(rrset->section == LDNS_SECTION_AUTHORITY) {
if(AA) return rrset_trust_auth_AA;
else return rrset_trust_auth_noAA;
} else {
/* addit section */
if(AA) return rrset_trust_add_AA;
else return rrset_trust_add_noAA;
}
/* NOTREACHED */
return rrset_trust_none;
}
int
parse_copy_decompress_rrset(sldns_buffer* pkt, struct msg_parse* msg,
struct rrset_parse *pset, struct regional* region,
struct ub_packed_rrset_key* pk)
{
struct packed_rrset_data* data;
pk->rk.flags = pset->flags;
pk->rk.dname_len = pset->dname_len;
if(region)
pk->rk.dname = (uint8_t*)regional_alloc(
region, pset->dname_len);
else pk->rk.dname =
(uint8_t*)malloc(pset->dname_len);
if(!pk->rk.dname)
return 0;
/** copy & decompress dname */
dname_pkt_copy(pkt, pk->rk.dname, pset->dname);
/** copy over type and class */
pk->rk.type = htons(pset->type);
pk->rk.rrset_class = pset->rrset_class;
/** read data part. */
if(!parse_create_rrset(pkt, pset, &data, region))
return 0;
pk->entry.data = (void*)data;
pk->entry.key = (void*)pk;
pk->entry.hash = pset->hash;
data->trust = get_rrset_trust(msg, pset);
return 1;
}
/**
* Copy and decompress rrs
* @param pkt: the packet for compression pointer resolution.
* @param msg: the parsed message
* @param rep: reply info to put rrs into.
* @param region: if not NULL, used for allocation.
* @return 0 on failure.
*/
static int
parse_copy_decompress(sldns_buffer* pkt, struct msg_parse* msg,
struct reply_info* rep, struct regional* region)
{
size_t i;
struct rrset_parse *pset = msg->rrset_first;
struct packed_rrset_data* data;
log_assert(rep);
rep->ttl = MAX_TTL;
rep->security = sec_status_unchecked;
if(rep->rrset_count == 0)
rep->ttl = NORR_TTL;
for(i=0; i<rep->rrset_count; i++) {
if(!parse_copy_decompress_rrset(pkt, msg, pset, region,
rep->rrsets[i]))
return 0;
data = (struct packed_rrset_data*)rep->rrsets[i]->entry.data;
if(data->ttl < rep->ttl)
rep->ttl = data->ttl;
pset = pset->rrset_all_next;
}
rep->prefetch_ttl = PREFETCH_TTL_CALC(rep->ttl);
return 1;
}
int
parse_create_msg(sldns_buffer* pkt, struct msg_parse* msg,
struct alloc_cache* alloc, struct query_info* qinf,
struct reply_info** rep, struct regional* region)
{
log_assert(pkt && msg);
if(!parse_create_qinfo(pkt, msg, qinf, region))
return 0;
if(!parse_create_repinfo(msg, rep, region))
return 0;
if(!repinfo_alloc_rrset_keys(*rep, alloc, region))
return 0;
if(!parse_copy_decompress(pkt, msg, *rep, region))
return 0;
return 1;
}
int reply_info_parse(sldns_buffer* pkt, struct alloc_cache* alloc,
struct query_info* qinf, struct reply_info** rep,
struct regional* region, struct edns_data* edns)
{
/* use scratch pad region-allocator during parsing. */
struct msg_parse* msg;
int ret;
qinf->qname = NULL;
*rep = NULL;
if(!(msg = regional_alloc(region, sizeof(*msg)))) {
return LDNS_RCODE_SERVFAIL;
}
memset(msg, 0, sizeof(*msg));
sldns_buffer_set_position(pkt, 0);
if((ret = parse_packet(pkt, msg, region)) != 0) {
return ret;
}
if((ret = parse_extract_edns(msg, edns)) != 0)
return ret;
/* parse OK, allocate return structures */
/* this also performs dname decompression */
if(!parse_create_msg(pkt, msg, alloc, qinf, rep, NULL)) {
query_info_clear(qinf);
reply_info_parsedelete(*rep, alloc);
*rep = NULL;
return LDNS_RCODE_SERVFAIL;
}
return 0;
}
/** helper compare function to sort in lock order */
static int
reply_info_sortref_cmp(const void* a, const void* b)
{
struct rrset_ref* x = (struct rrset_ref*)a;
struct rrset_ref* y = (struct rrset_ref*)b;
if(x->key < y->key) return -1;
if(x->key > y->key) return 1;
return 0;
}
void
reply_info_sortref(struct reply_info* rep)
{
qsort(&rep->ref[0], rep->rrset_count, sizeof(struct rrset_ref),
reply_info_sortref_cmp);
}
void
reply_info_set_ttls(struct reply_info* rep, time_t timenow)
{
size_t i, j;
rep->ttl += timenow;
rep->prefetch_ttl += timenow;
for(i=0; i<rep->rrset_count; i++) {
struct packed_rrset_data* data = (struct packed_rrset_data*)
rep->ref[i].key->entry.data;
if(i>0 && rep->ref[i].key == rep->ref[i-1].key)
continue;
data->ttl += timenow;
for(j=0; j<data->count + data->rrsig_count; j++) {
data->rr_ttl[j] += timenow;
}
}
}
void
reply_info_parsedelete(struct reply_info* rep, struct alloc_cache* alloc)
{
size_t i;
if(!rep)
return;
/* no need to lock, since not shared in hashtables. */
for(i=0; i<rep->rrset_count; i++) {
ub_packed_rrset_parsedelete(rep->rrsets[i], alloc);
}
free(rep);
}
int
query_info_parse(struct query_info* m, sldns_buffer* query)
{
uint8_t* q = sldns_buffer_begin(query);
/* minimum size: header + \0 + qtype + qclass */
if(sldns_buffer_limit(query) < LDNS_HEADER_SIZE + 5)
return 0;
if(LDNS_OPCODE_WIRE(q) != LDNS_PACKET_QUERY ||
LDNS_QDCOUNT(q) != 1 || sldns_buffer_position(query) != 0)
return 0;
sldns_buffer_skip(query, LDNS_HEADER_SIZE);
m->qname = sldns_buffer_current(query);
if((m->qname_len = query_dname_len(query)) == 0)
return 0; /* parse error */
if(sldns_buffer_remaining(query) < 4)
return 0; /* need qtype, qclass */
m->qtype = sldns_buffer_read_u16(query);
m->qclass = sldns_buffer_read_u16(query);
return 1;
}
/** tiny subroutine for msgreply_compare */
#define COMPARE_IT(x, y) \
if( (x) < (y) ) return -1; \
else if( (x) > (y) ) return +1; \
log_assert( (x) == (y) );
int
query_info_compare(void* m1, void* m2)
{
struct query_info* msg1 = (struct query_info*)m1;
struct query_info* msg2 = (struct query_info*)m2;
int mc;
/* from most different to least different for speed */
COMPARE_IT(msg1->qtype, msg2->qtype);
if((mc = query_dname_compare(msg1->qname, msg2->qname)) != 0)
return mc;
log_assert(msg1->qname_len == msg2->qname_len);
COMPARE_IT(msg1->qclass, msg2->qclass);
return 0;
#undef COMPARE_IT
}
void
query_info_clear(struct query_info* m)
{
free(m->qname);
m->qname = NULL;
}
size_t
msgreply_sizefunc(void* k, void* d)
{
struct msgreply_entry* q = (struct msgreply_entry*)k;
struct reply_info* r = (struct reply_info*)d;
size_t s = sizeof(struct msgreply_entry) + sizeof(struct reply_info)
+ q->key.qname_len + lock_get_mem(&q->entry.lock)
- sizeof(struct rrset_ref);
s += r->rrset_count * sizeof(struct rrset_ref);
s += r->rrset_count * sizeof(struct ub_packed_rrset_key*);
return s;
}
void
query_entry_delete(void *k, void* ATTR_UNUSED(arg))
{
struct msgreply_entry* q = (struct msgreply_entry*)k;
lock_rw_destroy(&q->entry.lock);
query_info_clear(&q->key);
free(q);
}
void
reply_info_delete(void* d, void* ATTR_UNUSED(arg))
{
struct reply_info* r = (struct reply_info*)d;
free(r);
}
hashvalue_t
query_info_hash(struct query_info *q)
{
hashvalue_t h = 0xab;
h = hashlittle(&q->qtype, sizeof(q->qtype), h);
h = hashlittle(&q->qclass, sizeof(q->qclass), h);
h = dname_query_hash(q->qname, h);
return h;
}
struct msgreply_entry*
query_info_entrysetup(struct query_info* q, struct reply_info* r,
hashvalue_t h)
{
struct msgreply_entry* e = (struct msgreply_entry*)malloc(
sizeof(struct msgreply_entry));
if(!e) return NULL;
memcpy(&e->key, q, sizeof(*q));
e->entry.hash = h;
e->entry.key = e;
e->entry.data = r;
lock_rw_init(&e->entry.lock);
lock_protect(&e->entry.lock, &e->key, sizeof(e->key));
lock_protect(&e->entry.lock, &e->entry.hash, sizeof(e->entry.hash) +
sizeof(e->entry.key) + sizeof(e->entry.data));
lock_protect(&e->entry.lock, e->key.qname, e->key.qname_len);
q->qname = NULL;
return e;
}
/** copy rrsets from replyinfo to dest replyinfo */
static int
repinfo_copy_rrsets(struct reply_info* dest, struct reply_info* from,
struct regional* region)
{
size_t i, s;
struct packed_rrset_data* fd, *dd;
struct ub_packed_rrset_key* fk, *dk;
for(i=0; i<dest->rrset_count; i++) {
fk = from->rrsets[i];
dk = dest->rrsets[i];
fd = (struct packed_rrset_data*)fk->entry.data;
dk->entry.hash = fk->entry.hash;
dk->rk = fk->rk;
if(region) {
dk->id = fk->id;
dk->rk.dname = (uint8_t*)regional_alloc_init(region,
fk->rk.dname, fk->rk.dname_len);
} else
dk->rk.dname = (uint8_t*)memdup(fk->rk.dname,
fk->rk.dname_len);
if(!dk->rk.dname)
return 0;
s = packed_rrset_sizeof(fd);
if(region)
dd = (struct packed_rrset_data*)regional_alloc_init(
region, fd, s);
else dd = (struct packed_rrset_data*)memdup(fd, s);
if(!dd)
return 0;
packed_rrset_ptr_fixup(dd);
dk->entry.data = (void*)dd;
}
return 1;
}
struct reply_info*
reply_info_copy(struct reply_info* rep, struct alloc_cache* alloc,
struct regional* region)
{
struct reply_info* cp;
cp = construct_reply_info_base(region, rep->flags, rep->qdcount,
rep->ttl, rep->prefetch_ttl, rep->an_numrrsets,
rep->ns_numrrsets, rep->ar_numrrsets, rep->rrset_count,
rep->security);
if(!cp)
return NULL;
/* allocate ub_key structures special or not */
if(!repinfo_alloc_rrset_keys(cp, alloc, region)) {
if(!region)
reply_info_parsedelete(cp, alloc);
return NULL;
}
if(!repinfo_copy_rrsets(cp, rep, region)) {
if(!region)
reply_info_parsedelete(cp, alloc);
return NULL;
}
return cp;
}
uint8_t*
reply_find_final_cname_target(struct query_info* qinfo, struct reply_info* rep)
{
uint8_t* sname = qinfo->qname;
size_t snamelen = qinfo->qname_len;
size_t i;
for(i=0; i<rep->an_numrrsets; i++) {
struct ub_packed_rrset_key* s = rep->rrsets[i];
/* follow CNAME chain (if any) */
if(ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME &&
ntohs(s->rk.rrset_class) == qinfo->qclass &&
snamelen == s->rk.dname_len &&
query_dname_compare(sname, s->rk.dname) == 0) {
get_cname_target(s, &sname, &snamelen);
}
}
if(sname != qinfo->qname)
return sname;
return NULL;
}
struct ub_packed_rrset_key*
reply_find_answer_rrset(struct query_info* qinfo, struct reply_info* rep)
{
uint8_t* sname = qinfo->qname;
size_t snamelen = qinfo->qname_len;
size_t i;
for(i=0; i<rep->an_numrrsets; i++) {
struct ub_packed_rrset_key* s = rep->rrsets[i];
/* first match type, for query of qtype cname */
if(ntohs(s->rk.type) == qinfo->qtype &&
ntohs(s->rk.rrset_class) == qinfo->qclass &&
snamelen == s->rk.dname_len &&
query_dname_compare(sname, s->rk.dname) == 0) {
return s;
}
/* follow CNAME chain (if any) */
if(ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME &&
ntohs(s->rk.rrset_class) == qinfo->qclass &&
snamelen == s->rk.dname_len &&
query_dname_compare(sname, s->rk.dname) == 0) {
get_cname_target(s, &sname, &snamelen);
}
}
return NULL;
}
struct ub_packed_rrset_key* reply_find_rrset_section_an(struct reply_info* rep,
uint8_t* name, size_t namelen, uint16_t type, uint16_t dclass)
{
size_t i;
for(i=0; i<rep->an_numrrsets; i++) {
struct ub_packed_rrset_key* s = rep->rrsets[i];
if(ntohs(s->rk.type) == type &&
ntohs(s->rk.rrset_class) == dclass &&
namelen == s->rk.dname_len &&
query_dname_compare(name, s->rk.dname) == 0) {
return s;
}
}
return NULL;
}
struct ub_packed_rrset_key* reply_find_rrset_section_ns(struct reply_info* rep,
uint8_t* name, size_t namelen, uint16_t type, uint16_t dclass)
{
size_t i;
for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
struct ub_packed_rrset_key* s = rep->rrsets[i];
if(ntohs(s->rk.type) == type &&
ntohs(s->rk.rrset_class) == dclass &&
namelen == s->rk.dname_len &&
query_dname_compare(name, s->rk.dname) == 0) {
return s;
}
}
return NULL;
}
struct ub_packed_rrset_key* reply_find_rrset(struct reply_info* rep,
uint8_t* name, size_t namelen, uint16_t type, uint16_t dclass)
{
size_t i;
for(i=0; i<rep->rrset_count; i++) {
struct ub_packed_rrset_key* s = rep->rrsets[i];
if(ntohs(s->rk.type) == type &&
ntohs(s->rk.rrset_class) == dclass &&
namelen == s->rk.dname_len &&
query_dname_compare(name, s->rk.dname) == 0) {
return s;
}
}
return NULL;
}
void
log_dns_msg(const char* str, struct query_info* qinfo, struct reply_info* rep)
{
/* not particularly fast but flexible, make wireformat and print */
sldns_buffer* buf = sldns_buffer_new(65535);
struct regional* region = regional_create();
if(!reply_info_encode(qinfo, rep, 0, rep->flags, buf, 0,
region, 65535, 1)) {
log_info("%s: log_dns_msg: out of memory", str);
} else {
char* str = sldns_wire2str_pkt(sldns_buffer_begin(buf),
sldns_buffer_limit(buf));
if(!str) {
log_info("%s: log_dns_msg: ldns tostr failed", str);
} else {
log_info("%s %s",
str, (char*)sldns_buffer_begin(buf));
}
free(str);
}
sldns_buffer_free(buf);
regional_destroy(region);
}
void
log_query_info(enum verbosity_value v, const char* str,
struct query_info* qinf)
{
log_nametypeclass(v, str, qinf->qname, qinf->qtype, qinf->qclass);
}
int
reply_check_cname_chain(struct reply_info* rep)
{
/* check only answer section rrs for matching cname chain.
* the cache may return changed rdata, but owner names are untouched.*/
size_t i;
uint8_t* sname = rep->rrsets[0]->rk.dname;
size_t snamelen = rep->rrsets[0]->rk.dname_len;
for(i=0; i<rep->an_numrrsets; i++) {
uint16_t t = ntohs(rep->rrsets[i]->rk.type);
if(t == LDNS_RR_TYPE_DNAME)
continue; /* skip dnames; note TTL 0 not cached */
/* verify that owner matches current sname */
if(query_dname_compare(sname, rep->rrsets[i]->rk.dname) != 0){
/* cname chain broken */
return 0;
}
/* if this is a cname; move on */
if(t == LDNS_RR_TYPE_CNAME) {
get_cname_target(rep->rrsets[i], &sname, &snamelen);
}
}
return 1;
}
int
reply_all_rrsets_secure(struct reply_info* rep)
{
size_t i;
for(i=0; i<rep->rrset_count; i++) {
if( ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
->security != sec_status_secure )
return 0;
}
return 1;
}

438
external/unbound/util/data/msgreply.h vendored Normal file
View File

@@ -0,0 +1,438 @@
/*
* util/data/msgreply.h - store message and reply data.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a data structure to store a message and its reply.
*/
#ifndef UTIL_DATA_MSGREPLY_H
#define UTIL_DATA_MSGREPLY_H
#include "util/storage/lruhash.h"
#include "util/data/packed_rrset.h"
struct sldns_buffer;
struct comm_reply;
struct alloc_cache;
struct iovec;
struct regional;
struct edns_data;
struct msg_parse;
struct rrset_parse;
/** calculate the prefetch TTL as 90% of original. Calculation
* without numerical overflow (uin32_t) */
#define PREFETCH_TTL_CALC(ttl) ((ttl) - (ttl)/10)
/**
* Structure to store query information that makes answers to queries
* different.
*/
struct query_info {
/**
* Salient data on the query: qname, in wireformat.
* can be allocated or a pointer to outside buffer.
* User has to keep track on the status of this.
*/
uint8_t* qname;
/** length of qname (including last 0 octet) */
size_t qname_len;
/** qtype, host byte order */
uint16_t qtype;
/** qclass, host byte order */
uint16_t qclass;
};
/**
* Information to reference an rrset
*/
struct rrset_ref {
/** the key with lock, and ptr to packed data. */
struct ub_packed_rrset_key* key;
/** id needed */
rrset_id_t id;
};
/**
* Structure to store DNS query and the reply packet.
* To use it, copy over the flags from reply and modify using flags from
* the query (RD,CD if not AA). prepend ID.
*
* Memory layout is:
* o struct
* o rrset_ref array
* o packed_rrset_key* array.
*
* Memory layout is sometimes not packed, when the message is synthesized,
* for easy of the generation. It is allocated packed when it is copied
* from the region allocation to the malloc allocation.
*/
struct reply_info {
/** the flags for the answer, host byte order. */
uint16_t flags;
/**
* This flag informs unbound the answer is authoritative and
* the AA flag should be preserved.
*/
uint8_t authoritative;
/**
* Number of RRs in the query section.
* If qdcount is not 0, then it is 1, and the data that appears
* in the reply is the same as the query_info.
* Host byte order.
*/
uint8_t qdcount;
/** 32 bit padding to pad struct member alignment to 64 bits. */
uint32_t padding;
/**
* TTL of the entire reply (for negative caching).
* only for use when there are 0 RRsets in this message.
* if there are RRsets, check those instead.
*/
time_t ttl;
/**
* TTL for prefetch. After it has expired, a prefetch is suitable.
* Smaller than the TTL, otherwise the prefetch would not happen.
*/
time_t prefetch_ttl;
/**
* The security status from DNSSEC validation of this message.
*/
enum sec_status security;
/**
* Number of RRsets in each section.
* The answer section. Add up the RRs in every RRset to calculate
* the number of RRs, and the count for the dns packet.
* The number of RRs in RRsets can change due to RRset updates.
*/
size_t an_numrrsets;
/** Count of authority section RRsets */
size_t ns_numrrsets;
/** Count of additional section RRsets */
size_t ar_numrrsets;
/** number of RRsets: an_numrrsets + ns_numrrsets + ar_numrrsets */
size_t rrset_count;
/**
* List of pointers (only) to the rrsets in the order in which
* they appear in the reply message.
* Number of elements is ancount+nscount+arcount RRsets.
* This is a pointer to that array.
* Use the accessor function for access.
*/
struct ub_packed_rrset_key** rrsets;
/**
* Packed array of ids (see counts) and pointers to packed_rrset_key.
* The number equals ancount+nscount+arcount RRsets.
* These are sorted in ascending pointer, the locking order. So
* this list can be locked (and id, ttl checked), to see if
* all the data is available and recent enough.
*
* This is defined as an array of size 1, so that the compiler
* associates the identifier with this position in the structure.
* Array bound overflow on this array then gives access to the further
* elements of the array, which are allocated after the main structure.
*
* It could be more pure to define as array of size 0, ref[0].
* But ref[1] may be less confusing for compilers.
* Use the accessor function for access.
*/
struct rrset_ref ref[1];
};
/**
* Structure to keep hash table entry for message replies.
*/
struct msgreply_entry {
/** the hash table key */
struct query_info key;
/** the hash table entry, data is struct reply_info* */
struct lruhash_entry entry;
};
/**
* Constructor for replyinfo.
* @param region: where to allocate the results, pass NULL to use malloc.
* @param flags: flags for the replyinfo.
* @param qd: qd count
* @param ttl: TTL of replyinfo
* @param prettl: prefetch ttl
* @param an: an count
* @param ns: ns count
* @param ar: ar count
* @param total: total rrset count (presumably an+ns+ar).
* @param sec: security status of the reply info.
* @return the reply_info base struct with the array for putting the rrsets
* in. The array has been zeroed. Returns NULL on malloc failure.
*/
struct reply_info*
construct_reply_info_base(struct regional* region, uint16_t flags, size_t qd,
time_t ttl, time_t prettl, size_t an, size_t ns, size_t ar,
size_t total, enum sec_status sec);
/**
* Parse wire query into a queryinfo structure, return 0 on parse error.
* initialises the (prealloced) queryinfo structure as well.
* This query structure contains a pointer back info the buffer!
* This pointer avoids memory allocation. allocqname does memory allocation.
* @param m: the prealloced queryinfo structure to put query into.
* must be unused, or _clear()ed.
* @param query: the wireformat packet query. starts with ID.
* @return: 0 on format error.
*/
int query_info_parse(struct query_info* m, struct sldns_buffer* query);
/**
* Parse query reply.
* Fills in preallocated query_info structure (with ptr into buffer).
* Allocates reply_info and packed_rrsets. These are not yet added to any
* caches or anything, this is only parsing. Returns formerror on qdcount > 1.
* @param pkt: the packet buffer. Must be positioned after the query section.
* @param alloc: creates packed rrset key structures.
* @param rep: allocated reply_info is returned (only on no error).
* @param qinf: query_info is returned (only on no error).
* @param region: where to store temporary data (for parsing).
* @param edns: where to store edns information, does not need to be inited.
* @return: zero is OK, or DNS error code in case of error
* o FORMERR for parse errors.
* o SERVFAIL for memory allocation errors.
*/
int reply_info_parse(struct sldns_buffer* pkt, struct alloc_cache* alloc,
struct query_info* qinf, struct reply_info** rep,
struct regional* region, struct edns_data* edns);
/**
* Allocate and decompress parsed message and rrsets.
* @param pkt: for name decompression.
* @param msg: parsed message in scratch region.
* @param alloc: alloc cache for special rrset key structures.
* Not used if region!=NULL, it can be NULL in that case.
* @param qinf: where to store query info.
* qinf itself is allocated by the caller.
* @param rep: reply info is allocated and returned.
* @param region: if this parameter is NULL then malloc and the alloc is used.
* otherwise, everything is allocated in this region.
* In a region, no special rrset key structures are needed (not shared),
* and no rrset_ref array in the reply is built up.
* @return 0 if allocation failed.
*/
int parse_create_msg(struct sldns_buffer* pkt, struct msg_parse* msg,
struct alloc_cache* alloc, struct query_info* qinf,
struct reply_info** rep, struct regional* region);
/**
* Sorts the ref array.
* @param rep: reply info. rrsets must be filled in.
*/
void reply_info_sortref(struct reply_info* rep);
/**
* Set TTLs inside the replyinfo to absolute values.
* @param rep: reply info. rrsets must be filled in.
* Also refs must be filled in.
* @param timenow: the current time.
*/
void reply_info_set_ttls(struct reply_info* rep, time_t timenow);
/**
* Delete reply_info and packed_rrsets (while they are not yet added to the
* hashtables.). Returns rrsets to the alloc cache.
* @param rep: reply_info to delete.
* @param alloc: where to return rrset structures to.
*/
void reply_info_parsedelete(struct reply_info* rep, struct alloc_cache* alloc);
/**
* Compare two queryinfo structures, on query and type, class.
* It is _not_ sorted in canonical ordering.
* @param m1: struct query_info* , void* here to ease use as function pointer.
* @param m2: struct query_info* , void* here to ease use as function pointer.
* @return: 0 = same, -1 m1 is smaller, +1 m1 is larger.
*/
int query_info_compare(void* m1, void* m2);
/** clear out query info structure */
void query_info_clear(struct query_info* m);
/** calculate size of struct query_info + reply_info */
size_t msgreply_sizefunc(void* k, void* d);
/** delete msgreply_entry key structure */
void query_entry_delete(void *q, void* arg);
/** delete reply_info data structure */
void reply_info_delete(void* d, void* arg);
/** calculate hash value of query_info, lowercases the qname */
hashvalue_t query_info_hash(struct query_info *q);
/**
* Setup query info entry
* @param q: query info to copy. Emptied as if clear is called.
* @param r: reply to init data.
* @param h: hash value.
* @return: newly allocated message reply cache item.
*/
struct msgreply_entry* query_info_entrysetup(struct query_info* q,
struct reply_info* r, hashvalue_t h);
/**
* Copy reply_info and all rrsets in it and allocate.
* @param rep: what to copy, probably inside region, no ref[] array in it.
* @param alloc: how to allocate rrset keys.
* Not used if region!=NULL, it can be NULL in that case.
* @param region: if this parameter is NULL then malloc and the alloc is used.
* otherwise, everything is allocated in this region.
* In a region, no special rrset key structures are needed (not shared),
* and no rrset_ref array in the reply is built up.
* @return new reply info or NULL on memory error.
*/
struct reply_info* reply_info_copy(struct reply_info* rep,
struct alloc_cache* alloc, struct regional* region);
/**
* Copy a parsed rrset into given key, decompressing and allocating rdata.
* @param pkt: packet for decompression
* @param msg: the parser message (for flags for trust).
* @param pset: the parsed rrset to copy.
* @param region: if NULL - malloc, else data is allocated in this region.
* @param pk: a freshly obtained rrsetkey structure. No dname is set yet,
* will be set on return.
* Note that TTL will still be relative on return.
* @return false on alloc failure.
*/
int parse_copy_decompress_rrset(struct sldns_buffer* pkt, struct msg_parse* msg,
struct rrset_parse *pset, struct regional* region,
struct ub_packed_rrset_key* pk);
/**
* Find final cname target in reply, the one matching qinfo. Follows CNAMEs.
* @param qinfo: what to start with.
* @param rep: looks in answer section of this message.
* @return: pointer dname, or NULL if not found.
*/
uint8_t* reply_find_final_cname_target(struct query_info* qinfo,
struct reply_info* rep);
/**
* Check if cname chain in cached reply is still valid.
* @param rep: reply to check.
* @return: true if valid, false if invalid.
*/
int reply_check_cname_chain(struct reply_info* rep);
/**
* Check security status of all RRs in the message.
* @param rep: reply to check
* @return: true if all RRs are secure. False if not.
* True if there are zero RRs.
*/
int reply_all_rrsets_secure(struct reply_info* rep);
/**
* Find answer rrset in reply, the one matching qinfo. Follows CNAMEs, so the
* result may have a different owner name.
* @param qinfo: what to look for.
* @param rep: looks in answer section of this message.
* @return: pointer to rrset, or NULL if not found.
*/
struct ub_packed_rrset_key* reply_find_answer_rrset(struct query_info* qinfo,
struct reply_info* rep);
/**
* Find rrset in reply, inside the answer section. Does not follow CNAMEs.
* @param rep: looks in answer section of this message.
* @param name: what to look for.
* @param namelen: length of name.
* @param type: looks for (host order).
* @param dclass: looks for (host order).
* @return: pointer to rrset, or NULL if not found.
*/
struct ub_packed_rrset_key* reply_find_rrset_section_an(struct reply_info* rep,
uint8_t* name, size_t namelen, uint16_t type, uint16_t dclass);
/**
* Find rrset in reply, inside the authority section. Does not follow CNAMEs.
* @param rep: looks in authority section of this message.
* @param name: what to look for.
* @param namelen: length of name.
* @param type: looks for (host order).
* @param dclass: looks for (host order).
* @return: pointer to rrset, or NULL if not found.
*/
struct ub_packed_rrset_key* reply_find_rrset_section_ns(struct reply_info* rep,
uint8_t* name, size_t namelen, uint16_t type, uint16_t dclass);
/**
* Find rrset in reply, inside any section. Does not follow CNAMEs.
* @param rep: looks in answer,authority and additional section of this message.
* @param name: what to look for.
* @param namelen: length of name.
* @param type: looks for (host order).
* @param dclass: looks for (host order).
* @return: pointer to rrset, or NULL if not found.
*/
struct ub_packed_rrset_key* reply_find_rrset(struct reply_info* rep,
uint8_t* name, size_t namelen, uint16_t type, uint16_t dclass);
/**
* Debug send the query info and reply info to the log in readable form.
* @param str: descriptive string printed with packet content.
* @param qinfo: query section.
* @param rep: rest of message.
*/
void log_dns_msg(const char* str, struct query_info* qinfo,
struct reply_info* rep);
/**
* Print string with neat domain name, type, class from query info.
* @param v: at what verbosity level to print this.
* @param str: string of message.
* @param qinf: query info structure with name, type and class.
*/
void log_query_info(enum verbosity_value v, const char* str,
struct query_info* qinf);
#endif /* UTIL_DATA_MSGREPLY_H */

View File

@@ -0,0 +1,389 @@
/*
* util/data/packed_rrset.c - data storage for a set of resource records.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains the data storage for RRsets.
*/
#include "config.h"
#include "util/data/packed_rrset.h"
#include "util/data/dname.h"
#include "util/storage/lookup3.h"
#include "util/log.h"
#include "util/alloc.h"
#include "util/regional.h"
#include "util/net_help.h"
#include "ldns/rrdef.h"
#include "ldns/sbuffer.h"
#include "ldns/wire2str.h"
void
ub_packed_rrset_parsedelete(struct ub_packed_rrset_key* pkey,
struct alloc_cache* alloc)
{
if(!pkey)
return;
if(pkey->entry.data)
free(pkey->entry.data);
pkey->entry.data = NULL;
if(pkey->rk.dname)
free(pkey->rk.dname);
pkey->rk.dname = NULL;
pkey->id = 0;
alloc_special_release(alloc, pkey);
}
size_t
ub_rrset_sizefunc(void* key, void* data)
{
struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)key;
struct packed_rrset_data* d = (struct packed_rrset_data*)data;
size_t s = sizeof(struct ub_packed_rrset_key) + k->rk.dname_len;
s += packed_rrset_sizeof(d) + lock_get_mem(&k->entry.lock);
return s;
}
size_t
packed_rrset_sizeof(struct packed_rrset_data* d)
{
size_t s;
if(d->rrsig_count > 0) {
s = ((uint8_t*)d->rr_data[d->count+d->rrsig_count-1] -
(uint8_t*)d) + d->rr_len[d->count+d->rrsig_count-1];
} else {
log_assert(d->count > 0);
s = ((uint8_t*)d->rr_data[d->count-1] - (uint8_t*)d) +
d->rr_len[d->count-1];
}
return s;
}
int
ub_rrset_compare(void* k1, void* k2)
{
struct ub_packed_rrset_key* key1 = (struct ub_packed_rrset_key*)k1;
struct ub_packed_rrset_key* key2 = (struct ub_packed_rrset_key*)k2;
int c;
if(key1 == key2)
return 0;
if(key1->rk.type != key2->rk.type) {
if(key1->rk.type < key2->rk.type)
return -1;
return 1;
}
if(key1->rk.dname_len != key2->rk.dname_len) {
if(key1->rk.dname_len < key2->rk.dname_len)
return -1;
return 1;
}
if((c=query_dname_compare(key1->rk.dname, key2->rk.dname)) != 0)
return c;
if(key1->rk.rrset_class != key2->rk.rrset_class) {
if(key1->rk.rrset_class < key2->rk.rrset_class)
return -1;
return 1;
}
if(key1->rk.flags != key2->rk.flags) {
if(key1->rk.flags < key2->rk.flags)
return -1;
return 1;
}
return 0;
}
void
ub_rrset_key_delete(void* key, void* userdata)
{
struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)key;
struct alloc_cache* a = (struct alloc_cache*)userdata;
k->id = 0;
free(k->rk.dname);
k->rk.dname = NULL;
alloc_special_release(a, k);
}
void
rrset_data_delete(void* data, void* ATTR_UNUSED(userdata))
{
struct packed_rrset_data* d = (struct packed_rrset_data*)data;
free(d);
}
int
rrsetdata_equal(struct packed_rrset_data* d1, struct packed_rrset_data* d2)
{
size_t i;
size_t total;
if(d1->count != d2->count || d1->rrsig_count != d2->rrsig_count)
return 0;
total = d1->count + d1->rrsig_count;
for(i=0; i<total; i++) {
if(d1->rr_len[i] != d2->rr_len[i])
return 0;
if(memcmp(d1->rr_data[i], d2->rr_data[i], d1->rr_len[i]) != 0)
return 0;
}
return 1;
}
hashvalue_t
rrset_key_hash(struct packed_rrset_key* key)
{
/* type is hashed in host order */
uint16_t t = ntohs(key->type);
/* Note this MUST be identical to pkt_hash_rrset in msgparse.c */
/* this routine does not have a compressed name */
hashvalue_t h = 0xab;
h = dname_query_hash(key->dname, h);
h = hashlittle(&t, sizeof(t), h);
h = hashlittle(&key->rrset_class, sizeof(uint16_t), h);
h = hashlittle(&key->flags, sizeof(uint32_t), h);
return h;
}
void
packed_rrset_ptr_fixup(struct packed_rrset_data* data)
{
size_t i;
size_t total = data->count + data->rrsig_count;
uint8_t* nextrdata;
/* fixup pointers in packed rrset data */
data->rr_len = (size_t*)((uint8_t*)data +
sizeof(struct packed_rrset_data));
data->rr_data = (uint8_t**)&(data->rr_len[total]);
data->rr_ttl = (time_t*)&(data->rr_data[total]);
nextrdata = (uint8_t*)&(data->rr_ttl[total]);
for(i=0; i<total; i++) {
data->rr_data[i] = nextrdata;
nextrdata += data->rr_len[i];
}
}
void
get_cname_target(struct ub_packed_rrset_key* rrset, uint8_t** dname,
size_t* dname_len)
{
struct packed_rrset_data* d;
size_t len;
if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_CNAME &&
ntohs(rrset->rk.type) != LDNS_RR_TYPE_DNAME)
return;
d = (struct packed_rrset_data*)rrset->entry.data;
if(d->count < 1)
return;
if(d->rr_len[0] < 3) /* at least rdatalen + 0byte root label */
return;
len = sldns_read_uint16(d->rr_data[0]);
if(len != d->rr_len[0] - sizeof(uint16_t))
return;
if(dname_valid(d->rr_data[0]+sizeof(uint16_t), len) != len)
return;
*dname = d->rr_data[0]+sizeof(uint16_t);
*dname_len = len;
}
void
packed_rrset_ttl_add(struct packed_rrset_data* data, time_t add)
{
size_t i;
size_t total = data->count + data->rrsig_count;
data->ttl += add;
for(i=0; i<total; i++)
data->rr_ttl[i] += add;
}
const char*
rrset_trust_to_string(enum rrset_trust s)
{
switch(s) {
case rrset_trust_none: return "rrset_trust_none";
case rrset_trust_add_noAA: return "rrset_trust_add_noAA";
case rrset_trust_auth_noAA: return "rrset_trust_auth_noAA";
case rrset_trust_add_AA: return "rrset_trust_add_AA";
case rrset_trust_nonauth_ans_AA:return "rrset_trust_nonauth_ans_AA";
case rrset_trust_ans_noAA: return "rrset_trust_ans_noAA";
case rrset_trust_glue: return "rrset_trust_glue";
case rrset_trust_auth_AA: return "rrset_trust_auth_AA";
case rrset_trust_ans_AA: return "rrset_trust_ans_AA";
case rrset_trust_sec_noglue: return "rrset_trust_sec_noglue";
case rrset_trust_prim_noglue: return "rrset_trust_prim_noglue";
case rrset_trust_validated: return "rrset_trust_validated";
case rrset_trust_ultimate: return "rrset_trust_ultimate";
}
return "unknown_rrset_trust_value";
}
const char*
sec_status_to_string(enum sec_status s)
{
switch(s) {
case sec_status_unchecked: return "sec_status_unchecked";
case sec_status_bogus: return "sec_status_bogus";
case sec_status_indeterminate: return "sec_status_indeterminate";
case sec_status_insecure: return "sec_status_insecure";
case sec_status_secure: return "sec_status_secure";
}
return "unknown_sec_status_value";
}
void log_rrset_key(enum verbosity_value v, const char* str,
struct ub_packed_rrset_key* rrset)
{
if(verbosity >= v)
log_nametypeclass(v, str, rrset->rk.dname,
ntohs(rrset->rk.type), ntohs(rrset->rk.rrset_class));
}
int packed_rr_to_string(struct ub_packed_rrset_key* rrset, size_t i,
time_t now, char* dest, size_t dest_len)
{
struct packed_rrset_data* d = (struct packed_rrset_data*)rrset->
entry.data;
uint8_t rr[65535];
size_t rlen = rrset->rk.dname_len + 2 + 2 + 4 + d->rr_len[i];
log_assert(dest_len > 0 && dest);
if(rlen > dest_len) {
dest[0] = 0;
return 0;
}
memmove(rr, rrset->rk.dname, rrset->rk.dname_len);
if(i < d->count)
memmove(rr+rrset->rk.dname_len, &rrset->rk.type, 2);
else sldns_write_uint16(rr+rrset->rk.dname_len, LDNS_RR_TYPE_RRSIG);
memmove(rr+rrset->rk.dname_len+2, &rrset->rk.rrset_class, 2);
sldns_write_uint32(rr+rrset->rk.dname_len+4,
(uint32_t)(d->rr_ttl[i]-now));
memmove(rr+rrset->rk.dname_len+8, d->rr_data[i], d->rr_len[i]);
if(sldns_wire2str_rr_buf(rr, rlen, dest, dest_len) == -1) {
log_info("rrbuf failure %d %s", (int)d->rr_len[i], dest);
dest[0] = 0;
return 0;
}
return 1;
}
void log_packed_rrset(enum verbosity_value v, const char* str,
struct ub_packed_rrset_key* rrset)
{
struct packed_rrset_data* d = (struct packed_rrset_data*)rrset->
entry.data;
char buf[65535];
size_t i;
if(verbosity < v)
return;
for(i=0; i<d->count+d->rrsig_count; i++) {
if(!packed_rr_to_string(rrset, i, 0, buf, sizeof(buf))) {
log_info("%s: rr %d wire2str-error", str, (int)i);
} else {
log_info("%s: %s", str, buf);
}
}
}
time_t
ub_packed_rrset_ttl(struct ub_packed_rrset_key* key)
{
struct packed_rrset_data* d = (struct packed_rrset_data*)key->
entry.data;
return d->ttl;
}
struct ub_packed_rrset_key*
packed_rrset_copy_region(struct ub_packed_rrset_key* key,
struct regional* region, time_t now)
{
struct ub_packed_rrset_key* ck = regional_alloc(region,
sizeof(struct ub_packed_rrset_key));
struct packed_rrset_data* d;
struct packed_rrset_data* data = (struct packed_rrset_data*)
key->entry.data;
size_t dsize, i;
if(!ck)
return NULL;
ck->id = key->id;
memset(&ck->entry, 0, sizeof(ck->entry));
ck->entry.hash = key->entry.hash;
ck->entry.key = ck;
ck->rk = key->rk;
ck->rk.dname = regional_alloc_init(region, key->rk.dname,
key->rk.dname_len);
if(!ck->rk.dname)
return NULL;
dsize = packed_rrset_sizeof(data);
d = (struct packed_rrset_data*)regional_alloc_init(region, data, dsize);
if(!d)
return NULL;
ck->entry.data = d;
packed_rrset_ptr_fixup(d);
/* make TTLs relative - once per rrset */
for(i=0; i<d->count + d->rrsig_count; i++) {
if(d->rr_ttl[i] < now)
d->rr_ttl[i] = 0;
else d->rr_ttl[i] -= now;
}
if(d->ttl < now)
d->ttl = 0;
else d->ttl -= now;
return ck;
}
struct ub_packed_rrset_key*
packed_rrset_copy_alloc(struct ub_packed_rrset_key* key,
struct alloc_cache* alloc, time_t now)
{
struct packed_rrset_data* fd, *dd;
struct ub_packed_rrset_key* dk = alloc_special_obtain(alloc);
if(!dk) return NULL;
fd = (struct packed_rrset_data*)key->entry.data;
dk->entry.hash = key->entry.hash;
dk->rk = key->rk;
dk->rk.dname = (uint8_t*)memdup(key->rk.dname, key->rk.dname_len);
if(!dk->rk.dname) {
alloc_special_release(alloc, dk);
return NULL;
}
dd = (struct packed_rrset_data*)memdup(fd, packed_rrset_sizeof(fd));
if(!dd) {
free(dk->rk.dname);
alloc_special_release(alloc, dk);
return NULL;
}
packed_rrset_ptr_fixup(dd);
dk->entry.data = (void*)dd;
packed_rrset_ttl_add(dd, now);
return dk;
}

View File

@@ -0,0 +1,428 @@
/*
* util/data/packed_rrset.h - data storage for a set of resource records.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains the data storage for RRsets.
*/
#ifndef UTIL_DATA_PACKED_RRSET_H
#define UTIL_DATA_PACKED_RRSET_H
#include "util/storage/lruhash.h"
struct alloc_cache;
struct regional;
/** type used to uniquely identify rrsets. Cannot be reused without
* clearing the cache. */
typedef uint64_t rrset_id_t;
/** this rrset is NSEC and is at zone apex (at child side of zonecut) */
#define PACKED_RRSET_NSEC_AT_APEX 0x1
/** this rrset is A/AAAA and is in-zone-glue (from parent side of zonecut) */
#define PACKED_RRSET_PARENT_SIDE 0x2
/** this rrset is SOA and has the negative ttl (from nxdomain or nodata),
* this is set on SOA rrsets in the authority section, to keep its TTL separate
* from the SOA in the answer section from a direct SOA query or ANY query. */
#define PACKED_RRSET_SOA_NEG 0x4
/**
* The identifying information for an RRset.
*/
struct packed_rrset_key {
/**
* The domain name. If not null (for id=0) it is allocated, and
* contains the wireformat domain name.
* This dname is not canonicalized.
*/
uint8_t* dname;
/**
* Length of the domain name, including last 0 root octet.
*/
size_t dname_len;
/**
* Flags. 32bit to be easy for hashing:
* o PACKED_RRSET_NSEC_AT_APEX
* o PACKED_RRSET_PARENT_SIDE
* o PACKED_RRSET_SOA_NEG
*/
uint32_t flags;
/** the rrset type in network format */
uint16_t type;
/** the rrset class in network format */
uint16_t rrset_class;
};
/**
* This structure contains an RRset. A set of resource records that
* share the same domain name, type and class.
*
* Due to memory management and threading, the key structure cannot be
* deleted, although the data can be. The id can be set to 0 to store and the
* structure can be recycled with a new id.
*/
struct ub_packed_rrset_key {
/**
* entry into hashtable. Note the lock is never destroyed,
* even when this key is retired to the cache.
* the data pointer (if not null) points to a struct packed_rrset.
*/
struct lruhash_entry entry;
/**
* the ID of this rrset. unique, based on threadid + sequenceno.
* ids are not reused, except after flushing the cache.
* zero is an unused entry, and never a valid id.
* Check this value after getting entry.lock.
* The other values in this struct may only be altered after changing
* the id (which needs a writelock on entry.lock).
*/
rrset_id_t id;
/** key data: dname, type and class */
struct packed_rrset_key rk;
};
/**
* RRset trustworthiness. Bigger value is more trust. RFC 2181.
* The rrset_trust_add_noAA, rrset_trust_auth_noAA, rrset_trust_add_AA,
* are mentioned as the same trustworthiness in 2181, but split up here
* for ease of processing.
*
* rrset_trust_nonauth_ans_AA, rrset_trust_ans_noAA
* are also mentioned as the same trustworthiness in 2181, but split up here
* for ease of processing.
*
* Added trust_none for a sane initial value, smaller than anything else.
* Added validated and ultimate trust for keys and rrsig validated content.
*/
enum rrset_trust {
/** initial value for trust */
rrset_trust_none = 0,
/** Additional information from non-authoritative answers */
rrset_trust_add_noAA,
/** Data from the authority section of a non-authoritative answer */
rrset_trust_auth_noAA,
/** Additional information from an authoritative answer */
rrset_trust_add_AA,
/** non-authoritative data from the answer section of authoritative
* answers */
rrset_trust_nonauth_ans_AA,
/** Data from the answer section of a non-authoritative answer */
rrset_trust_ans_noAA,
/** Glue from a primary zone, or glue from a zone transfer */
rrset_trust_glue,
/** Data from the authority section of an authoritative answer */
rrset_trust_auth_AA,
/** The authoritative data included in the answer section of an
* authoritative reply */
rrset_trust_ans_AA,
/** Data from a zone transfer, other than glue */
rrset_trust_sec_noglue,
/** Data from a primary zone file, other than glue data */
rrset_trust_prim_noglue,
/** DNSSEC(rfc4034) validated with trusted keys */
rrset_trust_validated,
/** ultimately trusted, no more trust is possible;
* trusted keys from the unbound configuration setup. */
rrset_trust_ultimate
};
/**
* Security status from validation for data.
* The order is significant; more secure, more proven later.
*/
enum sec_status {
/** UNCHECKED means that object has yet to be validated. */
sec_status_unchecked = 0,
/** BOGUS means that the object (RRset or message) failed to validate
* (according to local policy), but should have validated. */
sec_status_bogus,
/** INDETERMINATE means that the object is insecure, but not
* authoritatively so. Generally this means that the RRset is not
* below a configured trust anchor. */
sec_status_indeterminate,
/** INSECURE means that the object is authoritatively known to be
* insecure. Generally this means that this RRset is below a trust
* anchor, but also below a verified, insecure delegation. */
sec_status_insecure,
/** SECURE means that the object (RRset or message) validated
* according to local policy. */
sec_status_secure
};
/**
* RRset data.
*
* The data is packed, stored contiguously in memory.
* memory layout:
* o base struct
* o rr_len size_t array
* o rr_data uint8_t* array
* o rr_ttl time_t array (after size_t and ptrs because those may be
* 64bit and this array before those would make them unaligned).
* Since the stuff before is 32/64bit, rr_ttl is 32 bit aligned.
* o rr_data rdata wireformats
* o rrsig_data rdata wireformat(s)
*
* Rdata is stored in wireformat. The dname is stored in wireformat.
* TTLs are stored as absolute values (and could be expired).
*
* RRSIGs are stored in the arrays after the regular rrs.
*
* You need the packed_rrset_key to know dname, type, class of the
* resource records in this RRset. (if signed the rrsig gives the type too).
*
* On the wire an RR is:
* name, type, class, ttl, rdlength, rdata.
* So we need to send the following per RR:
* key.dname, ttl, rr_data[i].
* since key.dname ends with type and class.
* and rr_data starts with the rdlength.
* the ttl value to send changes due to time.
*/
struct packed_rrset_data {
/** TTL (in seconds like time()) of the rrset.
* Same for all RRs see rfc2181(5.2). */
time_t ttl;
/** number of rrs. */
size_t count;
/** number of rrsigs, if 0 no rrsigs */
size_t rrsig_count;
/** the trustworthiness of the rrset data */
enum rrset_trust trust;
/** security status of the rrset data */
enum sec_status security;
/** length of every rr's rdata, rr_len[i] is size of rr_data[i]. */
size_t* rr_len;
/** ttl of every rr. rr_ttl[i] ttl of rr i. */
time_t *rr_ttl;
/**
* Array of pointers to every rr's rdata.
* The rr_data[i] rdata is stored in uncompressed wireformat.
* The first uint16_t of rr_data[i] is network format rdlength.
*
* rr_data[count] to rr_data[count+rrsig_count] contain the rrsig data.
*/
uint8_t** rr_data;
};
/**
* An RRset can be represented using both key and data together.
* Split into key and data structures to simplify implementation of
* caching schemes.
*/
struct packed_rrset {
/** domain name, type and class */
struct packed_rrset_key* k;
/** ttl, count and rdatas (and rrsig) */
struct packed_rrset_data* d;
};
/**
* list of packed rrsets
*/
struct packed_rrset_list {
/** next in list */
struct packed_rrset_list* next;
/** rrset key and data */
struct packed_rrset rrset;
};
/**
* Delete packed rrset key and data, not entered in hashtables yet.
* Used during parsing.
* @param pkey: rrset key structure with locks, key and data pointers.
* @param alloc: where to return the unfree-able key structure.
*/
void ub_packed_rrset_parsedelete(struct ub_packed_rrset_key* pkey,
struct alloc_cache* alloc);
/**
* Memory size of rrset data. RRset data must be filled in correctly.
* @param data: data to examine.
* @return size in bytes.
*/
size_t packed_rrset_sizeof(struct packed_rrset_data* data);
/**
* Get TTL of rrset. RRset data must be filled in correctly.
* @param key: rrset key, with data to examine.
* @return ttl value.
*/
time_t ub_packed_rrset_ttl(struct ub_packed_rrset_key* key);
/**
* Calculate memory size of rrset entry. For hash table usage.
* @param key: struct ub_packed_rrset_key*.
* @param data: struct packed_rrset_data*.
* @return size in bytes.
*/
size_t ub_rrset_sizefunc(void* key, void* data);
/**
* compares two rrset keys.
* @param k1: struct ub_packed_rrset_key*.
* @param k2: struct ub_packed_rrset_key*.
* @return 0 if equal.
*/
int ub_rrset_compare(void* k1, void* k2);
/**
* compare two rrset data structures.
* Compared rdata and rrsigdata, not the trust or ttl value.
* @param d1: data to compare.
* @param d2: data to compare.
* @return 1 if equal.
*/
int rrsetdata_equal(struct packed_rrset_data* d1, struct packed_rrset_data* d2);
/**
* Old key to be deleted. RRset keys are recycled via alloc.
* The id is set to 0. So that other threads, after acquiring a lock always
* get the correct value, in this case the 0 deleted-special value.
* @param key: struct ub_packed_rrset_key*.
* @param userdata: alloc structure to use for recycling.
*/
void ub_rrset_key_delete(void* key, void* userdata);
/**
* Old data to be deleted.
* @param data: what to delete.
* @param userdata: user data ptr.
*/
void rrset_data_delete(void* data, void* userdata);
/**
* Calculate hash value for a packed rrset key.
* @param key: the rrset key with name, type, class, flags.
* @return hash value.
*/
hashvalue_t rrset_key_hash(struct packed_rrset_key* key);
/**
* Fixup pointers in fixed data packed_rrset_data blob.
* After a memcpy of the data for example. Will set internal pointers right.
* @param data: rrset data structure. Otherwise correctly filled in.
*/
void packed_rrset_ptr_fixup(struct packed_rrset_data* data);
/**
* Fixup TTLs in fixed data packed_rrset_data blob.
* @param data: rrset data structure. Otherwise correctly filled in.
* @param add: how many seconds to add, pass time(0) for example.
*/
void packed_rrset_ttl_add(struct packed_rrset_data* data, time_t add);
/**
* Utility procedure to extract CNAME target name from its rdata.
* Failsafes; it will change passed dname to a valid dname or do nothing.
* @param rrset: the rrset structure. Must be a CNAME.
* Only first RR is used (multiple RRs are technically illegal anyway).
* Also works on type DNAME. Returns target name.
* @param dname: this pointer is updated to point into the cname rdata.
* If a failsafe fails, nothing happens to the pointer (such as the
* rdata was not a valid dname, not a CNAME, ...).
* @param dname_len: length of dname is returned.
*/
void get_cname_target(struct ub_packed_rrset_key* rrset, uint8_t** dname,
size_t* dname_len);
/**
* Get a printable string for a rrset trust value
* @param s: rrset trust value
* @return printable string.
*/
const char* rrset_trust_to_string(enum rrset_trust s);
/**
* Get a printable string for a security status value
* @param s: security status
* @return printable string.
*/
const char* sec_status_to_string(enum sec_status s);
/**
* Print string with neat domain name, type, class from rrset.
* @param v: at what verbosity level to print this.
* @param str: string of message.
* @param rrset: structure with name, type and class.
*/
void log_rrset_key(enum verbosity_value v, const char* str,
struct ub_packed_rrset_key* rrset);
/**
* Convert RR from RRset to string.
* @param rrset: structure with data.
* @param i: index of rr or RRSIG.
* @param now: time that is subtracted from ttl before printout. Can be 0.
* @param dest: destination string buffer. Must be nonNULL.
* @param dest_len: length of dest buffer (>0).
* @return false on failure.
*/
int packed_rr_to_string(struct ub_packed_rrset_key* rrset, size_t i,
time_t now, char* dest, size_t dest_len);
/**
* Print the string with prefix, one rr per line.
* @param v: at what verbosity level to print this.
* @param str: string of message.
* @param rrset: with name, and rdata, and rrsigs.
*/
void log_packed_rrset(enum verbosity_value v, const char* str,
struct ub_packed_rrset_key* rrset);
/**
* Allocate rrset in region - no more locks needed
* @param key: a (just from rrset cache looked up) rrset key + valid,
* packed data record.
* @param region: where to alloc the copy
* @param now: adjust the TTLs to be relative (subtract from all TTLs).
* @return new region-alloced rrset key or NULL on alloc failure.
*/
struct ub_packed_rrset_key* packed_rrset_copy_region(
struct ub_packed_rrset_key* key, struct regional* region,
time_t now);
/**
* Allocate rrset with malloc (from region or you are holding the lock).
* @param key: key with data entry.
* @param alloc: alloc_cache to create rrset_keys
* @param now: adjust the TTLs to be absolute (add to all TTLs).
* @return new region-alloced rrset key or NULL on alloc failure.
*/
struct ub_packed_rrset_key* packed_rrset_copy_alloc(
struct ub_packed_rrset_key* key, struct alloc_cache* alloc,
time_t now);
#endif /* UTIL_DATA_PACKED_RRSET_H */

409
external/unbound/util/fptr_wlist.c vendored Normal file
View File

@@ -0,0 +1,409 @@
/*
* util/fptr_wlist.c - function pointer whitelists.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions that check function pointers.
* The functions contain a whitelist of known good callback values.
* Any other values lead to an error.
*
* Due to the listing nature, this file violates all the modularization
* boundaries in the program.
*/
#include "config.h"
#include "util/fptr_wlist.h"
#include "util/mini_event.h"
#include "services/outside_network.h"
#include "services/mesh.h"
#include "services/localzone.h"
#include "services/cache/infra.h"
#include "services/cache/rrset.h"
#include "dns64/dns64.h"
#include "iterator/iterator.h"
#include "iterator/iter_fwd.h"
#include "validator/validator.h"
#include "validator/val_anchor.h"
#include "validator/val_nsec3.h"
#include "validator/val_sigcrypt.h"
#include "validator/val_kentry.h"
#include "validator/val_neg.h"
#include "validator/autotrust.h"
#include "util/data/msgreply.h"
#include "util/data/packed_rrset.h"
#include "util/storage/slabhash.h"
#include "util/storage/dnstree.h"
#include "util/locks.h"
#include "libunbound/libworker.h"
#include "libunbound/context.h"
#include "libunbound/worker.h"
#include "util/tube.h"
#include "util/config_file.h"
#ifdef UB_ON_WINDOWS
#include "winrc/win_svc.h"
#endif
#ifdef WITH_PYTHONMODULE
#include "pythonmod/pythonmod.h"
#endif
int
fptr_whitelist_comm_point(comm_point_callback_t *fptr)
{
if(fptr == &worker_handle_request) return 1;
else if(fptr == &outnet_udp_cb) return 1;
else if(fptr == &outnet_tcp_cb) return 1;
else if(fptr == &tube_handle_listen) return 1;
return 0;
}
int
fptr_whitelist_comm_point_raw(comm_point_callback_t *fptr)
{
if(fptr == &tube_handle_listen) return 1;
else if(fptr == &tube_handle_write) return 1;
else if(fptr == &remote_accept_callback) return 1;
else if(fptr == &remote_control_callback) return 1;
return 0;
}
int
fptr_whitelist_comm_timer(void (*fptr)(void*))
{
if(fptr == &pending_udp_timer_cb) return 1;
else if(fptr == &outnet_tcptimer) return 1;
else if(fptr == &pending_udp_timer_delay_cb) return 1;
else if(fptr == &worker_stat_timer_cb) return 1;
else if(fptr == &worker_probe_timer_cb) return 1;
#ifdef UB_ON_WINDOWS
else if(fptr == &wsvc_cron_cb) return 1;
#endif
return 0;
}
int
fptr_whitelist_comm_signal(void (*fptr)(int, void*))
{
if(fptr == &worker_sighandler) return 1;
return 0;
}
int fptr_whitelist_start_accept(void (*fptr)(void*))
{
if(fptr == &worker_start_accept) return 1;
return 0;
}
int fptr_whitelist_stop_accept(void (*fptr)(void*))
{
if(fptr == &worker_stop_accept) return 1;
return 0;
}
int
fptr_whitelist_event(void (*fptr)(int, short, void *))
{
if(fptr == &comm_point_udp_callback) return 1;
else if(fptr == &comm_point_udp_ancil_callback) return 1;
else if(fptr == &comm_point_tcp_accept_callback) return 1;
else if(fptr == &comm_point_tcp_handle_callback) return 1;
else if(fptr == &comm_timer_callback) return 1;
else if(fptr == &comm_signal_callback) return 1;
else if(fptr == &comm_point_local_handle_callback) return 1;
else if(fptr == &comm_point_raw_handle_callback) return 1;
else if(fptr == &tube_handle_signal) return 1;
else if(fptr == &comm_base_handle_slow_accept) return 1;
#ifdef UB_ON_WINDOWS
else if(fptr == &worker_win_stop_cb) return 1;
#endif
return 0;
}
int
fptr_whitelist_pending_udp(comm_point_callback_t *fptr)
{
if(fptr == &serviced_udp_callback) return 1;
else if(fptr == &worker_handle_reply) return 1;
else if(fptr == &libworker_handle_reply) return 1;
return 0;
}
int
fptr_whitelist_pending_tcp(comm_point_callback_t *fptr)
{
if(fptr == &serviced_tcp_callback) return 1;
else if(fptr == &worker_handle_reply) return 1;
else if(fptr == &libworker_handle_reply) return 1;
return 0;
}
int
fptr_whitelist_serviced_query(comm_point_callback_t *fptr)
{
if(fptr == &worker_handle_service_reply) return 1;
else if(fptr == &libworker_handle_service_reply) return 1;
return 0;
}
int
fptr_whitelist_rbtree_cmp(int (*fptr) (const void *, const void *))
{
if(fptr == &mesh_state_compare) return 1;
else if(fptr == &mesh_state_ref_compare) return 1;
else if(fptr == &addr_tree_compare) return 1;
else if(fptr == &local_zone_cmp) return 1;
else if(fptr == &local_data_cmp) return 1;
else if(fptr == &fwd_cmp) return 1;
else if(fptr == &pending_cmp) return 1;
else if(fptr == &serviced_cmp) return 1;
else if(fptr == &name_tree_compare) return 1;
else if(fptr == &order_lock_cmp) return 1;
else if(fptr == &codeline_cmp) return 1;
else if(fptr == &nsec3_hash_cmp) return 1;
else if(fptr == &mini_ev_cmp) return 1;
else if(fptr == &anchor_cmp) return 1;
else if(fptr == &canonical_tree_compare) return 1;
else if(fptr == &context_query_cmp) return 1;
else if(fptr == &val_neg_data_compare) return 1;
else if(fptr == &val_neg_zone_compare) return 1;
else if(fptr == &probetree_cmp) return 1;
else if(fptr == &replay_var_compare) return 1;
return 0;
}
int
fptr_whitelist_hash_sizefunc(lruhash_sizefunc_t fptr)
{
if(fptr == &msgreply_sizefunc) return 1;
else if(fptr == &ub_rrset_sizefunc) return 1;
else if(fptr == &infra_sizefunc) return 1;
else if(fptr == &key_entry_sizefunc) return 1;
else if(fptr == &test_slabhash_sizefunc) return 1;
return 0;
}
int
fptr_whitelist_hash_compfunc(lruhash_compfunc_t fptr)
{
if(fptr == &query_info_compare) return 1;
else if(fptr == &ub_rrset_compare) return 1;
else if(fptr == &infra_compfunc) return 1;
else if(fptr == &key_entry_compfunc) return 1;
else if(fptr == &test_slabhash_compfunc) return 1;
return 0;
}
int
fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_t fptr)
{
if(fptr == &query_entry_delete) return 1;
else if(fptr == &ub_rrset_key_delete) return 1;
else if(fptr == &infra_delkeyfunc) return 1;
else if(fptr == &key_entry_delkeyfunc) return 1;
else if(fptr == &test_slabhash_delkey) return 1;
return 0;
}
int
fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_t fptr)
{
if(fptr == &reply_info_delete) return 1;
else if(fptr == &rrset_data_delete) return 1;
else if(fptr == &infra_deldatafunc) return 1;
else if(fptr == &key_entry_deldatafunc) return 1;
else if(fptr == &test_slabhash_deldata) return 1;
return 0;
}
int
fptr_whitelist_hash_markdelfunc(lruhash_markdelfunc_t fptr)
{
if(fptr == NULL) return 1;
else if(fptr == &rrset_markdel) return 1;
return 0;
}
/** whitelist env->send_query callbacks */
int
fptr_whitelist_modenv_send_query(struct outbound_entry* (*fptr)(
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
uint16_t flags, int dnssec, int want_dnssec, int nocaps,
struct sockaddr_storage* addr, socklen_t addrlen,
uint8_t* zone, size_t zonelen,
struct module_qstate* q))
{
if(fptr == &worker_send_query) return 1;
else if(fptr == &libworker_send_query) return 1;
return 0;
}
int
fptr_whitelist_modenv_detach_subs(void (*fptr)(
struct module_qstate* qstate))
{
if(fptr == &mesh_detach_subs) return 1;
return 0;
}
int
fptr_whitelist_modenv_attach_sub(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t qflags, int prime, struct module_qstate** newq))
{
if(fptr == &mesh_attach_sub) return 1;
return 0;
}
int
fptr_whitelist_modenv_kill_sub(void (*fptr)(struct module_qstate* newq))
{
if(fptr == &mesh_state_delete) return 1;
return 0;
}
int
fptr_whitelist_modenv_detect_cycle(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t flags, int prime))
{
if(fptr == &mesh_detect_cycle) return 1;
return 0;
}
int
fptr_whitelist_mod_init(int (*fptr)(struct module_env* env, int id))
{
if(fptr == &iter_init) return 1;
else if(fptr == &val_init) return 1;
else if(fptr == &dns64_init) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_init) return 1;
#endif
return 0;
}
int
fptr_whitelist_mod_deinit(void (*fptr)(struct module_env* env, int id))
{
if(fptr == &iter_deinit) return 1;
else if(fptr == &val_deinit) return 1;
else if(fptr == &dns64_deinit) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_deinit) return 1;
#endif
return 0;
}
int
fptr_whitelist_mod_operate(void (*fptr)(struct module_qstate* qstate,
enum module_ev event, int id, struct outbound_entry* outbound))
{
if(fptr == &iter_operate) return 1;
else if(fptr == &val_operate) return 1;
else if(fptr == &dns64_operate) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_operate) return 1;
#endif
return 0;
}
int
fptr_whitelist_mod_inform_super(void (*fptr)(
struct module_qstate* qstate, int id, struct module_qstate* super))
{
if(fptr == &iter_inform_super) return 1;
else if(fptr == &val_inform_super) return 1;
else if(fptr == &dns64_inform_super) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_inform_super) return 1;
#endif
return 0;
}
int
fptr_whitelist_mod_clear(void (*fptr)(struct module_qstate* qstate,
int id))
{
if(fptr == &iter_clear) return 1;
else if(fptr == &val_clear) return 1;
else if(fptr == &dns64_clear) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_clear) return 1;
#endif
return 0;
}
int
fptr_whitelist_mod_get_mem(size_t (*fptr)(struct module_env* env, int id))
{
if(fptr == &iter_get_mem) return 1;
else if(fptr == &val_get_mem) return 1;
else if(fptr == &dns64_get_mem) return 1;
#ifdef WITH_PYTHONMODULE
else if(fptr == &pythonmod_get_mem) return 1;
#endif
return 0;
}
int
fptr_whitelist_alloc_cleanup(void (*fptr)(void*))
{
if(fptr == &worker_alloc_cleanup) return 1;
return 0;
}
int fptr_whitelist_tube_listen(tube_callback_t* fptr)
{
if(fptr == &worker_handle_control_cmd) return 1;
else if(fptr == &libworker_handle_control_cmd) return 1;
return 0;
}
int fptr_whitelist_mesh_cb(mesh_cb_func_t fptr)
{
if(fptr == &libworker_fg_done_cb) return 1;
else if(fptr == &libworker_bg_done_cb) return 1;
else if(fptr == &libworker_event_done_cb) return 1;
else if(fptr == &probe_answer_cb) return 1;
return 0;
}
int fptr_whitelist_print_func(void (*fptr)(char*,void*))
{
if(fptr == &config_print_func) return 1;
else if(fptr == &config_collate_func) return 1;
else if(fptr == &remote_get_opt_ssl) return 1;
return 0;
}

359
external/unbound/util/fptr_wlist.h vendored Normal file
View File

@@ -0,0 +1,359 @@
/*
* util/fptr_wlist.h - function pointer whitelists.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions that check function pointers.
* The functions contain a whitelist of known good callback values.
* Any other values lead to an error.
*
* This prevent heap overflow based exploits, where the callback pointer
* is overwritten by a buffer overflow (apart from this defense, buffer
* overflows should be fixed of course).
*
* Function pointers are used in
* o network code callbacks.
* o rbtree, lruhash, region data manipulation
* in lruhash, the assertions are before the critical regions.
* in other places, assertions are before the callback.
* o module operations.
*/
#ifndef UTIL_FPTR_WLIST_H
#define UTIL_FPTR_WLIST_H
#include "util/netevent.h"
#include "util/storage/lruhash.h"
#include "util/module.h"
#include "util/tube.h"
#include "services/mesh.h"
/**
* Macro to perform an assertion check for fptr wlist checks.
* Does not get disabled in optimize mode. Check adds security by layers.
*/
#if defined(EXPORT_ALL_SYMBOLS)
#define fptr_ok(x) /* nothing, dll-exe memory layout on win disables it */
#else
#define fptr_ok(x) \
do { if(!(x)) \
fatal_exit("%s:%d: %s: pointer whitelist %s failed", \
__FILE__, __LINE__, __func__, #x); \
} while(0);
#endif
/**
* Check function pointer whitelist for comm_point callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_comm_point(comm_point_callback_t *fptr);
/**
* Check function pointer whitelist for raw comm_point callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_comm_point_raw(comm_point_callback_t *fptr);
/**
* Check function pointer whitelist for comm_timer callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_comm_timer(void (*fptr)(void*));
/**
* Check function pointer whitelist for comm_signal callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_comm_signal(void (*fptr)(int, void*));
/**
* Check function pointer whitelist for start_accept callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_start_accept(void (*fptr)(void*));
/**
* Check function pointer whitelist for stop_accept callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_stop_accept(void (*fptr)(void*));
/**
* Check function pointer whitelist for event structure callback values.
* This is not called by libevent itself, but checked by netevent.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_event(void (*fptr)(int, short, void *));
/**
* Check function pointer whitelist for pending udp callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_pending_udp(comm_point_callback_t *fptr);
/**
* Check function pointer whitelist for pending tcp callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_pending_tcp(comm_point_callback_t *fptr);
/**
* Check function pointer whitelist for serviced query callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_serviced_query(comm_point_callback_t *fptr);
/**
* Check function pointer whitelist for rbtree cmp callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_rbtree_cmp(int (*fptr) (const void *, const void *));
/**
* Check function pointer whitelist for lruhash sizefunc callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_sizefunc(lruhash_sizefunc_t fptr);
/**
* Check function pointer whitelist for lruhash compfunc callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_compfunc(lruhash_compfunc_t fptr);
/**
* Check function pointer whitelist for lruhash delkeyfunc callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_t fptr);
/**
* Check function pointer whitelist for lruhash deldata callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_t fptr);
/**
* Check function pointer whitelist for lruhash markdel callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_markdelfunc(lruhash_markdelfunc_t fptr);
/**
* Check function pointer whitelist for module_env send_query callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_modenv_send_query(struct outbound_entry* (*fptr)(
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
uint16_t flags, int dnssec, int want_dnssec, int nocaps,
struct sockaddr_storage* addr, socklen_t addrlen,
uint8_t* zone, size_t zonelen,
struct module_qstate* q));
/**
* Check function pointer whitelist for module_env detach_subs callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_modenv_detach_subs(void (*fptr)(
struct module_qstate* qstate));
/**
* Check function pointer whitelist for module_env attach_sub callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_modenv_attach_sub(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t qflags, int prime, struct module_qstate** newq));
/**
* Check function pointer whitelist for module_env kill_sub callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_modenv_kill_sub(void (*fptr)(struct module_qstate* newq));
/**
* Check function pointer whitelist for module_env detect_cycle callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_modenv_detect_cycle(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t flags, int prime));
/**
* Check function pointer whitelist for module init call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mod_init(int (*fptr)(struct module_env* env, int id));
/**
* Check function pointer whitelist for module deinit call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mod_deinit(void (*fptr)(struct module_env* env, int id));
/**
* Check function pointer whitelist for module operate call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mod_operate(void (*fptr)(struct module_qstate* qstate,
enum module_ev event, int id, struct outbound_entry* outbound));
/**
* Check function pointer whitelist for module inform_super call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mod_inform_super(void (*fptr)(
struct module_qstate* qstate, int id, struct module_qstate* super));
/**
* Check function pointer whitelist for module clear call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mod_clear(void (*fptr)(struct module_qstate* qstate,
int id));
/**
* Check function pointer whitelist for module get_mem call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mod_get_mem(size_t (*fptr)(struct module_env* env, int id));
/**
* Check function pointer whitelist for alloc clear on id overflow call values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_alloc_cleanup(void (*fptr)(void*));
/**
* Check function pointer whitelist for tube listen handler values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_tube_listen(tube_callback_t* fptr);
/**
* Check function pointer whitelist for mesh state callback values.
*
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mesh_cb(mesh_cb_func_t fptr);
/**
* Check function pointer whitelist for config_get_option func values.
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_print_func(void (*fptr)(char*,void*));
/** Due to module breakage by fptr wlist, these test app declarations
* are presented here */
/**
* compare two order_ids from lock-verify test app
* @param e1: first order_id
* @param e2: second order_id
* @return compare code -1, 0, +1 (like memcmp).
*/
int order_lock_cmp(const void* e1, const void* e2);
/**
* compare two codeline structs for rbtree from memstats test app
* @param a: codeline
* @param b: codeline
* @return compare code -1, 0, +1 (like memcmp).
*/
int codeline_cmp(const void* a, const void* b);
/** compare two replay_vars */
int replay_var_compare(const void* a, const void* b);
#endif /* UTIL_FPTR_WLIST_H */

5408
external/unbound/util/iana_ports.inc vendored Normal file

File diff suppressed because it is too large Load Diff

264
external/unbound/util/locks.c vendored Normal file
View File

@@ -0,0 +1,264 @@
/**
* util/locks.c - unbound locking primitives
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Implementation of locking and threading support.
* A place for locking debug code since most locking functions are macros.
*/
#include "config.h"
#include "util/locks.h"
#include <signal.h>
#ifdef HAVE_SYS_WAIT_H
#include <sys/wait.h>
#endif
/** block all signals, masks them away. */
void
ub_thread_blocksigs(void)
{
#if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) || defined(HAVE_SIGPROCMASK)
# if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS)
int err;
# endif
sigset_t sigset;
sigfillset(&sigset);
#ifdef HAVE_PTHREAD
if((err=pthread_sigmask(SIG_SETMASK, &sigset, NULL)))
fatal_exit("pthread_sigmask: %s", strerror(err));
#else
# ifdef HAVE_SOLARIS_THREADS
if((err=thr_sigsetmask(SIG_SETMASK, &sigset, NULL)))
fatal_exit("thr_sigsetmask: %s", strerror(err));
# else
/* have nothing, do single process signal mask */
if(sigprocmask(SIG_SETMASK, &sigset, NULL))
fatal_exit("sigprocmask: %s", strerror(errno));
# endif /* HAVE_SOLARIS_THREADS */
#endif /* HAVE_PTHREAD */
#endif /* have signal stuff */
}
/** unblock one signal, so we can catch it */
void ub_thread_sig_unblock(int sig)
{
#if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) || defined(HAVE_SIGPROCMASK)
# if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS)
int err;
# endif
sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, sig);
#ifdef HAVE_PTHREAD
if((err=pthread_sigmask(SIG_UNBLOCK, &sigset, NULL)))
fatal_exit("pthread_sigmask: %s", strerror(err));
#else
# ifdef HAVE_SOLARIS_THREADS
if((err=thr_sigsetmask(SIG_UNBLOCK, &sigset, NULL)))
fatal_exit("thr_sigsetmask: %s", strerror(err));
# else
/* have nothing, do single thread case */
if(sigprocmask(SIG_UNBLOCK, &sigset, NULL))
fatal_exit("sigprocmask: %s", strerror(errno));
# endif /* HAVE_SOLARIS_THREADS */
#endif /* HAVE_PTHREAD */
#else
(void)sig;
#endif /* have signal stuff */
}
#if !defined(HAVE_PTHREAD) && !defined(HAVE_SOLARIS_THREADS) && !defined(HAVE_WINDOWS_THREADS)
/**
* No threading available: fork a new process.
* This means no shared data structure, and no locking.
* Only the main thread ever returns. Exits on errors.
* @param thr: the location where to store the thread-id.
* @param func: function body of the thread. Return value of func is lost.
* @param arg: user argument to func.
*/
void
ub_thr_fork_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
{
pid_t pid = fork();
switch(pid) {
default: /* main */
*thr = (ub_thread_t)pid;
return;
case 0: /* child */
*thr = (ub_thread_t)getpid();
(void)(*func)(arg);
exit(0);
case -1: /* error */
fatal_exit("could not fork: %s", strerror(errno));
}
}
/**
* There is no threading. Wait for a process to terminate.
* Note that ub_thread_t is defined as pid_t.
* @param thread: the process id to wait for.
*/
void ub_thr_fork_wait(ub_thread_t thread)
{
int status = 0;
if(waitpid((pid_t)thread, &status, 0) == -1)
log_err("waitpid(%d): %s", (int)thread, strerror(errno));
if(status != 0)
log_warn("process %d abnormal exit with status %d",
(int)thread, status);
}
#endif /* !defined(HAVE_PTHREAD) && !defined(HAVE_SOLARIS_THREADS) && !defined(HAVE_WINDOWS_THREADS) */
#ifdef HAVE_SOLARIS_THREADS
void* ub_thread_key_get(ub_thread_key_t key)
{
void* ret=NULL;
LOCKRET(thr_getspecific(key, &ret));
return ret;
}
#endif
#ifdef HAVE_WINDOWS_THREADS
/** log a windows GetLastError message */
static void log_win_err(const char* str, DWORD err)
{
LPTSTR buf;
if(FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER,
NULL, err, 0, (LPTSTR)&buf, 0, NULL) == 0) {
/* could not format error message */
log_err("%s, GetLastError=%d", str, (int)err);
return;
}
log_err("%s, (err=%d): %s", str, (int)err, buf);
LocalFree(buf);
}
void lock_basic_init(lock_basic_t* lock)
{
/* implement own lock, because windows HANDLE as Mutex usage
* uses too many handles and would bog down the whole system. */
(void)InterlockedExchange(lock, 0);
}
void lock_basic_destroy(lock_basic_t* lock)
{
(void)InterlockedExchange(lock, 0);
}
void lock_basic_lock(lock_basic_t* lock)
{
LONG wait = 1; /* wait 1 msec at first */
while(InterlockedExchange(lock, 1)) {
/* if the old value was 1 then if was already locked */
Sleep(wait); /* wait with sleep */
wait *= 2; /* exponential backoff for waiting */
}
/* the old value was 0, but we inserted 1, we locked it! */
}
void lock_basic_unlock(lock_basic_t* lock)
{
/* unlock it by inserting the value of 0. xchg for cache coherency. */
(void)InterlockedExchange(lock, 0);
}
void ub_thread_key_create(ub_thread_key_t* key, void* f)
{
*key = TlsAlloc();
if(*key == TLS_OUT_OF_INDEXES) {
*key = 0;
log_win_err("TlsAlloc Failed(OUT_OF_INDEXES)", GetLastError());
}
else ub_thread_key_set(*key, f);
}
void ub_thread_key_set(ub_thread_key_t key, void* v)
{
if(!TlsSetValue(key, v)) {
log_win_err("TlsSetValue failed", GetLastError());
}
}
void* ub_thread_key_get(ub_thread_key_t key)
{
void* ret = (void*)TlsGetValue(key);
if(ret == NULL && GetLastError() != ERROR_SUCCESS) {
log_win_err("TlsGetValue failed", GetLastError());
}
return ret;
}
void ub_thread_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
{
#ifndef HAVE__BEGINTHREADEX
*thr = CreateThread(NULL, /* default security (no inherit handle) */
0, /* default stack size */
(LPTHREAD_START_ROUTINE)func, arg,
0, /* default flags, run immediately */
NULL); /* do not store thread identifier anywhere */
#else
/* the begintheadex routine setups for the C lib; aligns stack */
*thr=(ub_thread_t)_beginthreadex(NULL, 0, (void*)func, arg, 0, NULL);
#endif
if(*thr == NULL) {
log_win_err("CreateThread failed", GetLastError());
fatal_exit("thread create failed");
}
}
ub_thread_t ub_thread_self(void)
{
return GetCurrentThread();
}
void ub_thread_join(ub_thread_t thr)
{
DWORD ret = WaitForSingleObject(thr, INFINITE);
if(ret == WAIT_FAILED) {
log_win_err("WaitForSingleObject(Thread):WAIT_FAILED",
GetLastError());
} else if(ret == WAIT_TIMEOUT) {
log_win_err("WaitForSingleObject(Thread):WAIT_TIMEOUT",
GetLastError());
}
/* and close the handle to the thread */
if(!CloseHandle(thr)) {
log_win_err("CloseHandle(Thread) failed", GetLastError());
}
}
#endif /* HAVE_WINDOWS_THREADS */

296
external/unbound/util/locks.h vendored Normal file
View File

@@ -0,0 +1,296 @@
/**
* util/locks.h - unbound locking primitives
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UTIL_LOCKS_H
#define UTIL_LOCKS_H
/**
* \file
* Locking primitives.
* If pthreads is available, these are used.
* If no locking exists, they do nothing.
*
* The idea is to have different sorts of locks for different tasks.
* This allows the locking code to be ported more easily.
*
* Types of locks that are supported.
* o lock_rw: lock that has many readers and one writer (to a data entry).
* o lock_basic: simple mutex. Blocking, one person has access only.
* This lock is meant for non performance sensitive uses.
* o lock_quick: speed lock. For performance sensitive locking of critical
* sections. Could be implemented by a mutex or a spinlock.
*
* Also thread creation and deletion functions are defined here.
*/
/* if you define your own LOCKRET before including locks.h, you can get most
* locking functions without the dependency on log_err. */
#ifndef LOCKRET
#include "util/log.h"
/**
* The following macro is used to check the return value of the
* pthread calls. They return 0 on success and an errno on error.
* The errno is logged to the logfile with a descriptive comment.
*/
#define LOCKRET(func) do {\
int lockret_err; \
if( (lockret_err=(func)) != 0) \
log_err("%s at %d could not " #func ": %s", \
__FILE__, __LINE__, strerror(lockret_err)); \
} while(0)
#endif
/** DEBUG: use thread debug whenever possible */
#if defined(HAVE_PTHREAD) && defined(HAVE_PTHREAD_SPINLOCK_T) && defined(ENABLE_LOCK_CHECKS)
# define USE_THREAD_DEBUG
#endif
#ifdef USE_THREAD_DEBUG
/******************* THREAD DEBUG ************************/
/* (some) checking; to detect races and deadlocks. */
#include "testcode/checklocks.h"
#else /* USE_THREAD_DEBUG */
#define lock_protect(lock, area, size) /* nop */
#define lock_unprotect(lock, area) /* nop */
#define lock_get_mem(lock) (0) /* nothing */
#define checklock_start() /* nop */
#define checklock_stop() /* nop */
#ifdef HAVE_PTHREAD
#include <pthread.h>
/******************* PTHREAD ************************/
/** use pthread mutex for basic lock */
typedef pthread_mutex_t lock_basic_t;
/** small front for pthread init func, NULL is default attrs. */
#define lock_basic_init(lock) LOCKRET(pthread_mutex_init(lock, NULL))
#define lock_basic_destroy(lock) LOCKRET(pthread_mutex_destroy(lock))
#define lock_basic_lock(lock) LOCKRET(pthread_mutex_lock(lock))
#define lock_basic_unlock(lock) LOCKRET(pthread_mutex_unlock(lock))
#ifndef HAVE_PTHREAD_RWLOCK_T
/** in case rwlocks are not supported, use a mutex. */
typedef pthread_mutex_t lock_rw_t;
#define lock_rw_init(lock) LOCKRET(pthread_mutex_init(lock, NULL))
#define lock_rw_destroy(lock) LOCKRET(pthread_mutex_destroy(lock))
#define lock_rw_rdlock(lock) LOCKRET(pthread_mutex_lock(lock))
#define lock_rw_wrlock(lock) LOCKRET(pthread_mutex_lock(lock))
#define lock_rw_unlock(lock) LOCKRET(pthread_mutex_unlock(lock))
#else /* HAVE_PTHREAD_RWLOCK_T */
/** we use the pthread rwlock */
typedef pthread_rwlock_t lock_rw_t;
/** small front for pthread init func, NULL is default attrs. */
#define lock_rw_init(lock) LOCKRET(pthread_rwlock_init(lock, NULL))
#define lock_rw_destroy(lock) LOCKRET(pthread_rwlock_destroy(lock))
#define lock_rw_rdlock(lock) LOCKRET(pthread_rwlock_rdlock(lock))
#define lock_rw_wrlock(lock) LOCKRET(pthread_rwlock_wrlock(lock))
#define lock_rw_unlock(lock) LOCKRET(pthread_rwlock_unlock(lock))
#endif /* HAVE_PTHREAD_RWLOCK_T */
#ifndef HAVE_PTHREAD_SPINLOCK_T
/** in case spinlocks are not supported, use a mutex. */
typedef pthread_mutex_t lock_quick_t;
/** small front for pthread init func, NULL is default attrs. */
#define lock_quick_init(lock) LOCKRET(pthread_mutex_init(lock, NULL))
#define lock_quick_destroy(lock) LOCKRET(pthread_mutex_destroy(lock))
#define lock_quick_lock(lock) LOCKRET(pthread_mutex_lock(lock))
#define lock_quick_unlock(lock) LOCKRET(pthread_mutex_unlock(lock))
#else /* HAVE_PTHREAD_SPINLOCK_T */
/** use pthread spinlock for the quick lock */
typedef pthread_spinlock_t lock_quick_t;
/**
* allocate process private since this is available whether
* Thread Process-Shared Synchronization is supported or not.
* This means only threads inside this process may access the lock.
* (not threads from another process that shares memory).
* spinlocks are not supported on all pthread platforms.
*/
#define lock_quick_init(lock) LOCKRET(pthread_spin_init(lock, PTHREAD_PROCESS_PRIVATE))
#define lock_quick_destroy(lock) LOCKRET(pthread_spin_destroy(lock))
#define lock_quick_lock(lock) LOCKRET(pthread_spin_lock(lock))
#define lock_quick_unlock(lock) LOCKRET(pthread_spin_unlock(lock))
#endif /* HAVE SPINLOCK */
/** Thread creation */
typedef pthread_t ub_thread_t;
/** Pass where to store tread_t in thr. Use default NULL attributes. */
#define ub_thread_create(thr, func, arg) LOCKRET(pthread_create(thr, NULL, func, arg))
/** get self id. */
#define ub_thread_self() pthread_self()
/** wait for another thread to terminate */
#define ub_thread_join(thread) LOCKRET(pthread_join(thread, NULL))
typedef pthread_key_t ub_thread_key_t;
#define ub_thread_key_create(key, f) LOCKRET(pthread_key_create(key, f))
#define ub_thread_key_set(key, v) LOCKRET(pthread_setspecific(key, v))
#define ub_thread_key_get(key) pthread_getspecific(key)
#else /* we do not HAVE_PTHREAD */
#ifdef HAVE_SOLARIS_THREADS
/******************* SOLARIS THREADS ************************/
#include <synch.h>
#include <thread.h>
typedef rwlock_t lock_rw_t;
#define lock_rw_init(lock) LOCKRET(rwlock_init(lock, USYNC_THREAD, NULL))
#define lock_rw_destroy(lock) LOCKRET(rwlock_destroy(lock))
#define lock_rw_rdlock(lock) LOCKRET(rw_rdlock(lock))
#define lock_rw_wrlock(lock) LOCKRET(rw_wrlock(lock))
#define lock_rw_unlock(lock) LOCKRET(rw_unlock(lock))
/** use basic mutex */
typedef mutex_t lock_basic_t;
#define lock_basic_init(lock) LOCKRET(mutex_init(lock, USYNC_THREAD, NULL))
#define lock_basic_destroy(lock) LOCKRET(mutex_destroy(lock))
#define lock_basic_lock(lock) LOCKRET(mutex_lock(lock))
#define lock_basic_unlock(lock) LOCKRET(mutex_unlock(lock))
/** No spinlocks in solaris threads API. Use a mutex. */
typedef mutex_t lock_quick_t;
#define lock_quick_init(lock) LOCKRET(mutex_init(lock, USYNC_THREAD, NULL))
#define lock_quick_destroy(lock) LOCKRET(mutex_destroy(lock))
#define lock_quick_lock(lock) LOCKRET(mutex_lock(lock))
#define lock_quick_unlock(lock) LOCKRET(mutex_unlock(lock))
/** Thread creation, create a default thread. */
typedef thread_t ub_thread_t;
#define ub_thread_create(thr, func, arg) LOCKRET(thr_create(NULL, NULL, func, arg, NULL, thr))
#define ub_thread_self() thr_self()
#define ub_thread_join(thread) LOCKRET(thr_join(thread, NULL, NULL))
typedef thread_key_t ub_thread_key_t;
#define ub_thread_key_create(key, f) LOCKRET(thr_keycreate(key, f))
#define ub_thread_key_set(key, v) LOCKRET(thr_setspecific(key, v))
void* ub_thread_key_get(ub_thread_key_t key);
#else /* we do not HAVE_SOLARIS_THREADS and no PTHREADS */
/******************* WINDOWS THREADS ************************/
#ifdef HAVE_WINDOWS_THREADS
#include <windows.h>
/* Use a mutex */
typedef LONG lock_rw_t;
#define lock_rw_init(lock) lock_basic_init(lock)
#define lock_rw_destroy(lock) lock_basic_destroy(lock)
#define lock_rw_rdlock(lock) lock_basic_lock(lock)
#define lock_rw_wrlock(lock) lock_basic_lock(lock)
#define lock_rw_unlock(lock) lock_basic_unlock(lock)
/** the basic lock is a mutex, implemented opaquely, for error handling. */
typedef LONG lock_basic_t;
void lock_basic_init(lock_basic_t* lock);
void lock_basic_destroy(lock_basic_t* lock);
void lock_basic_lock(lock_basic_t* lock);
void lock_basic_unlock(lock_basic_t* lock);
/** on windows no spinlock, use mutex too. */
typedef LONG lock_quick_t;
#define lock_quick_init(lock) lock_basic_init(lock)
#define lock_quick_destroy(lock) lock_basic_destroy(lock)
#define lock_quick_lock(lock) lock_basic_lock(lock)
#define lock_quick_unlock(lock) lock_basic_unlock(lock)
/** Thread creation, create a default thread. */
typedef HANDLE ub_thread_t;
void ub_thread_create(ub_thread_t* thr, void* (*func)(void*), void* arg);
ub_thread_t ub_thread_self(void);
void ub_thread_join(ub_thread_t thr);
typedef DWORD ub_thread_key_t;
void ub_thread_key_create(ub_thread_key_t* key, void* f);
void ub_thread_key_set(ub_thread_key_t key, void* v);
void* ub_thread_key_get(ub_thread_key_t key);
#else /* we do not HAVE_SOLARIS_THREADS, PTHREADS or WINDOWS_THREADS */
/******************* NO THREADS ************************/
#define THREADS_DISABLED 1
/** In case there is no thread support, define locks to do nothing */
typedef int lock_rw_t;
#define lock_rw_init(lock) /* nop */
#define lock_rw_destroy(lock) /* nop */
#define lock_rw_rdlock(lock) /* nop */
#define lock_rw_wrlock(lock) /* nop */
#define lock_rw_unlock(lock) /* nop */
/** define locks to do nothing */
typedef int lock_basic_t;
#define lock_basic_init(lock) /* nop */
#define lock_basic_destroy(lock) /* nop */
#define lock_basic_lock(lock) /* nop */
#define lock_basic_unlock(lock) /* nop */
/** define locks to do nothing */
typedef int lock_quick_t;
#define lock_quick_init(lock) /* nop */
#define lock_quick_destroy(lock) /* nop */
#define lock_quick_lock(lock) /* nop */
#define lock_quick_unlock(lock) /* nop */
/** Thread creation, threads do not exist */
typedef pid_t ub_thread_t;
/** ub_thread_create is simulated with fork (extremely heavy threads,
* with no shared memory). */
#define ub_thread_create(thr, func, arg) \
ub_thr_fork_create(thr, func, arg)
#define ub_thread_self() getpid()
#define ub_thread_join(thread) ub_thr_fork_wait(thread)
void ub_thr_fork_wait(ub_thread_t thread);
void ub_thr_fork_create(ub_thread_t* thr, void* (*func)(void*), void* arg);
typedef void* ub_thread_key_t;
#define ub_thread_key_create(key, f) (*(key)) = NULL
#define ub_thread_key_set(key, v) (key) = (v)
#define ub_thread_key_get(key) (key)
#endif /* HAVE_WINDOWS_THREADS */
#endif /* HAVE_SOLARIS_THREADS */
#endif /* HAVE_PTHREAD */
#endif /* USE_THREAD_DEBUG */
/**
* Block all signals for this thread.
* fatal exit on error.
*/
void ub_thread_blocksigs(void);
/**
* unblock one signal for this thread.
*/
void ub_thread_sig_unblock(int sig);
#endif /* UTIL_LOCKS_H */

485
external/unbound/util/log.c vendored Normal file
View File

@@ -0,0 +1,485 @@
/*
* util/log.c - implementation of the log code
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Implementation of log.h.
*/
#include "config.h"
#include "util/log.h"
#include "util/locks.h"
#include "ldns/sbuffer.h"
#include <stdarg.h>
#ifdef HAVE_TIME_H
#include <time.h>
#endif
#ifdef HAVE_SYSLOG_H
# include <syslog.h>
#else
/**define LOG_ constants */
# define LOG_CRIT 2
# define LOG_ERR 3
# define LOG_WARNING 4
# define LOG_NOTICE 5
# define LOG_INFO 6
# define LOG_DEBUG 7
#endif
#ifdef UB_ON_WINDOWS
# include "winrc/win_svc.h"
#endif
/* default verbosity */
enum verbosity_value verbosity = 0;
/** the file logged to. */
static FILE* logfile = 0;
/** if key has been created */
static int key_created = 0;
/** pthread key for thread ids in logfile */
static ub_thread_key_t logkey;
#ifndef THREADS_DISABLED
/** pthread mutex to protect FILE* */
static lock_quick_t log_lock;
#endif
/** the identity of this executable/process */
static const char* ident="unbound";
#if defined(HAVE_SYSLOG_H) || defined(UB_ON_WINDOWS)
/** are we using syslog(3) to log to */
static int logging_to_syslog = 0;
#endif /* HAVE_SYSLOG_H */
/** time to print in log, if NULL, use time(2) */
static time_t* log_now = NULL;
/** print time in UTC or in secondsfrom1970 */
static int log_time_asc = 0;
void
log_init(const char* filename, int use_syslog, const char* chrootdir)
{
FILE *f;
if(!key_created) {
key_created = 1;
ub_thread_key_create(&logkey, NULL);
lock_quick_init(&log_lock);
}
lock_quick_lock(&log_lock);
if(logfile
#if defined(HAVE_SYSLOG_H) || defined(UB_ON_WINDOWS)
|| logging_to_syslog
#endif
) {
lock_quick_unlock(&log_lock); /* verbose() needs the lock */
verbose(VERB_QUERY, "switching log to %s",
use_syslog?"syslog":(filename&&filename[0]?filename:"stderr"));
lock_quick_lock(&log_lock);
}
if(logfile && logfile != stderr)
fclose(logfile);
#ifdef HAVE_SYSLOG_H
if(logging_to_syslog) {
closelog();
logging_to_syslog = 0;
}
if(use_syslog) {
/* do not delay opening until first write, because we may
* chroot and no longer be able to access dev/log and so on */
openlog(ident, LOG_NDELAY, LOG_DAEMON);
logging_to_syslog = 1;
lock_quick_unlock(&log_lock);
return;
}
#elif defined(UB_ON_WINDOWS)
if(logging_to_syslog) {
logging_to_syslog = 0;
}
if(use_syslog) {
logging_to_syslog = 1;
lock_quick_unlock(&log_lock);
return;
}
#endif /* HAVE_SYSLOG_H */
if(!filename || !filename[0]) {
logfile = stderr;
lock_quick_unlock(&log_lock);
return;
}
/* open the file for logging */
if(chrootdir && chrootdir[0] && strncmp(filename, chrootdir,
strlen(chrootdir)) == 0)
filename += strlen(chrootdir);
f = fopen(filename, "a");
if(!f) {
lock_quick_unlock(&log_lock);
log_err("Could not open logfile %s: %s", filename,
strerror(errno));
return;
}
#ifndef UB_ON_WINDOWS
/* line buffering does not work on windows */
setvbuf(f, NULL, (int)_IOLBF, 0);
#endif
logfile = f;
lock_quick_unlock(&log_lock);
}
void log_file(FILE *f)
{
lock_quick_lock(&log_lock);
logfile = f;
lock_quick_unlock(&log_lock);
}
void log_thread_set(int* num)
{
ub_thread_key_set(logkey, num);
}
void log_ident_set(const char* id)
{
ident = id;
}
void log_set_time(time_t* t)
{
log_now = t;
}
void log_set_time_asc(int use_asc)
{
log_time_asc = use_asc;
}
void
log_vmsg(int pri, const char* type,
const char *format, va_list args)
{
char message[MAXSYSLOGMSGLEN];
unsigned int* tid = (unsigned int*)ub_thread_key_get(logkey);
time_t now;
#if defined(HAVE_STRFTIME) && defined(HAVE_LOCALTIME_R)
char tmbuf[32];
struct tm tm;
#elif defined(UB_ON_WINDOWS)
char tmbuf[128], dtbuf[128];
#endif
(void)pri;
vsnprintf(message, sizeof(message), format, args);
#ifdef HAVE_SYSLOG_H
if(logging_to_syslog) {
syslog(pri, "[%d:%x] %s: %s",
(int)getpid(), tid?*tid:0, type, message);
return;
}
#elif defined(UB_ON_WINDOWS)
if(logging_to_syslog) {
char m[32768];
HANDLE* s;
LPCTSTR str = m;
DWORD tp = MSG_GENERIC_ERR;
WORD wt = EVENTLOG_ERROR_TYPE;
if(strcmp(type, "info") == 0) {
tp=MSG_GENERIC_INFO;
wt=EVENTLOG_INFORMATION_TYPE;
} else if(strcmp(type, "warning") == 0) {
tp=MSG_GENERIC_WARN;
wt=EVENTLOG_WARNING_TYPE;
} else if(strcmp(type, "notice") == 0
|| strcmp(type, "debug") == 0) {
tp=MSG_GENERIC_SUCCESS;
wt=EVENTLOG_SUCCESS;
}
snprintf(m, sizeof(m), "[%s:%x] %s: %s",
ident, tid?*tid:0, type, message);
s = RegisterEventSource(NULL, SERVICE_NAME);
if(!s) return;
ReportEvent(s, wt, 0, tp, NULL, 1, 0, &str, NULL);
DeregisterEventSource(s);
return;
}
#endif /* HAVE_SYSLOG_H */
lock_quick_lock(&log_lock);
if(!logfile) {
lock_quick_unlock(&log_lock);
return;
}
if(log_now)
now = (time_t)*log_now;
else now = (time_t)time(NULL);
#if defined(HAVE_STRFTIME) && defined(HAVE_LOCALTIME_R)
if(log_time_asc && strftime(tmbuf, sizeof(tmbuf), "%b %d %H:%M:%S",
localtime_r(&now, &tm))%(sizeof(tmbuf)) != 0) {
/* %sizeof buf!=0 because old strftime returned max on error */
fprintf(logfile, "%s %s[%d:%x] %s: %s\n", tmbuf,
ident, (int)getpid(), tid?*tid:0, type, message);
} else
#elif defined(UB_ON_WINDOWS)
if(log_time_asc && GetTimeFormat(LOCALE_USER_DEFAULT, 0, NULL, NULL,
tmbuf, sizeof(tmbuf)) && GetDateFormat(LOCALE_USER_DEFAULT, 0,
NULL, NULL, dtbuf, sizeof(dtbuf))) {
fprintf(logfile, "%s %s %s[%d:%x] %s: %s\n", dtbuf, tmbuf,
ident, (int)getpid(), tid?*tid:0, type, message);
} else
#endif
fprintf(logfile, "[" ARG_LL "d] %s[%d:%x] %s: %s\n", (long long)now,
ident, (int)getpid(), tid?*tid:0, type, message);
#ifdef UB_ON_WINDOWS
/* line buffering does not work on windows */
fflush(logfile);
#endif
lock_quick_unlock(&log_lock);
}
/**
* implementation of log_info
* @param format: format string printf-style.
*/
void
log_info(const char *format, ...)
{
va_list args;
va_start(args, format);
log_vmsg(LOG_INFO, "info", format, args);
va_end(args);
}
/**
* implementation of log_err
* @param format: format string printf-style.
*/
void
log_err(const char *format, ...)
{
va_list args;
va_start(args, format);
log_vmsg(LOG_ERR, "error", format, args);
va_end(args);
}
/**
* implementation of log_warn
* @param format: format string printf-style.
*/
void
log_warn(const char *format, ...)
{
va_list args;
va_start(args, format);
log_vmsg(LOG_WARNING, "warning", format, args);
va_end(args);
}
/**
* implementation of fatal_exit
* @param format: format string printf-style.
*/
void
fatal_exit(const char *format, ...)
{
va_list args;
va_start(args, format);
log_vmsg(LOG_CRIT, "fatal error", format, args);
va_end(args);
exit(1);
}
/**
* implementation of verbose
* @param level: verbose level for the message.
* @param format: format string printf-style.
*/
void
verbose(enum verbosity_value level, const char* format, ...)
{
va_list args;
va_start(args, format);
if(verbosity >= level) {
if(level == VERB_OPS)
log_vmsg(LOG_NOTICE, "notice", format, args);
else if(level == VERB_DETAIL)
log_vmsg(LOG_INFO, "info", format, args);
else log_vmsg(LOG_DEBUG, "debug", format, args);
}
va_end(args);
}
/** log hex data */
static void
log_hex_f(enum verbosity_value v, const char* msg, void* data, size_t length)
{
size_t i, j;
uint8_t* data8 = (uint8_t*)data;
const char* hexchar = "0123456789ABCDEF";
char buf[1024+1]; /* alloc blocksize hex chars + \0 */
const size_t blocksize = 512;
size_t len;
if(length == 0) {
verbose(v, "%s[%u]", msg, (unsigned)length);
return;
}
for(i=0; i<length; i+=blocksize/2) {
len = blocksize/2;
if(length - i < blocksize/2)
len = length - i;
for(j=0; j<len; j++) {
buf[j*2] = hexchar[ data8[i+j] >> 4 ];
buf[j*2 + 1] = hexchar[ data8[i+j] & 0xF ];
}
buf[len*2] = 0;
verbose(v, "%s[%u:%u] %.*s", msg, (unsigned)length,
(unsigned)i, (int)len*2, buf);
}
}
void
log_hex(const char* msg, void* data, size_t length)
{
log_hex_f(verbosity, msg, data, length);
}
void log_buf(enum verbosity_value level, const char* msg, sldns_buffer* buf)
{
if(verbosity < level)
return;
log_hex_f(level, msg, sldns_buffer_begin(buf), sldns_buffer_limit(buf));
}
#ifdef USE_WINSOCK
char* wsa_strerror(DWORD err)
{
static char unknown[32];
switch(err) {
case WSA_INVALID_HANDLE: return "Specified event object handle is invalid.";
case WSA_NOT_ENOUGH_MEMORY: return "Insufficient memory available.";
case WSA_INVALID_PARAMETER: return "One or more parameters are invalid.";
case WSA_OPERATION_ABORTED: return "Overlapped operation aborted.";
case WSA_IO_INCOMPLETE: return "Overlapped I/O event object not in signaled state.";
case WSA_IO_PENDING: return "Overlapped operations will complete later.";
case WSAEINTR: return "Interrupted function call.";
case WSAEBADF: return "File handle is not valid.";
case WSAEACCES: return "Permission denied.";
case WSAEFAULT: return "Bad address.";
case WSAEINVAL: return "Invalid argument.";
case WSAEMFILE: return "Too many open files.";
case WSAEWOULDBLOCK: return "Resource temporarily unavailable.";
case WSAEINPROGRESS: return "Operation now in progress.";
case WSAEALREADY: return "Operation already in progress.";
case WSAENOTSOCK: return "Socket operation on nonsocket.";
case WSAEDESTADDRREQ: return "Destination address required.";
case WSAEMSGSIZE: return "Message too long.";
case WSAEPROTOTYPE: return "Protocol wrong type for socket.";
case WSAENOPROTOOPT: return "Bad protocol option.";
case WSAEPROTONOSUPPORT: return "Protocol not supported.";
case WSAESOCKTNOSUPPORT: return "Socket type not supported.";
case WSAEOPNOTSUPP: return "Operation not supported.";
case WSAEPFNOSUPPORT: return "Protocol family not supported.";
case WSAEAFNOSUPPORT: return "Address family not supported by protocol family.";
case WSAEADDRINUSE: return "Address already in use.";
case WSAEADDRNOTAVAIL: return "Cannot assign requested address.";
case WSAENETDOWN: return "Network is down.";
case WSAENETUNREACH: return "Network is unreachable.";
case WSAENETRESET: return "Network dropped connection on reset.";
case WSAECONNABORTED: return "Software caused connection abort.";
case WSAECONNRESET: return "Connection reset by peer.";
case WSAENOBUFS: return "No buffer space available.";
case WSAEISCONN: return "Socket is already connected.";
case WSAENOTCONN: return "Socket is not connected.";
case WSAESHUTDOWN: return "Cannot send after socket shutdown.";
case WSAETOOMANYREFS: return "Too many references.";
case WSAETIMEDOUT: return "Connection timed out.";
case WSAECONNREFUSED: return "Connection refused.";
case WSAELOOP: return "Cannot translate name.";
case WSAENAMETOOLONG: return "Name too long.";
case WSAEHOSTDOWN: return "Host is down.";
case WSAEHOSTUNREACH: return "No route to host.";
case WSAENOTEMPTY: return "Directory not empty.";
case WSAEPROCLIM: return "Too many processes.";
case WSAEUSERS: return "User quota exceeded.";
case WSAEDQUOT: return "Disk quota exceeded.";
case WSAESTALE: return "Stale file handle reference.";
case WSAEREMOTE: return "Item is remote.";
case WSASYSNOTREADY: return "Network subsystem is unavailable.";
case WSAVERNOTSUPPORTED: return "Winsock.dll version out of range.";
case WSANOTINITIALISED: return "Successful WSAStartup not yet performed.";
case WSAEDISCON: return "Graceful shutdown in progress.";
case WSAENOMORE: return "No more results.";
case WSAECANCELLED: return "Call has been canceled.";
case WSAEINVALIDPROCTABLE: return "Procedure call table is invalid.";
case WSAEINVALIDPROVIDER: return "Service provider is invalid.";
case WSAEPROVIDERFAILEDINIT: return "Service provider failed to initialize.";
case WSASYSCALLFAILURE: return "System call failure.";
case WSASERVICE_NOT_FOUND: return "Service not found.";
case WSATYPE_NOT_FOUND: return "Class type not found.";
case WSA_E_NO_MORE: return "No more results.";
case WSA_E_CANCELLED: return "Call was canceled.";
case WSAEREFUSED: return "Database query was refused.";
case WSAHOST_NOT_FOUND: return "Host not found.";
case WSATRY_AGAIN: return "Nonauthoritative host not found.";
case WSANO_RECOVERY: return "This is a nonrecoverable error.";
case WSANO_DATA: return "Valid name, no data record of requested type.";
case WSA_QOS_RECEIVERS: return "QOS receivers.";
case WSA_QOS_SENDERS: return "QOS senders.";
case WSA_QOS_NO_SENDERS: return "No QOS senders.";
case WSA_QOS_NO_RECEIVERS: return "QOS no receivers.";
case WSA_QOS_REQUEST_CONFIRMED: return "QOS request confirmed.";
case WSA_QOS_ADMISSION_FAILURE: return "QOS admission error.";
case WSA_QOS_POLICY_FAILURE: return "QOS policy failure.";
case WSA_QOS_BAD_STYLE: return "QOS bad style.";
case WSA_QOS_BAD_OBJECT: return "QOS bad object.";
case WSA_QOS_TRAFFIC_CTRL_ERROR: return "QOS traffic control error.";
case WSA_QOS_GENERIC_ERROR: return "QOS generic error.";
case WSA_QOS_ESERVICETYPE: return "QOS service type error.";
case WSA_QOS_EFLOWSPEC: return "QOS flowspec error.";
case WSA_QOS_EPROVSPECBUF: return "Invalid QOS provider buffer.";
case WSA_QOS_EFILTERSTYLE: return "Invalid QOS filter style.";
case WSA_QOS_EFILTERTYPE: return "Invalid QOS filter type.";
case WSA_QOS_EFILTERCOUNT: return "Incorrect QOS filter count.";
case WSA_QOS_EOBJLENGTH: return "Invalid QOS object length.";
case WSA_QOS_EFLOWCOUNT: return "Incorrect QOS flow count.";
/*case WSA_QOS_EUNKOWNPSOBJ: return "Unrecognized QOS object.";*/
case WSA_QOS_EPOLICYOBJ: return "Invalid QOS policy object.";
case WSA_QOS_EFLOWDESC: return "Invalid QOS flow descriptor.";
case WSA_QOS_EPSFLOWSPEC: return "Invalid QOS provider-specific flowspec.";
case WSA_QOS_EPSFILTERSPEC: return "Invalid QOS provider-specific filterspec.";
case WSA_QOS_ESDMODEOBJ: return "Invalid QOS shape discard mode object.";
case WSA_QOS_ESHAPERATEOBJ: return "Invalid QOS shaping rate object.";
case WSA_QOS_RESERVED_PETYPE: return "Reserved policy QOS element type.";
default:
snprintf(unknown, sizeof(unknown),
"unknown WSA error code %d", (int)err);
return unknown;
}
}
#endif /* USE_WINSOCK */

198
external/unbound/util/log.h vendored Normal file
View File

@@ -0,0 +1,198 @@
/*
* util/log.h - logging service
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains logging functions.
*/
#ifndef UTIL_LOG_H
#define UTIL_LOG_H
struct sldns_buffer;
/**
* verbosity value:
*/
enum verbosity_value {
/** 0 - no verbose messages */
NO_VERBOSE = 0,
/** 1 - operational information */
VERB_OPS,
/** 2 - detailed information */
VERB_DETAIL,
/** 3 - query level information */
VERB_QUERY,
/** 4 - algorithm level information */
VERB_ALGO,
/** 5 - querier client information */
VERB_CLIENT
};
/** The global verbosity setting */
extern enum verbosity_value verbosity;
/**
* log a verbose message, pass the level for this message.
* It has printf formatted arguments. No trailing newline is needed.
* @param level: verbosity level for this message, compared to global
* verbosity setting.
* @param format: printf-style format string. Arguments follow.
*/
void verbose(enum verbosity_value level,
const char* format, ...) ATTR_FORMAT(printf, 2, 3);
/**
* call this to initialize logging services.
* @param filename: if NULL stderr is used.
* @param use_syslog: set to true to ignore filename and use syslog(3).
* @param chrootdir: to which directory we have been chrooted, if any.
*/
void log_init(const char* filename, int use_syslog, const char* chrootdir);
/**
* Set logging to go to the specified file *.
* This setting does not affect the use_syslog setting.
* @param f: to that file, or pass NULL to disable logging.
*/
void log_file(FILE *f);
/**
* Init a thread (will print this number for the thread log entries).
* Must be called from the thread itself. If not called 0 is printed.
* @param num: number to print for this thread. Owned by caller, must
* continue to exist.
*/
void log_thread_set(int* num);
/**
* Set identity to print, default is 'unbound'.
* @param id: string to print. Name of executable.
*/
void log_ident_set(const char* id);
/**
* Set the time value to print in log entries.
* @param t: the point is copied and used to find the time.
* if NULL, time(2) is used.
*/
void log_set_time(time_t* t);
/**
* Set if the time value is printed ascii or decimal in log entries.
* @param use_asc: if true, ascii is printed, otherwise decimal.
* If the conversion fails or you have no time functions,
* decimal is printed.
*/
void log_set_time_asc(int use_asc);
/**
* Log informational message.
* Pass printf formatted arguments. No trailing newline is needed.
* @param format: printf-style format string. Arguments follow.
*/
void log_info(const char* format, ...) ATTR_FORMAT(printf, 1, 2);
/**
* Log error message.
* Pass printf formatted arguments. No trailing newline is needed.
* @param format: printf-style format string. Arguments follow.
*/
void log_err(const char* format, ...) ATTR_FORMAT(printf, 1, 2);
/**
* Log warning message.
* Pass printf formatted arguments. No trailing newline is needed.
* @param format: printf-style format string. Arguments follow.
*/
void log_warn(const char* format, ...) ATTR_FORMAT(printf, 1, 2);
/**
* Log a hex-string to the log. Can be any length.
* performs mallocs to do so, slow. But debug useful.
* @param msg: string desc to accompany the hexdump.
* @param data: data to dump in hex format.
* @param length: length of data.
*/
void log_hex(const char* msg, void* data, size_t length);
/**
* Easy alternative for log_hex, takes a sldns_buffer.
* @param level: verbosity level for this message, compared to global
* verbosity setting.
* @param msg: string desc to print
* @param buf: the buffer.
*/
void log_buf(enum verbosity_value level, const char* msg, struct sldns_buffer* buf);
/**
* Log fatal error message, and exit the current process.
* Pass printf formatted arguments. No trailing newline is needed.
* @param format: printf-style format string. Arguments follow.
*/
void fatal_exit(const char* format, ...) ATTR_FORMAT(printf, 1, 2);
/**
* va_list argument version of log_info.
* @param pri: priority type, for example 5 (INFO).
* @param type: string to designate type of message (info, error).
* @param format: the printf style format to print. no newline.
* @param args: arguments for format string.
*/
void log_vmsg(int pri, const char* type, const char* format, va_list args);
/**
* an assertion that is thrown to the logfile.
*/
#ifdef UNBOUND_DEBUG
# define log_assert(x) \
do { if(!(x)) \
fatal_exit("%s:%d: %s: assertion %s failed", \
__FILE__, __LINE__, __func__, #x); \
} while(0);
#else
# define log_assert(x) /*nothing*/
#endif
#ifdef USE_WINSOCK
/**
* Convert WSA error into string.
* @param err: from WSAGetLastError()
* @return: string.
*/
char* wsa_strerror(DWORD err);
#endif /* USE_WINSOCK */
#endif /* UTIL_LOG_H */

394
external/unbound/util/mini_event.c vendored Normal file
View File

@@ -0,0 +1,394 @@
/*
* mini_event.c - implementation of part of libevent api, portably.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**
* \file
* fake libevent implementation. Less broad in functionality, and only
* supports select(2).
*/
#include "config.h"
#ifdef HAVE_TIME_H
#include <time.h>
#endif
#include <sys/time.h>
#if defined(USE_MINI_EVENT) && !defined(USE_WINSOCK)
#include <signal.h>
#include "util/mini_event.h"
#include "util/fptr_wlist.h"
/** compare events in tree, based on timevalue, ptr for uniqueness */
int mini_ev_cmp(const void* a, const void* b)
{
const struct event *e = (const struct event*)a;
const struct event *f = (const struct event*)b;
if(e->ev_timeout.tv_sec < f->ev_timeout.tv_sec)
return -1;
if(e->ev_timeout.tv_sec > f->ev_timeout.tv_sec)
return 1;
if(e->ev_timeout.tv_usec < f->ev_timeout.tv_usec)
return -1;
if(e->ev_timeout.tv_usec > f->ev_timeout.tv_usec)
return 1;
if(e < f)
return -1;
if(e > f)
return 1;
return 0;
}
/** set time */
static int
settime(struct event_base* base)
{
if(gettimeofday(base->time_tv, NULL) < 0) {
return -1;
}
#ifndef S_SPLINT_S
*base->time_secs = (time_t)base->time_tv->tv_sec;
#endif
return 0;
}
/** create event base */
void *event_init(time_t* time_secs, struct timeval* time_tv)
{
struct event_base* base = (struct event_base*)malloc(
sizeof(struct event_base));
if(!base)
return NULL;
memset(base, 0, sizeof(*base));
base->time_secs = time_secs;
base->time_tv = time_tv;
if(settime(base) < 0) {
event_base_free(base);
return NULL;
}
base->times = rbtree_create(mini_ev_cmp);
if(!base->times) {
event_base_free(base);
return NULL;
}
base->capfd = MAX_FDS;
#ifdef FD_SETSIZE
if((int)FD_SETSIZE < base->capfd)
base->capfd = (int)FD_SETSIZE;
#endif
base->fds = (struct event**)calloc((size_t)base->capfd,
sizeof(struct event*));
if(!base->fds) {
event_base_free(base);
return NULL;
}
base->signals = (struct event**)calloc(MAX_SIG, sizeof(struct event*));
if(!base->signals) {
event_base_free(base);
return NULL;
}
#ifndef S_SPLINT_S
FD_ZERO(&base->reads);
FD_ZERO(&base->writes);
#endif
return base;
}
/** get version */
const char *event_get_version(void)
{
return "mini-event-"PACKAGE_VERSION;
}
/** get polling method, select */
const char *event_get_method(void)
{
return "select";
}
/** call timeouts handlers, and return how long to wait for next one or -1 */
static void handle_timeouts(struct event_base* base, struct timeval* now,
struct timeval* wait)
{
struct event* p;
#ifndef S_SPLINT_S
wait->tv_sec = (time_t)-1;
#endif
while((rbnode_t*)(p = (struct event*)rbtree_first(base->times))
!=RBTREE_NULL) {
#ifndef S_SPLINT_S
if(p->ev_timeout.tv_sec > now->tv_sec ||
(p->ev_timeout.tv_sec==now->tv_sec &&
p->ev_timeout.tv_usec > now->tv_usec)) {
/* there is a next larger timeout. wait for it */
wait->tv_sec = p->ev_timeout.tv_sec - now->tv_sec;
if(now->tv_usec > p->ev_timeout.tv_usec) {
wait->tv_sec--;
wait->tv_usec = 1000000 - (now->tv_usec -
p->ev_timeout.tv_usec);
} else {
wait->tv_usec = p->ev_timeout.tv_usec
- now->tv_usec;
}
return;
}
#endif
/* event times out, remove it */
(void)rbtree_delete(base->times, p);
p->ev_events &= ~EV_TIMEOUT;
fptr_ok(fptr_whitelist_event(p->ev_callback));
(*p->ev_callback)(p->ev_fd, EV_TIMEOUT, p->ev_arg);
}
}
/** call select and callbacks for that */
static int handle_select(struct event_base* base, struct timeval* wait)
{
fd_set r, w;
int ret, i;
#ifndef S_SPLINT_S
if(wait->tv_sec==(time_t)-1)
wait = NULL;
#endif
memmove(&r, &base->reads, sizeof(fd_set));
memmove(&w, &base->writes, sizeof(fd_set));
memmove(&base->ready, &base->content, sizeof(fd_set));
if((ret = select(base->maxfd+1, &r, &w, NULL, wait)) == -1) {
ret = errno;
if(settime(base) < 0)
return -1;
errno = ret;
if(ret == EAGAIN || ret == EINTR)
return 0;
return -1;
}
if(settime(base) < 0)
return -1;
for(i=0; i<base->maxfd+1; i++) {
short bits = 0;
if(!base->fds[i] || !(FD_ISSET(i, &base->ready))) {
continue;
}
if(FD_ISSET(i, &r)) {
bits |= EV_READ;
ret--;
}
if(FD_ISSET(i, &w)) {
bits |= EV_WRITE;
ret--;
}
bits &= base->fds[i]->ev_events;
if(bits) {
fptr_ok(fptr_whitelist_event(
base->fds[i]->ev_callback));
(*base->fds[i]->ev_callback)(base->fds[i]->ev_fd,
bits, base->fds[i]->ev_arg);
if(ret==0)
break;
}
}
return 0;
}
/** run select in a loop */
int event_base_dispatch(struct event_base* base)
{
struct timeval wait;
if(settime(base) < 0)
return -1;
while(!base->need_to_exit)
{
/* see if timeouts need handling */
handle_timeouts(base, base->time_tv, &wait);
if(base->need_to_exit)
return 0;
/* do select */
if(handle_select(base, &wait) < 0) {
if(base->need_to_exit)
return 0;
return -1;
}
}
return 0;
}
/** exit that loop */
int event_base_loopexit(struct event_base* base,
struct timeval* ATTR_UNUSED(tv))
{
base->need_to_exit = 1;
return 0;
}
/* free event base, free events yourself */
void event_base_free(struct event_base* base)
{
if(!base)
return;
if(base->times)
free(base->times);
if(base->fds)
free(base->fds);
if(base->signals)
free(base->signals);
free(base);
}
/** set content of event */
void event_set(struct event* ev, int fd, short bits,
void (*cb)(int, short, void *), void* arg)
{
ev->node.key = ev;
ev->ev_fd = fd;
ev->ev_events = bits;
ev->ev_callback = cb;
fptr_ok(fptr_whitelist_event(ev->ev_callback));
ev->ev_arg = arg;
ev->added = 0;
}
/* add event to a base */
int event_base_set(struct event_base* base, struct event* ev)
{
ev->ev_base = base;
ev->added = 0;
return 0;
}
/* add event to make it active, you may not change it with event_set anymore */
int event_add(struct event* ev, struct timeval* tv)
{
if(ev->added)
event_del(ev);
if(ev->ev_fd != -1 && ev->ev_fd >= ev->ev_base->capfd)
return -1;
if( (ev->ev_events&(EV_READ|EV_WRITE)) && ev->ev_fd != -1) {
ev->ev_base->fds[ev->ev_fd] = ev;
if(ev->ev_events&EV_READ) {
FD_SET(FD_SET_T ev->ev_fd, &ev->ev_base->reads);
}
if(ev->ev_events&EV_WRITE) {
FD_SET(FD_SET_T ev->ev_fd, &ev->ev_base->writes);
}
FD_SET(FD_SET_T ev->ev_fd, &ev->ev_base->content);
FD_CLR(FD_SET_T ev->ev_fd, &ev->ev_base->ready);
if(ev->ev_fd > ev->ev_base->maxfd)
ev->ev_base->maxfd = ev->ev_fd;
}
if(tv && (ev->ev_events&EV_TIMEOUT)) {
#ifndef S_SPLINT_S
struct timeval *now = ev->ev_base->time_tv;
ev->ev_timeout.tv_sec = tv->tv_sec + now->tv_sec;
ev->ev_timeout.tv_usec = tv->tv_usec + now->tv_usec;
while(ev->ev_timeout.tv_usec > 1000000) {
ev->ev_timeout.tv_usec -= 1000000;
ev->ev_timeout.tv_sec++;
}
#endif
(void)rbtree_insert(ev->ev_base->times, &ev->node);
}
ev->added = 1;
return 0;
}
/* remove event, you may change it again */
int event_del(struct event* ev)
{
if(ev->ev_fd != -1 && ev->ev_fd >= ev->ev_base->capfd)
return -1;
if((ev->ev_events&EV_TIMEOUT))
(void)rbtree_delete(ev->ev_base->times, &ev->node);
if((ev->ev_events&(EV_READ|EV_WRITE)) && ev->ev_fd != -1) {
ev->ev_base->fds[ev->ev_fd] = NULL;
FD_CLR(FD_SET_T ev->ev_fd, &ev->ev_base->reads);
FD_CLR(FD_SET_T ev->ev_fd, &ev->ev_base->writes);
FD_CLR(FD_SET_T ev->ev_fd, &ev->ev_base->ready);
FD_CLR(FD_SET_T ev->ev_fd, &ev->ev_base->content);
}
ev->added = 0;
return 0;
}
/** which base gets to handle signals */
static struct event_base* signal_base = NULL;
/** signal handler */
static RETSIGTYPE sigh(int sig)
{
struct event* ev;
if(!signal_base || sig < 0 || sig >= MAX_SIG)
return;
ev = signal_base->signals[sig];
if(!ev)
return;
fptr_ok(fptr_whitelist_event(ev->ev_callback));
(*ev->ev_callback)(sig, EV_SIGNAL, ev->ev_arg);
}
/** install signal handler */
int signal_add(struct event* ev, struct timeval* ATTR_UNUSED(tv))
{
if(ev->ev_fd == -1 || ev->ev_fd >= MAX_SIG)
return -1;
signal_base = ev->ev_base;
ev->ev_base->signals[ev->ev_fd] = ev;
ev->added = 1;
if(signal(ev->ev_fd, sigh) == SIG_ERR) {
return -1;
}
return 0;
}
/** remove signal handler */
int signal_del(struct event* ev)
{
if(ev->ev_fd == -1 || ev->ev_fd >= MAX_SIG)
return -1;
ev->ev_base->signals[ev->ev_fd] = NULL;
ev->added = 0;
return 0;
}
#else /* USE_MINI_EVENT */
#ifndef USE_WINSOCK
int mini_ev_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
{
return 0;
}
#endif /* not USE_WINSOCK */
#endif /* USE_MINI_EVENT */

177
external/unbound/util/mini_event.h vendored Normal file
View File

@@ -0,0 +1,177 @@
/*
* mini-event.h - micro implementation of libevent api, using select() only.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* This file implements part of the event(3) libevent api.
* The back end is only select. Max number of fds is limited.
* Max number of signals is limited, one handler per signal only.
* And one handler per fd.
*
* Although limited to select() and a max (1024) open fds, it
* is efficient:
* o dispatch call caches fd_sets to use.
* o handler calling takes time ~ to the number of fds.
* o timeouts are stored in a redblack tree, sorted, so take log(n).
* Timeouts are only accurate to the second (no subsecond accuracy).
* To avoid cpu hogging, fractional timeouts are rounded up to a whole second.
*/
#ifndef MINI_EVENT_H
#define MINI_EVENT_H
#if defined(USE_MINI_EVENT) && !defined(USE_WINSOCK)
#ifndef HAVE_EVENT_BASE_FREE
#define HAVE_EVENT_BASE_FREE
#endif
/** event timeout */
#define EV_TIMEOUT 0x01
/** event fd readable */
#define EV_READ 0x02
/** event fd writable */
#define EV_WRITE 0x04
/** event signal */
#define EV_SIGNAL 0x08
/** event must persist */
#define EV_PERSIST 0x10
/* needs our redblack tree */
#include "rbtree.h"
/** max number of file descriptors to support */
#define MAX_FDS 1024
/** max number of signals to support */
#define MAX_SIG 32
/** event base */
struct event_base
{
/** sorted by timeout (absolute), ptr */
rbtree_t* times;
/** array of 0 - maxfd of ptr to event for it */
struct event** fds;
/** max fd in use */
int maxfd;
/** capacity - size of the fds array */
int capfd;
/* fdset for read write, for fds ready, and added */
fd_set
/** fds for reading */
reads,
/** fds for writing */
writes,
/** fds determined ready for use */
ready,
/** ready plus newly added events. */
content;
/** array of 0 - maxsig of ptr to event for it */
struct event** signals;
/** if we need to exit */
int need_to_exit;
/** where to store time in seconds */
time_t* time_secs;
/** where to store time in microseconds */
struct timeval* time_tv;
};
/**
* Event structure. Has some of the event elements.
*/
struct event {
/** node in timeout rbtree */
rbnode_t node;
/** is event already added */
int added;
/** event base it belongs to */
struct event_base *ev_base;
/** fd to poll or -1 for timeouts. signal number for sigs. */
int ev_fd;
/** what events this event is interested in, see EV_.. above. */
short ev_events;
/** timeout value */
struct timeval ev_timeout;
/** callback to call: fd, eventbits, userarg */
void (*ev_callback)(int, short, void *arg);
/** callback user arg */
void *ev_arg;
};
/* function prototypes (some are as they appear in event.h) */
/** create event base */
void *event_init(time_t* time_secs, struct timeval* time_tv);
/** get version */
const char *event_get_version(void);
/** get polling method, select */
const char *event_get_method(void);
/** run select in a loop */
int event_base_dispatch(struct event_base *);
/** exit that loop */
int event_base_loopexit(struct event_base *, struct timeval *);
/** free event base. Free events yourself */
void event_base_free(struct event_base *);
/** set content of event */
void event_set(struct event *, int, short, void (*)(int, short, void *), void *);
/** add event to a base. You *must* call this for every event. */
int event_base_set(struct event_base *, struct event *);
/** add event to make it active. You may not change it with event_set anymore */
int event_add(struct event *, struct timeval *);
/** remove event. You may change it again */
int event_del(struct event *);
/** add a timer */
#define evtimer_add(ev, tv) event_add(ev, tv)
/** remove a timer */
#define evtimer_del(ev) event_del(ev)
/* uses different implementation. Cannot mix fd/timeouts and signals inside
* the same struct event. create several event structs for that. */
/** install signal handler */
int signal_add(struct event *, struct timeval *);
/** set signal event contents */
#define signal_set(ev, x, cb, arg) \
event_set(ev, x, EV_SIGNAL|EV_PERSIST, cb, arg)
/** remove signal handler */
int signal_del(struct event *);
#endif /* USE_MINI_EVENT and not USE_WINSOCK */
/** compare events in tree, based on timevalue, ptr for uniqueness */
int mini_ev_cmp(const void* a, const void* b);
#endif /* MINI_EVENT_H */

71
external/unbound/util/module.c vendored Normal file
View File

@@ -0,0 +1,71 @@
/*
* util/module.c - module interface
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Implementation of module.h.
*/
#include "config.h"
#include "util/module.h"
const char*
strextstate(enum module_ext_state s)
{
switch(s) {
case module_state_initial: return "module_state_initial";
case module_wait_reply: return "module_wait_reply";
case module_wait_module: return "module_wait_module";
case module_restart_next: return "module_restart_next";
case module_wait_subquery: return "module_wait_subquery";
case module_error: return "module_error";
case module_finished: return "module_finished";
}
return "bad_extstate_value";
}
const char*
strmodulevent(enum module_ev e)
{
switch(e) {
case module_event_new: return "module_event_new";
case module_event_pass: return "module_event_pass";
case module_event_reply: return "module_event_reply";
case module_event_noreply: return "module_event_noreply";
case module_event_capsfail: return "module_event_capsfail";
case module_event_moddone: return "module_event_moddone";
case module_event_error: return "module_event_error";
}
return "bad_event_value";
}

517
external/unbound/util/module.h vendored Normal file
View File

@@ -0,0 +1,517 @@
/*
* util/module.h - DNS handling module interface
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains the interface for DNS handling modules.
*
* The module interface uses the DNS modules as state machines. The
* state machines are activated in sequence to operate on queries. Once
* they are done, the reply is passed back. In the usual setup the mesh
* is the caller of the state machines and once things are done sends replies
* and invokes result callbacks.
*
* The module provides a number of functions, listed in the module_func_block.
* The module is inited and destroyed and memory usage queries, for the
* module as a whole, for entire-module state (such as a cache). And per-query
* functions are called, operate to move the state machine and cleanup of
* the per-query state.
*
* Most per-query state should simply be allocated in the query region.
* This is destroyed at the end of the query.
*
* The module environment contains services and information and caches
* shared by the modules and the rest of the system. It also contains
* function pointers for module-specific tasks (like sending queries).
*
* *** Example module calls for a normal query
*
* In this example, the query does not need recursion, all the other data
* can be found in the cache. This makes the example shorter.
*
* At the start of the program the iterator module is initialised.
* The iterator module sets up its global state, such as donotquery lists
* and private address trees.
*
* A query comes in, and a mesh entry is created for it. The mesh
* starts the resolution process. The validator module is the first
* in the list of modules, and it is started on this new query. The
* operate() function is called. The validator decides it needs not do
* anything yet until there is a result and returns wait_module, that
* causes the next module in the list to be started.
*
* The next module is the iterator. It is started on the passed query and
* decides to perform a lookup. For this simple example, the delegation
* point information is available, and all the iterator wants to do is
* send a UDP query. The iterator uses env.send_query() to send the
* query. Then the iterator suspends (returns from the operate call).
*
* When the UDP reply comes back (and on errors and timeouts), the
* operate function is called for the query, on the iterator module,
* with the event that there is a reply. The iterator decides that this
* is enough, the work is done. It returns the value finished from the
* operate call, which causes the previous module to be started.
*
* The previous module, the validator module, is started with the event
* that the iterator module is done. The validator decides to validate
* the query. Once it is done (which could take recursive lookups, but
* in this example no recursive lookups are needed), it returns from the
* operate function with finished.
*
* There is no previous module from the validator module, and the mesh
* takes this to mean that the query is finally done. The mesh invokes
* callbacks and sends packets to queriers.
*
* If other modules had been waiting (recursively) on the answer to this
* query, then the mesh will tell them about it. It calls the inform_super
* routine on all the waiting modules, and once that is done it calls all of
* them with the operate() call. During inform_super the query that is done
* still exists and information can be copied from it (but the module should
* not really re-entry codepoints and services). During the operate call
* the modules can use stored state to continue operation with the results.
* (network buffers are used to contain the answer packet during the
* inform_super phase, but after that the network buffers will be cleared
* of their contents so that other tasks can be performed).
*
* *** Example module calls for recursion
*
* A module is called in operate, and it decides that it wants to perform
* recursion. That is, it wants the full state-machine-list to operate on
* a different query. It calls env.attach_sub() to create a new query state.
* The routine returns the newly created state, and potentially the module
* can edit the module-states for the newly created query (i.e. pass along
* some information, like delegation points). The module then suspends,
* returns from the operate routine.
*
* The mesh meanwhile will have the newly created query (or queries) on
* a waiting list, and will call operate() on this query (or queries).
* It starts again at the start of the module list for them. The query
* (or queries) continue to operate their state machines, until they are
* done. When they are done the mesh calls inform_super on the module that
* wanted the recursion. After that the mesh calls operate() on the module
* that wanted to do the recursion, and during this phase the module could,
* for example, decide to create more recursions.
*
* If the module decides it no longer wants the recursive information
* it can call detach_subs. Those queries will still run to completion,
* potentially filling the cache with information. Inform_super is not
* called any more.
*
* The iterator module will fetch items from the cache, so a recursion
* attempt may complete very quickly if the item is in cache. The calling
* module has to wait for completion or eventual timeout. A recursive query
* that times out returns a servfail rcode (servfail is also returned for
* other errors during the lookup).
*
* Results are passed in the qstate, the rcode member is used to pass
* errors without requiring memory allocation, so that the code can continue
* in out-of-memory conditions. If the rcode member is 0 (NOERROR) then
* the dns_msg entry contains a filled out message. This message may
* also contain an rcode that is nonzero, but in this case additional
* information (query, additional) can be passed along.
*
* The rcode and dns_msg are used to pass the result from the the rightmost
* module towards the leftmost modules and then towards the user.
*
* If you want to avoid recursion-cycles where queries need other queries
* that need the first one, use detect_cycle() to see if that will happen.
*
*/
#ifndef UTIL_MODULE_H
#define UTIL_MODULE_H
#include "util/storage/lruhash.h"
#include "util/data/msgreply.h"
#include "util/data/msgparse.h"
struct sldns_buffer;
struct alloc_cache;
struct rrset_cache;
struct key_cache;
struct config_file;
struct slabhash;
struct query_info;
struct edns_data;
struct regional;
struct worker;
struct module_qstate;
struct ub_randstate;
struct mesh_area;
struct mesh_state;
struct val_anchors;
struct val_neg_cache;
struct iter_forwards;
struct iter_hints;
/** Maximum number of modules in operation */
#define MAX_MODULE 5
/**
* Module environment.
* Services and data provided to the module.
*/
struct module_env {
/* --- data --- */
/** config file with config options */
struct config_file* cfg;
/** shared message cache */
struct slabhash* msg_cache;
/** shared rrset cache */
struct rrset_cache* rrset_cache;
/** shared infrastructure cache (edns, lameness) */
struct infra_cache* infra_cache;
/** shared key cache */
struct key_cache* key_cache;
/* --- services --- */
/**
* Send serviced DNS query to server. UDP/TCP and EDNS is handled.
* operate() should return with wait_reply. Later on a callback
* will cause operate() to be called with event timeout or reply.
* The time until a timeout is calculated from roundtrip timing,
* several UDP retries are attempted.
* @param qname: query name. (host order)
* @param qnamelen: length in bytes of qname, including trailing 0.
* @param qtype: query type. (host order)
* @param qclass: query class. (host order)
* @param flags: host order flags word, with opcode and CD bit.
* @param dnssec: if set, EDNS record will have bits set.
* If EDNS_DO bit is set, DO bit is set in EDNS records.
* If BIT_CD is set, CD bit is set in queries with EDNS records.
* @param want_dnssec: if set, the validator wants DNSSEC. Without
* EDNS, the answer is likely to be useless for this domain.
* @param nocaps: do not use caps_for_id, use the qname as given.
* (ignored if caps_for_id is disabled).
* @param addr: where to.
* @param addrlen: length of addr.
* @param zone: delegation point name.
* @param zonelen: length of zone name.
* @param q: wich query state to reactivate upon return.
* @return: false on failure (memory or socket related). no query was
* sent. Or returns an outbound entry with qsent and qstate set.
* This outbound_entry will be used on later module invocations
* that involve this query (timeout, error or reply).
*/
struct outbound_entry* (*send_query)(uint8_t* qname, size_t qnamelen,
uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
int want_dnssec, int nocaps, struct sockaddr_storage* addr,
socklen_t addrlen, uint8_t* zone, size_t zonelen,
struct module_qstate* q);
/**
* Detach-subqueries.
* Remove all sub-query references from this query state.
* Keeps super-references of those sub-queries correct.
* Updates stat items in mesh_area structure.
* @param qstate: used to find mesh state.
*/
void (*detach_subs)(struct module_qstate* qstate);
/**
* Attach subquery.
* Creates it if it does not exist already.
* Keeps sub and super references correct.
* Updates stat items in mesh_area structure.
* Pass if it is priming query or not.
* return:
* o if error (malloc) happened.
* o need to initialise the new state (module init; it is a new state).
* so that the next run of the query with this module is successful.
* o no init needed, attachment successful.
*
* @param qstate: the state to find mesh state, and that wants to
* receive the results from the new subquery.
* @param qinfo: what to query for (copied).
* @param qflags: what flags to use (RD, CD flag or not).
* @param prime: if it is a (stub) priming query.
* @param newq: If the new subquery needs initialisation, it is
* returned, otherwise NULL is returned.
* @return: false on error, true if success (and init may be needed).
*/
int (*attach_sub)(struct module_qstate* qstate,
struct query_info* qinfo, uint16_t qflags, int prime,
struct module_qstate** newq);
/**
* Kill newly attached sub. If attach_sub returns newq for
* initialisation, but that fails, then this routine will cleanup and
* delete the fresly created sub.
* @param newq: the new subquery that is no longer needed.
* It is removed.
*/
void (*kill_sub)(struct module_qstate* newq);
/**
* Detect if adding a dependency for qstate on name,type,class will
* create a dependency cycle.
* @param qstate: given mesh querystate.
* @param qinfo: query info for dependency.
* @param flags: query flags of dependency, RD/CD flags.
* @param prime: if dependency is a priming query or not.
* @return true if the name,type,class exists and the given
* qstate mesh exists as a dependency of that name. Thus
* if qstate becomes dependent on name,type,class then a
* cycle is created.
*/
int (*detect_cycle)(struct module_qstate* qstate,
struct query_info* qinfo, uint16_t flags, int prime);
/** region for temporary usage. May be cleared after operate() call. */
struct regional* scratch;
/** buffer for temporary usage. May be cleared after operate() call. */
struct sldns_buffer* scratch_buffer;
/** internal data for daemon - worker thread. */
struct worker* worker;
/** mesh area with query state dependencies */
struct mesh_area* mesh;
/** allocation service */
struct alloc_cache* alloc;
/** random table to generate random numbers */
struct ub_randstate* rnd;
/** time in seconds, converted to integer */
time_t* now;
/** time in microseconds. Relatively recent. */
struct timeval* now_tv;
/** is validation required for messages, controls client-facing
* validation status (AD bits) and servfails */
int need_to_validate;
/** trusted key storage; these are the configured keys, if not NULL,
* otherwise configured by validator. These are the trust anchors,
* and are not primed and ready for validation, but on the bright
* side, they are read only memory, thus no locks and fast. */
struct val_anchors* anchors;
/** negative cache, configured by the validator. if not NULL,
* contains NSEC record lookup trees. */
struct val_neg_cache* neg_cache;
/** the 5011-probe timer (if any) */
struct comm_timer* probe_timer;
/** Mapping of forwarding zones to targets.
* iterator forwarder information. per-thread, created by worker */
struct iter_forwards* fwds;
/**
* iterator forwarder information. per-thread, created by worker.
* The hints -- these aren't stored in the cache because they don't
* expire. The hints are always used to "prime" the cache. Note
* that both root hints and stub zone "hints" are stored in this
* data structure.
*/
struct iter_hints* hints;
/** module specific data. indexed by module id. */
void* modinfo[MAX_MODULE];
};
/**
* External visible states of the module state machine
* Modules may also have an internal state.
* Modules are supposed to run to completion or until blocked.
*/
enum module_ext_state {
/** initial state - new query */
module_state_initial = 0,
/** waiting for reply to outgoing network query */
module_wait_reply,
/** module is waiting for another module */
module_wait_module,
/** module is waiting for another module; that other is restarted */
module_restart_next,
/** module is waiting for sub-query */
module_wait_subquery,
/** module could not finish the query */
module_error,
/** module is finished with query */
module_finished
};
/**
* Events that happen to modules, that start or wakeup modules.
*/
enum module_ev {
/** new query */
module_event_new = 0,
/** query passed by other module */
module_event_pass,
/** reply inbound from server */
module_event_reply,
/** no reply, timeout or other error */
module_event_noreply,
/** reply is there, but capitalisation check failed */
module_event_capsfail,
/** next module is done, and its reply is awaiting you */
module_event_moddone,
/** error */
module_event_error
};
/**
* Linked list of sockaddrs
* May be allocated such that only 'len' bytes of addr exist for the structure.
*/
struct sock_list {
/** next in list */
struct sock_list* next;
/** length of addr */
socklen_t len;
/** sockaddr */
struct sockaddr_storage addr;
};
/**
* Module state, per query.
*/
struct module_qstate {
/** which query is being answered: name, type, class */
struct query_info qinfo;
/** flags uint16 from query */
uint16_t query_flags;
/** if this is a (stub or root) priming query (with hints) */
int is_priming;
/** comm_reply contains server replies */
struct comm_reply* reply;
/** the reply message, with message for client and calling module */
struct dns_msg* return_msg;
/** the rcode, in case of error, instead of a reply message */
int return_rcode;
/** origin of the reply (can be NULL from cache, list for cnames) */
struct sock_list* reply_origin;
/** IP blacklist for queries */
struct sock_list* blacklist;
/** region for this query. Cleared when query process finishes. */
struct regional* region;
/** failure reason information if val-log-level is high */
struct config_strlist* errinf;
/** which module is executing */
int curmod;
/** module states */
enum module_ext_state ext_state[MAX_MODULE];
/** module specific data for query. indexed by module id. */
void* minfo[MAX_MODULE];
/** environment for this query */
struct module_env* env;
/** mesh related information for this query */
struct mesh_state* mesh_info;
/** how many seconds before expiry is this prefetched (0 if not) */
time_t prefetch_leeway;
};
/**
* Module functionality block
*/
struct module_func_block {
/** text string name of module */
const char* name;
/**
* init the module. Called once for the global state.
* This is the place to apply settings from the config file.
* @param env: module environment.
* @param id: module id number.
* return: 0 on error
*/
int (*init)(struct module_env* env, int id);
/**
* de-init, delete, the module. Called once for the global state.
* @param env: module environment.
* @param id: module id number.
*/
void (*deinit)(struct module_env* env, int id);
/**
* accept a new query, or work further on existing query.
* Changes the qstate->ext_state to be correct on exit.
* @param ev: event that causes the module state machine to
* (re-)activate.
* @param qstate: the query state.
* Note that this method is not allowed to change the
* query state 'identity', that is query info, qflags,
* and priming status.
* Attach a subquery to get results to a different query.
* @param id: module id number that operate() is called on.
* @param outbound: if not NULL this event is due to the reply/timeout
* or error on this outbound query.
* @return: if at exit the ext_state is:
* o wait_module: next module is started. (with pass event).
* o error or finished: previous module is resumed.
* o otherwise it waits until that event happens (assumes
* the service routine to make subrequest or send message
* have been called.
*/
void (*operate)(struct module_qstate* qstate, enum module_ev event,
int id, struct outbound_entry* outbound);
/**
* inform super querystate about the results from this subquerystate.
* Is called when the querystate is finished. The method invoked is
* the one from the current module active in the super querystate.
* @param qstate: the query state that is finished.
* Examine return_rcode and return_reply in the qstate.
* @param id: module id for this module.
* This coincides with the current module for the super qstate.
* @param super: the super querystate that needs to be informed.
*/
void (*inform_super)(struct module_qstate* qstate, int id,
struct module_qstate* super);
/**
* clear module specific data
*/
void (*clear)(struct module_qstate* qstate, int id);
/**
* How much memory is the module specific data using.
* @param env: module environment.
* @param id: the module id.
* @return the number of bytes that are alloced.
*/
size_t (*get_mem)(struct module_env* env, int id);
};
/**
* Debug utility: module external qstate to string
* @param s: the state value.
* @return descriptive string.
*/
const char* strextstate(enum module_ext_state s);
/**
* Debug utility: module event to string
* @param e: the module event value.
* @return descriptive string.
*/
const char* strmodulevent(enum module_ev e);
#endif /* UTIL_MODULE_H */

804
external/unbound/util/net_help.c vendored Normal file
View File

@@ -0,0 +1,804 @@
/*
* util/net_help.c - implementation of the network helper code
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Implementation of net_help.h.
*/
#include "config.h"
#include "util/net_help.h"
#include "util/log.h"
#include "util/data/dname.h"
#include "util/module.h"
#include "util/regional.h"
#include "ldns/parseutil.h"
#include "ldns/wire2str.h"
#include <fcntl.h>
#ifdef HAVE_OPENSSL_SSL_H
#include <openssl/ssl.h>
#endif
#ifdef HAVE_OPENSSL_ERR_H
#include <openssl/err.h>
#endif
/** max length of an IP address (the address portion) that we allow */
#define MAX_ADDR_STRLEN 128 /* characters */
/** default value for EDNS ADVERTISED size */
uint16_t EDNS_ADVERTISED_SIZE = 4096;
/** minimal responses when positive answer: default is no */
int MINIMAL_RESPONSES = 0;
/** rrset order roundrobin: default is no */
int RRSET_ROUNDROBIN = 0;
/* returns true is string addr is an ip6 specced address */
int
str_is_ip6(const char* str)
{
if(strchr(str, ':'))
return 1;
else return 0;
}
int
fd_set_nonblock(int s)
{
#ifdef HAVE_FCNTL
int flag;
if((flag = fcntl(s, F_GETFL)) == -1) {
log_err("can't fcntl F_GETFL: %s", strerror(errno));
flag = 0;
}
flag |= O_NONBLOCK;
if(fcntl(s, F_SETFL, flag) == -1) {
log_err("can't fcntl F_SETFL: %s", strerror(errno));
return 0;
}
#elif defined(HAVE_IOCTLSOCKET)
unsigned long on = 1;
if(ioctlsocket(s, FIONBIO, &on) != 0) {
log_err("can't ioctlsocket FIONBIO on: %s",
wsa_strerror(WSAGetLastError()));
}
#endif
return 1;
}
int
fd_set_block(int s)
{
#ifdef HAVE_FCNTL
int flag;
if((flag = fcntl(s, F_GETFL)) == -1) {
log_err("cannot fcntl F_GETFL: %s", strerror(errno));
flag = 0;
}
flag &= ~O_NONBLOCK;
if(fcntl(s, F_SETFL, flag) == -1) {
log_err("cannot fcntl F_SETFL: %s", strerror(errno));
return 0;
}
#elif defined(HAVE_IOCTLSOCKET)
unsigned long off = 0;
if(ioctlsocket(s, FIONBIO, &off) != 0) {
log_err("can't ioctlsocket FIONBIO off: %s",
wsa_strerror(WSAGetLastError()));
}
#endif
return 1;
}
int
is_pow2(size_t num)
{
if(num == 0) return 1;
return (num & (num-1)) == 0;
}
void*
memdup(void* data, size_t len)
{
void* d;
if(!data) return NULL;
if(len == 0) return NULL;
d = malloc(len);
if(!d) return NULL;
memcpy(d, data, len);
return d;
}
void
log_addr(enum verbosity_value v, const char* str,
struct sockaddr_storage* addr, socklen_t addrlen)
{
uint16_t port;
const char* family = "unknown";
char dest[100];
int af = (int)((struct sockaddr_in*)addr)->sin_family;
void* sinaddr = &((struct sockaddr_in*)addr)->sin_addr;
if(verbosity < v)
return;
switch(af) {
case AF_INET: family="ip4"; break;
case AF_INET6: family="ip6";
sinaddr = &((struct sockaddr_in6*)addr)->sin6_addr;
break;
case AF_UNIX: family="unix"; break;
default: break;
}
if(inet_ntop(af, sinaddr, dest, (socklen_t)sizeof(dest)) == 0) {
(void)strlcpy(dest, "(inet_ntop error)", sizeof(dest));
}
dest[sizeof(dest)-1] = 0;
port = ntohs(((struct sockaddr_in*)addr)->sin_port);
if(verbosity >= 4)
verbose(v, "%s %s %s port %d (len %d)", str, family, dest,
(int)port, (int)addrlen);
else verbose(v, "%s %s port %d", str, dest, (int)port);
}
int
extstrtoaddr(const char* str, struct sockaddr_storage* addr,
socklen_t* addrlen)
{
char* s;
int port = UNBOUND_DNS_PORT;
if((s=strchr(str, '@'))) {
char buf[MAX_ADDR_STRLEN];
if(s-str >= MAX_ADDR_STRLEN) {
return 0;
}
(void)strlcpy(buf, str, sizeof(buf));
buf[s-str] = 0;
port = atoi(s+1);
if(port == 0 && strcmp(s+1,"0")!=0) {
return 0;
}
return ipstrtoaddr(buf, port, addr, addrlen);
}
return ipstrtoaddr(str, port, addr, addrlen);
}
int
ipstrtoaddr(const char* ip, int port, struct sockaddr_storage* addr,
socklen_t* addrlen)
{
uint16_t p;
if(!ip) return 0;
p = (uint16_t) port;
if(str_is_ip6(ip)) {
char buf[MAX_ADDR_STRLEN];
char* s;
struct sockaddr_in6* sa = (struct sockaddr_in6*)addr;
*addrlen = (socklen_t)sizeof(struct sockaddr_in6);
memset(sa, 0, *addrlen);
sa->sin6_family = AF_INET6;
sa->sin6_port = (in_port_t)htons(p);
if((s=strchr(ip, '%'))) { /* ip6%interface, rfc 4007 */
if(s-ip >= MAX_ADDR_STRLEN)
return 0;
(void)strlcpy(buf, ip, sizeof(buf));
buf[s-ip]=0;
sa->sin6_scope_id = (uint32_t)atoi(s+1);
ip = buf;
}
if(inet_pton((int)sa->sin6_family, ip, &sa->sin6_addr) <= 0) {
return 0;
}
} else { /* ip4 */
struct sockaddr_in* sa = (struct sockaddr_in*)addr;
*addrlen = (socklen_t)sizeof(struct sockaddr_in);
memset(sa, 0, *addrlen);
sa->sin_family = AF_INET;
sa->sin_port = (in_port_t)htons(p);
if(inet_pton((int)sa->sin_family, ip, &sa->sin_addr) <= 0) {
return 0;
}
}
return 1;
}
int netblockstrtoaddr(const char* str, int port, struct sockaddr_storage* addr,
socklen_t* addrlen, int* net)
{
char* s = NULL;
*net = (str_is_ip6(str)?128:32);
if((s=strchr(str, '/'))) {
if(atoi(s+1) > *net) {
log_err("netblock too large: %s", str);
return 0;
}
*net = atoi(s+1);
if(*net == 0 && strcmp(s+1, "0") != 0) {
log_err("cannot parse netblock: '%s'", str);
return 0;
}
if(!(s = strdup(str))) {
log_err("out of memory");
return 0;
}
*strchr(s, '/') = '\0';
}
if(!ipstrtoaddr(s?s:str, port, addr, addrlen)) {
free(s);
log_err("cannot parse ip address: '%s'", str);
return 0;
}
if(s) {
free(s);
addr_mask(addr, *addrlen, *net);
}
return 1;
}
void
log_nametypeclass(enum verbosity_value v, const char* str, uint8_t* name,
uint16_t type, uint16_t dclass)
{
char buf[LDNS_MAX_DOMAINLEN+1];
char t[12], c[12];
const char *ts, *cs;
if(verbosity < v)
return;
dname_str(name, buf);
if(type == LDNS_RR_TYPE_TSIG) ts = "TSIG";
else if(type == LDNS_RR_TYPE_IXFR) ts = "IXFR";
else if(type == LDNS_RR_TYPE_AXFR) ts = "AXFR";
else if(type == LDNS_RR_TYPE_MAILB) ts = "MAILB";
else if(type == LDNS_RR_TYPE_MAILA) ts = "MAILA";
else if(type == LDNS_RR_TYPE_ANY) ts = "ANY";
else if(sldns_rr_descript(type) && sldns_rr_descript(type)->_name)
ts = sldns_rr_descript(type)->_name;
else {
snprintf(t, sizeof(t), "TYPE%d", (int)type);
ts = t;
}
if(sldns_lookup_by_id(sldns_rr_classes, (int)dclass) &&
sldns_lookup_by_id(sldns_rr_classes, (int)dclass)->name)
cs = sldns_lookup_by_id(sldns_rr_classes, (int)dclass)->name;
else {
snprintf(c, sizeof(c), "CLASS%d", (int)dclass);
cs = c;
}
log_info("%s %s %s %s", str, buf, ts, cs);
}
void log_name_addr(enum verbosity_value v, const char* str, uint8_t* zone,
struct sockaddr_storage* addr, socklen_t addrlen)
{
uint16_t port;
const char* family = "unknown_family ";
char namebuf[LDNS_MAX_DOMAINLEN+1];
char dest[100];
int af = (int)((struct sockaddr_in*)addr)->sin_family;
void* sinaddr = &((struct sockaddr_in*)addr)->sin_addr;
if(verbosity < v)
return;
switch(af) {
case AF_INET: family=""; break;
case AF_INET6: family="";
sinaddr = &((struct sockaddr_in6*)addr)->sin6_addr;
break;
case AF_UNIX: family="unix_family "; break;
default: break;
}
if(inet_ntop(af, sinaddr, dest, (socklen_t)sizeof(dest)) == 0) {
(void)strlcpy(dest, "(inet_ntop error)", sizeof(dest));
}
dest[sizeof(dest)-1] = 0;
port = ntohs(((struct sockaddr_in*)addr)->sin_port);
dname_str(zone, namebuf);
if(af != AF_INET && af != AF_INET6)
verbose(v, "%s <%s> %s%s#%d (addrlen %d)",
str, namebuf, family, dest, (int)port, (int)addrlen);
else verbose(v, "%s <%s> %s%s#%d",
str, namebuf, family, dest, (int)port);
}
void log_err_addr(const char* str, const char* err,
struct sockaddr_storage* addr, socklen_t addrlen)
{
uint16_t port;
char dest[100];
int af = (int)((struct sockaddr_in*)addr)->sin_family;
void* sinaddr = &((struct sockaddr_in*)addr)->sin_addr;
if(af == AF_INET6)
sinaddr = &((struct sockaddr_in6*)addr)->sin6_addr;
if(inet_ntop(af, sinaddr, dest, (socklen_t)sizeof(dest)) == 0) {
(void)strlcpy(dest, "(inet_ntop error)", sizeof(dest));
}
dest[sizeof(dest)-1] = 0;
port = ntohs(((struct sockaddr_in*)addr)->sin_port);
if(verbosity >= 4)
log_err("%s: %s for %s port %d (len %d)", str, err, dest,
(int)port, (int)addrlen);
else log_err("%s: %s for %s", str, err, dest);
}
int
sockaddr_cmp(struct sockaddr_storage* addr1, socklen_t len1,
struct sockaddr_storage* addr2, socklen_t len2)
{
struct sockaddr_in* p1_in = (struct sockaddr_in*)addr1;
struct sockaddr_in* p2_in = (struct sockaddr_in*)addr2;
struct sockaddr_in6* p1_in6 = (struct sockaddr_in6*)addr1;
struct sockaddr_in6* p2_in6 = (struct sockaddr_in6*)addr2;
if(len1 < len2)
return -1;
if(len1 > len2)
return 1;
log_assert(len1 == len2);
if( p1_in->sin_family < p2_in->sin_family)
return -1;
if( p1_in->sin_family > p2_in->sin_family)
return 1;
log_assert( p1_in->sin_family == p2_in->sin_family );
/* compare ip4 */
if( p1_in->sin_family == AF_INET ) {
/* just order it, ntohs not required */
if(p1_in->sin_port < p2_in->sin_port)
return -1;
if(p1_in->sin_port > p2_in->sin_port)
return 1;
log_assert(p1_in->sin_port == p2_in->sin_port);
return memcmp(&p1_in->sin_addr, &p2_in->sin_addr, INET_SIZE);
} else if (p1_in6->sin6_family == AF_INET6) {
/* just order it, ntohs not required */
if(p1_in6->sin6_port < p2_in6->sin6_port)
return -1;
if(p1_in6->sin6_port > p2_in6->sin6_port)
return 1;
log_assert(p1_in6->sin6_port == p2_in6->sin6_port);
return memcmp(&p1_in6->sin6_addr, &p2_in6->sin6_addr,
INET6_SIZE);
} else {
/* eek unknown type, perform this comparison for sanity. */
return memcmp(addr1, addr2, len1);
}
}
int
sockaddr_cmp_addr(struct sockaddr_storage* addr1, socklen_t len1,
struct sockaddr_storage* addr2, socklen_t len2)
{
struct sockaddr_in* p1_in = (struct sockaddr_in*)addr1;
struct sockaddr_in* p2_in = (struct sockaddr_in*)addr2;
struct sockaddr_in6* p1_in6 = (struct sockaddr_in6*)addr1;
struct sockaddr_in6* p2_in6 = (struct sockaddr_in6*)addr2;
if(len1 < len2)
return -1;
if(len1 > len2)
return 1;
log_assert(len1 == len2);
if( p1_in->sin_family < p2_in->sin_family)
return -1;
if( p1_in->sin_family > p2_in->sin_family)
return 1;
log_assert( p1_in->sin_family == p2_in->sin_family );
/* compare ip4 */
if( p1_in->sin_family == AF_INET ) {
return memcmp(&p1_in->sin_addr, &p2_in->sin_addr, INET_SIZE);
} else if (p1_in6->sin6_family == AF_INET6) {
return memcmp(&p1_in6->sin6_addr, &p2_in6->sin6_addr,
INET6_SIZE);
} else {
/* eek unknown type, perform this comparison for sanity. */
return memcmp(addr1, addr2, len1);
}
}
int
addr_is_ip6(struct sockaddr_storage* addr, socklen_t len)
{
if(len == (socklen_t)sizeof(struct sockaddr_in6) &&
((struct sockaddr_in6*)addr)->sin6_family == AF_INET6)
return 1;
else return 0;
}
void
addr_mask(struct sockaddr_storage* addr, socklen_t len, int net)
{
uint8_t mask[8] = {0x0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
int i, max;
uint8_t* s;
if(addr_is_ip6(addr, len)) {
s = (uint8_t*)&((struct sockaddr_in6*)addr)->sin6_addr;
max = 128;
} else {
s = (uint8_t*)&((struct sockaddr_in*)addr)->sin_addr;
max = 32;
}
if(net >= max)
return;
for(i=net/8+1; i<max/8; i++) {
s[i] = 0;
}
s[net/8] &= mask[net&0x7];
}
int
addr_in_common(struct sockaddr_storage* addr1, int net1,
struct sockaddr_storage* addr2, int net2, socklen_t addrlen)
{
int min = (net1<net2)?net1:net2;
int i, to;
int match = 0;
uint8_t* s1, *s2;
if(addr_is_ip6(addr1, addrlen)) {
s1 = (uint8_t*)&((struct sockaddr_in6*)addr1)->sin6_addr;
s2 = (uint8_t*)&((struct sockaddr_in6*)addr2)->sin6_addr;
to = 16;
} else {
s1 = (uint8_t*)&((struct sockaddr_in*)addr1)->sin_addr;
s2 = (uint8_t*)&((struct sockaddr_in*)addr2)->sin_addr;
to = 4;
}
/* match = bits_in_common(s1, s2, to); */
for(i=0; i<to; i++) {
if(s1[i] == s2[i]) {
match += 8;
} else {
uint8_t z = s1[i]^s2[i];
log_assert(z);
while(!(z&0x80)) {
match++;
z<<=1;
}
break;
}
}
if(match > min) match = min;
return match;
}
void
addr_to_str(struct sockaddr_storage* addr, socklen_t addrlen,
char* buf, size_t len)
{
int af = (int)((struct sockaddr_in*)addr)->sin_family;
void* sinaddr = &((struct sockaddr_in*)addr)->sin_addr;
if(addr_is_ip6(addr, addrlen))
sinaddr = &((struct sockaddr_in6*)addr)->sin6_addr;
if(inet_ntop(af, sinaddr, buf, (socklen_t)len) == 0) {
snprintf(buf, len, "(inet_ntop_error)");
}
}
int
addr_is_ip4mapped(struct sockaddr_storage* addr, socklen_t addrlen)
{
/* prefix for ipv4 into ipv6 mapping is ::ffff:x.x.x.x */
const uint8_t map_prefix[16] =
{0,0,0,0, 0,0,0,0, 0,0,0xff,0xff, 0,0,0,0};
uint8_t* s;
if(!addr_is_ip6(addr, addrlen))
return 0;
/* s is 16 octet ipv6 address string */
s = (uint8_t*)&((struct sockaddr_in6*)addr)->sin6_addr;
return (memcmp(s, map_prefix, 12) == 0);
}
int addr_is_broadcast(struct sockaddr_storage* addr, socklen_t addrlen)
{
int af = (int)((struct sockaddr_in*)addr)->sin_family;
void* sinaddr = &((struct sockaddr_in*)addr)->sin_addr;
return af == AF_INET && addrlen>=(socklen_t)sizeof(struct sockaddr_in)
&& memcmp(sinaddr, "\377\377\377\377", 4) == 0;
}
int addr_is_any(struct sockaddr_storage* addr, socklen_t addrlen)
{
int af = (int)((struct sockaddr_in*)addr)->sin_family;
void* sinaddr = &((struct sockaddr_in*)addr)->sin_addr;
void* sin6addr = &((struct sockaddr_in6*)addr)->sin6_addr;
if(af == AF_INET && addrlen>=(socklen_t)sizeof(struct sockaddr_in)
&& memcmp(sinaddr, "\000\000\000\000", 4) == 0)
return 1;
else if(af==AF_INET6 && addrlen>=(socklen_t)sizeof(struct sockaddr_in6)
&& memcmp(sin6addr, "\000\000\000\000\000\000\000\000"
"\000\000\000\000\000\000\000\000", 16) == 0)
return 1;
return 0;
}
void sock_list_insert(struct sock_list** list, struct sockaddr_storage* addr,
socklen_t len, struct regional* region)
{
struct sock_list* add = (struct sock_list*)regional_alloc(region,
sizeof(*add) - sizeof(add->addr) + (size_t)len);
if(!add) {
log_err("out of memory in socketlist insert");
return;
}
log_assert(list);
add->next = *list;
add->len = len;
*list = add;
if(len) memmove(&add->addr, addr, len);
}
void sock_list_prepend(struct sock_list** list, struct sock_list* add)
{
struct sock_list* last = add;
if(!last)
return;
while(last->next)
last = last->next;
last->next = *list;
*list = add;
}
int sock_list_find(struct sock_list* list, struct sockaddr_storage* addr,
socklen_t len)
{
while(list) {
if(len == list->len) {
if(len == 0 || sockaddr_cmp_addr(addr, len,
&list->addr, list->len) == 0)
return 1;
}
list = list->next;
}
return 0;
}
void sock_list_merge(struct sock_list** list, struct regional* region,
struct sock_list* add)
{
struct sock_list* p;
for(p=add; p; p=p->next) {
if(!sock_list_find(*list, &p->addr, p->len))
sock_list_insert(list, &p->addr, p->len, region);
}
}
void
log_crypto_err(const char* str)
{
#ifdef HAVE_SSL
/* error:[error code]:[library name]:[function name]:[reason string] */
char buf[128];
unsigned long e;
ERR_error_string_n(ERR_get_error(), buf, sizeof(buf));
log_err("%s crypto %s", str, buf);
while( (e=ERR_get_error()) ) {
ERR_error_string_n(e, buf, sizeof(buf));
log_err("and additionally crypto %s", buf);
}
#else
(void)str;
#endif /* HAVE_SSL */
}
void* listen_sslctx_create(char* key, char* pem, char* verifypem)
{
#ifdef HAVE_SSL
SSL_CTX* ctx = SSL_CTX_new(SSLv23_server_method());
if(!ctx) {
log_crypto_err("could not SSL_CTX_new");
return NULL;
}
/* no SSLv2 because has defects */
if(!(SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2) & SSL_OP_NO_SSLv2)){
log_crypto_err("could not set SSL_OP_NO_SSLv2");
SSL_CTX_free(ctx);
return NULL;
}
if(!SSL_CTX_use_certificate_file(ctx, pem, SSL_FILETYPE_PEM)) {
log_err("error for cert file: %s", pem);
log_crypto_err("error in SSL_CTX use_certificate_file");
SSL_CTX_free(ctx);
return NULL;
}
if(!SSL_CTX_use_PrivateKey_file(ctx, key, SSL_FILETYPE_PEM)) {
log_err("error for private key file: %s", key);
log_crypto_err("Error in SSL_CTX use_PrivateKey_file");
SSL_CTX_free(ctx);
return NULL;
}
if(!SSL_CTX_check_private_key(ctx)) {
log_err("error for key file: %s", key);
log_crypto_err("Error in SSL_CTX check_private_key");
SSL_CTX_free(ctx);
return NULL;
}
if(verifypem && verifypem[0]) {
if(!SSL_CTX_load_verify_locations(ctx, verifypem, NULL)) {
log_crypto_err("Error in SSL_CTX verify locations");
SSL_CTX_free(ctx);
return NULL;
}
SSL_CTX_set_client_CA_list(ctx, SSL_load_client_CA_file(
verifypem));
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER, NULL);
}
return ctx;
#else
(void)key; (void)pem; (void)verifypem;
return NULL;
#endif
}
void* connect_sslctx_create(char* key, char* pem, char* verifypem)
{
#ifdef HAVE_SSL
SSL_CTX* ctx = SSL_CTX_new(SSLv23_client_method());
if(!ctx) {
log_crypto_err("could not allocate SSL_CTX pointer");
return NULL;
}
if(!(SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2) & SSL_OP_NO_SSLv2)) {
log_crypto_err("could not set SSL_OP_NO_SSLv2");
SSL_CTX_free(ctx);
return NULL;
}
if(key && key[0]) {
if(!SSL_CTX_use_certificate_file(ctx, pem, SSL_FILETYPE_PEM)) {
log_err("error in client certificate %s", pem);
log_crypto_err("error in certificate file");
SSL_CTX_free(ctx);
return NULL;
}
if(!SSL_CTX_use_PrivateKey_file(ctx, key, SSL_FILETYPE_PEM)) {
log_err("error in client private key %s", key);
log_crypto_err("error in key file");
SSL_CTX_free(ctx);
return NULL;
}
if(!SSL_CTX_check_private_key(ctx)) {
log_err("error in client key %s", key);
log_crypto_err("error in SSL_CTX_check_private_key");
SSL_CTX_free(ctx);
return NULL;
}
}
if(verifypem && verifypem[0]) {
if(!SSL_CTX_load_verify_locations(ctx, verifypem, NULL) != 1) {
log_crypto_err("error in SSL_CTX verify");
SSL_CTX_free(ctx);
return NULL;
}
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER, NULL);
}
return ctx;
#else
(void)key; (void)pem; (void)verifypem;
return NULL;
#endif
}
void* incoming_ssl_fd(void* sslctx, int fd)
{
#ifdef HAVE_SSL
SSL* ssl = SSL_new((SSL_CTX*)sslctx);
if(!ssl) {
log_crypto_err("could not SSL_new");
return NULL;
}
SSL_set_accept_state(ssl);
(void)SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
if(!SSL_set_fd(ssl, fd)) {
log_crypto_err("could not SSL_set_fd");
SSL_free(ssl);
return NULL;
}
return ssl;
#else
(void)sslctx; (void)fd;
return NULL;
#endif
}
void* outgoing_ssl_fd(void* sslctx, int fd)
{
#ifdef HAVE_SSL
SSL* ssl = SSL_new((SSL_CTX*)sslctx);
if(!ssl) {
log_crypto_err("could not SSL_new");
return NULL;
}
SSL_set_connect_state(ssl);
(void)SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
if(!SSL_set_fd(ssl, fd)) {
log_crypto_err("could not SSL_set_fd");
SSL_free(ssl);
return NULL;
}
return ssl;
#else
(void)sslctx; (void)fd;
return NULL;
#endif
}
#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
/** global lock list for openssl locks */
static lock_basic_t *ub_openssl_locks = NULL;
/** callback that gets thread id for openssl */
static unsigned long
ub_crypto_id_cb(void)
{
return (unsigned long)ub_thread_self();
}
static void
ub_crypto_lock_cb(int mode, int type, const char *ATTR_UNUSED(file),
int ATTR_UNUSED(line))
{
if((mode&CRYPTO_LOCK)) {
lock_basic_lock(&ub_openssl_locks[type]);
} else {
lock_basic_unlock(&ub_openssl_locks[type]);
}
}
#endif /* OPENSSL_THREADS */
int ub_openssl_lock_init(void)
{
#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
int i;
ub_openssl_locks = (lock_basic_t*)malloc(
sizeof(lock_basic_t)*CRYPTO_num_locks());
if(!ub_openssl_locks)
return 0;
for(i=0; i<CRYPTO_num_locks(); i++) {
lock_basic_init(&ub_openssl_locks[i]);
}
CRYPTO_set_id_callback(&ub_crypto_id_cb);
CRYPTO_set_locking_callback(&ub_crypto_lock_cb);
#endif /* OPENSSL_THREADS */
return 1;
}
void ub_openssl_lock_delete(void)
{
#if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED)
int i;
if(!ub_openssl_locks)
return;
CRYPTO_set_id_callback(NULL);
CRYPTO_set_locking_callback(NULL);
for(i=0; i<CRYPTO_num_locks(); i++) {
lock_basic_destroy(&ub_openssl_locks[i]);
}
free(ub_openssl_locks);
#endif /* OPENSSL_THREADS */
}

393
external/unbound/util/net_help.h vendored Normal file
View File

@@ -0,0 +1,393 @@
/*
* util/net_help.h - network help functions
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions to perform network related tasks.
*/
#ifndef NET_HELP_H
#define NET_HELP_H
#include "util/log.h"
struct sock_list;
struct regional;
/** DNS constants for uint16_t style flag manipulation. host byteorder.
* 1 1 1 1 1 1
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
* |QR| Opcode |AA|TC|RD|RA| Z|AD|CD| RCODE |
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
*/
/** CD flag */
#define BIT_CD 0x0010
/** AD flag */
#define BIT_AD 0x0020
/** Z flag */
#define BIT_Z 0x0040
/** RA flag */
#define BIT_RA 0x0080
/** RD flag */
#define BIT_RD 0x0100
/** TC flag */
#define BIT_TC 0x0200
/** AA flag */
#define BIT_AA 0x0400
/** QR flag */
#define BIT_QR 0x8000
/** get RCODE bits from uint16 flags */
#define FLAGS_GET_RCODE(f) ((f) & 0xf)
/** set RCODE bits in uint16 flags */
#define FLAGS_SET_RCODE(f, r) (f = (((f) & 0xfff0) | (r)))
/** timeout in seconds for UDP queries to auth servers. */
#define UDP_AUTH_QUERY_TIMEOUT 4
/** timeout in seconds for TCP queries to auth servers. */
#define TCP_AUTH_QUERY_TIMEOUT 30
/** Advertised version of EDNS capabilities */
#define EDNS_ADVERTISED_VERSION 0
/** Advertised size of EDNS capabilities */
extern uint16_t EDNS_ADVERTISED_SIZE;
/** bits for EDNS bitfield */
#define EDNS_DO 0x8000 /* Dnssec Ok */
/** byte size of ip4 address */
#define INET_SIZE 4
/** byte size of ip6 address */
#define INET6_SIZE 16
/** DNSKEY zone sign key flag */
#define DNSKEY_BIT_ZSK 0x0100
/** DNSKEY secure entry point, KSK flag */
#define DNSKEY_BIT_SEP 0x0001
/** minimal responses when positive answer */
extern int MINIMAL_RESPONSES;
/** rrset order roundrobin */
extern int RRSET_ROUNDROBIN;
/**
* See if string is ip4 or ip6.
* @param str: IP specification.
* @return: true if string addr is an ip6 specced address.
*/
int str_is_ip6(const char* str);
/**
* Set fd nonblocking.
* @param s: file descriptor.
* @return: 0 on error (error is printed to log).
*/
int fd_set_nonblock(int s);
/**
* Set fd (back to) blocking.
* @param s: file descriptor.
* @return: 0 on error (error is printed to log).
*/
int fd_set_block(int s);
/**
* See if number is a power of 2.
* @param num: the value.
* @return: true if the number is a power of 2.
*/
int is_pow2(size_t num);
/**
* Allocate memory and copy over contents.
* @param data: what to copy over.
* @param len: length of data.
* @return: NULL on malloc failure, or newly malloced data.
*/
void* memdup(void* data, size_t len);
/**
* Prints the sockaddr in readable format with log_info. Debug helper.
* @param v: at what verbosity level to print this.
* @param str: descriptive string printed with it.
* @param addr: the sockaddr to print. Can be ip4 or ip6.
* @param addrlen: length of addr.
*/
void log_addr(enum verbosity_value v, const char* str,
struct sockaddr_storage* addr, socklen_t addrlen);
/**
* Prints zone name and sockaddr in readable format with log_info. Debug.
* @param v: at what verbosity level to print this.
* @param str: descriptive string printed with it.
* @param zone: DNS domain name, uncompressed wireformat.
* @param addr: the sockaddr to print. Can be ip4 or ip6.
* @param addrlen: length of addr.
*/
void log_name_addr(enum verbosity_value v, const char* str, uint8_t* zone,
struct sockaddr_storage* addr, socklen_t addrlen);
/**
* Log errno and addr.
* @param str: descriptive string printed with it.
* @param err: errno string to print, i.e. strerror(errno).
* @param addr: the sockaddr to print. Can be ip4 or ip6.
* @param addrlen: length of addr.
*/
void log_err_addr(const char* str, const char* err,
struct sockaddr_storage* addr, socklen_t addrlen);
/**
* Convert address string, with "@port" appendix, to sockaddr.
* Uses DNS port by default.
* @param str: the string
* @param addr: where to store sockaddr.
* @param addrlen: length of stored sockaddr is returned.
* @return 0 on error.
*/
int extstrtoaddr(const char* str, struct sockaddr_storage* addr,
socklen_t* addrlen);
/**
* Convert ip address string and port to sockaddr.
* @param ip: ip4 or ip6 address string.
* @param port: port number, host format.
* @param addr: where to store sockaddr.
* @param addrlen: length of stored sockaddr is returned.
* @return 0 on error.
*/
int ipstrtoaddr(const char* ip, int port, struct sockaddr_storage* addr,
socklen_t* addrlen);
/**
* Convert ip netblock (ip/netsize) string and port to sockaddr.
* *SLOW*, does a malloc internally to avoid writing over 'ip' string.
* @param ip: ip4 or ip6 address string.
* @param port: port number, host format.
* @param addr: where to store sockaddr.
* @param addrlen: length of stored sockaddr is returned.
* @param net: netblock size is returned.
* @return 0 on error.
*/
int netblockstrtoaddr(const char* ip, int port, struct sockaddr_storage* addr,
socklen_t* addrlen, int* net);
/**
* Print string with neat domain name, type and class.
* @param v: at what verbosity level to print this.
* @param str: string of message.
* @param name: domain name uncompressed wireformat.
* @param type: host format RR type.
* @param dclass: host format RR class.
*/
void log_nametypeclass(enum verbosity_value v, const char* str,
uint8_t* name, uint16_t type, uint16_t dclass);
/**
* Compare two sockaddrs. Imposes an ordering on the addresses.
* Compares address and port.
* @param addr1: address 1.
* @param len1: lengths of addr1.
* @param addr2: address 2.
* @param len2: lengths of addr2.
* @return: 0 if addr1 == addr2. -1 if addr1 is smaller, +1 if larger.
*/
int sockaddr_cmp(struct sockaddr_storage* addr1, socklen_t len1,
struct sockaddr_storage* addr2, socklen_t len2);
/**
* Compare two sockaddrs. Compares address, not the port.
* @param addr1: address 1.
* @param len1: lengths of addr1.
* @param addr2: address 2.
* @param len2: lengths of addr2.
* @return: 0 if addr1 == addr2. -1 if addr1 is smaller, +1 if larger.
*/
int sockaddr_cmp_addr(struct sockaddr_storage* addr1, socklen_t len1,
struct sockaddr_storage* addr2, socklen_t len2);
/**
* Checkout address family.
* @param addr: the sockaddr to examine.
* @param len: the length of addr.
* @return: true if sockaddr is ip6.
*/
int addr_is_ip6(struct sockaddr_storage* addr, socklen_t len);
/**
* Make sure the sockaddr ends in zeroes. For tree insertion and subsequent
* comparison.
* @param addr: the ip4 or ip6 addr.
* @param len: length of addr.
* @param net: number of bits to leave untouched, the rest of the netblock
* address is zeroed.
*/
void addr_mask(struct sockaddr_storage* addr, socklen_t len, int net);
/**
* See how many bits are shared, equal, between two addrs.
* @param addr1: first addr.
* @param net1: netblock size of first addr.
* @param addr2: second addr.
* @param net2: netblock size of second addr.
* @param addrlen: length of first addr and of second addr.
* They must be of the same length (i.e. same type IP4, IP6).
* @return: number of bits the same.
*/
int addr_in_common(struct sockaddr_storage* addr1, int net1,
struct sockaddr_storage* addr2, int net2, socklen_t addrlen);
/**
* Put address into string, works for IPv4 and IPv6.
* @param addr: address
* @param addrlen: length of address
* @param buf: result string stored here
* @param len: length of buf.
* On failure a string with "error" is stored inside.
*/
void addr_to_str(struct sockaddr_storage* addr, socklen_t addrlen,
char* buf, size_t len);
/**
* See if sockaddr is an ipv6 mapped ipv4 address, "::ffff:0.0.0.0"
* @param addr: address
* @param addrlen: length of address
* @return true if so
*/
int addr_is_ip4mapped(struct sockaddr_storage* addr, socklen_t addrlen);
/**
* See if sockaddr is 255.255.255.255.
* @param addr: address
* @param addrlen: length of address
* @return true if so
*/
int addr_is_broadcast(struct sockaddr_storage* addr, socklen_t addrlen);
/**
* See if sockaddr is 0.0.0.0 or ::0.
* @param addr: address
* @param addrlen: length of address
* @return true if so
*/
int addr_is_any(struct sockaddr_storage* addr, socklen_t addrlen);
/**
* Insert new socket list item. If fails logs error.
* @param list: pointer to pointer to first item.
* @param addr: address or NULL if 'cache'.
* @param len: length of addr, or 0 if 'cache'.
* @param region: where to allocate
*/
void sock_list_insert(struct sock_list** list, struct sockaddr_storage* addr,
socklen_t len, struct regional* region);
/**
* Append one list to another. Must both be from same qstate(regional).
* @param list: pointer to result list that is modified.
* @param add: item(s) to add. They are prepended to list.
*/
void sock_list_prepend(struct sock_list** list, struct sock_list* add);
/**
* Find addr in list.
* @param list: to search in
* @param addr: address to look for.
* @param len: length. Can be 0, look for 'cache entry'.
* @return true if found.
*/
int sock_list_find(struct sock_list* list, struct sockaddr_storage* addr,
socklen_t len);
/**
* Merge socklist into another socket list. Allocates the new entries
* freshly and copies them over, so also performs a region switchover.
* Allocation failures are logged.
* @param list: the destination list (checked for duplicates)
* @param region: where to allocate
* @param add: the list of entries to add.
*/
void sock_list_merge(struct sock_list** list, struct regional* region,
struct sock_list* add);
/**
* Log libcrypto error with descriptive string. Calls log_err().
* @param str: what failed.
*/
void log_crypto_err(const char* str);
/**
* create SSL listen context
* @param key: private key file.
* @param pem: public key cert.
* @param verifypem: if nonNULL, verifylocation file.
* return SSL_CTX* or NULL on failure (logged).
*/
void* listen_sslctx_create(char* key, char* pem, char* verifypem);
/**
* create SSL connect context
* @param key: if nonNULL (also pem nonNULL), the client private key.
* @param pem: client public key (or NULL if key is NULL).
* @param verifypem: if nonNULL used for verifylocation file.
* @return SSL_CTX* or NULL on failure (logged).
*/
void* connect_sslctx_create(char* key, char* pem, char* verifypem);
/**
* accept a new fd and wrap it in a BIO in SSL
* @param sslctx: the SSL_CTX to use (from listen_sslctx_create()).
* @param fd: from accept, nonblocking.
* @return SSL or NULL on alloc failure.
*/
void* incoming_ssl_fd(void* sslctx, int fd);
/**
* connect a new fd and wrap it in a BIO in SSL
* @param sslctx: the SSL_CTX to use (from connect_sslctx_create())
* @param fd: from connect.
* @return SSL or NULL on alloc failure
*/
void* outgoing_ssl_fd(void* sslctx, int fd);
/**
* Initialize openssl locking for thread safety
* @return false on failure (alloc failure).
*/
int ub_openssl_lock_init(void);
/**
* De-init the allocated openssl locks
*/
void ub_openssl_lock_delete(void);
#endif /* NET_HELP_H */

2217
external/unbound/util/netevent.c vendored Normal file

File diff suppressed because it is too large Load Diff

703
external/unbound/util/netevent.h vendored Normal file
View File

@@ -0,0 +1,703 @@
/*
* util/netevent.h - event notification
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains event notification functions.
*
* There are three types of communication points
* o UDP socket - perthread buffer.
* o TCP-accept socket - array of TCP-sockets, socketcount.
* o TCP socket - own buffer, parent-TCPaccept, read/write state,
* number of bytes read/written, timeout.
*
* There are sockets aimed towards our clients and towards the internet.
* o frontside - aimed towards our clients, queries come in, answers back.
* o behind - aimed towards internet, to the authoritative DNS servers.
*
* Several event types are available:
* o comm_base - for thread safety of the comm points, one per thread.
* o comm_point - udp and tcp networking, with callbacks.
* o comm_timer - a timeout with callback.
* o comm_signal - callbacks when signal is caught.
* o comm_reply - holds reply info during networking callback.
*
*/
#ifndef NET_EVENT_H
#define NET_EVENT_H
struct sldns_buffer;
struct comm_point;
struct comm_reply;
struct event_base;
/* internal event notification data storage structure. */
struct internal_event;
struct internal_base;
struct internal_timer;
/** callback from communication point function type */
typedef int comm_point_callback_t(struct comm_point*, void*, int,
struct comm_reply*);
/** to pass no_error to callback function */
#define NETEVENT_NOERROR 0
/** to pass closed connection to callback function */
#define NETEVENT_CLOSED -1
/** to pass timeout happened to callback function */
#define NETEVENT_TIMEOUT -2
/** to pass fallback from capsforID to callback function; 0x20 failed */
#define NETEVENT_CAPSFAIL -3
/** timeout to slow accept calls when not possible, in msec. */
#define NETEVENT_SLOW_ACCEPT_TIME 2000
/**
* A communication point dispatcher. Thread specific.
*/
struct comm_base {
/** behind the scenes structure. with say libevent info. alloced */
struct internal_base* eb;
/** callback to stop listening on accept sockets,
* performed when accept() will not function properly */
void (*stop_accept)(void*);
/** callback to start listening on accept sockets, performed
* after stop_accept() then a timeout has passed. */
void (*start_accept)(void*);
/** user argument for stop_accept and start_accept functions */
void* cb_arg;
};
/**
* Reply information for a communication point.
*/
struct comm_reply {
/** the comm_point with fd to send reply on to. */
struct comm_point* c;
/** the address (for UDP based communication) */
struct sockaddr_storage addr;
/** length of address */
socklen_t addrlen;
/** return type 0 (none), 4(IP4), 6(IP6) */
int srctype;
/** the return source interface data */
union {
#ifdef IPV6_PKTINFO
struct in6_pktinfo v6info;
#endif
#ifdef IP_PKTINFO
struct in_pktinfo v4info;
#elif defined(IP_RECVDSTADDR)
struct in_addr v4addr;
#endif
}
/** variable with return source data */
pktinfo;
};
/**
* Communication point to the network
* These behaviours can be accomplished by setting the flags
* and passing return values from the callback.
* udp frontside: called after readdone. sendafter.
* tcp frontside: called readdone, sendafter. close.
* udp behind: called after readdone. No send after.
* tcp behind: write done, read done, then called. No send after.
*/
struct comm_point {
/** behind the scenes structure, with say libevent info. alloced. */
struct internal_event* ev;
/** file descriptor for communication point */
int fd;
/** timeout (NULL if it does not). Malloced. */
struct timeval* timeout;
/** buffer pointer. Either to perthread, or own buffer or NULL */
struct sldns_buffer* buffer;
/* -------- TCP Handler -------- */
/** Read/Write state for TCP */
int tcp_is_reading;
/** The current read/write count for TCP */
size_t tcp_byte_count;
/** parent communication point (for TCP sockets) */
struct comm_point* tcp_parent;
/** sockaddr from peer, for TCP handlers */
struct comm_reply repinfo;
/* -------- TCP Accept -------- */
/** the number of TCP handlers for this tcp-accept socket */
int max_tcp_count;
/** malloced array of tcp handlers for a tcp-accept,
of size max_tcp_count. */
struct comm_point** tcp_handlers;
/** linked list of free tcp_handlers to use for new queries.
For tcp_accept the first entry, for tcp_handlers the next one. */
struct comm_point* tcp_free;
/* -------- SSL TCP DNS ------- */
/** the SSL object with rw bio (owned) or for commaccept ctx ref */
void* ssl;
/** handshake state for init and renegotiate */
enum {
/** no handshake, it has been done */
comm_ssl_shake_none = 0,
/** ssl initial handshake wants to read */
comm_ssl_shake_read,
/** ssl initial handshake wants to write */
comm_ssl_shake_write,
/** ssl_write wants to read */
comm_ssl_shake_hs_read,
/** ssl_read wants to write */
comm_ssl_shake_hs_write
} ssl_shake_state;
/* -------- dnstap ------- */
/** the dnstap environment */
struct dt_env* dtenv;
/** is this a UDP, TCP-accept or TCP socket. */
enum comm_point_type {
/** UDP socket - handle datagrams. */
comm_udp,
/** TCP accept socket - only creates handlers if readable. */
comm_tcp_accept,
/** TCP handler socket - handle byteperbyte readwrite. */
comm_tcp,
/** AF_UNIX socket - for internal commands. */
comm_local,
/** raw - not DNS format - for pipe readers and writers */
comm_raw
}
/** variable with type of socket, UDP,TCP-accept,TCP,pipe */
type;
/* ---------- Behaviour ----------- */
/** if set the connection is NOT closed on delete. */
int do_not_close;
/** if set, the connection is closed on error, on timeout,
and after read/write completes. No callback is done. */
int tcp_do_close;
/** if set, read/write completes:
read/write state of tcp is toggled.
buffer reset/bytecount reset.
this flag cleared.
So that when that is done the callback is called. */
int tcp_do_toggle_rw;
/** if set, checks for pending error from nonblocking connect() call.*/
int tcp_check_nb_connect;
/** number of queries outstanding on this socket, used by
* outside network for udp ports */
int inuse;
/** callback when done.
tcp_accept does not get called back, is NULL then.
If a timeout happens, callback with timeout=1 is called.
If an error happens, callback is called with error set
nonzero. If not NETEVENT_NOERROR, it is an errno value.
If the connection is closed (by remote end) then the
callback is called with error set to NETEVENT_CLOSED=-1.
If a timeout happens on the connection, the error is set to
NETEVENT_TIMEOUT=-2.
The reply_info can be copied if the reply needs to happen at a
later time. It consists of a struct with commpoint and address.
It can be passed to a msg send routine some time later.
Note the reply information is temporary and must be copied.
NULL is passed for_reply info, in cases where error happened.
declare as:
int my_callback(struct comm_point* c, void* my_arg, int error,
struct comm_reply *reply_info);
if the routine returns 0, nothing is done.
Notzero, the buffer will be sent back to client.
For UDP this is done without changing the commpoint.
In TCP it sets write state.
*/
comm_point_callback_t* callback;
/** argument to pass to callback. */
void *cb_arg;
};
/**
* Structure only for making timeout events.
*/
struct comm_timer {
/** the internal event stuff */
struct internal_timer* ev_timer;
/** callback function, takes user arg only */
void (*callback)(void*);
/** callback user argument */
void* cb_arg;
};
/**
* Structure only for signal events.
*/
struct comm_signal {
/** the communication base */
struct comm_base* base;
/** the internal event stuff */
struct internal_signal* ev_signal;
/** callback function, takes signal number and user arg */
void (*callback)(int, void*);
/** callback user argument */
void* cb_arg;
};
/**
* Create a new comm base.
* @param sigs: if true it attempts to create a default loop for
* signal handling.
* @return: the new comm base. NULL on error.
*/
struct comm_base* comm_base_create(int sigs);
/**
* Create comm base that uses the given event_base (underlying event
* mechanism pointer).
* @param base: underlying lib event base.
* @return: the new comm base. NULL on error.
*/
struct comm_base* comm_base_create_event(struct event_base* base);
/**
* Delete comm base structure but not the underlying lib event base.
* All comm points must have been deleted.
* @param b: the base to delete.
*/
void comm_base_delete_no_base(struct comm_base* b);
/**
* Destroy a comm base.
* All comm points must have been deleted.
* @param b: the base to delete.
*/
void comm_base_delete(struct comm_base* b);
/**
* Obtain two pointers. The pointers never change (until base_delete()).
* The pointers point to time values that are updated regularly.
* @param b: the communication base that will update the time values.
* @param tt: pointer to time in seconds is returned.
* @param tv: pointer to time in microseconds is returned.
*/
void comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv);
/**
* Dispatch the comm base events.
* @param b: the communication to perform.
*/
void comm_base_dispatch(struct comm_base* b);
/**
* Exit from dispatch loop.
* @param b: the communication base that is in dispatch().
*/
void comm_base_exit(struct comm_base* b);
/**
* Set the slow_accept mode handlers. You can not provide these if you do
* not perform accept() calls.
* @param b: comm base
* @param stop_accept: function that stops listening to accept fds.
* @param start_accept: function that resumes listening to accept fds.
* @param arg: callback arg to pass to the functions.
*/
void comm_base_set_slow_accept_handlers(struct comm_base* b,
void (*stop_accept)(void*), void (*start_accept)(void*), void* arg);
/**
* Access internal data structure (for util/tube.c on windows)
* @param b: comm base
* @return event_base. Could be libevent, or internal event handler.
*/
struct event_base* comm_base_internal(struct comm_base* b);
/**
* Create an UDP comm point. Calls malloc.
* setups the structure with the parameters you provide.
* @param base: in which base to alloc the commpoint.
* @param fd : file descriptor of open UDP socket.
* @param buffer: shared buffer by UDP sockets from this thread.
* @param callback: callback function pointer.
* @param callback_arg: will be passed to your callback function.
* @return: returns the allocated communication point. NULL on error.
* Sets timeout to NULL. Turns off TCP options.
*/
struct comm_point* comm_point_create_udp(struct comm_base* base,
int fd, struct sldns_buffer* buffer,
comm_point_callback_t* callback, void* callback_arg);
/**
* Create an UDP with ancillary data comm point. Calls malloc.
* Uses recvmsg instead of recv to get udp message.
* setups the structure with the parameters you provide.
* @param base: in which base to alloc the commpoint.
* @param fd : file descriptor of open UDP socket.
* @param buffer: shared buffer by UDP sockets from this thread.
* @param callback: callback function pointer.
* @param callback_arg: will be passed to your callback function.
* @return: returns the allocated communication point. NULL on error.
* Sets timeout to NULL. Turns off TCP options.
*/
struct comm_point* comm_point_create_udp_ancil(struct comm_base* base,
int fd, struct sldns_buffer* buffer,
comm_point_callback_t* callback, void* callback_arg);
/**
* Create a TCP listener comm point. Calls malloc.
* Setups the structure with the parameters you provide.
* Also Creates TCP Handlers, pre allocated for you.
* Uses the parameters you provide.
* @param base: in which base to alloc the commpoint.
* @param fd: file descriptor of open TCP socket set to listen nonblocking.
* @param num: becomes max_tcp_count, the routine allocates that
* many tcp handler commpoints.
* @param bufsize: size of buffer to create for handlers.
* @param callback: callback function pointer for TCP handlers.
* @param callback_arg: will be passed to your callback function.
* @return: returns the TCP listener commpoint. You can find the
* TCP handlers in the array inside the listener commpoint.
* returns NULL on error.
* Inits timeout to NULL. All handlers are on the free list.
*/
struct comm_point* comm_point_create_tcp(struct comm_base* base,
int fd, int num, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg);
/**
* Create an outgoing TCP commpoint. No file descriptor is opened, left at -1.
* @param base: in which base to alloc the commpoint.
* @param bufsize: size of buffer to create for handlers.
* @param callback: callback function pointer for the handler.
* @param callback_arg: will be passed to your callback function.
* @return: the commpoint or NULL on error.
*/
struct comm_point* comm_point_create_tcp_out(struct comm_base* base,
size_t bufsize, comm_point_callback_t* callback, void* callback_arg);
/**
* Create commpoint to listen to a local domain file descriptor.
* @param base: in which base to alloc the commpoint.
* @param fd: file descriptor of open AF_UNIX socket set to listen nonblocking.
* @param bufsize: size of buffer to create for handlers.
* @param callback: callback function pointer for the handler.
* @param callback_arg: will be passed to your callback function.
* @return: the commpoint or NULL on error.
*/
struct comm_point* comm_point_create_local(struct comm_base* base,
int fd, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg);
/**
* Create commpoint to listen to a local domain pipe descriptor.
* @param base: in which base to alloc the commpoint.
* @param fd: file descriptor.
* @param writing: true if you want to listen to writes, false for reads.
* @param callback: callback function pointer for the handler.
* @param callback_arg: will be passed to your callback function.
* @return: the commpoint or NULL on error.
*/
struct comm_point* comm_point_create_raw(struct comm_base* base,
int fd, int writing,
comm_point_callback_t* callback, void* callback_arg);
/**
* Close a comm point fd.
* @param c: comm point to close.
*/
void comm_point_close(struct comm_point* c);
/**
* Close and deallocate (free) the comm point. If the comm point is
* a tcp-accept point, also its tcp-handler points are deleted.
* @param c: comm point to delete.
*/
void comm_point_delete(struct comm_point* c);
/**
* Send reply. Put message into commpoint buffer.
* @param repinfo: The reply info copied from a commpoint callback call.
*/
void comm_point_send_reply(struct comm_reply* repinfo);
/**
* Drop reply. Cleans up.
* @param repinfo: The reply info copied from a commpoint callback call.
*/
void comm_point_drop_reply(struct comm_reply* repinfo);
/**
* Send an udp message over a commpoint.
* @param c: commpoint to send it from.
* @param packet: what to send.
* @param addr: where to send it to.
* @param addrlen: length of addr.
* @return: false on a failure.
*/
int comm_point_send_udp_msg(struct comm_point* c, struct sldns_buffer* packet,
struct sockaddr* addr, socklen_t addrlen);
/**
* Stop listening for input on the commpoint. No callbacks will happen.
* @param c: commpoint to disable. The fd is not closed.
*/
void comm_point_stop_listening(struct comm_point* c);
/**
* Start listening again for input on the comm point.
* @param c: commpoint to enable again.
* @param newfd: new fd, or -1 to leave fd be.
* @param sec: timeout in seconds, or -1 for no (change to the) timeout.
*/
void comm_point_start_listening(struct comm_point* c, int newfd, int sec);
/**
* Stop listening and start listening again for reading or writing.
* @param c: commpoint
* @param rd: if true, listens for reading.
* @param wr: if true, listens for writing.
*/
void comm_point_listen_for_rw(struct comm_point* c, int rd, int wr);
/**
* Get size of memory used by comm point.
* For TCP handlers this includes subhandlers.
* For UDP handlers, this does not include the (shared) UDP buffer.
* @param c: commpoint.
* @return size in bytes.
*/
size_t comm_point_get_mem(struct comm_point* c);
/**
* create timer. Not active upon creation.
* @param base: event handling base.
* @param cb: callback function: void myfunc(void* myarg);
* @param cb_arg: user callback argument.
* @return: the new timer or NULL on error.
*/
struct comm_timer* comm_timer_create(struct comm_base* base,
void (*cb)(void*), void* cb_arg);
/**
* disable timer. Stops callbacks from happening.
* @param timer: to disable.
*/
void comm_timer_disable(struct comm_timer* timer);
/**
* reset timevalue for timer.
* @param timer: timer to (re)set.
* @param tv: when the timer should activate. if NULL timer is disabled.
*/
void comm_timer_set(struct comm_timer* timer, struct timeval* tv);
/**
* delete timer.
* @param timer: to delete.
*/
void comm_timer_delete(struct comm_timer* timer);
/**
* see if timeout has been set to a value.
* @param timer: the timer to examine.
* @return: false if disabled or not set.
*/
int comm_timer_is_set(struct comm_timer* timer);
/**
* Get size of memory used by comm timer.
* @param timer: the timer to examine.
* @return size in bytes.
*/
size_t comm_timer_get_mem(struct comm_timer* timer);
/**
* Create a signal handler. Call signal_bind() later to bind to a signal.
* @param base: communication base to use.
* @param callback: called when signal is caught.
* @param cb_arg: user argument to callback
* @return: the signal struct or NULL on error.
*/
struct comm_signal* comm_signal_create(struct comm_base* base,
void (*callback)(int, void*), void* cb_arg);
/**
* Bind signal struct to catch a signal. A signle comm_signal can be bound
* to multiple signals, calling comm_signal_bind multiple times.
* @param comsig: the communication point, with callback information.
* @param sig: signal number.
* @return: true on success. false on error.
*/
int comm_signal_bind(struct comm_signal* comsig, int sig);
/**
* Delete the signal communication point.
* @param comsig: to delete.
*/
void comm_signal_delete(struct comm_signal* comsig);
/**
* perform accept(2) with error checking.
* @param c: commpoint with accept fd.
* @param addr: remote end returned here.
* @param addrlen: length of remote end returned here.
* @return new fd, or -1 on error.
* if -1, error message has been printed if necessary, simply drop
* out of the reading handler.
*/
int comm_point_perform_accept(struct comm_point* c,
struct sockaddr_storage* addr, socklen_t* addrlen);
/**** internal routines ****/
/**
* This routine is published for checks and tests, and is only used internally.
* handle libevent callback for udp comm point.
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_point_udp_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* handle libevent callback for udp ancillary data comm point.
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_point_udp_ancil_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* handle libevent callback for tcp accept comm point
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_point_tcp_accept_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* handle libevent callback for tcp data comm point
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_point_tcp_handle_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* handle libevent callback for timer comm.
* @param fd: file descriptor (always -1).
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_timer structure.
*/
void comm_timer_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* handle libevent callback for signal comm.
* @param fd: file descriptor (used for the signal number).
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the internal commsignal structure.
*/
void comm_signal_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* libevent callback for AF_UNIX fds
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_point_local_handle_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* libevent callback for raw fd access.
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_point_raw_handle_callback(int fd, short event, void* arg);
/**
* This routine is published for checks and tests, and is only used internally.
* libevent callback for timeout on slow accept.
* @param fd: file descriptor.
* @param event: event bits from libevent:
* EV_READ, EV_WRITE, EV_SIGNAL, EV_TIMEOUT.
* @param arg: the comm_point structure.
*/
void comm_base_handle_slow_accept(int fd, short event, void* arg);
#ifdef USE_WINSOCK
/**
* Callback for openssl BIO to on windows detect WSAEWOULDBLOCK and notify
* the winsock_event of this for proper TCP nonblocking implementation.
* @param c: comm_point, fd must be set its struct event is registered.
* @param ssl: openssl SSL, fd must be set so it has a bio.
*/
void comm_point_tcp_win_bio_cb(struct comm_point* c, void* ssl);
#endif
/** see if errno for tcp connect has to be logged or not. This uses errno */
int tcp_connect_errno_needs_log(struct sockaddr* addr, socklen_t addrlen);
#endif /* NET_EVENT_H */

166
external/unbound/util/random.c vendored Normal file
View File

@@ -0,0 +1,166 @@
/*
* util/random.c - thread safe random generator, which is reasonably secure.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Thread safe random functions. Similar to arc4random() with an explicit
* initialisation routine.
*
* The code in this file is based on arc4random from
* openssh-4.0p1/openbsd-compat/bsd-arc4random.c
* That code is also BSD licensed. Here is their statement:
*
* Copyright (c) 1996, David Mazieres <dm@uun.org>
* Copyright (c) 2008, Damien Miller <djm@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "config.h"
#include "util/random.h"
#include "util/log.h"
#include <time.h>
#ifdef HAVE_NSS
/* nspr4 */
#include "prerror.h"
/* nss3 */
#include "secport.h"
#include "pk11pub.h"
#endif
/**
* Max random value. Similar to RAND_MAX, but more portable
* (mingw uses only 15 bits random).
*/
#define MAX_VALUE 0x7fffffff
#ifndef HAVE_NSS
void
ub_systemseed(unsigned int ATTR_UNUSED(seed))
{
/* arc4random_uniform does not need seeds, it gets kernel entropy */
}
struct ub_randstate*
ub_initstate(unsigned int ATTR_UNUSED(seed),
struct ub_randstate* ATTR_UNUSED(from))
{
struct ub_randstate* s = (struct ub_randstate*)malloc(1);
if(!s) {
log_err("malloc failure in random init");
return NULL;
}
return s;
}
long int
ub_random(struct ub_randstate* ATTR_UNUSED(s))
{
/* This relies on MAX_VALUE being 0x7fffffff. */
return (long)arc4random() & MAX_VALUE;
}
long int
ub_random_max(struct ub_randstate* state, long int x)
{
(void)state;
/* on OpenBSD, this does not need _seed(), or _stir() calls */
return (long)arc4random_uniform((uint32_t)x);
}
#else
/* not much to remember for NSS since we use its pk11_random, placeholder */
struct ub_randstate {
int ready;
};
void ub_systemseed(unsigned int ATTR_UNUSED(seed))
{
}
struct ub_randstate* ub_initstate(unsigned int ATTR_UNUSED(seed),
struct ub_randstate* ATTR_UNUSED(from))
{
struct ub_randstate* s = (struct ub_randstate*)calloc(1, sizeof(*s));
if(!s) {
log_err("malloc failure in random init");
return NULL;
}
return s;
}
long int ub_random(struct ub_randstate* ATTR_UNUSED(state))
{
long int x;
/* random 31 bit value. */
SECStatus s = PK11_GenerateRandom((unsigned char*)&x, (int)sizeof(x));
if(s != SECSuccess) {
log_err("PK11_GenerateRandom error: %s",
PORT_ErrorToString(PORT_GetError()));
}
return x & MAX_VALUE;
}
long int
ub_random_max(struct ub_randstate* state, long int x)
{
/* make sure we fetch in a range that is divisible by x. ignore
* values from d .. MAX_VALUE, instead draw a new number */
long int d = MAX_VALUE - (MAX_VALUE % x); /* d is divisible by x */
long int v = ub_random(state);
while(d <= v)
v = ub_random(state);
return (v % x);
}
#endif /* HAVE_NSS */
void
ub_randfree(struct ub_randstate* s)
{
if(s)
free(s);
/* user app must do RAND_cleanup(); */
}

93
external/unbound/util/random.h vendored Normal file
View File

@@ -0,0 +1,93 @@
/*
* util/random.h - thread safe random generator, which is reasonably secure.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UTIL_RANDOM_H
#define UTIL_RANDOM_H
/**
* \file
* Thread safe random functions. Similar to arc4random() with an explicit
* initialisation routine.
*/
/**
* random state structure.
*/
struct ub_randstate;
/**
* Initialize the system randomness. Obtains entropy from the system
* before a chroot or privilege makes it unavailable.
* You do not have to call this, otherwise ub_initstate does so.
* @param seed: seed value to create state (if no good entropy is found).
*/
void ub_systemseed(unsigned int seed);
/**
* Initialize a random generator state for use
* @param seed: seed value to create state contents.
* (ignored for arc4random).
* @param from: if not NULL, the seed is taken from this random structure.
* can be used to seed random states via a parent-random-state that
* is itself seeded with entropy.
* @return new state or NULL alloc failure.
*/
struct ub_randstate* ub_initstate(unsigned int seed,
struct ub_randstate* from);
/**
* Generate next random number from the state passed along.
* Thread safe, so random numbers are repeatable.
* @param state: must have been initialised with ub_initstate.
* @return: random 31 bit value.
*/
long int ub_random(struct ub_randstate* state);
/**
* Generate random number between 0 and x-1. No modulo bias.
* @param state: must have been initialised with ub_initstate.
* @param x: an upper limit. not (negative or zero). must be smaller than 2**31.
* @return: random value between 0..x-1. Possibly more than one
* random number is picked from the random stream to satisfy this.
*/
long int ub_random_max(struct ub_randstate* state, long int x);
/**
* Delete the random state.
* @param state: to delete.
*/
void ub_randfree(struct ub_randstate* state);
#endif /* UTIL_RANDOM_H */

620
external/unbound/util/rbtree.c vendored Normal file
View File

@@ -0,0 +1,620 @@
/*
* rbtree.c -- generic red black tree
*
* Copyright (c) 2001-2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**
* \file
* Implementation of a redblack tree.
*/
#include "config.h"
#include "log.h"
#include "fptr_wlist.h"
#include "util/rbtree.h"
/** Node colour black */
#define BLACK 0
/** Node colour red */
#define RED 1
/** the NULL node, global alloc */
rbnode_t rbtree_null_node = {
RBTREE_NULL, /* Parent. */
RBTREE_NULL, /* Left. */
RBTREE_NULL, /* Right. */
NULL, /* Key. */
BLACK /* Color. */
};
/** rotate subtree left (to preserve redblack property) */
static void rbtree_rotate_left(rbtree_t *rbtree, rbnode_t *node);
/** rotate subtree right (to preserve redblack property) */
static void rbtree_rotate_right(rbtree_t *rbtree, rbnode_t *node);
/** Fixup node colours when insert happened */
static void rbtree_insert_fixup(rbtree_t *rbtree, rbnode_t *node);
/** Fixup node colours when delete happened */
static void rbtree_delete_fixup(rbtree_t* rbtree, rbnode_t* child, rbnode_t* child_parent);
/*
* Creates a new red black tree, intializes and returns a pointer to it.
*
* Return NULL on failure.
*
*/
rbtree_t *
rbtree_create (int (*cmpf)(const void *, const void *))
{
rbtree_t *rbtree;
/* Allocate memory for it */
rbtree = (rbtree_t *) malloc(sizeof(rbtree_t));
if (!rbtree) {
return NULL;
}
/* Initialize it */
rbtree_init(rbtree, cmpf);
return rbtree;
}
void
rbtree_init(rbtree_t *rbtree, int (*cmpf)(const void *, const void *))
{
/* Initialize it */
rbtree->root = RBTREE_NULL;
rbtree->count = 0;
rbtree->cmp = cmpf;
}
/*
* Rotates the node to the left.
*
*/
static void
rbtree_rotate_left(rbtree_t *rbtree, rbnode_t *node)
{
rbnode_t *right = node->right;
node->right = right->left;
if (right->left != RBTREE_NULL)
right->left->parent = node;
right->parent = node->parent;
if (node->parent != RBTREE_NULL) {
if (node == node->parent->left) {
node->parent->left = right;
} else {
node->parent->right = right;
}
} else {
rbtree->root = right;
}
right->left = node;
node->parent = right;
}
/*
* Rotates the node to the right.
*
*/
static void
rbtree_rotate_right(rbtree_t *rbtree, rbnode_t *node)
{
rbnode_t *left = node->left;
node->left = left->right;
if (left->right != RBTREE_NULL)
left->right->parent = node;
left->parent = node->parent;
if (node->parent != RBTREE_NULL) {
if (node == node->parent->right) {
node->parent->right = left;
} else {
node->parent->left = left;
}
} else {
rbtree->root = left;
}
left->right = node;
node->parent = left;
}
static void
rbtree_insert_fixup(rbtree_t *rbtree, rbnode_t *node)
{
rbnode_t *uncle;
/* While not at the root and need fixing... */
while (node != rbtree->root && node->parent->color == RED) {
/* If our parent is left child of our grandparent... */
if (node->parent == node->parent->parent->left) {
uncle = node->parent->parent->right;
/* If our uncle is red... */
if (uncle->color == RED) {
/* Paint the parent and the uncle black... */
node->parent->color = BLACK;
uncle->color = BLACK;
/* And the grandparent red... */
node->parent->parent->color = RED;
/* And continue fixing the grandparent */
node = node->parent->parent;
} else { /* Our uncle is black... */
/* Are we the right child? */
if (node == node->parent->right) {
node = node->parent;
rbtree_rotate_left(rbtree, node);
}
/* Now we're the left child, repaint and rotate... */
node->parent->color = BLACK;
node->parent->parent->color = RED;
rbtree_rotate_right(rbtree, node->parent->parent);
}
} else {
uncle = node->parent->parent->left;
/* If our uncle is red... */
if (uncle->color == RED) {
/* Paint the parent and the uncle black... */
node->parent->color = BLACK;
uncle->color = BLACK;
/* And the grandparent red... */
node->parent->parent->color = RED;
/* And continue fixing the grandparent */
node = node->parent->parent;
} else { /* Our uncle is black... */
/* Are we the right child? */
if (node == node->parent->left) {
node = node->parent;
rbtree_rotate_right(rbtree, node);
}
/* Now we're the right child, repaint and rotate... */
node->parent->color = BLACK;
node->parent->parent->color = RED;
rbtree_rotate_left(rbtree, node->parent->parent);
}
}
}
rbtree->root->color = BLACK;
}
/*
* Inserts a node into a red black tree.
*
* Returns NULL on failure or the pointer to the newly added node
* otherwise.
*/
rbnode_t *
rbtree_insert (rbtree_t *rbtree, rbnode_t *data)
{
/* XXX Not necessary, but keeps compiler quiet... */
int r = 0;
/* We start at the root of the tree */
rbnode_t *node = rbtree->root;
rbnode_t *parent = RBTREE_NULL;
fptr_ok(fptr_whitelist_rbtree_cmp(rbtree->cmp));
/* Lets find the new parent... */
while (node != RBTREE_NULL) {
/* Compare two keys, do we have a duplicate? */
if ((r = rbtree->cmp(data->key, node->key)) == 0) {
return NULL;
}
parent = node;
if (r < 0) {
node = node->left;
} else {
node = node->right;
}
}
/* Initialize the new node */
data->parent = parent;
data->left = data->right = RBTREE_NULL;
data->color = RED;
rbtree->count++;
/* Insert it into the tree... */
if (parent != RBTREE_NULL) {
if (r < 0) {
parent->left = data;
} else {
parent->right = data;
}
} else {
rbtree->root = data;
}
/* Fix up the red-black properties... */
rbtree_insert_fixup(rbtree, data);
return data;
}
/*
* Searches the red black tree, returns the data if key is found or NULL otherwise.
*
*/
rbnode_t *
rbtree_search (rbtree_t *rbtree, const void *key)
{
rbnode_t *node;
if (rbtree_find_less_equal(rbtree, key, &node)) {
return node;
} else {
return NULL;
}
}
/** helpers for delete: swap node colours */
static void swap_int8(uint8_t* x, uint8_t* y)
{
uint8_t t = *x; *x = *y; *y = t;
}
/** helpers for delete: swap node pointers */
static void swap_np(rbnode_t** x, rbnode_t** y)
{
rbnode_t* t = *x; *x = *y; *y = t;
}
/** Update parent pointers of child trees of 'parent' */
static void change_parent_ptr(rbtree_t* rbtree, rbnode_t* parent, rbnode_t* old, rbnode_t* new)
{
if(parent == RBTREE_NULL)
{
log_assert(rbtree->root == old);
if(rbtree->root == old) rbtree->root = new;
return;
}
log_assert(parent->left == old || parent->right == old
|| parent->left == new || parent->right == new);
if(parent->left == old) parent->left = new;
if(parent->right == old) parent->right = new;
}
/** Update parent pointer of a node 'child' */
static void change_child_ptr(rbnode_t* child, rbnode_t* old, rbnode_t* new)
{
if(child == RBTREE_NULL) return;
log_assert(child->parent == old || child->parent == new);
if(child->parent == old) child->parent = new;
}
rbnode_t*
rbtree_delete(rbtree_t *rbtree, const void *key)
{
rbnode_t *to_delete;
rbnode_t *child;
if((to_delete = rbtree_search(rbtree, key)) == 0) return 0;
rbtree->count--;
/* make sure we have at most one non-leaf child */
if(to_delete->left != RBTREE_NULL && to_delete->right != RBTREE_NULL)
{
/* swap with smallest from right subtree (or largest from left) */
rbnode_t *smright = to_delete->right;
while(smright->left != RBTREE_NULL)
smright = smright->left;
/* swap the smright and to_delete elements in the tree,
* but the rbnode_t is first part of user data struct
* so cannot just swap the keys and data pointers. Instead
* readjust the pointers left,right,parent */
/* swap colors - colors are tied to the position in the tree */
swap_int8(&to_delete->color, &smright->color);
/* swap child pointers in parents of smright/to_delete */
change_parent_ptr(rbtree, to_delete->parent, to_delete, smright);
if(to_delete->right != smright)
change_parent_ptr(rbtree, smright->parent, smright, to_delete);
/* swap parent pointers in children of smright/to_delete */
change_child_ptr(smright->left, smright, to_delete);
change_child_ptr(smright->left, smright, to_delete);
change_child_ptr(smright->right, smright, to_delete);
change_child_ptr(smright->right, smright, to_delete);
change_child_ptr(to_delete->left, to_delete, smright);
if(to_delete->right != smright)
change_child_ptr(to_delete->right, to_delete, smright);
if(to_delete->right == smright)
{
/* set up so after swap they work */
to_delete->right = to_delete;
smright->parent = smright;
}
/* swap pointers in to_delete/smright nodes */
swap_np(&to_delete->parent, &smright->parent);
swap_np(&to_delete->left, &smright->left);
swap_np(&to_delete->right, &smright->right);
/* now delete to_delete (which is at the location where the smright previously was) */
}
log_assert(to_delete->left == RBTREE_NULL || to_delete->right == RBTREE_NULL);
if(to_delete->left != RBTREE_NULL) child = to_delete->left;
else child = to_delete->right;
/* unlink to_delete from the tree, replace to_delete with child */
change_parent_ptr(rbtree, to_delete->parent, to_delete, child);
change_child_ptr(child, to_delete, to_delete->parent);
if(to_delete->color == RED)
{
/* if node is red then the child (black) can be swapped in */
}
else if(child->color == RED)
{
/* change child to BLACK, removing a RED node is no problem */
if(child!=RBTREE_NULL) child->color = BLACK;
}
else rbtree_delete_fixup(rbtree, child, to_delete->parent);
/* unlink completely */
to_delete->parent = RBTREE_NULL;
to_delete->left = RBTREE_NULL;
to_delete->right = RBTREE_NULL;
to_delete->color = BLACK;
return to_delete;
}
static void rbtree_delete_fixup(rbtree_t* rbtree, rbnode_t* child, rbnode_t* child_parent)
{
rbnode_t* sibling;
int go_up = 1;
/* determine sibling to the node that is one-black short */
if(child_parent->right == child) sibling = child_parent->left;
else sibling = child_parent->right;
while(go_up)
{
if(child_parent == RBTREE_NULL)
{
/* removed parent==black from root, every path, so ok */
return;
}
if(sibling->color == RED)
{ /* rotate to get a black sibling */
child_parent->color = RED;
sibling->color = BLACK;
if(child_parent->right == child)
rbtree_rotate_right(rbtree, child_parent);
else rbtree_rotate_left(rbtree, child_parent);
/* new sibling after rotation */
if(child_parent->right == child) sibling = child_parent->left;
else sibling = child_parent->right;
}
if(child_parent->color == BLACK
&& sibling->color == BLACK
&& sibling->left->color == BLACK
&& sibling->right->color == BLACK)
{ /* fixup local with recolor of sibling */
if(sibling != RBTREE_NULL)
sibling->color = RED;
child = child_parent;
child_parent = child_parent->parent;
/* prepare to go up, new sibling */
if(child_parent->right == child) sibling = child_parent->left;
else sibling = child_parent->right;
}
else go_up = 0;
}
if(child_parent->color == RED
&& sibling->color == BLACK
&& sibling->left->color == BLACK
&& sibling->right->color == BLACK)
{
/* move red to sibling to rebalance */
if(sibling != RBTREE_NULL)
sibling->color = RED;
child_parent->color = BLACK;
return;
}
log_assert(sibling != RBTREE_NULL);
/* get a new sibling, by rotating at sibling. See which child
of sibling is red */
if(child_parent->right == child
&& sibling->color == BLACK
&& sibling->right->color == RED
&& sibling->left->color == BLACK)
{
sibling->color = RED;
sibling->right->color = BLACK;
rbtree_rotate_left(rbtree, sibling);
/* new sibling after rotation */
if(child_parent->right == child) sibling = child_parent->left;
else sibling = child_parent->right;
}
else if(child_parent->left == child
&& sibling->color == BLACK
&& sibling->left->color == RED
&& sibling->right->color == BLACK)
{
sibling->color = RED;
sibling->left->color = BLACK;
rbtree_rotate_right(rbtree, sibling);
/* new sibling after rotation */
if(child_parent->right == child) sibling = child_parent->left;
else sibling = child_parent->right;
}
/* now we have a black sibling with a red child. rotate and exchange colors. */
sibling->color = child_parent->color;
child_parent->color = BLACK;
if(child_parent->right == child)
{
log_assert(sibling->left->color == RED);
sibling->left->color = BLACK;
rbtree_rotate_right(rbtree, child_parent);
}
else
{
log_assert(sibling->right->color == RED);
sibling->right->color = BLACK;
rbtree_rotate_left(rbtree, child_parent);
}
}
int
rbtree_find_less_equal(rbtree_t *rbtree, const void *key, rbnode_t **result)
{
int r;
rbnode_t *node;
log_assert(result);
/* We start at root... */
node = rbtree->root;
*result = NULL;
fptr_ok(fptr_whitelist_rbtree_cmp(rbtree->cmp));
/* While there are children... */
while (node != RBTREE_NULL) {
r = rbtree->cmp(key, node->key);
if (r == 0) {
/* Exact match */
*result = node;
return 1;
}
if (r < 0) {
node = node->left;
} else {
/* Temporary match */
*result = node;
node = node->right;
}
}
return 0;
}
/*
* Finds the first element in the red black tree
*
*/
rbnode_t *
rbtree_first (rbtree_t *rbtree)
{
rbnode_t *node;
for (node = rbtree->root; node->left != RBTREE_NULL; node = node->left);
return node;
}
rbnode_t *
rbtree_last (rbtree_t *rbtree)
{
rbnode_t *node;
for (node = rbtree->root; node->right != RBTREE_NULL; node = node->right);
return node;
}
/*
* Returns the next node...
*
*/
rbnode_t *
rbtree_next (rbnode_t *node)
{
rbnode_t *parent;
if (node->right != RBTREE_NULL) {
/* One right, then keep on going left... */
for (node = node->right; node->left != RBTREE_NULL; node = node->left);
} else {
parent = node->parent;
while (parent != RBTREE_NULL && node == parent->right) {
node = parent;
parent = parent->parent;
}
node = parent;
}
return node;
}
rbnode_t *
rbtree_previous(rbnode_t *node)
{
rbnode_t *parent;
if (node->left != RBTREE_NULL) {
/* One left, then keep on going right... */
for (node = node->left; node->right != RBTREE_NULL; node = node->right);
} else {
parent = node->parent;
while (parent != RBTREE_NULL && node == parent->left) {
node = parent;
parent = parent->parent;
}
node = parent;
}
return node;
}
/** recursive descent traverse */
static void
traverse_post(void (*func)(rbnode_t*, void*), void* arg, rbnode_t* node)
{
if(!node || node == RBTREE_NULL)
return;
/* recurse */
traverse_post(func, arg, node->left);
traverse_post(func, arg, node->right);
/* call user func */
(*func)(node, arg);
}
void
traverse_postorder(rbtree_t* tree, void (*func)(rbnode_t*, void*), void* arg)
{
traverse_post(func, arg, tree->root);
}

192
external/unbound/util/rbtree.h vendored Normal file
View File

@@ -0,0 +1,192 @@
/*
* rbtree.h -- generic red-black tree
*
* Copyright (c) 2001-2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**
* \file
* Red black tree. Implementation taken from NSD 3.0.5, adjusted for use
* in unbound (memory allocation, logging and so on).
*/
#ifndef UTIL_RBTREE_H_
#define UTIL_RBTREE_H_
/**
* This structure must be the first member of the data structure in
* the rbtree. This allows easy casting between an rbnode_t and the
* user data (poor man's inheritance).
*/
typedef struct rbnode_t rbnode_t;
/**
* The rbnode_t struct definition.
*/
struct rbnode_t {
/** parent in rbtree, RBTREE_NULL for root */
rbnode_t *parent;
/** left node (smaller items) */
rbnode_t *left;
/** right node (larger items) */
rbnode_t *right;
/** pointer to sorting key */
const void *key;
/** colour of this node */
uint8_t color;
};
/** The nullpointer, points to empty node */
#define RBTREE_NULL &rbtree_null_node
/** the global empty node */
extern rbnode_t rbtree_null_node;
/** An entire red black tree */
typedef struct rbtree_t rbtree_t;
/** definition for tree struct */
struct rbtree_t {
/** The root of the red-black tree */
rbnode_t *root;
/** The number of the nodes in the tree */
size_t count;
/**
* Key compare function. <0,0,>0 like strcmp.
* Return 0 on two NULL ptrs.
*/
int (*cmp) (const void *, const void *);
};
/**
* Create new tree (malloced) with given key compare function.
* @param cmpf: compare function (like strcmp) takes pointers to two keys.
* @return: new tree, empty.
*/
rbtree_t *rbtree_create(int (*cmpf)(const void *, const void *));
/**
* Init a new tree (malloced by caller) with given key compare function.
* @param rbtree: uninitialised memory for new tree, returned empty.
* @param cmpf: compare function (like strcmp) takes pointers to two keys.
*/
void rbtree_init(rbtree_t *rbtree, int (*cmpf)(const void *, const void *));
/**
* Insert data into the tree.
* @param rbtree: tree to insert to.
* @param data: element to insert.
* @return: data ptr or NULL if key already present.
*/
rbnode_t *rbtree_insert(rbtree_t *rbtree, rbnode_t *data);
/**
* Delete element from tree.
* @param rbtree: tree to delete from.
* @param key: key of item to delete.
* @return: node that is now unlinked from the tree. User to delete it.
* returns 0 if node not present
*/
rbnode_t *rbtree_delete(rbtree_t *rbtree, const void *key);
/**
* Find key in tree. Returns NULL if not found.
* @param rbtree: tree to find in.
* @param key: key that must match.
* @return: node that fits or NULL.
*/
rbnode_t *rbtree_search(rbtree_t *rbtree, const void *key);
/**
* Find, but match does not have to be exact.
* @param rbtree: tree to find in.
* @param key: key to find position of.
* @param result: set to the exact node if present, otherwise to element that
* precedes the position of key in the tree. NULL if no smaller element.
* @return: true if exact match in result. Else result points to <= element,
* or NULL if key is smaller than the smallest key.
*/
int rbtree_find_less_equal(rbtree_t *rbtree, const void *key,
rbnode_t **result);
/**
* Returns first (smallest) node in the tree
* @param rbtree: tree
* @return: smallest element or NULL if tree empty.
*/
rbnode_t *rbtree_first(rbtree_t *rbtree);
/**
* Returns last (largest) node in the tree
* @param rbtree: tree
* @return: largest element or NULL if tree empty.
*/
rbnode_t *rbtree_last(rbtree_t *rbtree);
/**
* Returns next larger node in the tree
* @param rbtree: tree
* @return: next larger element or NULL if no larger in tree.
*/
rbnode_t *rbtree_next(rbnode_t *rbtree);
/**
* Returns previous smaller node in the tree
* @param rbtree: tree
* @return: previous smaller element or NULL if no previous in tree.
*/
rbnode_t *rbtree_previous(rbnode_t *rbtree);
/**
* Call with node=variable of struct* with rbnode_t as first element.
* with type is the type of a pointer to that struct.
*/
#define RBTREE_FOR(node, type, rbtree) \
for(node=(type)rbtree_first(rbtree); \
(rbnode_t*)node != RBTREE_NULL; \
node = (type)rbtree_next((rbnode_t*)node))
/**
* Call function for all elements in the redblack tree, such that
* leaf elements are called before parent elements. So that all
* elements can be safely free()d.
* Note that your function must not remove the nodes from the tree.
* Since that may trigger rebalances of the rbtree.
* @param tree: the tree
* @param func: function called with element and user arg.
* The function must not alter the rbtree.
* @param arg: user argument.
*/
void traverse_postorder(rbtree_t* tree, void (*func)(rbnode_t*, void*),
void* arg);
#endif /* UTIL_RBTREE_H_ */

223
external/unbound/util/regional.c vendored Normal file
View File

@@ -0,0 +1,223 @@
/*
* regional.c -- region based memory allocator.
*
* Copyright (c) 2001-2006, NLnet Labs. All rights reserved.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Regional allocator. Allocates small portions of of larger chunks.
*/
#include "config.h"
#include "util/log.h"
#include "util/regional.h"
#ifdef ALIGNMENT
# undef ALIGNMENT
#endif
/** increase size until it fits alignment of s bytes */
#define ALIGN_UP(x, s) (((x) + s - 1) & (~(s - 1)))
/** what size to align on; make sure a char* fits in it. */
#define ALIGNMENT (sizeof(uint64_t))
/** Default reasonable size for chunks */
#define REGIONAL_CHUNK_SIZE 8192
#ifdef UNBOUND_ALLOC_NONREGIONAL
/** All objects allocated outside of chunks, for debug */
#define REGIONAL_LARGE_OBJECT_SIZE 0
#else
/** Default size for large objects - allocated outside of chunks. */
#define REGIONAL_LARGE_OBJECT_SIZE 2048
#endif
struct regional*
regional_create(void)
{
return regional_create_custom(REGIONAL_CHUNK_SIZE);
}
/** init regional struct with first block */
static void
regional_init(struct regional* r)
{
size_t a = ALIGN_UP(sizeof(struct regional), ALIGNMENT);
r->data = (char*)r + a;
r->available = r->first_size - a;
r->next = NULL;
r->large_list = NULL;
r->total_large = 0;
}
struct regional*
regional_create_custom(size_t size)
{
struct regional* r = (struct regional*)malloc(size);
log_assert(sizeof(struct regional) <= size);
if(!r) return NULL;
r->first_size = size;
regional_init(r);
return r;
}
void
regional_free_all(struct regional *r)
{
char* p = r->next, *np;
while(p) {
np = *(char**)p;
free(p);
p = np;
}
p = r->large_list;
while(p) {
np = *(char**)p;
free(p);
p = np;
}
regional_init(r);
}
void
regional_destroy(struct regional *r)
{
if(!r) return;
regional_free_all(r);
free(r);
}
void *
regional_alloc(struct regional *r, size_t size)
{
size_t a = ALIGN_UP(size, ALIGNMENT);
void *s;
/* large objects */
if(a > REGIONAL_LARGE_OBJECT_SIZE) {
s = malloc(ALIGNMENT + size);
if(!s) return NULL;
r->total_large += ALIGNMENT+size;
*(char**)s = r->large_list;
r->large_list = (char*)s;
return (char*)s+ALIGNMENT;
}
/* create a new chunk */
if(a > r->available) {
s = malloc(REGIONAL_CHUNK_SIZE);
if(!s) return NULL;
*(char**)s = r->next;
r->next = (char*)s;
r->data = (char*)s + ALIGNMENT;
r->available = REGIONAL_CHUNK_SIZE - ALIGNMENT;
}
/* put in this chunk */
r->available -= a;
s = r->data;
r->data += a;
return s;
}
void *
regional_alloc_init(struct regional* r, const void *init, size_t size)
{
void *s = regional_alloc(r, size);
if(!s) return NULL;
memcpy(s, init, size);
return s;
}
void *
regional_alloc_zero(struct regional *r, size_t size)
{
void *s = regional_alloc(r, size);
if(!s) return NULL;
memset(s, 0, size);
return s;
}
char *
regional_strdup(struct regional *r, const char *string)
{
return (char*)regional_alloc_init(r, string, strlen(string)+1);
}
/**
* reasonably slow, but stats and get_mem are not supposed to be fast
* count the number of chunks in use
*/
static size_t
count_chunks(struct regional* r)
{
size_t c = 1;
char* p = r->next;
while(p) {
c++;
p = *(char**)p;
}
return c;
}
/**
* also reasonably slow, counts the number of large objects
*/
static size_t
count_large(struct regional* r)
{
size_t c = 0;
char* p = r->large_list;
while(p) {
c++;
p = *(char**)p;
}
return c;
}
void
regional_log_stats(struct regional *r)
{
/* some basic assertions put here (non time critical code) */
log_assert(ALIGNMENT >= sizeof(char*));
log_assert(REGIONAL_CHUNK_SIZE > ALIGNMENT);
log_assert(REGIONAL_CHUNK_SIZE-ALIGNMENT > REGIONAL_LARGE_OBJECT_SIZE);
log_assert(REGIONAL_CHUNK_SIZE >= sizeof(struct regional));
/* debug print */
log_info("regional %u chunks, %u large",
(unsigned)count_chunks(r), (unsigned)count_large(r));
}
size_t
regional_get_mem(struct regional* r)
{
return r->first_size + (count_chunks(r)-1)*REGIONAL_CHUNK_SIZE
+ r->total_large;
}

150
external/unbound/util/regional.h vendored Normal file
View File

@@ -0,0 +1,150 @@
/*
* regional.h -- region based memory allocator.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Regional allocator. Allocates small portions of of larger chunks.
* Based on region-allocator from NSD, but rewritten to be light.
*
* Different from (nsd) region-allocator.h
* o does not have recycle bin
* o does not collect stats; just enough to answer get_mem() in use.
* o does not keep cleanup list
* o does not have function pointers to setup
* o allocs the regional struct inside the first block.
* o can take a block to create regional from.
* o blocks and large allocations are kept on singly linked lists.
*/
#ifndef UTIL_REGIONAL_H_
#define UTIL_REGIONAL_H_
/**
* the regional* is the first block*.
* every block has a ptr to the next in first bytes.
* and so does the regional struct, which is the first block.
*/
struct regional
{
/**
* next chunk. NULL if first chunk is the only chunk.
* first inside that chunk is the char* next pointer.
* When regional_free_all() has been called this value is NULL.
*/
char* next;
/** first large object, cast to char** to obtain next ptr */
char* large_list;
/** total large size */
size_t total_large;
/** initial chunk size */
size_t first_size;
/** number of bytes available in the current chunk. */
size_t available;
/** current chunk data position. */
char* data;
};
/**
* Create a new regional.
* @return: newly allocated regional.
*/
struct regional* regional_create(void);
/**
* Create a new region, with custom settings.
* @param size: length of first block.
* @return: newly allocated regional.
*/
struct regional* regional_create_custom(size_t size);
/**
* Free all memory associated with regional. Only keeps the first block with
* the regional inside it.
* @param r: the region.
*/
void regional_free_all(struct regional *r);
/**
* Destroy regional. All memory associated with regional is freed as if
* regional_free_all was called, as well as destroying the regional struct.
* @param r: to delete.
*/
void regional_destroy(struct regional *r);
/**
* Allocate size bytes of memory inside regional. The memory is
* deallocated when region_free_all is called for this region.
* @param r: the region.
* @param size: number of bytes.
* @return: pointer to memory allocated.
*/
void *regional_alloc(struct regional *r, size_t size);
/**
* Allocate size bytes of memory inside regional and copy INIT into it.
* The memory is deallocated when region_free_all is called for this
* region.
* @param r: the region.
* @param init: to copy.
* @param size: number of bytes.
* @return: pointer to memory allocated.
*/
void *regional_alloc_init(struct regional* r, const void *init, size_t size);
/**
* Allocate size bytes of memory inside regional that are initialized to
* 0. The memory is deallocated when region_free_all is called for
* this region.
* @param r: the region.
* @param size: number of bytes.
* @return: pointer to memory allocated.
*/
void *regional_alloc_zero(struct regional *r, size_t size);
/**
* Duplicate string and allocate the result in regional.
* @param r: the region.
* @param string: null terminated string.
* @return: pointer to memory allocated.
*/
char *regional_strdup(struct regional *r, const char *string);
/** Debug print regional statistics to log */
void regional_log_stats(struct regional *r);
/** get total memory size in use by region */
size_t regional_get_mem(struct regional* r);
#endif /* UTIL_REGIONAL_H_ */

119
external/unbound/util/rtt.c vendored Normal file
View File

@@ -0,0 +1,119 @@
/*
* util/rtt.c - UDP round trip time estimator for resend timeouts.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a data type and functions to help estimate good
* round trip times for UDP resend timeout values.
*/
#include "config.h"
#include "util/rtt.h"
/** calculate RTO from rtt information */
static int
calc_rto(const struct rtt_info* rtt)
{
/* From Stevens, Unix Network Programming, Vol1, 3rd ed., p.598 */
int rto = rtt->srtt + 4*rtt->rttvar;
if(rto < RTT_MIN_TIMEOUT)
rto = RTT_MIN_TIMEOUT;
if(rto > RTT_MAX_TIMEOUT)
rto = RTT_MAX_TIMEOUT;
return rto;
}
void
rtt_init(struct rtt_info* rtt)
{
rtt->srtt = 0;
rtt->rttvar = 94;
rtt->rto = calc_rto(rtt);
/* default value from the book is 0 + 4*0.75 = 3 seconds */
/* first RTO is 0 + 4*0.094 = 0.376 seconds */
}
int
rtt_timeout(const struct rtt_info* rtt)
{
return rtt->rto;
}
int
rtt_unclamped(const struct rtt_info* rtt)
{
if(calc_rto(rtt) != rtt->rto) {
/* timeout fallback has happened */
return rtt->rto;
}
/* return unclamped value */
return rtt->srtt + 4*rtt->rttvar;
}
void
rtt_update(struct rtt_info* rtt, int ms)
{
int delta = ms - rtt->srtt;
rtt->srtt += delta / 8; /* g = 1/8 */
if(delta < 0)
delta = -delta; /* |delta| */
rtt->rttvar += (delta - rtt->rttvar) / 4; /* h = 1/4 */
rtt->rto = calc_rto(rtt);
}
void
rtt_lost(struct rtt_info* rtt, int orig)
{
/* exponential backoff */
/* if a query succeeded and put down the rto meanwhile, ignore this */
if(rtt->rto < orig)
return;
/* the original rto is doubled, not the current one to make sure
* that the values in the cache are not increased by lots of
* queries simultaneously as they time out at the same time */
orig *= 2;
if(rtt->rto <= orig) {
rtt->rto = orig;
if(rtt->rto > RTT_MAX_TIMEOUT)
rtt->rto = RTT_MAX_TIMEOUT;
}
}
int rtt_notimeout(const struct rtt_info* rtt)
{
return calc_rto(rtt);
}

107
external/unbound/util/rtt.h vendored Normal file
View File

@@ -0,0 +1,107 @@
/*
* util/rtt.h - UDP round trip time estimator for resend timeouts.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a data type and functions to help estimate good
* round trip times for UDP resend timeout values.
*/
#ifndef UTIL_RTT_H
#define UTIL_RTT_H
/**
* RTT information. Keeps packet Round Trip Time.
*/
struct rtt_info {
/** smoothed rtt estimator, in milliseconds */
int srtt;
/** smoothed mean deviation, in milliseconds */
int rttvar;
/** current RTO in use, in milliseconds */
int rto;
};
/** min retransmit timeout value, in milliseconds */
#define RTT_MIN_TIMEOUT 50
/** max retransmit timeout value, in milliseconds */
#define RTT_MAX_TIMEOUT 120000
/**
* Initialize RTT estimators.
* @param rtt: The structure. Caller is responsible for allocation of it.
*/
void rtt_init(struct rtt_info* rtt);
/**
* Get timeout to use for sending a UDP packet.
* @param rtt: round trip statistics structure.
* @return: timeout to use in milliseconds. Relative time value.
*/
int rtt_timeout(const struct rtt_info* rtt);
/**
* Get unclamped timeout to use for server selection.
* Recent timeouts are reflected in the returned value.
* @param rtt: round trip statistics structure.
* @return: value to use in milliseconds.
*/
int rtt_unclamped(const struct rtt_info* rtt);
/**
* RTT for valid responses. Without timeouts.
* @param rtt: round trip statistics structure.
* @return: value in msec.
*/
int rtt_notimeout(const struct rtt_info* rtt);
/**
* Update the statistics with a new roundtrip estimate observation.
* @param rtt: round trip statistics structure.
* @param ms: estimate of roundtrip time in milliseconds.
*/
void rtt_update(struct rtt_info* rtt, int ms);
/**
* Update the statistics with a new timout expired observation.
* @param rtt: round trip statistics structure.
* @param orig: original rtt time given for the query that timed out.
* Used to calculate the maximum responsible backed off time that
* can reasonably be applied.
*/
void rtt_lost(struct rtt_info* rtt, int orig);
#endif /* UTIL_RTT_H */

282
external/unbound/util/storage/dnstree.c vendored Normal file
View File

@@ -0,0 +1,282 @@
/*
* util/storage/dnstree.c - support for rbtree types suitable for DNS code.
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains structures combining types and functions to
* manipulate those structures that help building DNS lookup trees.
*/
#include "config.h"
#include "util/storage/dnstree.h"
#include "util/data/dname.h"
#include "util/net_help.h"
int name_tree_compare(const void* k1, const void* k2)
{
struct name_tree_node* x = (struct name_tree_node*)k1;
struct name_tree_node* y = (struct name_tree_node*)k2;
int m;
if(x->dclass != y->dclass) {
if(x->dclass < y->dclass)
return -1;
return 1;
}
return dname_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
}
int addr_tree_compare(const void* k1, const void* k2)
{
struct addr_tree_node* n1 = (struct addr_tree_node*)k1;
struct addr_tree_node* n2 = (struct addr_tree_node*)k2;
int r = sockaddr_cmp_addr(&n1->addr, n1->addrlen, &n2->addr,
n2->addrlen);
if(r != 0) return r;
if(n1->net < n2->net)
return -1;
if(n1->net > n2->net)
return 1;
return 0;
}
void name_tree_init(rbtree_t* tree)
{
rbtree_init(tree, &name_tree_compare);
}
void addr_tree_init(rbtree_t* tree)
{
rbtree_init(tree, &addr_tree_compare);
}
int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
uint8_t* name, size_t len, int labs, uint16_t dclass)
{
node->node.key = node;
node->name = name;
node->len = len;
node->labs = labs;
node->dclass = dclass;
node->parent = NULL;
return rbtree_insert(tree, &node->node) != NULL;
}
int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
struct sockaddr_storage* addr, socklen_t addrlen, int net)
{
node->node.key = node;
memcpy(&node->addr, addr, addrlen);
node->addrlen = addrlen;
node->net = net;
node->parent = NULL;
return rbtree_insert(tree, &node->node) != NULL;
}
void addr_tree_init_parents(rbtree_t* tree)
{
struct addr_tree_node* node, *prev = NULL, *p;
int m;
RBTREE_FOR(node, struct addr_tree_node*, tree) {
node->parent = NULL;
if(!prev || prev->addrlen != node->addrlen) {
prev = node;
continue;
}
m = addr_in_common(&prev->addr, prev->net, &node->addr,
node->net, node->addrlen);
/* sort order like: ::/0, 1::/2, 1::/4, ... 2::/2 */
/* find the previous, or parent-parent-parent */
for(p = prev; p; p = p->parent)
if(p->net <= m) {
/* ==: since prev matched m, this is closest*/
/* <: prev matches more, but is not a parent,
* this one is a (grand)parent */
node->parent = p;
break;
}
prev = node;
}
}
void name_tree_init_parents(rbtree_t* tree)
{
struct name_tree_node* node, *prev = NULL, *p;
int m;
RBTREE_FOR(node, struct name_tree_node*, tree) {
node->parent = NULL;
if(!prev || prev->dclass != node->dclass) {
prev = node;
continue;
}
(void)dname_lab_cmp(prev->name, prev->labs, node->name,
node->labs, &m); /* we know prev is smaller */
/* sort order like: . com. bla.com. zwb.com. net. */
/* find the previous, or parent-parent-parent */
for(p = prev; p; p = p->parent)
if(p->labs <= m) {
/* ==: since prev matched m, this is closest*/
/* <: prev matches more, but is not a parent,
* this one is a (grand)parent */
node->parent = p;
break;
}
prev = node;
}
}
struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass)
{
struct name_tree_node key;
key.node.key = &key;
key.name = name;
key.len = len;
key.labs = labs;
key.dclass = dclass;
return (struct name_tree_node*)rbtree_search(tree, &key);
}
struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass)
{
rbnode_t* res = NULL;
struct name_tree_node *result;
struct name_tree_node key;
key.node.key = &key;
key.name = name;
key.len = len;
key.labs = labs;
key.dclass = dclass;
if(rbtree_find_less_equal(tree, &key, &res)) {
/* exact */
result = (struct name_tree_node*)res;
} else {
/* smaller element (or no element) */
int m;
result = (struct name_tree_node*)res;
if(!result || result->dclass != dclass)
return NULL;
/* count number of labels matched */
(void)dname_lab_cmp(result->name, result->labs, key.name,
key.labs, &m);
while(result) { /* go up until qname is subdomain of stub */
if(result->labs <= m)
break;
result = result->parent;
}
}
return result;
}
struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
struct sockaddr_storage* addr, socklen_t addrlen)
{
rbnode_t* res = NULL;
struct addr_tree_node* result;
struct addr_tree_node key;
key.node.key = &key;
memcpy(&key.addr, addr, addrlen);
key.addrlen = addrlen;
key.net = (addr_is_ip6(addr, addrlen)?128:32);
if(rbtree_find_less_equal(tree, &key, &res)) {
/* exact */
return (struct addr_tree_node*)res;
} else {
/* smaller element (or no element) */
int m;
result = (struct addr_tree_node*)res;
if(!result || result->addrlen != addrlen)
return 0;
/* count number of bits matched */
m = addr_in_common(&result->addr, result->net, addr,
key.net, addrlen);
while(result) { /* go up until addr is inside netblock */
if(result->net <= m)
break;
result = result->parent;
}
}
return result;
}
int
name_tree_next_root(rbtree_t* tree, uint16_t* dclass)
{
struct name_tree_node key;
rbnode_t* n;
struct name_tree_node* p;
if(*dclass == 0) {
/* first root item is first item in tree */
n = rbtree_first(tree);
if(n == RBTREE_NULL)
return 0;
p = (struct name_tree_node*)n;
if(dname_is_root(p->name)) {
*dclass = p->dclass;
return 1;
}
/* root not first item? search for higher items */
*dclass = p->dclass + 1;
return name_tree_next_root(tree, dclass);
}
/* find class n in tree, we may get a direct hit, or if we don't
* this is the last item of the previous class so rbtree_next() takes
* us to the next root (if any) */
key.node.key = &key;
key.name = (uint8_t*)"\000";
key.len = 1;
key.labs = 0;
key.dclass = *dclass;
n = NULL;
if(rbtree_find_less_equal(tree, &key, &n)) {
/* exact */
return 1;
} else {
/* smaller element */
if(!n || n == RBTREE_NULL)
return 0; /* nothing found */
n = rbtree_next(n);
if(n == RBTREE_NULL)
return 0; /* no higher */
p = (struct name_tree_node*)n;
if(dname_is_root(p->name)) {
*dclass = p->dclass;
return 1;
}
/* not a root node, return next higher item */
*dclass = p->dclass+1;
return name_tree_next_root(tree, dclass);
}
}

192
external/unbound/util/storage/dnstree.h vendored Normal file
View File

@@ -0,0 +1,192 @@
/*
* util/storage/dnstree.h - support for rbtree types suitable for DNS code.
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains structures combining types and functions to
* manipulate those structures that help building DNS lookup trees.
*/
#ifndef UTIL_STORAGE_DNSTREE_H
#define UTIL_STORAGE_DNSTREE_H
#include "util/rbtree.h"
/**
* Tree of domain names. Sorted first by class then by name.
* This is not sorted canonically, but fast.
* This can be looked up to obtain a closest encloser parent name.
*
* The tree itself is a rbtree_t.
* This is the element node put as first entry in the client structure.
*/
struct name_tree_node {
/** rbtree node, key is this struct : dclass and name */
rbnode_t node;
/** parent in tree */
struct name_tree_node* parent;
/** name in uncompressed wireformat */
uint8_t* name;
/** length of name */
size_t len;
/** labels in name */
int labs;
/** the class of the name (host order) */
uint16_t dclass;
};
/**
* Tree of IP addresses. Sorted first by protocol, then by bits.
* This can be looked up to obtain the enclosing subnet.
*
* The tree itself is a rbtree_t.
* This is the element node put as first entry in the client structure.
*/
struct addr_tree_node {
/** rbtree node, key is this struct : proto and subnet */
rbnode_t node;
/** parent in tree */
struct addr_tree_node* parent;
/** address */
struct sockaddr_storage addr;
/** length of addr */
socklen_t addrlen;
/** netblock size */
int net;
};
/**
* Init a name tree to be empty
* @param tree: to init.
*/
void name_tree_init(rbtree_t* tree);
/**
* insert element into name tree.
* @param tree: name tree
* @param node: node element (at start of a structure that caller
* has allocated).
* @param name: name to insert (wireformat)
* this node has been allocated by the caller and it itself inserted.
* @param len: length of name
* @param labs: labels in name
* @param dclass: class of name
* @return false on error (duplicate element).
*/
int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
uint8_t* name, size_t len, int labs, uint16_t dclass);
/**
* Initialize parent pointers in name tree.
* Should be performed after insertions are done, before lookups
* @param tree: name tree
*/
void name_tree_init_parents(rbtree_t* tree);
/**
* Lookup exact match in name tree
* @param tree: name tree
* @param name: wireformat name
* @param len: length of name
* @param labs: labels in name
* @param dclass: class of name
* @return node or NULL if not found.
*/
struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass);
/**
* Lookup closest encloser in name tree.
* @param tree: name tree
* @param name: wireformat name
* @param len: length of name
* @param labs: labels in name
* @param dclass: class of name
* @return closest enclosing node (could be equal) or NULL if not found.
*/
struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass);
/**
* Find next root item in name tree.
* @param tree: the nametree.
* @param dclass: the class to look for next (or higher).
* @return false if no classes found, true means class put into c.
*/
int name_tree_next_root(rbtree_t* tree, uint16_t* dclass);
/**
* Init addr tree to be empty.
* @param tree: to init.
*/
void addr_tree_init(rbtree_t* tree);
/**
* insert element into addr tree.
* @param tree: addr tree
* @param node: node element (at start of a structure that caller
* has allocated).
* @param addr: to insert (copied).
* @param addrlen: length of addr
* @param net: size of subnet.
* @return false on error (duplicate element).
*/
int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
struct sockaddr_storage* addr, socklen_t addrlen, int net);
/**
* Initialize parent pointers in addr tree.
* Should be performed after insertions are done, before lookups
* @param tree: addr tree
*/
void addr_tree_init_parents(rbtree_t* tree);
/**
* Lookup closest encloser in addr tree.
* @param tree: addr tree
* @param addr: to lookup.
* @param addrlen: length of addr
* @return closest enclosing node (could be equal) or NULL if not found.
*/
struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
struct sockaddr_storage* addr, socklen_t addrlen);
/** compare name tree nodes */
int name_tree_compare(const void* k1, const void* k2);
/** compare addr tree nodes */
int addr_tree_compare(const void* k1, const void* k2);
#endif /* UTIL_STORAGE_DNSTREE_H */

1032
external/unbound/util/storage/lookup3.c vendored Normal file

File diff suppressed because it is too large Load Diff

71
external/unbound/util/storage/lookup3.h vendored Normal file
View File

@@ -0,0 +1,71 @@
/*
* util/storage/lookup3.h - header file for hashing functions.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains header definitions for the hash functions we use.
* The hash functions are public domain (see lookup3.c).
*/
#ifndef UTIL_STORAGE_LOOKUP3_H
#define UTIL_STORAGE_LOOKUP3_H
/**
* Hash key made of 4byte chunks.
* @param k: the key, an array of uint32_t values
* @param length: the length of the key, in uint32_ts
* @param initval: the previous hash, or an arbitrary value
* @return: hash value.
*/
uint32_t hashword(const uint32_t *k, size_t length, uint32_t initval);
/**
* Hash key data.
* @param k: the key, array of uint8_t
* @param length: the length of the key, in uint8_ts
* @param initval: the previous hash, or an arbitrary value
* @return: hash value.
*/
uint32_t hashlittle(const void *k, size_t length, uint32_t initval);
/**
* Set the randomisation initial value, set this before threads start,
* and before hashing stuff (because it changes subsequent results).
* @param v: value
*/
void hash_set_raninit(uint32_t v);
#endif /* UTIL_STORAGE_LOOKUP3_H */

544
external/unbound/util/storage/lruhash.c vendored Normal file
View File

@@ -0,0 +1,544 @@
/*
* util/storage/lruhash.c - hashtable, hash function, LRU keeping.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a hashtable with LRU keeping of entries.
*
*/
#include "config.h"
#include "util/storage/lruhash.h"
#include "util/fptr_wlist.h"
void
bin_init(struct lruhash_bin* array, size_t size)
{
size_t i;
#ifdef THREADS_DISABLED
(void)array;
#endif
for(i=0; i<size; i++) {
lock_quick_init(&array[i].lock);
lock_protect(&array[i].lock, &array[i],
sizeof(struct lruhash_bin));
}
}
struct lruhash*
lruhash_create(size_t start_size, size_t maxmem, lruhash_sizefunc_t sizefunc,
lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
lruhash_deldatafunc_t deldatafunc, void* arg)
{
struct lruhash* table = (struct lruhash*)calloc(1,
sizeof(struct lruhash));
if(!table)
return NULL;
lock_quick_init(&table->lock);
table->sizefunc = sizefunc;
table->compfunc = compfunc;
table->delkeyfunc = delkeyfunc;
table->deldatafunc = deldatafunc;
table->cb_arg = arg;
table->size = start_size;
table->size_mask = (int)(start_size-1);
table->lru_start = NULL;
table->lru_end = NULL;
table->num = 0;
table->space_used = 0;
table->space_max = maxmem;
table->array = calloc(table->size, sizeof(struct lruhash_bin));
if(!table->array) {
lock_quick_destroy(&table->lock);
free(table);
return NULL;
}
bin_init(table->array, table->size);
lock_protect(&table->lock, table, sizeof(*table));
lock_protect(&table->lock, table->array,
table->size*sizeof(struct lruhash_bin));
return table;
}
void
bin_delete(struct lruhash* table, struct lruhash_bin* bin)
{
struct lruhash_entry* p, *np;
void *d;
if(!bin)
return;
lock_quick_destroy(&bin->lock);
p = bin->overflow_list;
bin->overflow_list = NULL;
while(p) {
np = p->overflow_next;
d = p->data;
(*table->delkeyfunc)(p->key, table->cb_arg);
(*table->deldatafunc)(d, table->cb_arg);
p = np;
}
}
void
bin_split(struct lruhash* table, struct lruhash_bin* newa,
int newmask)
{
size_t i;
struct lruhash_entry *p, *np;
struct lruhash_bin* newbin;
/* move entries to new table. Notice that since hash x is mapped to
* bin x & mask, and new mask uses one more bit, so all entries in
* one bin will go into the old bin or bin | newbit */
#ifndef THREADS_DISABLED
int newbit = newmask - table->size_mask;
#endif
/* so, really, this task could also be threaded, per bin. */
/* LRU list is not changed */
for(i=0; i<table->size; i++)
{
lock_quick_lock(&table->array[i].lock);
p = table->array[i].overflow_list;
/* lock both destination bins */
lock_quick_lock(&newa[i].lock);
lock_quick_lock(&newa[newbit|i].lock);
while(p) {
np = p->overflow_next;
/* link into correct new bin */
newbin = &newa[p->hash & newmask];
p->overflow_next = newbin->overflow_list;
newbin->overflow_list = p;
p=np;
}
lock_quick_unlock(&newa[i].lock);
lock_quick_unlock(&newa[newbit|i].lock);
lock_quick_unlock(&table->array[i].lock);
}
}
void
lruhash_delete(struct lruhash* table)
{
size_t i;
if(!table)
return;
/* delete lock on hashtable to force check its OK */
lock_quick_destroy(&table->lock);
for(i=0; i<table->size; i++)
bin_delete(table, &table->array[i]);
free(table->array);
free(table);
}
void
bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
{
struct lruhash_entry* p = bin->overflow_list;
struct lruhash_entry** prevp = &bin->overflow_list;
while(p) {
if(p == entry) {
*prevp = p->overflow_next;
return;
}
prevp = &p->overflow_next;
p = p->overflow_next;
}
}
void
reclaim_space(struct lruhash* table, struct lruhash_entry** list)
{
struct lruhash_entry* d;
struct lruhash_bin* bin;
log_assert(table);
/* does not delete MRU entry, so table will not be empty. */
while(table->num > 1 && table->space_used > table->space_max) {
/* notice that since we hold the hashtable lock, nobody
can change the lru chain. So it cannot be deleted underneath
us. We still need the hashbin and entry write lock to make
sure we flush all users away from the entry.
which is unlikely, since it is LRU, if someone got a rdlock
it would be moved to front, but to be sure. */
d = table->lru_end;
/* specialised, delete from end of double linked list,
and we know num>1, so there is a previous lru entry. */
log_assert(d && d->lru_prev);
table->lru_end = d->lru_prev;
d->lru_prev->lru_next = NULL;
/* schedule entry for deletion */
bin = &table->array[d->hash & table->size_mask];
table->num --;
lock_quick_lock(&bin->lock);
bin_overflow_remove(bin, d);
d->overflow_next = *list;
*list = d;
lock_rw_wrlock(&d->lock);
table->space_used -= table->sizefunc(d->key, d->data);
if(table->markdelfunc)
(*table->markdelfunc)(d->key);
lock_rw_unlock(&d->lock);
lock_quick_unlock(&bin->lock);
}
}
struct lruhash_entry*
bin_find_entry(struct lruhash* table,
struct lruhash_bin* bin, hashvalue_t hash, void* key)
{
struct lruhash_entry* p = bin->overflow_list;
while(p) {
if(p->hash == hash && table->compfunc(p->key, key) == 0)
return p;
p = p->overflow_next;
}
return NULL;
}
void
table_grow(struct lruhash* table)
{
struct lruhash_bin* newa;
int newmask;
size_t i;
if(table->size_mask == (int)(((size_t)-1)>>1)) {
log_err("hash array malloc: size_t too small");
return;
}
/* try to allocate new array, if not fail */
newa = calloc(table->size*2, sizeof(struct lruhash_bin));
if(!newa) {
log_err("hash grow: malloc failed");
/* continue with smaller array. Though its slower. */
return;
}
bin_init(newa, table->size*2);
newmask = (table->size_mask << 1) | 1;
bin_split(table, newa, newmask);
/* delete the old bins */
lock_unprotect(&table->lock, table->array);
for(i=0; i<table->size; i++) {
lock_quick_destroy(&table->array[i].lock);
}
free(table->array);
table->size *= 2;
table->size_mask = newmask;
table->array = newa;
lock_protect(&table->lock, table->array,
table->size*sizeof(struct lruhash_bin));
return;
}
void
lru_front(struct lruhash* table, struct lruhash_entry* entry)
{
entry->lru_prev = NULL;
entry->lru_next = table->lru_start;
if(!table->lru_start)
table->lru_end = entry;
else table->lru_start->lru_prev = entry;
table->lru_start = entry;
}
void
lru_remove(struct lruhash* table, struct lruhash_entry* entry)
{
if(entry->lru_prev)
entry->lru_prev->lru_next = entry->lru_next;
else table->lru_start = entry->lru_next;
if(entry->lru_next)
entry->lru_next->lru_prev = entry->lru_prev;
else table->lru_end = entry->lru_prev;
}
void
lru_touch(struct lruhash* table, struct lruhash_entry* entry)
{
log_assert(table && entry);
if(entry == table->lru_start)
return; /* nothing to do */
/* remove from current lru position */
lru_remove(table, entry);
/* add at front */
lru_front(table, entry);
}
void
lruhash_insert(struct lruhash* table, hashvalue_t hash,
struct lruhash_entry* entry, void* data, void* cb_arg)
{
struct lruhash_bin* bin;
struct lruhash_entry* found, *reclaimlist=NULL;
size_t need_size;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
need_size = table->sizefunc(entry->key, data);
if(cb_arg == NULL) cb_arg = table->cb_arg;
/* find bin */
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
/* see if entry exists already */
if(!(found=bin_find_entry(table, bin, hash, entry->key))) {
/* if not: add to bin */
entry->overflow_next = bin->overflow_list;
bin->overflow_list = entry;
lru_front(table, entry);
table->num++;
table->space_used += need_size;
} else {
/* if so: update data - needs a writelock */
table->space_used += need_size -
(*table->sizefunc)(found->key, found->data);
(*table->delkeyfunc)(entry->key, cb_arg);
lru_touch(table, found);
lock_rw_wrlock(&found->lock);
(*table->deldatafunc)(found->data, cb_arg);
found->data = data;
lock_rw_unlock(&found->lock);
}
lock_quick_unlock(&bin->lock);
if(table->space_used > table->space_max)
reclaim_space(table, &reclaimlist);
if(table->num >= table->size)
table_grow(table);
lock_quick_unlock(&table->lock);
/* finish reclaim if any (outside of critical region) */
while(reclaimlist) {
struct lruhash_entry* n = reclaimlist->overflow_next;
void* d = reclaimlist->data;
(*table->delkeyfunc)(reclaimlist->key, cb_arg);
(*table->deldatafunc)(d, cb_arg);
reclaimlist = n;
}
}
struct lruhash_entry*
lruhash_lookup(struct lruhash* table, hashvalue_t hash, void* key, int wr)
{
struct lruhash_entry* entry;
struct lruhash_bin* bin;
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
if((entry=bin_find_entry(table, bin, hash, key)))
lru_touch(table, entry);
lock_quick_unlock(&table->lock);
if(entry) {
if(wr) { lock_rw_wrlock(&entry->lock); }
else { lock_rw_rdlock(&entry->lock); }
}
lock_quick_unlock(&bin->lock);
return entry;
}
void
lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key)
{
struct lruhash_entry* entry;
struct lruhash_bin* bin;
void *d;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
if((entry=bin_find_entry(table, bin, hash, key))) {
bin_overflow_remove(bin, entry);
lru_remove(table, entry);
} else {
lock_quick_unlock(&table->lock);
lock_quick_unlock(&bin->lock);
return;
}
table->num--;
table->space_used -= (*table->sizefunc)(entry->key, entry->data);
lock_quick_unlock(&table->lock);
lock_rw_wrlock(&entry->lock);
if(table->markdelfunc)
(*table->markdelfunc)(entry->key);
lock_rw_unlock(&entry->lock);
lock_quick_unlock(&bin->lock);
/* finish removal */
d = entry->data;
(*table->delkeyfunc)(entry->key, table->cb_arg);
(*table->deldatafunc)(d, table->cb_arg);
}
/** clear bin, respecting locks, does not do space, LRU */
static void
bin_clear(struct lruhash* table, struct lruhash_bin* bin)
{
struct lruhash_entry* p, *np;
void *d;
lock_quick_lock(&bin->lock);
p = bin->overflow_list;
while(p) {
lock_rw_wrlock(&p->lock);
np = p->overflow_next;
d = p->data;
if(table->markdelfunc)
(*table->markdelfunc)(p->key);
lock_rw_unlock(&p->lock);
(*table->delkeyfunc)(p->key, table->cb_arg);
(*table->deldatafunc)(d, table->cb_arg);
p = np;
}
bin->overflow_list = NULL;
lock_quick_unlock(&bin->lock);
}
void
lruhash_clear(struct lruhash* table)
{
size_t i;
if(!table)
return;
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
lock_quick_lock(&table->lock);
for(i=0; i<table->size; i++) {
bin_clear(table, &table->array[i]);
}
table->lru_start = NULL;
table->lru_end = NULL;
table->num = 0;
table->space_used = 0;
lock_quick_unlock(&table->lock);
}
void
lruhash_status(struct lruhash* table, const char* id, int extended)
{
lock_quick_lock(&table->lock);
log_info("%s: %u entries, memory %u / %u",
id, (unsigned)table->num, (unsigned)table->space_used,
(unsigned)table->space_max);
log_info(" itemsize %u, array %u, mask %d",
(unsigned)(table->num? table->space_used/table->num : 0),
(unsigned)table->size, table->size_mask);
if(extended) {
size_t i;
int min=(int)table->size*2, max=-2;
for(i=0; i<table->size; i++) {
int here = 0;
struct lruhash_entry *en;
lock_quick_lock(&table->array[i].lock);
en = table->array[i].overflow_list;
while(en) {
here ++;
en = en->overflow_next;
}
lock_quick_unlock(&table->array[i].lock);
if(extended >= 2)
log_info("bin[%d] %d", (int)i, here);
if(here > max) max = here;
if(here < min) min = here;
}
log_info(" bin min %d, avg %.2lf, max %d", min,
(double)table->num/(double)table->size, max);
}
lock_quick_unlock(&table->lock);
}
size_t
lruhash_get_mem(struct lruhash* table)
{
size_t s;
lock_quick_lock(&table->lock);
s = sizeof(struct lruhash) + table->space_used;
#ifdef USE_THREAD_DEBUG
if(table->size != 0) {
size_t i;
for(i=0; i<table->size; i++)
s += sizeof(struct lruhash_bin) +
lock_get_mem(&table->array[i].lock);
}
#else /* no THREAD_DEBUG */
if(table->size != 0)
s += (table->size)*(sizeof(struct lruhash_bin) +
lock_get_mem(&table->array[0].lock));
#endif
lock_quick_unlock(&table->lock);
s += lock_get_mem(&table->lock);
return s;
}
void
lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md)
{
lock_quick_lock(&table->lock);
table->markdelfunc = md;
lock_quick_unlock(&table->lock);
}
void
lruhash_traverse(struct lruhash* h, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
struct lruhash_entry* e;
lock_quick_lock(&h->lock);
for(i=0; i<h->size; i++) {
lock_quick_lock(&h->array[i].lock);
for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
if(wr) {
lock_rw_wrlock(&e->lock);
} else {
lock_rw_rdlock(&e->lock);
}
(*func)(e, arg);
lock_rw_unlock(&e->lock);
}
lock_quick_unlock(&h->array[i].lock);
}
lock_quick_unlock(&h->lock);
}

414
external/unbound/util/storage/lruhash.h vendored Normal file
View File

@@ -0,0 +1,414 @@
/*
* util/storage/lruhash.h - hashtable, hash function, LRU keeping.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a hashtable with LRU keeping of entries.
*
* The hash table keeps a maximum memory size. Old entries are removed
* to make space for new entries.
*
* The locking strategy is as follows:
* o since (almost) every read also implies a LRU update, the
* hashtable lock is a spinlock, not rwlock.
* o the idea is to move every thread through the hash lock quickly,
* so that the next thread can access the lookup table.
* o User performs hash function.
*
* For read:
* o lock hashtable.
* o lookup hash bin.
* o lock hash bin.
* o find entry (if failed, unlock hash, unl bin, exit).
* o swizzle pointers for LRU update.
* o unlock hashtable.
* o lock entry (rwlock).
* o unlock hash bin.
* o work on entry.
* o unlock entry.
*
* To update an entry, gain writelock and change the entry.
* (the entry must keep the same hashvalue, so a data update.)
* (you cannot upgrade a readlock to a writelock, because the item may
* be deleted, it would cause race conditions. So instead, unlock and
* relookup it in the hashtable.)
*
* To delete an entry:
* o unlock the entry if you hold the lock already.
* o lock hashtable.
* o lookup hash bin.
* o lock hash bin.
* o find entry (if failed, unlock hash, unl bin, exit).
* o remove entry from hashtable bin overflow chain.
* o unlock hashtable.
* o lock entry (writelock).
* o unlock hash bin.
* o unlock entry (nobody else should be waiting for this lock,
* since you removed it from hashtable, and you got writelock while
* holding the hashbinlock so you are the only one.)
* Note you are only allowed to obtain a lock while holding hashbinlock.
* o delete entry.
*
* The above sequence is:
* o race free, works with read, write and delete.
* o but has a queue, imagine someone needing a writelock on an item.
* but there are still readlocks. The writelocker waits, but holds
* the hashbinlock. The next thread that comes in and needs the same
* hashbin will wait for the lock while holding the hashtable lock.
* thus halting the entire system on hashtable.
* This is because of the delete protection.
* Readlocks will be easier on the rwlock on entries.
* While the writer is holding writelock, similar problems happen with
* a reader or writer needing the same item.
* the scenario requires more than three threads.
* o so the queue length is 3 threads in a bad situation. The fourth is
* unable to use the hashtable.
*
* If you need to acquire locks on multiple items from the hashtable.
* o you MUST release all locks on items from the hashtable before
* doing the next lookup/insert/delete/whatever.
* o To acquire multiple items you should use a special routine that
* obtains the locks on those multiple items in one go.
*/
#ifndef UTIL_STORAGE_LRUHASH_H
#define UTIL_STORAGE_LRUHASH_H
#include "util/locks.h"
struct lruhash_bin;
struct lruhash_entry;
/** default start size for hash arrays */
#define HASH_DEFAULT_STARTARRAY 1024 /* entries in array */
/** default max memory for hash arrays */
#define HASH_DEFAULT_MAXMEM 4*1024*1024 /* bytes */
/** the type of a hash value */
typedef uint32_t hashvalue_t;
/**
* Type of function that calculates the size of an entry.
* Result must include the size of struct lruhash_entry.
* Keys that are identical must also calculate to the same size.
* size = func(key, data).
*/
typedef size_t (*lruhash_sizefunc_t)(void*, void*);
/** type of function that compares two keys. return 0 if equal. */
typedef int (*lruhash_compfunc_t)(void*, void*);
/** old keys are deleted.
* The RRset type has to revoke its ID number, markdel() is used first.
* This function is called: func(key, userarg) */
typedef void (*lruhash_delkeyfunc_t)(void*, void*);
/** old data is deleted. This function is called: func(data, userarg). */
typedef void (*lruhash_deldatafunc_t)(void*, void*);
/** mark a key as pending to be deleted (and not to be used by anyone).
* called: func(key) */
typedef void (*lruhash_markdelfunc_t)(void*);
/**
* Hash table that keeps LRU list of entries.
*/
struct lruhash {
/** lock for exclusive access, to the lookup array */
lock_quick_t lock;
/** the size function for entries in this table */
lruhash_sizefunc_t sizefunc;
/** the compare function for entries in this table. */
lruhash_compfunc_t compfunc;
/** how to delete keys. */
lruhash_delkeyfunc_t delkeyfunc;
/** how to delete data. */
lruhash_deldatafunc_t deldatafunc;
/** how to mark a key pending deletion */
lruhash_markdelfunc_t markdelfunc;
/** user argument for user functions */
void* cb_arg;
/** the size of the lookup array */
size_t size;
/** size bitmask - since size is a power of 2 */
int size_mask;
/** lookup array of bins */
struct lruhash_bin* array;
/** the lru list, start and end, noncyclical double linked list. */
struct lruhash_entry* lru_start;
/** lru list end item (least recently used) */
struct lruhash_entry* lru_end;
/** the number of entries in the hash table. */
size_t num;
/** the amount of space used, roughly the number of bytes in use. */
size_t space_used;
/** the amount of space the hash table is maximally allowed to use. */
size_t space_max;
};
/**
* A single bin with a linked list of entries in it.
*/
struct lruhash_bin {
/**
* Lock for exclusive access to the linked list
* This lock makes deletion of items safe in this overflow list.
*/
lock_quick_t lock;
/** linked list of overflow entries */
struct lruhash_entry* overflow_list;
};
/**
* An entry into the hash table.
* To change overflow_next you need to hold the bin lock.
* To change the lru items you need to hold the hashtable lock.
* This structure is designed as part of key struct. And key pointer helps
* to get the surrounding structure. Data should be allocated on its own.
*/
struct lruhash_entry {
/**
* rwlock for access to the contents of the entry
* Note that it does _not_ cover the lru_ and overflow_ ptrs.
* Even with a writelock, you cannot change hash and key.
* You need to delete it to change hash or key.
*/
lock_rw_t lock;
/** next entry in overflow chain. Covered by hashlock and binlock. */
struct lruhash_entry* overflow_next;
/** next entry in lru chain. covered by hashlock. */
struct lruhash_entry* lru_next;
/** prev entry in lru chain. covered by hashlock. */
struct lruhash_entry* lru_prev;
/** hash value of the key. It may not change, until entry deleted. */
hashvalue_t hash;
/** key */
void* key;
/** data */
void* data;
};
/**
* Create new hash table.
* @param start_size: size of hashtable array at start, must be power of 2.
* @param maxmem: maximum amount of memory this table is allowed to use.
* @param sizefunc: calculates memory usage of entries.
* @param compfunc: compares entries, 0 on equality.
* @param delkeyfunc: deletes key.
* Calling both delkey and deldata will also free the struct lruhash_entry.
* Make it part of the key structure and delete it in delkeyfunc.
* @param deldatafunc: deletes data.
* @param arg: user argument that is passed to user function calls.
* @return: new hash table or NULL on malloc failure.
*/
struct lruhash* lruhash_create(size_t start_size, size_t maxmem,
lruhash_sizefunc_t sizefunc, lruhash_compfunc_t compfunc,
lruhash_delkeyfunc_t delkeyfunc, lruhash_deldatafunc_t deldatafunc,
void* arg);
/**
* Delete hash table. Entries are all deleted.
* @param table: to delete.
*/
void lruhash_delete(struct lruhash* table);
/**
* Clear hash table. Entries are all deleted, while locking them before
* doing so. At end the table is empty.
* @param table: to make empty.
*/
void lruhash_clear(struct lruhash* table);
/**
* Insert a new element into the hashtable.
* If key is already present data pointer in that entry is updated.
* The space calculation function is called with the key, data.
* If necessary the least recently used entries are deleted to make space.
* If necessary the hash array is grown up.
*
* @param table: hash table.
* @param hash: hash value. User calculates the hash.
* @param entry: identifies the entry.
* If key already present, this entry->key is deleted immediately.
* But entry->data is set to NULL before deletion, and put into
* the existing entry. The data is then freed.
* @param data: the data.
* @param cb_override: if not null overrides the cb_arg for the deletefunc.
*/
void lruhash_insert(struct lruhash* table, hashvalue_t hash,
struct lruhash_entry* entry, void* data, void* cb_override);
/**
* Lookup an entry in the hashtable.
* At the end of the function you hold a (read/write)lock on the entry.
* The LRU is updated for the entry (if found).
* @param table: hash table.
* @param hash: hash of key.
* @param key: what to look for, compared against entries in overflow chain.
* the hash value must be set, and must work with compare function.
* @param wr: set to true if you desire a writelock on the entry.
* with a writelock you can update the data part.
* @return: pointer to the entry or NULL. The entry is locked.
* The user must unlock the entry when done.
*/
struct lruhash_entry* lruhash_lookup(struct lruhash* table, hashvalue_t hash,
void* key, int wr);
/**
* Touch entry, so it becomes the most recently used in the LRU list.
* Caller must hold hash table lock. The entry must be inserted already.
* @param table: hash table.
* @param entry: entry to make first in LRU.
*/
void lru_touch(struct lruhash* table, struct lruhash_entry* entry);
/**
* Set the markdelfunction (or NULL)
*/
void lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md);
/************************* Internal functions ************************/
/*** these are only exposed for unit tests. ***/
/**
* Remove entry from hashtable. Does nothing if not found in hashtable.
* Delfunc is called for the entry.
* @param table: hash table.
* @param hash: hash of key.
* @param key: what to look for.
*/
void lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key);
/** init the hash bins for the table */
void bin_init(struct lruhash_bin* array, size_t size);
/** delete the hash bin and entries inside it */
void bin_delete(struct lruhash* table, struct lruhash_bin* bin);
/**
* Find entry in hash bin. You must have locked the bin.
* @param table: hash table with function pointers.
* @param bin: hash bin to look into.
* @param hash: hash value to look for.
* @param key: key to look for.
* @return: the entry or NULL if not found.
*/
struct lruhash_entry* bin_find_entry(struct lruhash* table,
struct lruhash_bin* bin, hashvalue_t hash, void* key);
/**
* Remove entry from bin overflow chain.
* You must have locked the bin.
* @param bin: hash bin to look into.
* @param entry: entry ptr that needs removal.
*/
void bin_overflow_remove(struct lruhash_bin* bin,
struct lruhash_entry* entry);
/**
* Split hash bin into two new ones. Based on increased size_mask.
* Caller must hold hash table lock.
* At the end the routine acquires all hashbin locks (in the old array).
* This makes it wait for other threads to finish with the bins.
* So the bins are ready to be deleted after this function.
* @param table: hash table with function pointers.
* @param newa: new increased array.
* @param newmask: new lookup mask.
*/
void bin_split(struct lruhash* table, struct lruhash_bin* newa,
int newmask);
/**
* Try to make space available by deleting old entries.
* Assumes that the lock on the hashtable is being held by caller.
* Caller must not hold bin locks.
* @param table: hash table.
* @param list: list of entries that are to be deleted later.
* Entries have been removed from the hash table and writelock is held.
*/
void reclaim_space(struct lruhash* table, struct lruhash_entry** list);
/**
* Grow the table lookup array. Becomes twice as large.
* Caller must hold the hash table lock. Must not hold any bin locks.
* Tries to grow, on malloc failure, nothing happened.
* @param table: hash table.
*/
void table_grow(struct lruhash* table);
/**
* Put entry at front of lru. entry must be unlinked from lru.
* Caller must hold hash table lock.
* @param table: hash table with lru head and tail.
* @param entry: entry to make most recently used.
*/
void lru_front(struct lruhash* table, struct lruhash_entry* entry);
/**
* Remove entry from lru list.
* Caller must hold hash table lock.
* @param table: hash table with lru head and tail.
* @param entry: entry to remove from lru.
*/
void lru_remove(struct lruhash* table, struct lruhash_entry* entry);
/**
* Output debug info to the log as to state of the hash table.
* @param table: hash table.
* @param id: string printed with table to identify the hash table.
* @param extended: set to true to print statistics on overflow bin lengths.
*/
void lruhash_status(struct lruhash* table, const char* id, int extended);
/**
* Get memory in use now by the lruhash table.
* @param table: hash table. Will be locked before use. And unlocked after.
* @return size in bytes.
*/
size_t lruhash_get_mem(struct lruhash* table);
/**
* Traverse a lruhash. Call back for every element in the table.
* @param h: hash table. Locked before use.
* @param wr: if true writelock is obtained on element, otherwise readlock.
* @param func: function for every element. Do not lock or unlock elements.
* @param arg: user argument to func.
*/
void lruhash_traverse(struct lruhash* h, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg);
#endif /* UTIL_STORAGE_LRUHASH_H */

231
external/unbound/util/storage/slabhash.c vendored Normal file
View File

@@ -0,0 +1,231 @@
/*
* util/storage/slabhash.c - hashtable consisting of several smaller tables.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* Implementation of hash table that consists of smaller hash tables.
* This results in a partitioned lruhash table.
* It cannot grow, but that gives it the ability to have multiple
* locks. Also this means there are multiple LRU lists.
*/
#include "config.h"
#include "util/storage/slabhash.h"
struct slabhash* slabhash_create(size_t numtables, size_t start_size,
size_t maxmem, lruhash_sizefunc_t sizefunc,
lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
lruhash_deldatafunc_t deldatafunc, void* arg)
{
size_t i;
struct slabhash* sl = (struct slabhash*)calloc(1,
sizeof(struct slabhash));
if(!sl) return NULL;
sl->size = numtables;
log_assert(sl->size > 0);
sl->array = (struct lruhash**)calloc(sl->size, sizeof(struct lruhash*));
if(!sl->array) {
free(sl);
return NULL;
}
sl->mask = (uint32_t)(sl->size - 1);
if(sl->mask == 0) {
sl->shift = 0;
} else {
log_assert( (sl->size & sl->mask) == 0
/* size must be power of 2 */ );
sl->shift = 0;
while(!(sl->mask & 0x80000000)) {
sl->mask <<= 1;
sl->shift ++;
}
}
for(i=0; i<sl->size; i++) {
sl->array[i] = lruhash_create(start_size, maxmem / sl->size,
sizefunc, compfunc, delkeyfunc, deldatafunc, arg);
if(!sl->array[i]) {
slabhash_delete(sl);
return NULL;
}
}
return sl;
}
void slabhash_delete(struct slabhash* sl)
{
if(!sl)
return;
if(sl->array) {
size_t i;
for(i=0; i<sl->size; i++)
lruhash_delete(sl->array[i]);
free(sl->array);
}
free(sl);
}
void slabhash_clear(struct slabhash* sl)
{
size_t i;
if(!sl)
return;
for(i=0; i<sl->size; i++)
lruhash_clear(sl->array[i]);
}
/** helper routine to calculate the slabhash index */
static unsigned int
slab_idx(struct slabhash* sl, hashvalue_t hash)
{
return ((hash & sl->mask) >> sl->shift);
}
void slabhash_insert(struct slabhash* sl, hashvalue_t hash,
struct lruhash_entry* entry, void* data, void* arg)
{
lruhash_insert(sl->array[slab_idx(sl, hash)], hash, entry, data, arg);
}
struct lruhash_entry* slabhash_lookup(struct slabhash* sl,
hashvalue_t hash, void* key, int wr)
{
return lruhash_lookup(sl->array[slab_idx(sl, hash)], hash, key, wr);
}
void slabhash_remove(struct slabhash* sl, hashvalue_t hash, void* key)
{
lruhash_remove(sl->array[slab_idx(sl, hash)], hash, key);
}
void slabhash_status(struct slabhash* sl, const char* id, int extended)
{
size_t i;
char num[17];
log_info("Slabhash %s: %u tables mask=%x shift=%d",
id, (unsigned)sl->size, (unsigned)sl->mask, sl->shift);
for(i=0; i<sl->size; i++) {
snprintf(num, sizeof(num), "table %u", (unsigned)i);
lruhash_status(sl->array[i], num, extended);
}
}
size_t slabhash_get_size(struct slabhash* sl)
{
size_t i, total = 0;
for(i=0; i<sl->size; i++) {
lock_quick_lock(&sl->array[i]->lock);
total += sl->array[i]->space_max;
lock_quick_unlock(&sl->array[i]->lock);
}
return total;
}
size_t slabhash_get_mem(struct slabhash* sl)
{
size_t i, total = sizeof(*sl);
total += sizeof(struct lruhash*)*sl->size;
for(i=0; i<sl->size; i++) {
total += lruhash_get_mem(sl->array[i]);
}
return total;
}
struct lruhash* slabhash_gettable(struct slabhash* sl, hashvalue_t hash)
{
return sl->array[slab_idx(sl, hash)];
}
/* test code, here to avoid linking problems with fptr_wlist */
/** delete key */
static void delkey(struct slabhash_testkey* k) {
lock_rw_destroy(&k->entry.lock); free(k);}
/** delete data */
static void deldata(struct slabhash_testdata* d) {free(d);}
size_t test_slabhash_sizefunc(void* ATTR_UNUSED(key), void* ATTR_UNUSED(data))
{
return sizeof(struct slabhash_testkey) +
sizeof(struct slabhash_testdata);
}
int test_slabhash_compfunc(void* key1, void* key2)
{
struct slabhash_testkey* k1 = (struct slabhash_testkey*)key1;
struct slabhash_testkey* k2 = (struct slabhash_testkey*)key2;
if(k1->id == k2->id)
return 0;
if(k1->id > k2->id)
return 1;
return -1;
}
void test_slabhash_delkey(void* key, void* ATTR_UNUSED(arg))
{
delkey((struct slabhash_testkey*)key);
}
void test_slabhash_deldata(void* data, void* ATTR_UNUSED(arg))
{
deldata((struct slabhash_testdata*)data);
}
void slabhash_setmarkdel(struct slabhash* sl, lruhash_markdelfunc_t md)
{
size_t i;
for(i=0; i<sl->size; i++) {
lruhash_setmarkdel(sl->array[i], md);
}
}
void slabhash_traverse(struct slabhash* sh, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
for(i=0; i<sh->size; i++)
lruhash_traverse(sh->array[i], wr, func, arg);
}
size_t count_slabhash_entries(struct slabhash* sh)
{
size_t slab, cnt = 0;
for(slab=0; slab<sh->size; slab++) {
lock_quick_lock(&sh->array[slab]->lock);
cnt += sh->array[slab]->num;
lock_quick_unlock(&sh->array[slab]->lock);
}
return cnt;
}

218
external/unbound/util/storage/slabhash.h vendored Normal file
View File

@@ -0,0 +1,218 @@
/*
* util/storage/slabhash.h - hashtable consisting of several smaller tables.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* Hash table that consists of smaller hash tables.
* It cannot grow, but that gives it the ability to have multiple
* locks. Also this means there are multiple LRU lists.
*/
#ifndef UTIL_STORAGE_SLABHASH_H
#define UTIL_STORAGE_SLABHASH_H
#include "util/storage/lruhash.h"
/** default number of slabs */
#define HASH_DEFAULT_SLABS 4
/**
* Hash table formed from several smaller ones.
* This results in a partitioned lruhash table, a 'slashtable'.
* None of the data inside the slabhash may be altered.
* Therefore, no locks are needed to access the structure.
*/
struct slabhash {
/** the size of the array - must be power of 2 */
size_t size;
/** size bitmask - uses high bits. */
uint32_t mask;
/** shift right this many bits to get index into array. */
unsigned int shift;
/** lookup array of hash tables */
struct lruhash** array;
};
/**
* Create new slabbed hash table.
* @param numtables: number of hash tables to use, other parameters used to
* initialize these smaller hashtables.
* @param start_size: size of hashtable array at start, must be power of 2.
* @param maxmem: maximum amount of memory this table is allowed to use.
* so every table gets maxmem/numtables to use for itself.
* @param sizefunc: calculates memory usage of entries.
* @param compfunc: compares entries, 0 on equality.
* @param delkeyfunc: deletes key.
* @param deldatafunc: deletes data.
* @param arg: user argument that is passed to user function calls.
* @return: new hash table or NULL on malloc failure.
*/
struct slabhash* slabhash_create(size_t numtables, size_t start_size,
size_t maxmem, lruhash_sizefunc_t sizefunc,
lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
lruhash_deldatafunc_t deldatafunc, void* arg);
/**
* Delete hash table. Entries are all deleted.
* @param table: to delete.
*/
void slabhash_delete(struct slabhash* table);
/**
* Clear hash table. Entries are all deleted.
* @param table: to make empty.
*/
void slabhash_clear(struct slabhash* table);
/**
* Insert a new element into the hashtable, uses lruhash_insert.
* If key is already present data pointer in that entry is updated.
*
* @param table: hash table.
* @param hash: hash value. User calculates the hash.
* @param entry: identifies the entry.
* If key already present, this entry->key is deleted immediately.
* But entry->data is set to NULL before deletion, and put into
* the existing entry. The data is then freed.
* @param data: the data.
* @param cb_override: if not NULL overrides the cb_arg for deletfunc.
*/
void slabhash_insert(struct slabhash* table, hashvalue_t hash,
struct lruhash_entry* entry, void* data, void* cb_override);
/**
* Lookup an entry in the hashtable. Uses lruhash_lookup.
* At the end of the function you hold a (read/write)lock on the entry.
* The LRU is updated for the entry (if found).
* @param table: hash table.
* @param hash: hash of key.
* @param key: what to look for, compared against entries in overflow chain.
* the hash value must be set, and must work with compare function.
* @param wr: set to true if you desire a writelock on the entry.
* with a writelock you can update the data part.
* @return: pointer to the entry or NULL. The entry is locked.
* The user must unlock the entry when done.
*/
struct lruhash_entry* slabhash_lookup(struct slabhash* table,
hashvalue_t hash, void* key, int wr);
/**
* Remove entry from hashtable. Does nothing if not found in hashtable.
* Delfunc is called for the entry. Uses lruhash_remove.
* @param table: hash table.
* @param hash: hash of key.
* @param key: what to look for.
*/
void slabhash_remove(struct slabhash* table, hashvalue_t hash, void* key);
/**
* Output debug info to the log as to state of the hash table.
* @param table: hash table.
* @param id: string printed with table to identify the hash table.
* @param extended: set to true to print statistics on overflow bin lengths.
*/
void slabhash_status(struct slabhash* table, const char* id, int extended);
/**
* Retrieve slab hash total size.
* @param table: hash table.
* @return size configured as max.
*/
size_t slabhash_get_size(struct slabhash* table);
/**
* Retrieve slab hash current memory use.
* @param table: hash table.
* @return memory in use.
*/
size_t slabhash_get_mem(struct slabhash* table);
/**
* Get lruhash table for a given hash value
* @param table: slabbed hash table.
* @param hash: hash value.
* @return the lru hash table.
*/
struct lruhash* slabhash_gettable(struct slabhash* table, hashvalue_t hash);
/**
* Set markdel function
* @param table: slabbed hash table.
* @param md: markdel function ptr.
*/
void slabhash_setmarkdel(struct slabhash* table, lruhash_markdelfunc_t md);
/**
* Traverse a slabhash.
* @param table: slabbed hash table.
* @param wr: if true, writelock is obtained, otherwise readlock.
* @param func: function to call for every element.
* @param arg: user argument to function.
*/
void slabhash_traverse(struct slabhash* table, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg);
/*
* Count entries in slabhash.
* @param table: slabbed hash table;
* @return the number of items
*/
size_t count_slabhash_entries(struct slabhash* table);
/* --- test representation --- */
/** test structure contains test key */
struct slabhash_testkey {
/** the key id */
int id;
/** the entry */
struct lruhash_entry entry;
};
/** test structure contains test data */
struct slabhash_testdata {
/** data value */
int data;
};
/** test sizefunc for lruhash */
size_t test_slabhash_sizefunc(void*, void*);
/** test comparefunc for lruhash */
int test_slabhash_compfunc(void*, void*);
/** test delkey for lruhash */
void test_slabhash_delkey(void*, void*);
/** test deldata for lruhash */
void test_slabhash_deldata(void*, void*);
/* --- end test representation --- */
#endif /* UTIL_STORAGE_SLABHASH_H */

247
external/unbound/util/timehist.c vendored Normal file
View File

@@ -0,0 +1,247 @@
/*
* util/timehist.c - make histogram of time values.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions to make a histogram of time values.
*/
#include "config.h"
#ifdef HAVE_TIME_H
#include <time.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#include "util/timehist.h"
#include "util/log.h"
/** special timestwo operation for time values in histogram setup */
static void
timestwo(struct timeval* v)
{
#ifndef S_SPLINT_S
if(v->tv_sec == 0 && v->tv_usec == 0) {
v->tv_usec = 1;
return;
}
v->tv_sec *= 2;
v->tv_usec *= 2;
if(v->tv_usec == 1024*1024) {
/* nice values and easy to compute */
v->tv_sec = 1;
v->tv_usec = 0;
}
#endif
}
/** do setup exponentially */
static void
dosetup(struct timehist* hist)
{
struct timeval last;
size_t i;
memset(&last, 0, sizeof(last));
for(i=0; i<hist->num; i++) {
hist->buckets[i].lower = last;
timestwo(&last);
hist->buckets[i].upper = last;
hist->buckets[i].count = 0;
}
}
struct timehist* timehist_setup(void)
{
struct timehist* hist = (struct timehist*)calloc(1,
sizeof(struct timehist));
if(!hist)
return NULL;
hist->num = NUM_BUCKETS_HIST;
hist->buckets = (struct th_buck*)calloc(hist->num,
sizeof(struct th_buck));
if(!hist->buckets) {
free(hist);
return NULL;
}
/* setup the buckets */
dosetup(hist);
return hist;
}
void timehist_delete(struct timehist* hist)
{
if(!hist)
return;
free(hist->buckets);
free(hist);
}
void timehist_clear(struct timehist* hist)
{
size_t i;
for(i=0; i<hist->num; i++)
hist->buckets[i].count = 0;
}
/** histogram compare of time values */
static int
timeval_smaller(const struct timeval* x, const struct timeval* y)
{
#ifndef S_SPLINT_S
if(x->tv_sec < y->tv_sec)
return 1;
else if(x->tv_sec == y->tv_sec) {
if(x->tv_usec <= y->tv_usec)
return 1;
else return 0;
}
else return 0;
#endif
}
void timehist_insert(struct timehist* hist, struct timeval* tv)
{
size_t i;
for(i=0; i<hist->num; i++) {
if(timeval_smaller(tv, &hist->buckets[i].upper)) {
hist->buckets[i].count++;
return;
}
}
/* dump in last bucket */
hist->buckets[hist->num-1].count++;
}
void timehist_print(struct timehist* hist)
{
#ifndef S_SPLINT_S
size_t i;
for(i=0; i<hist->num; i++) {
if(hist->buckets[i].count != 0) {
printf("%4d.%6.6d %4d.%6.6d %u\n",
(int)hist->buckets[i].lower.tv_sec,
(int)hist->buckets[i].lower.tv_usec,
(int)hist->buckets[i].upper.tv_sec,
(int)hist->buckets[i].upper.tv_usec,
(unsigned)hist->buckets[i].count);
}
}
#endif
}
void timehist_log(struct timehist* hist, const char* name)
{
#ifndef S_SPLINT_S
size_t i;
log_info("[25%%]=%g median[50%%]=%g [75%%]=%g",
timehist_quartile(hist, 0.25),
timehist_quartile(hist, 0.50),
timehist_quartile(hist, 0.75));
/* 0000.000000 0000.000000 0 */
log_info("lower(secs) upper(secs) %s", name);
for(i=0; i<hist->num; i++) {
if(hist->buckets[i].count != 0) {
log_info("%4d.%6.6d %4d.%6.6d %u",
(int)hist->buckets[i].lower.tv_sec,
(int)hist->buckets[i].lower.tv_usec,
(int)hist->buckets[i].upper.tv_sec,
(int)hist->buckets[i].upper.tv_usec,
(unsigned)hist->buckets[i].count);
}
}
#endif
}
/** total number in histogram */
static size_t
timehist_count(struct timehist* hist)
{
size_t i, res = 0;
for(i=0; i<hist->num; i++)
res += hist->buckets[i].count;
return res;
}
double
timehist_quartile(struct timehist* hist, double q)
{
double lookfor, passed, res;
double low = 0, up = 0;
size_t i;
if(!hist || hist->num == 0)
return 0.;
/* look for i'th element, interpolated */
lookfor = (double)timehist_count(hist);
if(lookfor < 4)
return 0.; /* not enough elements for a good estimate */
lookfor *= q;
passed = 0;
i = 0;
while(i+1 < hist->num &&
passed+(double)hist->buckets[i].count < lookfor) {
passed += (double)hist->buckets[i++].count;
}
/* got the right bucket */
#ifndef S_SPLINT_S
low = (double)hist->buckets[i].lower.tv_sec +
(double)hist->buckets[i].lower.tv_usec/1000000.;
up = (double)hist->buckets[i].upper.tv_sec +
(double)hist->buckets[i].upper.tv_usec/1000000.;
#endif
res = (lookfor - passed)*(up-low)/((double)hist->buckets[i].count);
return low+res;
}
void
timehist_export(struct timehist* hist, size_t* array, size_t sz)
{
size_t i;
if(!hist) return;
if(sz > hist->num)
sz = hist->num;
for(i=0; i<sz; i++)
array[i] = hist->buckets[i].count;
}
void
timehist_import(struct timehist* hist, size_t* array, size_t sz)
{
size_t i;
if(!hist) return;
if(sz > hist->num)
sz = hist->num;
for(i=0; i<sz; i++)
hist->buckets[i].count = array[i];
}

134
external/unbound/util/timehist.h vendored Normal file
View File

@@ -0,0 +1,134 @@
/*
* util/timehist.h - make histogram of time values.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains functions to make a histogram of time values.
*/
#ifndef UTIL_TIMEHIST_H
#define UTIL_TIMEHIST_H
/** Number of buckets in a histogram */
#define NUM_BUCKETS_HIST 40
/**
* Bucket of time history information
*/
struct th_buck {
/** lower bound */
struct timeval lower;
/** upper bound */
struct timeval upper;
/** number of items */
size_t count;
};
/**
* Keep histogram of time values.
*/
struct timehist {
/** number of buckets */
size_t num;
/** bucket array */
struct th_buck* buckets;
};
/**
* Setup a histogram, default
* @return histogram or NULL on malloc failure.
*/
struct timehist* timehist_setup(void);
/**
* Delete histogram
* @param hist: to delete
*/
void timehist_delete(struct timehist* hist);
/**
* Clear histogram
* @param hist: to clear all data from
*/
void timehist_clear(struct timehist* hist);
/**
* Add time value to histogram.
* @param hist: histogram
* @param tv: time value
*/
void timehist_insert(struct timehist* hist, struct timeval* tv);
/**
* Find time value for given quartile, such as 0.25, 0.50, 0.75.
* The looks up the value for the i-th element in the sorted list of time
* values, as approximated using the histogram.
* @param hist: histogram. Interpolated information is used from it.
* @param q: quartile, 0.50 results in the median. Must be >0 and <1.
* @return: the time in seconds for that percentage.
*/
double timehist_quartile(struct timehist* hist, double q);
/**
* Printout histogram
* @param hist: histogram
*/
void timehist_print(struct timehist* hist);
/**
* Log histogram, print it to the logfile.
* @param hist: histogram
* @param name: the name of the value column
*/
void timehist_log(struct timehist* hist, const char* name);
/**
* Export histogram to an array.
* @param hist: histogram
* @param array: the array to export to.
* @param sz: number of items in array.
*/
void timehist_export(struct timehist* hist, size_t* array, size_t sz);
/**
* Import histogram from an array.
* @param hist: histogram
* @param array: the array to import from.
* @param sz: number of items in array.
*/
void timehist_import(struct timehist* hist, size_t* array, size_t sz);
#endif /* UTIL_TIMEHIST_H */

727
external/unbound/util/tube.c vendored Normal file
View File

@@ -0,0 +1,727 @@
/*
* util/tube.c - pipe service
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains pipe service functions.
*/
#include "config.h"
#include "util/tube.h"
#include "util/log.h"
#include "util/net_help.h"
#include "util/netevent.h"
#include "util/fptr_wlist.h"
#ifndef USE_WINSOCK
/* on unix */
#ifndef HAVE_SOCKETPAIR
/** no socketpair() available, like on Minix 3.1.7, use pipe */
#define socketpair(f, t, p, sv) pipe(sv)
#endif /* HAVE_SOCKETPAIR */
struct tube* tube_create(void)
{
struct tube* tube = (struct tube*)calloc(1, sizeof(*tube));
int sv[2];
if(!tube) {
int err = errno;
log_err("tube_create: out of memory");
errno = err;
return NULL;
}
tube->sr = -1;
tube->sw = -1;
if(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == -1) {
int err = errno;
log_err("socketpair: %s", strerror(errno));
free(tube);
errno = err;
return NULL;
}
tube->sr = sv[0];
tube->sw = sv[1];
if(!fd_set_nonblock(tube->sr) || !fd_set_nonblock(tube->sw)) {
int err = errno;
log_err("tube: cannot set nonblocking");
tube_delete(tube);
errno = err;
return NULL;
}
return tube;
}
void tube_delete(struct tube* tube)
{
if(!tube) return;
tube_remove_bg_listen(tube);
tube_remove_bg_write(tube);
/* close fds after deleting commpoints, to be sure.
* Also epoll does not like closing fd before event_del */
tube_close_read(tube);
tube_close_write(tube);
free(tube);
}
void tube_close_read(struct tube* tube)
{
if(tube->sr != -1) {
close(tube->sr);
tube->sr = -1;
}
}
void tube_close_write(struct tube* tube)
{
if(tube->sw != -1) {
close(tube->sw);
tube->sw = -1;
}
}
void tube_remove_bg_listen(struct tube* tube)
{
if(tube->listen_com) {
comm_point_delete(tube->listen_com);
tube->listen_com = NULL;
}
if(tube->cmd_msg) {
free(tube->cmd_msg);
tube->cmd_msg = NULL;
}
}
void tube_remove_bg_write(struct tube* tube)
{
if(tube->res_com) {
comm_point_delete(tube->res_com);
tube->res_com = NULL;
}
if(tube->res_list) {
struct tube_res_list* np, *p = tube->res_list;
tube->res_list = NULL;
tube->res_last = NULL;
while(p) {
np = p->next;
free(p->buf);
free(p);
p = np;
}
}
}
int
tube_handle_listen(struct comm_point* c, void* arg, int error,
struct comm_reply* ATTR_UNUSED(reply_info))
{
struct tube* tube = (struct tube*)arg;
ssize_t r;
if(error != NETEVENT_NOERROR) {
fptr_ok(fptr_whitelist_tube_listen(tube->listen_cb));
(*tube->listen_cb)(tube, NULL, 0, error, tube->listen_arg);
return 0;
}
if(tube->cmd_read < sizeof(tube->cmd_len)) {
/* complete reading the length of control msg */
r = read(c->fd, ((uint8_t*)&tube->cmd_len) + tube->cmd_read,
sizeof(tube->cmd_len) - tube->cmd_read);
if(r==0) {
/* error has happened or */
/* parent closed pipe, must have exited somehow */
fptr_ok(fptr_whitelist_tube_listen(tube->listen_cb));
(*tube->listen_cb)(tube, NULL, 0, NETEVENT_CLOSED,
tube->listen_arg);
return 0;
}
if(r==-1) {
if(errno != EAGAIN && errno != EINTR) {
log_err("rpipe error: %s", strerror(errno));
}
/* nothing to read now, try later */
return 0;
}
tube->cmd_read += r;
if(tube->cmd_read < sizeof(tube->cmd_len)) {
/* not complete, try later */
return 0;
}
tube->cmd_msg = (uint8_t*)calloc(1, tube->cmd_len);
if(!tube->cmd_msg) {
log_err("malloc failure");
tube->cmd_read = 0;
return 0;
}
}
/* cmd_len has been read, read remainder */
r = read(c->fd, tube->cmd_msg+tube->cmd_read-sizeof(tube->cmd_len),
tube->cmd_len - (tube->cmd_read - sizeof(tube->cmd_len)));
if(r==0) {
/* error has happened or */
/* parent closed pipe, must have exited somehow */
fptr_ok(fptr_whitelist_tube_listen(tube->listen_cb));
(*tube->listen_cb)(tube, NULL, 0, NETEVENT_CLOSED,
tube->listen_arg);
return 0;
}
if(r==-1) {
/* nothing to read now, try later */
if(errno != EAGAIN && errno != EINTR) {
log_err("rpipe error: %s", strerror(errno));
}
return 0;
}
tube->cmd_read += r;
if(tube->cmd_read < sizeof(tube->cmd_len) + tube->cmd_len) {
/* not complete, try later */
return 0;
}
tube->cmd_read = 0;
fptr_ok(fptr_whitelist_tube_listen(tube->listen_cb));
(*tube->listen_cb)(tube, tube->cmd_msg, tube->cmd_len,
NETEVENT_NOERROR, tube->listen_arg);
/* also frees the buf */
tube->cmd_msg = NULL;
return 0;
}
int
tube_handle_write(struct comm_point* c, void* arg, int error,
struct comm_reply* ATTR_UNUSED(reply_info))
{
struct tube* tube = (struct tube*)arg;
struct tube_res_list* item = tube->res_list;
ssize_t r;
if(error != NETEVENT_NOERROR) {
log_err("tube_handle_write net error %d", error);
return 0;
}
if(!item) {
comm_point_stop_listening(c);
return 0;
}
if(tube->res_write < sizeof(item->len)) {
r = write(c->fd, ((uint8_t*)&item->len) + tube->res_write,
sizeof(item->len) - tube->res_write);
if(r == -1) {
if(errno != EAGAIN && errno != EINTR) {
log_err("wpipe error: %s", strerror(errno));
}
return 0; /* try again later */
}
if(r == 0) {
/* error on pipe, must have exited somehow */
/* cannot signal this to pipe user */
return 0;
}
tube->res_write += r;
if(tube->res_write < sizeof(item->len))
return 0;
}
r = write(c->fd, item->buf + tube->res_write - sizeof(item->len),
item->len - (tube->res_write - sizeof(item->len)));
if(r == -1) {
if(errno != EAGAIN && errno != EINTR) {
log_err("wpipe error: %s", strerror(errno));
}
return 0; /* try again later */
}
if(r == 0) {
/* error on pipe, must have exited somehow */
/* cannot signal this to pipe user */
return 0;
}
tube->res_write += r;
if(tube->res_write < sizeof(item->len) + item->len)
return 0;
/* done this result, remove it */
free(item->buf);
item->buf = NULL;
tube->res_list = tube->res_list->next;
free(item);
if(!tube->res_list) {
tube->res_last = NULL;
comm_point_stop_listening(c);
}
tube->res_write = 0;
return 0;
}
int tube_write_msg(struct tube* tube, uint8_t* buf, uint32_t len,
int nonblock)
{
ssize_t r, d;
int fd = tube->sw;
/* test */
if(nonblock) {
r = write(fd, &len, sizeof(len));
if(r == -1) {
if(errno==EINTR || errno==EAGAIN)
return -1;
log_err("tube msg write failed: %s", strerror(errno));
return -1; /* can still continue, perhaps */
}
} else r = 0;
if(!fd_set_block(fd))
return 0;
/* write remainder */
d = r;
while(d != (ssize_t)sizeof(len)) {
if((r=write(fd, ((char*)&len)+d, sizeof(len)-d)) == -1) {
log_err("tube msg write failed: %s", strerror(errno));
(void)fd_set_nonblock(fd);
return 0;
}
d += r;
}
d = 0;
while(d != (ssize_t)len) {
if((r=write(fd, buf+d, len-d)) == -1) {
log_err("tube msg write failed: %s", strerror(errno));
(void)fd_set_nonblock(fd);
return 0;
}
d += r;
}
if(!fd_set_nonblock(fd))
return 0;
return 1;
}
int tube_read_msg(struct tube* tube, uint8_t** buf, uint32_t* len,
int nonblock)
{
ssize_t r, d;
int fd = tube->sr;
/* test */
*len = 0;
if(nonblock) {
r = read(fd, len, sizeof(*len));
if(r == -1) {
if(errno==EINTR || errno==EAGAIN)
return -1;
log_err("tube msg read failed: %s", strerror(errno));
return -1; /* we can still continue, perhaps */
}
if(r == 0) /* EOF */
return 0;
} else r = 0;
if(!fd_set_block(fd))
return 0;
/* read remainder */
d = r;
while(d != (ssize_t)sizeof(*len)) {
if((r=read(fd, ((char*)len)+d, sizeof(*len)-d)) == -1) {
log_err("tube msg read failed: %s", strerror(errno));
(void)fd_set_nonblock(fd);
return 0;
}
if(r == 0) /* EOF */ {
(void)fd_set_nonblock(fd);
return 0;
}
d += r;
}
log_assert(*len < 65536*2);
*buf = (uint8_t*)malloc(*len);
if(!*buf) {
log_err("tube read out of memory");
(void)fd_set_nonblock(fd);
return 0;
}
d = 0;
while(d < (ssize_t)*len) {
if((r=read(fd, (*buf)+d, (size_t)((ssize_t)*len)-d)) == -1) {
log_err("tube msg read failed: %s", strerror(errno));
(void)fd_set_nonblock(fd);
free(*buf);
return 0;
}
if(r == 0) { /* EOF */
(void)fd_set_nonblock(fd);
free(*buf);
return 0;
}
d += r;
}
if(!fd_set_nonblock(fd)) {
free(*buf);
return 0;
}
return 1;
}
/** perform a select() on the fd */
static int
pollit(int fd, struct timeval* t)
{
fd_set r;
#ifndef S_SPLINT_S
FD_ZERO(&r);
FD_SET(FD_SET_T fd, &r);
#endif
if(select(fd+1, &r, NULL, NULL, t) == -1) {
return 0;
}
errno = 0;
return (int)(FD_ISSET(fd, &r));
}
int tube_poll(struct tube* tube)
{
struct timeval t;
memset(&t, 0, sizeof(t));
return pollit(tube->sr, &t);
}
int tube_wait(struct tube* tube)
{
return pollit(tube->sr, NULL);
}
int tube_read_fd(struct tube* tube)
{
return tube->sr;
}
int tube_setup_bg_listen(struct tube* tube, struct comm_base* base,
tube_callback_t* cb, void* arg)
{
tube->listen_cb = cb;
tube->listen_arg = arg;
if(!(tube->listen_com = comm_point_create_raw(base, tube->sr,
0, tube_handle_listen, tube))) {
int err = errno;
log_err("tube_setup_bg_l: commpoint creation failed");
errno = err;
return 0;
}
return 1;
}
int tube_setup_bg_write(struct tube* tube, struct comm_base* base)
{
if(!(tube->res_com = comm_point_create_raw(base, tube->sw,
1, tube_handle_write, tube))) {
int err = errno;
log_err("tube_setup_bg_w: commpoint creation failed");
errno = err;
return 0;
}
return 1;
}
int tube_queue_item(struct tube* tube, uint8_t* msg, size_t len)
{
struct tube_res_list* item =
(struct tube_res_list*)malloc(sizeof(*item));
if(!item) {
free(msg);
log_err("out of memory for async answer");
return 0;
}
item->buf = msg;
item->len = len;
item->next = NULL;
/* add at back of list, since the first one may be partially written */
if(tube->res_last)
tube->res_last->next = item;
else tube->res_list = item;
tube->res_last = item;
if(tube->res_list == tube->res_last) {
/* first added item, start the write process */
comm_point_start_listening(tube->res_com, -1, -1);
}
return 1;
}
void tube_handle_signal(int ATTR_UNUSED(fd), short ATTR_UNUSED(events),
void* ATTR_UNUSED(arg))
{
log_assert(0);
}
#else /* USE_WINSOCK */
/* on windows */
struct tube* tube_create(void)
{
/* windows does not have forks like unix, so we only support
* threads on windows. And thus the pipe need only connect
* threads. We use a mutex and a list of datagrams. */
struct tube* tube = (struct tube*)calloc(1, sizeof(*tube));
if(!tube) {
int err = errno;
log_err("tube_create: out of memory");
errno = err;
return NULL;
}
tube->event = WSACreateEvent();
if(tube->event == WSA_INVALID_EVENT) {
free(tube);
log_err("WSACreateEvent: %s", wsa_strerror(WSAGetLastError()));
}
if(!WSAResetEvent(tube->event)) {
log_err("WSAResetEvent: %s", wsa_strerror(WSAGetLastError()));
}
lock_basic_init(&tube->res_lock);
verbose(VERB_ALGO, "tube created");
return tube;
}
void tube_delete(struct tube* tube)
{
if(!tube) return;
tube_remove_bg_listen(tube);
tube_remove_bg_write(tube);
tube_close_read(tube);
tube_close_write(tube);
if(!WSACloseEvent(tube->event))
log_err("WSACloseEvent: %s", wsa_strerror(WSAGetLastError()));
lock_basic_destroy(&tube->res_lock);
verbose(VERB_ALGO, "tube deleted");
free(tube);
}
void tube_close_read(struct tube* ATTR_UNUSED(tube))
{
verbose(VERB_ALGO, "tube close_read");
}
void tube_close_write(struct tube* ATTR_UNUSED(tube))
{
verbose(VERB_ALGO, "tube close_write");
/* wake up waiting reader with an empty queue */
if(!WSASetEvent(tube->event)) {
log_err("WSASetEvent: %s", wsa_strerror(WSAGetLastError()));
}
}
void tube_remove_bg_listen(struct tube* tube)
{
verbose(VERB_ALGO, "tube remove_bg_listen");
winsock_unregister_wsaevent(&tube->ev_listen);
}
void tube_remove_bg_write(struct tube* tube)
{
verbose(VERB_ALGO, "tube remove_bg_write");
if(tube->res_list) {
struct tube_res_list* np, *p = tube->res_list;
tube->res_list = NULL;
tube->res_last = NULL;
while(p) {
np = p->next;
free(p->buf);
free(p);
p = np;
}
}
}
int tube_write_msg(struct tube* tube, uint8_t* buf, uint32_t len,
int ATTR_UNUSED(nonblock))
{
uint8_t* a;
verbose(VERB_ALGO, "tube write_msg len %d", (int)len);
a = (uint8_t*)memdup(buf, len);
if(!a) {
log_err("out of memory in tube_write_msg");
return 0;
}
/* always nonblocking, this pipe cannot get full */
return tube_queue_item(tube, a, len);
}
int tube_read_msg(struct tube* tube, uint8_t** buf, uint32_t* len,
int nonblock)
{
struct tube_res_list* item = NULL;
verbose(VERB_ALGO, "tube read_msg %s", nonblock?"nonblock":"blocking");
*buf = NULL;
if(!tube_poll(tube)) {
verbose(VERB_ALGO, "tube read_msg nodata");
/* nothing ready right now, wait if we want to */
if(nonblock)
return -1; /* would block waiting for items */
if(!tube_wait(tube))
return 0;
}
lock_basic_lock(&tube->res_lock);
if(tube->res_list) {
item = tube->res_list;
tube->res_list = item->next;
if(tube->res_last == item) {
/* the list is now empty */
tube->res_last = NULL;
verbose(VERB_ALGO, "tube read_msg lastdata");
if(!WSAResetEvent(tube->event)) {
log_err("WSAResetEvent: %s",
wsa_strerror(WSAGetLastError()));
}
}
}
lock_basic_unlock(&tube->res_lock);
if(!item)
return 0; /* would block waiting for items */
*buf = item->buf;
*len = item->len;
free(item);
verbose(VERB_ALGO, "tube read_msg len %d", (int)*len);
return 1;
}
int tube_poll(struct tube* tube)
{
struct tube_res_list* item = NULL;
lock_basic_lock(&tube->res_lock);
item = tube->res_list;
lock_basic_unlock(&tube->res_lock);
if(item)
return 1;
return 0;
}
int tube_wait(struct tube* tube)
{
/* block on eventhandle */
DWORD res = WSAWaitForMultipleEvents(
1 /* one event in array */,
&tube->event /* the event to wait for, our pipe signal */,
0 /* wait for all events is false */,
WSA_INFINITE /* wait, no timeout */,
0 /* we are not alertable for IO completion routines */
);
if(res == WSA_WAIT_TIMEOUT) {
return 0;
}
if(res == WSA_WAIT_IO_COMPLETION) {
/* a bit unexpected, since we were not alertable */
return 0;
}
return 1;
}
int tube_read_fd(struct tube* ATTR_UNUSED(tube))
{
/* nothing sensible on Windows */
return -1;
}
int
tube_handle_listen(struct comm_point* ATTR_UNUSED(c), void* ATTR_UNUSED(arg),
int ATTR_UNUSED(error), struct comm_reply* ATTR_UNUSED(reply_info))
{
log_assert(0);
return 0;
}
int
tube_handle_write(struct comm_point* ATTR_UNUSED(c), void* ATTR_UNUSED(arg),
int ATTR_UNUSED(error), struct comm_reply* ATTR_UNUSED(reply_info))
{
log_assert(0);
return 0;
}
int tube_setup_bg_listen(struct tube* tube, struct comm_base* base,
tube_callback_t* cb, void* arg)
{
tube->listen_cb = cb;
tube->listen_arg = arg;
if(!comm_base_internal(base))
return 1; /* ignore when no comm base - testing */
return winsock_register_wsaevent(comm_base_internal(base),
&tube->ev_listen, tube->event, &tube_handle_signal, tube);
}
int tube_setup_bg_write(struct tube* ATTR_UNUSED(tube),
struct comm_base* ATTR_UNUSED(base))
{
/* the queue item routine performs the signaling */
return 1;
}
int tube_queue_item(struct tube* tube, uint8_t* msg, size_t len)
{
struct tube_res_list* item =
(struct tube_res_list*)malloc(sizeof(*item));
verbose(VERB_ALGO, "tube queue_item len %d", (int)len);
if(!item) {
free(msg);
log_err("out of memory for async answer");
return 0;
}
item->buf = msg;
item->len = len;
item->next = NULL;
lock_basic_lock(&tube->res_lock);
/* add at back of list, since the first one may be partially written */
if(tube->res_last)
tube->res_last->next = item;
else tube->res_list = item;
tube->res_last = item;
/* signal the eventhandle */
if(!WSASetEvent(tube->event)) {
log_err("WSASetEvent: %s", wsa_strerror(WSAGetLastError()));
}
lock_basic_unlock(&tube->res_lock);
return 1;
}
void tube_handle_signal(int ATTR_UNUSED(fd), short ATTR_UNUSED(events),
void* arg)
{
struct tube* tube = (struct tube*)arg;
uint8_t* buf;
uint32_t len = 0;
verbose(VERB_ALGO, "tube handle_signal");
while(tube_poll(tube)) {
if(tube_read_msg(tube, &buf, &len, 1)) {
fptr_ok(fptr_whitelist_tube_listen(tube->listen_cb));
(*tube->listen_cb)(tube, buf, len, NETEVENT_NOERROR,
tube->listen_arg);
}
}
}
#endif /* USE_WINSOCK */

273
external/unbound/util/tube.h vendored Normal file
View File

@@ -0,0 +1,273 @@
/*
* util/tube.h - pipe service
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains pipe service functions.
*/
#ifndef UTIL_TUBE_H
#define UTIL_TUBE_H
struct comm_reply;
struct comm_point;
struct comm_base;
struct tube;
struct tube_res_list;
#ifdef USE_WINSOCK
#include "util/locks.h"
#include "util/winsock_event.h"
#endif
/**
* Callback from pipe listen function
* void mycallback(tube, msg, len, error, user_argument);
* if error is true (NETEVENT_*), msg is probably NULL.
*/
typedef void tube_callback_t(struct tube*, uint8_t*, size_t, int, void*);
/**
* A pipe
*/
struct tube {
#ifndef USE_WINSOCK
/** pipe end to read from */
int sr;
/** pipe end to write on */
int sw;
/** listen commpoint */
struct comm_point* listen_com;
/** listen callback */
tube_callback_t* listen_cb;
/** listen callback user arg */
void* listen_arg;
/** are we currently reading a command, 0 if not, else bytecount */
size_t cmd_read;
/** size of current read command, may be partially read */
uint32_t cmd_len;
/** the current read command content, malloced, can be partially read*/
uint8_t* cmd_msg;
/** background write queue, commpoint to write results back */
struct comm_point* res_com;
/** are we curently writing a result, 0 if not, else bytecount into
* the res_list first entry. */
size_t res_write;
/** list of outstanding results to be written back */
struct tube_res_list* res_list;
/** last in list */
struct tube_res_list* res_last;
#else /* USE_WINSOCK */
/** listen callback */
tube_callback_t* listen_cb;
/** listen callback user arg */
void* listen_arg;
/** the windows sockets event (signaled if items in pipe) */
WSAEVENT event;
/** winsock event storage when registered with event base */
struct event ev_listen;
/** lock on the list of outstanding items */
lock_basic_t res_lock;
/** list of outstanding results on pipe */
struct tube_res_list* res_list;
/** last in list */
struct tube_res_list* res_last;
#endif /* USE_WINSOCK */
};
/**
* List of results (arbitrary command serializations) to write back
*/
struct tube_res_list {
/** next in list */
struct tube_res_list* next;
/** serialized buffer to write */
uint8_t* buf;
/** length to write */
uint32_t len;
};
/**
* Create a pipe
* @return: new tube struct or NULL on error.
*/
struct tube* tube_create(void);
/**
* Delete and destroy a pipe
* @param tube: to delete
*/
void tube_delete(struct tube* tube);
/**
* Write length bytes followed by message.
* @param tube: the tube to write on.
* If that tube is a pipe, its write fd is used as
* the socket to write on. Is nonblocking.
* Set to blocking by the function,
* and back to non-blocking at exit of function.
* @param buf: the message.
* @param len: length of message.
* @param nonblock: if set to true, the first write is nonblocking.
* If the first write fails the function returns -1.
* If set false, the first write is blocking.
* @return: all remainder writes are nonblocking.
* return 0 on error, in that case blocking/nonblocking of socket is
* unknown.
* return 1 if all OK.
*/
int tube_write_msg(struct tube* tube, uint8_t* buf, uint32_t len,
int nonblock);
/**
* Read length bytes followed by message.
* @param tube: The tube to read on.
* If that tube is a pipe, its read fd is used as
* the socket to read on. Is nonblocking.
* Set to blocking by the function,
* and back to non-blocking at exit of function.
* @param buf: the message, malloced.
* @param len: length of message, returned.
* @param nonblock: if set to true, the first read is nonblocking.
* If the first read fails the function returns -1.
* If set false, the first read is blocking.
* @return: all remainder reads are nonblocking.
* return 0 on error, in that case blocking/nonblocking of socket is
* unknown. On EOF 0 is returned.
* return 1 if all OK.
*/
int tube_read_msg(struct tube* tube, uint8_t** buf, uint32_t* len,
int nonblock);
/**
* Close read part of the pipe.
* The tube can no longer be read from.
* @param tube: tube to operate on.
*/
void tube_close_read(struct tube* tube);
/**
* Close write part of the pipe.
* The tube can no longer be written to.
* @param tube: tube to operate on.
*/
void tube_close_write(struct tube* tube);
/**
* See if data is ready for reading on the tube without blocking.
* @param tube: tube to check for readable items
* @return true if readable items are present. False if not (or error).
* true on pipe_closed.
*/
int tube_poll(struct tube* tube);
/**
* Wait for data to be ready for reading on the tube. is blocking.
* No timeout.
* @param tube: the tube to wait on.
* @return: if there was something to read (false on error).
* true on pipe_closed.
*/
int tube_wait(struct tube* tube);
/**
* Get FD that is readable when new information arrives.
* @param tube
* @return file descriptor.
*/
int tube_read_fd(struct tube* tube);
/**
* Start listening for information over the pipe.
* Background registration of a read listener, callback when read completed.
* Do not mix with tube_read_msg style direct reads from the pipe.
* @param tube: tube to listen on
* @param base: what base to register event callback.
* @param cb: callback routine.
* @param arg: user argument for callback routine.
* @return true if successful, false on error.
*/
int tube_setup_bg_listen(struct tube* tube, struct comm_base* base,
tube_callback_t* cb, void* arg);
/**
* Remove bg listen setup from event base.
* @param tube: what tube to cleanup
*/
void tube_remove_bg_listen(struct tube* tube);
/**
* Start background write handler for the pipe.
* Do not mix with tube_write_msg style direct writes to the pipe.
* @param tube: tube to write on
* @param base: what base to register event handler on.
* @return true if successful, false on error.
*/
int tube_setup_bg_write(struct tube* tube, struct comm_base* base);
/**
* Remove bg write setup from event base.
* @param tube: what tube to cleanup
*/
void tube_remove_bg_write(struct tube* tube);
/**
* Append data item to background list of writes.
* Mallocs a list entry behind the scenes.
* Not locked behind the scenes, call from one thread or lock on outside.
* @param tube: what tube to queue on.
* @param msg: memory message to send. Is free()d after use.
* Put at the end of the to-send queue.
* @param len: length of item.
* @return 0 on failure (msg freed).
*/
int tube_queue_item(struct tube* tube, uint8_t* msg, size_t len);
/** for fptr wlist, callback function */
int tube_handle_listen(struct comm_point* c, void* arg, int error,
struct comm_reply* reply_info);
/** for fptr wlist, callback function */
int tube_handle_write(struct comm_point* c, void* arg, int error,
struct comm_reply* reply_info);
/** for fptr wlist, winsock signal event callback function */
void tube_handle_signal(int fd, short events, void* arg);
#endif /* UTIL_TUBE_H */

696
external/unbound/util/winsock_event.c vendored Normal file
View File

@@ -0,0 +1,696 @@
/*
* util/winsock_event.c - implementation of the unbound winsock event handler.
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
* Implementation of the unbound WinSock2 API event notification handler
* for the Windows port.
*/
#include "config.h"
#ifdef USE_WINSOCK
#include <signal.h>
#ifdef HAVE_TIME_H
#include <time.h>
#endif
#include <sys/time.h>
#include "util/winsock_event.h"
#include "util/fptr_wlist.h"
int mini_ev_cmp(const void* a, const void* b)
{
const struct event *e = (const struct event*)a;
const struct event *f = (const struct event*)b;
if(e->ev_timeout.tv_sec < f->ev_timeout.tv_sec)
return -1;
if(e->ev_timeout.tv_sec > f->ev_timeout.tv_sec)
return 1;
if(e->ev_timeout.tv_usec < f->ev_timeout.tv_usec)
return -1;
if(e->ev_timeout.tv_usec > f->ev_timeout.tv_usec)
return 1;
if(e < f)
return -1;
if(e > f)
return 1;
return 0;
}
/** set time */
static int
settime(struct event_base* base)
{
if(gettimeofday(base->time_tv, NULL) < 0) {
return -1;
}
#ifndef S_SPLINT_S
*base->time_secs = (time_t)base->time_tv->tv_sec;
#endif
return 0;
}
#ifdef UNBOUND_DEBUG
/**
* Find a fd in the list of items.
* Note that not all items have a fd associated (those are -1).
* Signals are stored separately, and not searched.
* @param base: event base to look in.
* @param fd: what socket to look for.
* @return the index in the array, or -1 on failure.
*/
static int
find_fd(struct event_base* base, int fd)
{
int i;
for(i=0; i<base->max; i++) {
if(base->items[i]->ev_fd == fd)
return i;
}
return -1;
}
#endif
/** Find ptr in base array */
static void
zero_waitfor(WSAEVENT waitfor[], WSAEVENT x)
{
int i;
for(i=0; i<WSK_MAX_ITEMS; i++) {
if(waitfor[i] == x)
waitfor[i] = 0;
}
}
void *event_init(time_t* time_secs, struct timeval* time_tv)
{
struct event_base* base = (struct event_base*)malloc(
sizeof(struct event_base));
if(!base)
return NULL;
memset(base, 0, sizeof(*base));
base->time_secs = time_secs;
base->time_tv = time_tv;
if(settime(base) < 0) {
event_base_free(base);
return NULL;
}
base->items = (struct event**)calloc(WSK_MAX_ITEMS,
sizeof(struct event*));
if(!base->items) {
event_base_free(base);
return NULL;
}
base->cap = WSK_MAX_ITEMS;
base->max = 0;
base->times = rbtree_create(mini_ev_cmp);
if(!base->times) {
event_base_free(base);
return NULL;
}
base->signals = (struct event**)calloc(MAX_SIG, sizeof(struct event*));
if(!base->signals) {
event_base_free(base);
return NULL;
}
base->tcp_stickies = 0;
base->tcp_reinvigorated = 0;
verbose(VERB_CLIENT, "winsock_event inited");
return base;
}
const char *event_get_version(void)
{
return "winsock-event-"PACKAGE_VERSION;
}
const char *event_get_method(void)
{
return "WSAWaitForMultipleEvents";
}
/** call timeouts handlers, and return how long to wait for next one or -1 */
static void handle_timeouts(struct event_base* base, struct timeval* now,
struct timeval* wait)
{
struct event* p;
#ifndef S_SPLINT_S
wait->tv_sec = (time_t)-1;
#endif
verbose(VERB_CLIENT, "winsock_event handle_timeouts");
while((rbnode_t*)(p = (struct event*)rbtree_first(base->times))
!=RBTREE_NULL) {
#ifndef S_SPLINT_S
if(p->ev_timeout.tv_sec > now->tv_sec ||
(p->ev_timeout.tv_sec==now->tv_sec &&
p->ev_timeout.tv_usec > now->tv_usec)) {
/* there is a next larger timeout. wait for it */
wait->tv_sec = p->ev_timeout.tv_sec - now->tv_sec;
if(now->tv_usec > p->ev_timeout.tv_usec) {
wait->tv_sec--;
wait->tv_usec = 1000000 - (now->tv_usec -
p->ev_timeout.tv_usec);
} else {
wait->tv_usec = p->ev_timeout.tv_usec
- now->tv_usec;
}
verbose(VERB_CLIENT, "winsock_event wait=" ARG_LL "d.%6.6d",
(long long)wait->tv_sec, (int)wait->tv_usec);
return;
}
#endif
/* event times out, remove it */
(void)rbtree_delete(base->times, p);
p->ev_events &= ~EV_TIMEOUT;
fptr_ok(fptr_whitelist_event(p->ev_callback));
(*p->ev_callback)(p->ev_fd, EV_TIMEOUT, p->ev_arg);
}
verbose(VERB_CLIENT, "winsock_event wait=(-1)");
}
/** handle is_signal events and see if signalled */
static void handle_signal(struct event* ev)
{
DWORD ret;
log_assert(ev->is_signal && ev->hEvent);
/* see if the event is signalled */
ret = WSAWaitForMultipleEvents(1, &ev->hEvent, 0 /* any object */,
0 /* return immediately */, 0 /* not alertable for IOcomple*/);
if(ret == WSA_WAIT_IO_COMPLETION || ret == WSA_WAIT_FAILED) {
log_err("WSAWaitForMultipleEvents(signal) failed: %s",
wsa_strerror(WSAGetLastError()));
return;
}
if(ret == WSA_WAIT_TIMEOUT) {
/* not signalled */
return;
}
/* reset the signal */
if(!WSAResetEvent(ev->hEvent))
log_err("WSAResetEvent failed: %s",
wsa_strerror(WSAGetLastError()));
/* do the callback (which may set the signal again) */
fptr_ok(fptr_whitelist_event(ev->ev_callback));
(*ev->ev_callback)(ev->ev_fd, ev->ev_events, ev->ev_arg);
}
/** call select and callbacks for that */
static int handle_select(struct event_base* base, struct timeval* wait)
{
DWORD timeout = 0; /* in milliseconds */
DWORD ret;
struct event* eventlist[WSK_MAX_ITEMS];
WSANETWORKEVENTS netev;
int i, numwait = 0, startidx = 0, was_timeout = 0;
int newstickies = 0;
struct timeval nultm;
verbose(VERB_CLIENT, "winsock_event handle_select");
#ifndef S_SPLINT_S
if(wait->tv_sec==(time_t)-1)
wait = NULL;
if(wait)
timeout = wait->tv_sec*1000 + wait->tv_usec/1000;
if(base->tcp_stickies) {
wait = &nultm;
nultm.tv_sec = 0;
nultm.tv_usec = 0;
timeout = 0; /* no waiting, we have sticky events */
}
#endif
/* prepare event array */
for(i=0; i<base->max; i++) {
if(base->items[i]->ev_fd == -1 && !base->items[i]->is_signal)
continue; /* skip timer only events */
eventlist[numwait] = base->items[i];
base->waitfor[numwait++] = base->items[i]->hEvent;
if(numwait == WSK_MAX_ITEMS)
break; /* sanity check */
}
log_assert(numwait <= WSA_MAXIMUM_WAIT_EVENTS);
verbose(VERB_CLIENT, "winsock_event bmax=%d numwait=%d wait=%x "
"timeout=%d", base->max, numwait, (int)wait, (int)timeout);
/* do the wait */
if(numwait == 0) {
/* WSAWaitFor.. doesn't like 0 event objects */
if(wait) {
Sleep(timeout);
}
was_timeout = 1;
} else {
ret = WSAWaitForMultipleEvents(numwait, base->waitfor,
0 /* do not wait for all, just one will do */,
wait?timeout:WSA_INFINITE,
0); /* we are not alertable (IO completion events) */
if(ret == WSA_WAIT_IO_COMPLETION) {
log_err("WSAWaitForMultipleEvents failed: WSA_WAIT_IO_COMPLETION");
return -1;
} else if(ret == WSA_WAIT_FAILED) {
log_err("WSAWaitForMultipleEvents failed: %s",
wsa_strerror(WSAGetLastError()));
return -1;
} else if(ret == WSA_WAIT_TIMEOUT) {
was_timeout = 1;
} else
startidx = ret - WSA_WAIT_EVENT_0;
}
verbose(VERB_CLIENT, "winsock_event wake was_timeout=%d startidx=%d",
was_timeout, startidx);
/* get new time after wait */
if(settime(base) < 0)
return -1;
/* callbacks */
if(base->tcp_stickies)
startidx = 0; /* process all events, some are sticky */
for(i=startidx; i<numwait; i++)
eventlist[i]->just_checked = 1;
verbose(VERB_CLIENT, "winsock_event signals");
for(i=startidx; i<numwait; i++) {
if(!base->waitfor[i])
continue; /* was deleted */
if(eventlist[i]->is_signal) {
eventlist[i]->just_checked = 0;
handle_signal(eventlist[i]);
}
}
/* early exit - do not process network, exit quickly */
if(base->need_to_exit)
return 0;
verbose(VERB_CLIENT, "winsock_event net");
for(i=startidx; i<numwait; i++) {
short bits = 0;
/* eventlist[i] fired */
/* see if eventlist[i] is still valid and just checked from
* WSAWaitForEvents */
if(!base->waitfor[i])
continue; /* was deleted */
if(!eventlist[i]->just_checked)
continue; /* added by other callback */
if(eventlist[i]->is_signal)
continue; /* not a network event at all */
eventlist[i]->just_checked = 0;
if(WSAEnumNetworkEvents(eventlist[i]->ev_fd,
base->waitfor[i], /* reset the event handle */
/*NULL,*/ /* do not reset the event handle */
&netev) != 0) {
log_err("WSAEnumNetworkEvents failed: %s",
wsa_strerror(WSAGetLastError()));
return -1;
}
if((netev.lNetworkEvents & FD_READ)) {
if(netev.iErrorCode[FD_READ_BIT] != 0)
verbose(VERB_ALGO, "FD_READ_BIT error: %s",
wsa_strerror(netev.iErrorCode[FD_READ_BIT]));
bits |= EV_READ;
}
if((netev.lNetworkEvents & FD_WRITE)) {
if(netev.iErrorCode[FD_WRITE_BIT] != 0)
verbose(VERB_ALGO, "FD_WRITE_BIT error: %s",
wsa_strerror(netev.iErrorCode[FD_WRITE_BIT]));
bits |= EV_WRITE;
}
if((netev.lNetworkEvents & FD_CONNECT)) {
if(netev.iErrorCode[FD_CONNECT_BIT] != 0)
verbose(VERB_ALGO, "FD_CONNECT_BIT error: %s",
wsa_strerror(netev.iErrorCode[FD_CONNECT_BIT]));
bits |= EV_READ;
bits |= EV_WRITE;
}
if((netev.lNetworkEvents & FD_ACCEPT)) {
if(netev.iErrorCode[FD_ACCEPT_BIT] != 0)
verbose(VERB_ALGO, "FD_ACCEPT_BIT error: %s",
wsa_strerror(netev.iErrorCode[FD_ACCEPT_BIT]));
bits |= EV_READ;
}
if((netev.lNetworkEvents & FD_CLOSE)) {
if(netev.iErrorCode[FD_CLOSE_BIT] != 0)
verbose(VERB_ALGO, "FD_CLOSE_BIT error: %s",
wsa_strerror(netev.iErrorCode[FD_CLOSE_BIT]));
bits |= EV_READ;
bits |= EV_WRITE;
}
if(eventlist[i]->is_tcp && eventlist[i]->stick_events) {
verbose(VERB_ALGO, "winsock %d pass sticky %s%s",
eventlist[i]->ev_fd,
(eventlist[i]->old_events&EV_READ)?"EV_READ":"",
(eventlist[i]->old_events&EV_WRITE)?"EV_WRITE":"");
bits |= eventlist[i]->old_events;
}
if(eventlist[i]->is_tcp && bits) {
eventlist[i]->old_events = bits;
eventlist[i]->stick_events = 1;
if((eventlist[i]->ev_events & bits)) {
newstickies = 1;
}
verbose(VERB_ALGO, "winsock %d store sticky %s%s",
eventlist[i]->ev_fd,
(eventlist[i]->old_events&EV_READ)?"EV_READ":"",
(eventlist[i]->old_events&EV_WRITE)?"EV_WRITE":"");
}
if((bits & eventlist[i]->ev_events)) {
verbose(VERB_ALGO, "winsock event callback %p fd=%d "
"%s%s%s%s%s ; %s%s%s",
eventlist[i], eventlist[i]->ev_fd,
(netev.lNetworkEvents&FD_READ)?" FD_READ":"",
(netev.lNetworkEvents&FD_WRITE)?" FD_WRITE":"",
(netev.lNetworkEvents&FD_CONNECT)?
" FD_CONNECT":"",
(netev.lNetworkEvents&FD_ACCEPT)?
" FD_ACCEPT":"",
(netev.lNetworkEvents&FD_CLOSE)?" FD_CLOSE":"",
(bits&EV_READ)?" EV_READ":"",
(bits&EV_WRITE)?" EV_WRITE":"",
(bits&EV_TIMEOUT)?" EV_TIMEOUT":"");
fptr_ok(fptr_whitelist_event(
eventlist[i]->ev_callback));
(*eventlist[i]->ev_callback)(eventlist[i]->ev_fd,
bits & eventlist[i]->ev_events,
eventlist[i]->ev_arg);
}
if(eventlist[i]->is_tcp && bits)
verbose(VERB_ALGO, "winsock %d got sticky %s%s",
eventlist[i]->ev_fd,
(eventlist[i]->old_events&EV_READ)?"EV_READ":"",
(eventlist[i]->old_events&EV_WRITE)?"EV_WRITE":"");
}
verbose(VERB_CLIENT, "winsock_event net");
if(base->tcp_reinvigorated) {
verbose(VERB_CLIENT, "winsock_event reinvigorated");
base->tcp_reinvigorated = 0;
newstickies = 1;
}
base->tcp_stickies = newstickies;
verbose(VERB_CLIENT, "winsock_event handle_select end");
return 0;
}
int event_base_dispatch(struct event_base *base)
{
struct timeval wait;
if(settime(base) < 0)
return -1;
while(!base->need_to_exit)
{
/* see if timeouts need handling */
handle_timeouts(base, base->time_tv, &wait);
if(base->need_to_exit)
return 0;
/* do select */
if(handle_select(base, &wait) < 0) {
if(base->need_to_exit)
return 0;
return -1;
}
}
return 0;
}
int event_base_loopexit(struct event_base *base,
struct timeval * ATTR_UNUSED(tv))
{
verbose(VERB_CLIENT, "winsock_event loopexit");
base->need_to_exit = 1;
return 0;
}
void event_base_free(struct event_base *base)
{
verbose(VERB_CLIENT, "winsock_event event_base_free");
if(!base)
return;
if(base->items)
free(base->items);
if(base->times)
free(base->times);
if(base->signals)
free(base->signals);
free(base);
}
void event_set(struct event *ev, int fd, short bits,
void (*cb)(int, short, void *), void *arg)
{
ev->node.key = ev;
ev->ev_fd = fd;
ev->ev_events = bits;
ev->ev_callback = cb;
fptr_ok(fptr_whitelist_event(ev->ev_callback));
ev->ev_arg = arg;
ev->just_checked = 0;
ev->added = 0;
}
int event_base_set(struct event_base *base, struct event *ev)
{
ev->ev_base = base;
ev->old_events = 0;
ev->stick_events = 0;
ev->added = 0;
return 0;
}
int event_add(struct event *ev, struct timeval *tv)
{
verbose(VERB_ALGO, "event_add %p added=%d fd=%d tv=" ARG_LL "d %s%s%s",
ev, ev->added, ev->ev_fd,
(tv?(long long)tv->tv_sec*1000+(long long)tv->tv_usec/1000:-1),
(ev->ev_events&EV_READ)?" EV_READ":"",
(ev->ev_events&EV_WRITE)?" EV_WRITE":"",
(ev->ev_events&EV_TIMEOUT)?" EV_TIMEOUT":"");
if(ev->added)
event_del(ev);
log_assert(ev->ev_fd==-1 || find_fd(ev->ev_base, ev->ev_fd) == -1);
ev->is_tcp = 0;
ev->is_signal = 0;
ev->just_checked = 0;
if((ev->ev_events&(EV_READ|EV_WRITE)) && ev->ev_fd != -1) {
BOOL b=0;
int t, l;
long events = 0;
if(ev->ev_base->max == ev->ev_base->cap)
return -1;
ev->idx = ev->ev_base->max++;
ev->ev_base->items[ev->idx] = ev;
if( (ev->ev_events&EV_READ) )
events |= FD_READ;
if( (ev->ev_events&EV_WRITE) )
events |= FD_WRITE;
l = sizeof(t);
if(getsockopt(ev->ev_fd, SOL_SOCKET, SO_TYPE,
(void*)&t, &l) != 0)
log_err("getsockopt(SO_TYPE) failed: %s",
wsa_strerror(WSAGetLastError()));
if(t == SOCK_STREAM) {
/* TCP socket */
ev->is_tcp = 1;
events |= FD_CLOSE;
if( (ev->ev_events&EV_WRITE) )
events |= FD_CONNECT;
l = sizeof(b);
if(getsockopt(ev->ev_fd, SOL_SOCKET, SO_ACCEPTCONN,
(void*)&b, &l) != 0)
log_err("getsockopt(SO_ACCEPTCONN) failed: %s",
wsa_strerror(WSAGetLastError()));
if(b) /* TCP accept socket */
events |= FD_ACCEPT;
}
ev->hEvent = WSACreateEvent();
if(ev->hEvent == WSA_INVALID_EVENT)
log_err("WSACreateEvent failed: %s",
wsa_strerror(WSAGetLastError()));
/* automatically sets fd to nonblocking mode.
* nonblocking cannot be disabled, until wsaES(fd, NULL, 0) */
if(WSAEventSelect(ev->ev_fd, ev->hEvent, events) != 0) {
log_err("WSAEventSelect failed: %s",
wsa_strerror(WSAGetLastError()));
}
if(ev->is_tcp && ev->stick_events &&
(ev->ev_events & ev->old_events)) {
/* go to processing the sticky event right away */
ev->ev_base->tcp_reinvigorated = 1;
}
}
if(tv && (ev->ev_events&EV_TIMEOUT)) {
#ifndef S_SPLINT_S
struct timeval *now = ev->ev_base->time_tv;
ev->ev_timeout.tv_sec = tv->tv_sec + now->tv_sec;
ev->ev_timeout.tv_usec = tv->tv_usec + now->tv_usec;
while(ev->ev_timeout.tv_usec > 1000000) {
ev->ev_timeout.tv_usec -= 1000000;
ev->ev_timeout.tv_sec++;
}
#endif
(void)rbtree_insert(ev->ev_base->times, &ev->node);
}
ev->added = 1;
return 0;
}
int event_del(struct event *ev)
{
verbose(VERB_ALGO, "event_del %p added=%d fd=%d tv=" ARG_LL "d %s%s%s",
ev, ev->added, ev->ev_fd,
(ev->ev_events&EV_TIMEOUT)?(long long)ev->ev_timeout.tv_sec*1000+
(long long)ev->ev_timeout.tv_usec/1000:-1,
(ev->ev_events&EV_READ)?" EV_READ":"",
(ev->ev_events&EV_WRITE)?" EV_WRITE":"",
(ev->ev_events&EV_TIMEOUT)?" EV_TIMEOUT":"");
if(!ev->added)
return 0;
log_assert(ev->added);
if((ev->ev_events&EV_TIMEOUT))
(void)rbtree_delete(ev->ev_base->times, &ev->node);
if((ev->ev_events&(EV_READ|EV_WRITE)) && ev->ev_fd != -1) {
log_assert(ev->ev_base->max > 0);
/* remove item and compact the list */
ev->ev_base->items[ev->idx] =
ev->ev_base->items[ev->ev_base->max-1];
ev->ev_base->items[ev->ev_base->max-1] = NULL;
ev->ev_base->max--;
if(ev->idx < ev->ev_base->max)
ev->ev_base->items[ev->idx]->idx = ev->idx;
zero_waitfor(ev->ev_base->waitfor, ev->hEvent);
if(WSAEventSelect(ev->ev_fd, ev->hEvent, 0) != 0)
log_err("WSAEventSelect(disable) failed: %s",
wsa_strerror(WSAGetLastError()));
if(!WSACloseEvent(ev->hEvent))
log_err("WSACloseEvent failed: %s",
wsa_strerror(WSAGetLastError()));
}
ev->just_checked = 0;
ev->added = 0;
return 0;
}
/** which base gets to handle signals */
static struct event_base* signal_base = NULL;
/** signal handler */
static RETSIGTYPE sigh(int sig)
{
struct event* ev;
if(!signal_base || sig < 0 || sig >= MAX_SIG)
return;
ev = signal_base->signals[sig];
if(!ev)
return;
fptr_ok(fptr_whitelist_event(ev->ev_callback));
(*ev->ev_callback)(sig, EV_SIGNAL, ev->ev_arg);
}
int signal_add(struct event *ev, struct timeval * ATTR_UNUSED(tv))
{
if(ev->ev_fd == -1 || ev->ev_fd >= MAX_SIG)
return -1;
signal_base = ev->ev_base;
ev->ev_base->signals[ev->ev_fd] = ev;
ev->added = 1;
if(signal(ev->ev_fd, sigh) == SIG_ERR) {
return -1;
}
return 0;
}
int signal_del(struct event *ev)
{
if(ev->ev_fd == -1 || ev->ev_fd >= MAX_SIG)
return -1;
ev->ev_base->signals[ev->ev_fd] = NULL;
ev->added = 0;
return 0;
}
void winsock_tcp_wouldblock(struct event* ev, int eventbits)
{
verbose(VERB_ALGO, "winsock: tcp wouldblock %s",
eventbits==EV_READ?"EV_READ":"EV_WRITE");
ev->old_events &= (~eventbits);
if(ev->old_events == 0)
ev->stick_events = 0;
/* in case this is the last sticky event, we could
* possibly run an empty handler loop to reset the base
* tcp_stickies variable
*/
}
int winsock_register_wsaevent(struct event_base* base, struct event* ev,
WSAEVENT wsaevent, void (*cb)(int, short, void*), void* arg)
{
if(base->max == base->cap)
return 0;
memset(ev, 0, sizeof(*ev));
ev->ev_fd = -1;
ev->ev_events = EV_READ;
ev->ev_callback = cb;
ev->ev_arg = arg;
ev->is_signal = 1;
ev->hEvent = wsaevent;
ev->added = 1;
ev->ev_base = base;
ev->idx = ev->ev_base->max++;
ev->ev_base->items[ev->idx] = ev;
return 1;
}
void winsock_unregister_wsaevent(struct event* ev)
{
if(!ev || !ev->added) return;
log_assert(ev->added && ev->ev_base->max > 0)
/* remove item and compact the list */
ev->ev_base->items[ev->idx] = ev->ev_base->items[ev->ev_base->max-1];
ev->ev_base->items[ev->ev_base->max-1] = NULL;
ev->ev_base->max--;
if(ev->idx < ev->ev_base->max)
ev->ev_base->items[ev->idx]->idx = ev->idx;
ev->added = 0;
}
#else /* USE_WINSOCK */
/** symbol so this codefile defines symbols. pleasing ranlib on OSX 10.5 */
int winsock_unused_symbol = 1;
#endif /* USE_WINSOCK */

264
external/unbound/util/winsock_event.h vendored Normal file
View File

@@ -0,0 +1,264 @@
/*
* util/winsock_event.h - unbound event handling for winsock on windows
*
* Copyright (c) 2008, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains interface functions with the WinSock2 API on Windows.
* It uses the winsock WSAWaitForMultipleEvents interface on a number of
* sockets.
*
* Note that windows can only wait for max 64 events at one time.
*
* Also, file descriptors cannot be waited for.
*
* Named pipes are not easily available (and are not usable in select() ).
* For interprocess communication, it is possible to wait for a hEvent to
* be signaled by another thread.
*
* When a socket becomes readable, then it will not be flagged as
* readable again until you have gotten WOULDBLOCK from a recv routine.
* That means the event handler must store the readability (edge notify)
* and process the incoming data until it blocks.
* The function performing recv then has to inform the event handler that
* the socket has blocked, and the event handler can mark it as such.
* Thus, this file transforms the edge notify from windows to a level notify
* that is compatible with UNIX.
* The WSAEventSelect page says that it does do level notify, as long
* as you call a recv/write/accept at least once when it is signalled.
* This last bit is not true, even though documented in server2008 api docs
* from microsoft, it does not happen at all. Instead you have to test for
* WSAEWOULDBLOCK on a tcp stream, and only then retest the socket.
* And before that remember the previous result as still valid.
*
* To stay 'fair', instead of emptying a socket completely, the event handler
* can test the other (marked as blocking) sockets for new events.
*
* Additionally, TCP accept sockets get special event support.
*
* Socket numbers are not starting small, they can be any number (say 33060).
* Therefore, bitmaps are not used, but arrays.
*
* on winsock, you must use recv() and send() for TCP reads and writes,
* not read() and write(), those work only on files.
*
* Also fseek and fseeko do not work if a FILE is not fopen-ed in binary mode.
*
* When under a high load windows gives out lots of errors, from recvfrom
* on udp sockets for example (WSAECONNRESET). Even though the udp socket
* has no connection per se.
*/
#ifndef UTIL_WINSOCK_EVENT_H
#define UTIL_WINSOCK_EVENT_H
#ifdef USE_WINSOCK
#ifndef HAVE_EVENT_BASE_FREE
#define HAVE_EVENT_BASE_FREE
#endif
/** event timeout */
#define EV_TIMEOUT 0x01
/** event fd readable */
#define EV_READ 0x02
/** event fd writable */
#define EV_WRITE 0x04
/** event signal */
#define EV_SIGNAL 0x08
/** event must persist */
#define EV_PERSIST 0x10
/* needs our redblack tree */
#include "rbtree.h"
/** max number of signals to support */
#define MAX_SIG 32
/** The number of items that the winsock event handler can service.
* Windows cannot handle more anyway */
#define WSK_MAX_ITEMS 64
/**
* event base for winsock event handler
*/
struct event_base
{
/** sorted by timeout (absolute), ptr */
rbtree_t* times;
/** array (first part in use) of handles to work on */
struct event** items;
/** number of items in use in array */
int max;
/** capacity of array, size of array in items */
int cap;
/** array of 0 - maxsig of ptr to event for it */
struct event** signals;
/** if we need to exit */
int need_to_exit;
/** where to store time in seconds */
time_t* time_secs;
/** where to store time in microseconds */
struct timeval* time_tv;
/**
* TCP streams have sticky events to them, these are not
* reported by the windows event system anymore, we have to
* keep reporting those events as present until wouldblock() is
* signalled by the handler back to use.
*/
int tcp_stickies;
/**
* should next cycle process reinvigorated stickies,
* these are stickies that have been stored, but due to a new
* event_add a sudden interest in the event has incepted.
*/
int tcp_reinvigorated;
/** The list of events that is currently being processed. */
WSAEVENT waitfor[WSK_MAX_ITEMS];
};
/**
* Event structure. Has some of the event elements.
*/
struct event {
/** node in timeout rbtree */
rbnode_t node;
/** is event already added */
int added;
/** event base it belongs to */
struct event_base *ev_base;
/** fd to poll or -1 for timeouts. signal number for sigs. */
int ev_fd;
/** what events this event is interested in, see EV_.. above. */
short ev_events;
/** timeout value */
struct timeval ev_timeout;
/** callback to call: fd, eventbits, userarg */
void (*ev_callback)(int, short, void *);
/** callback user arg */
void *ev_arg;
/* ----- nonpublic part, for winsock_event only ----- */
/** index of this event in the items array (if added) */
int idx;
/** the event handle to wait for new events to become ready */
WSAEVENT hEvent;
/** true if this filedes is a TCP socket and needs special attention */
int is_tcp;
/** remembered EV_ values */
short old_events;
/** should remembered EV_ values be used for TCP streams.
* Reset after WOULDBLOCK is signaled using the function. */
int stick_events;
/** true if this event is a signaling WSAEvent by the user.
* User created and user closed WSAEvent. Only signaled/unsigneled,
* no read/write/distinctions needed. */
int is_signal;
/** used during callbacks to see which events were just checked */
int just_checked;
};
/** create event base */
void *event_init(time_t* time_secs, struct timeval* time_tv);
/** get version */
const char *event_get_version(void);
/** get polling method (select,epoll) */
const char *event_get_method(void);
/** run select in a loop */
int event_base_dispatch(struct event_base *);
/** exit that loop */
int event_base_loopexit(struct event_base *, struct timeval *);
/** free event base. Free events yourself */
void event_base_free(struct event_base *);
/** set content of event */
void event_set(struct event *, int, short, void (*)(int, short, void *), void *);
/** add event to a base. You *must* call this for every event. */
int event_base_set(struct event_base *, struct event *);
/** add event to make it active. You may not change it with event_set anymore */
int event_add(struct event *, struct timeval *);
/** remove event. You may change it again */
int event_del(struct event *);
#define evtimer_add(ev, tv) event_add(ev, tv)
#define evtimer_del(ev) event_del(ev)
/* uses different implementation. Cannot mix fd/timeouts and signals inside
* the same struct event. create several event structs for that. */
/** install signal handler */
int signal_add(struct event *, struct timeval *);
/** set signal event contents */
#define signal_set(ev, x, cb, arg) \
event_set(ev, x, EV_SIGNAL|EV_PERSIST, cb, arg)
/** remove signal handler */
int signal_del(struct event *);
/** compare events in tree, based on timevalue, ptr for uniqueness */
int mini_ev_cmp(const void* a, const void* b);
/**
* Routine for windows only, where the handling layer can signal that
* a TCP stream encountered WSAEWOULDBLOCK for a stream and thus needs
* retesting the event.
* Pass if EV_READ or EV_WRITE gave wouldblock.
*/
void winsock_tcp_wouldblock(struct event* ev, int eventbit);
/**
* Routine for windows only. where you pass a signal WSAEvent that
* you wait for. When the event is signaled, the callback gets called.
* The callback has to WSAResetEvent to disable the signal.
* @param base: the event base.
* @param ev: the event structure for data storage
* can be passed uninitialised.
* @param wsaevent: the WSAEvent that gets signaled.
* @param cb: callback routine.
* @param arg: user argument to callback routine.
* @return false on error.
*/
int winsock_register_wsaevent(struct event_base* base, struct event* ev,
WSAEVENT wsaevent, void (*cb)(int, short, void*), void* arg);
/**
* Unregister a wsaevent. User has to close the WSAEVENT itself.
* @param ev: event data storage.
*/
void winsock_unregister_wsaevent(struct event* ev);
#endif /* USE_WINSOCK */
#endif /* UTIL_WINSOCK_EVENT_H */