2 * daemon/worker.c - worker that handles a pending list of requests.
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file implements the worker that handles callbacks on events, for
44 #include "util/net_help.h"
45 #include "util/random.h"
46 #include "daemon/worker.h"
47 #include "daemon/daemon.h"
48 #include "daemon/remote.h"
49 #include "daemon/acl_list.h"
50 #include "util/netevent.h"
51 #include "util/config_file.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/storage/slabhash.h"
55 #include "services/listen_dnsport.h"
56 #include "services/outside_network.h"
57 #include "services/outbound_list.h"
58 #include "services/cache/rrset.h"
59 #include "services/cache/infra.h"
60 #include "services/cache/dns.h"
61 #include "services/mesh.h"
62 #include "services/localzone.h"
63 #include "util/data/msgparse.h"
64 #include "util/data/msgencode.h"
65 #include "util/data/dname.h"
66 #include "util/fptr_wlist.h"
67 #include "util/tube.h"
68 #include "iterator/iter_fwd.h"
69 #include "iterator/iter_hints.h"
70 #include "validator/autotrust.h"
71 #include "validator/val_anchor.h"
72 #include "libunbound/context.h"
73 #include "libunbound/libworker.h"
74 #include "ldns/sbuffer.h"
76 #ifdef HAVE_SYS_TYPES_H
77 # include <sys/types.h>
84 #include "winrc/win_svc.h"
87 /** Size of an UDP datagram */
88 #define NORMAL_UDP_SIZE 512 /* bytes */
91 * seconds to add to prefetch leeway. This is a TTL that expires old rrsets
92 * earlier than they should in order to put the new update into the cache.
93 * This additional value is to make sure that if not all TTLs are equal in
94 * the message to be updated(and replaced), that rrsets with up to this much
95 * extra TTL are also replaced. This means that the resulting new message
96 * will have (most likely) this TTL at least, avoiding very small 'split
97 * second' TTLs due to operators choosing relative primes for TTLs (or so).
98 * Also has to be at least one to break ties (and overwrite cached entry).
100 #define PREFETCH_EXPIRY_ADD 60
102 #ifdef UNBOUND_ALLOC_STATS
103 /** measure memory leakage */
105 debug_memleak(size_t accounted
, size_t heap
,
106 size_t total_alloc
, size_t total_free
)
109 static size_t base_heap
, base_accounted
, base_alloc
, base_free
;
110 size_t base_af
, cur_af
, grow_af
, grow_acc
;
114 base_accounted
= accounted
;
115 base_alloc
= total_alloc
;
116 base_free
= total_free
;
118 base_af
= base_alloc
- base_free
;
119 cur_af
= total_alloc
- total_free
;
120 grow_af
= cur_af
- base_af
;
121 grow_acc
= accounted
- base_accounted
;
122 log_info("Leakage: %d leaked. growth: %u use, %u acc, %u heap",
123 (int)(grow_af
- grow_acc
), (unsigned)grow_af
,
124 (unsigned)grow_acc
, (unsigned)(heap
- base_heap
));
127 /** give debug heap size indication */
129 debug_total_mem(size_t calctotal
)
132 extern void* unbound_start_brk
;
133 extern size_t unbound_mem_alloc
, unbound_mem_freed
;
135 int total
= cur
-unbound_start_brk
;
136 log_info("Total heap memory estimate: %u total-alloc: %u "
137 "total-free: %u", (unsigned)total
,
138 (unsigned)unbound_mem_alloc
, (unsigned)unbound_mem_freed
);
139 debug_memleak(calctotal
, (size_t)total
,
140 unbound_mem_alloc
, unbound_mem_freed
);
143 #endif /* HAVE_SBRK */
145 #endif /* UNBOUND_ALLOC_STATS */
147 /** Report on memory usage by this thread and global */
149 worker_mem_report(struct worker
* ATTR_UNUSED(worker
),
150 struct serviced_query
* ATTR_UNUSED(cur_serv
))
152 #ifdef UNBOUND_ALLOC_STATS
153 /* debug func in validator module */
154 size_t total
, front
, back
, mesh
, msg
, rrset
, infra
, ac
, superac
;
155 size_t me
, iter
, val
, anch
;
157 if(verbosity
< VERB_ALGO
)
159 front
= listen_get_mem(worker
->front
);
160 back
= outnet_get_mem(worker
->back
);
161 msg
= slabhash_get_mem(worker
->env
.msg_cache
);
162 rrset
= slabhash_get_mem(&worker
->env
.rrset_cache
->table
);
163 infra
= infra_get_mem(worker
->env
.infra_cache
);
164 mesh
= mesh_get_mem(worker
->env
.mesh
);
165 ac
= alloc_get_mem(&worker
->alloc
);
166 superac
= alloc_get_mem(&worker
->daemon
->superalloc
);
167 anch
= anchors_get_mem(worker
->env
.anchors
);
170 for(i
=0; i
<worker
->env
.mesh
->mods
.num
; i
++) {
171 fptr_ok(fptr_whitelist_mod_get_mem(worker
->env
.mesh
->
172 mods
.mod
[i
]->get_mem
));
173 if(strcmp(worker
->env
.mesh
->mods
.mod
[i
]->name
, "validator")==0)
174 val
+= (*worker
->env
.mesh
->mods
.mod
[i
]->get_mem
)
176 else iter
+= (*worker
->env
.mesh
->mods
.mod
[i
]->get_mem
)
179 me
= sizeof(*worker
) + sizeof(*worker
->base
) + sizeof(*worker
->comsig
)
180 + comm_point_get_mem(worker
->cmd_com
)
181 + sizeof(worker
->rndstate
)
182 + regional_get_mem(worker
->scratchpad
)
183 + sizeof(*worker
->env
.scratch_buffer
)
184 + sldns_buffer_capacity(worker
->env
.scratch_buffer
)
185 + forwards_get_mem(worker
->env
.fwds
)
186 + hints_get_mem(worker
->env
.hints
);
187 if(worker
->thread_num
== 0)
188 me
+= acl_list_get_mem(worker
->daemon
->acl
);
190 me
+= serviced_get_mem(cur_serv
);
192 total
= front
+back
+mesh
+msg
+rrset
+infra
+iter
+val
+ac
+superac
+me
;
193 log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
194 "rrset=%u infra=%u iter=%u val=%u anchors=%u "
195 "alloccache=%u globalalloccache=%u me=%u",
196 (unsigned)total
, (unsigned)front
, (unsigned)back
,
197 (unsigned)mesh
, (unsigned)msg
, (unsigned)rrset
,
198 (unsigned)infra
, (unsigned)iter
, (unsigned)val
, (unsigned)anch
,
199 (unsigned)ac
, (unsigned)superac
, (unsigned)me
);
200 debug_total_mem(total
);
201 #else /* no UNBOUND_ALLOC_STATS */
204 if(verbosity
< VERB_QUERY
)
206 for(i
=0; i
<worker
->env
.mesh
->mods
.num
; i
++) {
207 fptr_ok(fptr_whitelist_mod_get_mem(worker
->env
.mesh
->
208 mods
.mod
[i
]->get_mem
));
209 if(strcmp(worker
->env
.mesh
->mods
.mod
[i
]->name
, "validator")==0)
210 val
+= (*worker
->env
.mesh
->mods
.mod
[i
]->get_mem
)
213 verbose(VERB_QUERY
, "cache memory msg=%u rrset=%u infra=%u val=%u",
214 (unsigned)slabhash_get_mem(worker
->env
.msg_cache
),
215 (unsigned)slabhash_get_mem(&worker
->env
.rrset_cache
->table
),
216 (unsigned)infra_get_mem(worker
->env
.infra_cache
),
218 #endif /* UNBOUND_ALLOC_STATS */
222 worker_send_cmd(struct worker
* worker
, enum worker_commands cmd
)
224 uint32_t c
= (uint32_t)htonl(cmd
);
225 if(!tube_write_msg(worker
->cmd
, (uint8_t*)&c
, sizeof(c
), 0)) {
226 log_err("worker send cmd %d failed", (int)cmd
);
231 worker_handle_reply(struct comm_point
* c
, void* arg
, int error
,
232 struct comm_reply
* reply_info
)
234 struct module_qstate
* q
= (struct module_qstate
*)arg
;
235 struct worker
* worker
= q
->env
->worker
;
236 struct outbound_entry e
;
241 mesh_report_reply(worker
->env
.mesh
, &e
, reply_info
, error
);
242 worker_mem_report(worker
, NULL
);
246 if(!LDNS_QR_WIRE(sldns_buffer_begin(c
->buffer
))
247 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c
->buffer
)) !=
249 || LDNS_QDCOUNT(sldns_buffer_begin(c
->buffer
)) > 1) {
250 /* error becomes timeout for the module as if this reply
252 mesh_report_reply(worker
->env
.mesh
, &e
, reply_info
,
254 worker_mem_report(worker
, NULL
);
257 mesh_report_reply(worker
->env
.mesh
, &e
, reply_info
, NETEVENT_NOERROR
);
258 worker_mem_report(worker
, NULL
);
263 worker_handle_service_reply(struct comm_point
* c
, void* arg
, int error
,
264 struct comm_reply
* reply_info
)
266 struct outbound_entry
* e
= (struct outbound_entry
*)arg
;
267 struct worker
* worker
= e
->qstate
->env
->worker
;
268 struct serviced_query
*sq
= e
->qsent
;
270 verbose(VERB_ALGO
, "worker svcd callback for qstate %p", e
->qstate
);
272 mesh_report_reply(worker
->env
.mesh
, e
, reply_info
, error
);
273 worker_mem_report(worker
, sq
);
277 if(!LDNS_QR_WIRE(sldns_buffer_begin(c
->buffer
))
278 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c
->buffer
)) !=
280 || LDNS_QDCOUNT(sldns_buffer_begin(c
->buffer
)) > 1) {
281 /* error becomes timeout for the module as if this reply
283 verbose(VERB_ALGO
, "worker: bad reply handled as timeout");
284 mesh_report_reply(worker
->env
.mesh
, e
, reply_info
,
286 worker_mem_report(worker
, sq
);
289 mesh_report_reply(worker
->env
.mesh
, e
, reply_info
, NETEVENT_NOERROR
);
290 worker_mem_report(worker
, sq
);
294 /** check request sanity.
295 * @param pkt: the wire packet to examine for sanity.
296 * @param worker: parameters for checking.
297 * @return error code, 0 OK, or -1 discard.
300 worker_check_request(sldns_buffer
* pkt
, struct worker
* worker
)
302 if(sldns_buffer_limit(pkt
) < LDNS_HEADER_SIZE
) {
303 verbose(VERB_QUERY
, "request too short, discarded");
306 if(sldns_buffer_limit(pkt
) > NORMAL_UDP_SIZE
&&
307 worker
->daemon
->cfg
->harden_large_queries
) {
308 verbose(VERB_QUERY
, "request too large, discarded");
311 if(LDNS_QR_WIRE(sldns_buffer_begin(pkt
))) {
312 verbose(VERB_QUERY
, "request has QR bit on, discarded");
315 if(LDNS_TC_WIRE(sldns_buffer_begin(pkt
))) {
316 LDNS_TC_CLR(sldns_buffer_begin(pkt
));
317 verbose(VERB_QUERY
, "request bad, has TC bit on");
318 return LDNS_RCODE_FORMERR
;
320 if(LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt
)) != LDNS_PACKET_QUERY
) {
321 verbose(VERB_QUERY
, "request unknown opcode %d",
322 LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt
)));
323 return LDNS_RCODE_NOTIMPL
;
325 if(LDNS_QDCOUNT(sldns_buffer_begin(pkt
)) != 1) {
326 verbose(VERB_QUERY
, "request wrong nr qd=%d",
327 LDNS_QDCOUNT(sldns_buffer_begin(pkt
)));
328 return LDNS_RCODE_FORMERR
;
330 if(LDNS_ANCOUNT(sldns_buffer_begin(pkt
)) != 0) {
331 verbose(VERB_QUERY
, "request wrong nr an=%d",
332 LDNS_ANCOUNT(sldns_buffer_begin(pkt
)));
333 return LDNS_RCODE_FORMERR
;
335 if(LDNS_NSCOUNT(sldns_buffer_begin(pkt
)) != 0) {
336 verbose(VERB_QUERY
, "request wrong nr ns=%d",
337 LDNS_NSCOUNT(sldns_buffer_begin(pkt
)));
338 return LDNS_RCODE_FORMERR
;
340 if(LDNS_ARCOUNT(sldns_buffer_begin(pkt
)) > 1) {
341 verbose(VERB_QUERY
, "request wrong nr ar=%d",
342 LDNS_ARCOUNT(sldns_buffer_begin(pkt
)));
343 return LDNS_RCODE_FORMERR
;
349 worker_handle_control_cmd(struct tube
* ATTR_UNUSED(tube
), uint8_t* msg
,
350 size_t len
, int error
, void* arg
)
352 struct worker
* worker
= (struct worker
*)arg
;
353 enum worker_commands cmd
;
354 if(error
!= NETEVENT_NOERROR
) {
356 if(error
== NETEVENT_CLOSED
)
357 comm_base_exit(worker
->base
);
358 else log_info("control event: %d", error
);
361 if(len
!= sizeof(uint32_t)) {
362 fatal_exit("bad control msg length %d", (int)len
);
364 cmd
= sldns_read_uint32(msg
);
367 case worker_cmd_quit
:
368 verbose(VERB_ALGO
, "got control cmd quit");
369 comm_base_exit(worker
->base
);
371 case worker_cmd_stats
:
372 verbose(VERB_ALGO
, "got control cmd stats");
373 server_stats_reply(worker
, 1);
375 case worker_cmd_stats_noreset
:
376 verbose(VERB_ALGO
, "got control cmd stats_noreset");
377 server_stats_reply(worker
, 0);
379 case worker_cmd_remote
:
380 verbose(VERB_ALGO
, "got control cmd remote");
381 daemon_remote_exec(worker
);
384 log_err("bad command %d", (int)cmd
);
389 /** check if a delegation is secure */
390 static enum sec_status
391 check_delegation_secure(struct reply_info
*rep
)
393 /* return smallest security status */
395 enum sec_status sec
= sec_status_secure
;
397 size_t num
= rep
->an_numrrsets
+ rep
->ns_numrrsets
;
398 /* check if answer and authority are OK */
399 for(i
=0; i
<num
; i
++) {
400 s
= ((struct packed_rrset_data
*)rep
->rrsets
[i
]->entry
.data
)
405 /* in additional, only unchecked triggers revalidation */
406 for(i
=num
; i
<rep
->rrset_count
; i
++) {
407 s
= ((struct packed_rrset_data
*)rep
->rrsets
[i
]->entry
.data
)
409 if(s
== sec_status_unchecked
)
415 /** remove nonsecure from a delegation referral additional section */
417 deleg_remove_nonsecure_additional(struct reply_info
* rep
)
419 /* we can simply edit it, since we are working in the scratch region */
423 for(i
= rep
->an_numrrsets
+rep
->ns_numrrsets
; i
<rep
->rrset_count
; i
++) {
424 s
= ((struct packed_rrset_data
*)rep
->rrsets
[i
]->entry
.data
)
426 if(s
!= sec_status_secure
) {
427 memmove(rep
->rrsets
+i
, rep
->rrsets
+i
+1,
428 sizeof(struct ub_packed_rrset_key
*)*
429 (rep
->rrset_count
- i
- 1));
437 /** answer nonrecursive query from the cache */
439 answer_norec_from_cache(struct worker
* worker
, struct query_info
* qinfo
,
440 uint16_t id
, uint16_t flags
, struct comm_reply
* repinfo
,
441 struct edns_data
* edns
)
443 /* for a nonrecursive query return either:
444 * o an error (servfail; we try to avoid this)
445 * o a delegation (closest we have; this routine tries that)
446 * o the answer (checked by answer_from_cache)
448 * So, grab a delegation from the rrset cache.
449 * Then check if it needs validation, if so, this routine fails,
450 * so that iterator can prime and validator can verify rrsets.
452 uint16_t udpsize
= edns
->udp_size
;
454 time_t timenow
= *worker
->env
.now
;
455 int must_validate
= (!(flags
&BIT_CD
) || worker
->env
.cfg
->ignore_cd
)
456 && worker
->env
.need_to_validate
;
457 struct dns_msg
*msg
= NULL
;
460 dp
= dns_cache_find_delegation(&worker
->env
, qinfo
->qname
,
461 qinfo
->qname_len
, qinfo
->qtype
, qinfo
->qclass
,
462 worker
->scratchpad
, &msg
, timenow
);
463 if(!dp
) { /* no delegation, need to reprime */
464 regional_free_all(worker
->scratchpad
);
468 switch(check_delegation_secure(msg
->rep
)) {
469 case sec_status_unchecked
:
470 /* some rrsets have not been verified yet, go and
471 * let validator do that */
472 regional_free_all(worker
->scratchpad
);
474 case sec_status_bogus
:
475 /* some rrsets are bogus, reply servfail */
476 edns
->edns_version
= EDNS_ADVERTISED_VERSION
;
477 edns
->udp_size
= EDNS_ADVERTISED_SIZE
;
479 edns
->bits
&= EDNS_DO
;
480 error_encode(repinfo
->c
->buffer
, LDNS_RCODE_SERVFAIL
,
481 &msg
->qinfo
, id
, flags
, edns
);
482 regional_free_all(worker
->scratchpad
);
483 if(worker
->stats
.extended
) {
484 worker
->stats
.ans_bogus
++;
485 worker
->stats
.ans_rcode
[LDNS_RCODE_SERVFAIL
]++;
488 case sec_status_secure
:
489 /* all rrsets are secure */
490 /* remove non-secure rrsets from the add. section*/
491 if(worker
->env
.cfg
->val_clean_additional
)
492 deleg_remove_nonsecure_additional(msg
->rep
);
495 case sec_status_indeterminate
:
496 case sec_status_insecure
:
503 /* return this delegation from the cache */
504 edns
->edns_version
= EDNS_ADVERTISED_VERSION
;
505 edns
->udp_size
= EDNS_ADVERTISED_SIZE
;
507 edns
->bits
&= EDNS_DO
;
508 msg
->rep
->flags
|= BIT_QR
|BIT_RA
;
509 if(!reply_info_answer_encode(&msg
->qinfo
, msg
->rep
, id
, flags
,
510 repinfo
->c
->buffer
, 0, 1, worker
->scratchpad
,
511 udpsize
, edns
, (int)(edns
->bits
& EDNS_DO
), secure
)) {
512 error_encode(repinfo
->c
->buffer
, LDNS_RCODE_SERVFAIL
,
513 &msg
->qinfo
, id
, flags
, edns
);
515 regional_free_all(worker
->scratchpad
);
516 if(worker
->stats
.extended
) {
517 if(secure
) worker
->stats
.ans_secure
++;
518 server_stats_insrcode(&worker
->stats
, repinfo
->c
->buffer
);
523 /** answer query from the cache */
525 answer_from_cache(struct worker
* worker
, struct query_info
* qinfo
,
526 struct reply_info
* rep
, uint16_t id
, uint16_t flags
,
527 struct comm_reply
* repinfo
, struct edns_data
* edns
)
529 time_t timenow
= *worker
->env
.now
;
530 uint16_t udpsize
= edns
->udp_size
;
532 int must_validate
= (!(flags
&BIT_CD
) || worker
->env
.cfg
->ignore_cd
)
533 && worker
->env
.need_to_validate
;
534 /* see if it is possible */
535 if(rep
->ttl
< timenow
) {
536 /* the rrsets may have been updated in the meantime.
537 * we will refetch the message format from the
538 * authoritative server
542 if(!rrset_array_lock(rep
->ref
, rep
->rrset_count
, timenow
))
544 /* locked and ids and ttls are OK. */
545 /* check CNAME chain (if any) */
546 if(rep
->an_numrrsets
> 0 && (rep
->rrsets
[0]->rk
.type
==
547 htons(LDNS_RR_TYPE_CNAME
) || rep
->rrsets
[0]->rk
.type
==
548 htons(LDNS_RR_TYPE_DNAME
))) {
549 if(!reply_check_cname_chain(rep
)) {
550 /* cname chain invalid, redo iterator steps */
551 verbose(VERB_ALGO
, "Cache reply: cname chain broken");
553 rrset_array_unlock_touch(worker
->env
.rrset_cache
,
554 worker
->scratchpad
, rep
->ref
, rep
->rrset_count
);
555 regional_free_all(worker
->scratchpad
);
559 /* check security status of the cached answer */
560 if( rep
->security
== sec_status_bogus
&& must_validate
) {
562 edns
->edns_version
= EDNS_ADVERTISED_VERSION
;
563 edns
->udp_size
= EDNS_ADVERTISED_SIZE
;
565 edns
->bits
&= EDNS_DO
;
566 error_encode(repinfo
->c
->buffer
, LDNS_RCODE_SERVFAIL
,
567 qinfo
, id
, flags
, edns
);
568 rrset_array_unlock_touch(worker
->env
.rrset_cache
,
569 worker
->scratchpad
, rep
->ref
, rep
->rrset_count
);
570 regional_free_all(worker
->scratchpad
);
571 if(worker
->stats
.extended
) {
572 worker
->stats
.ans_bogus
++;
573 worker
->stats
.ans_rcode
[LDNS_RCODE_SERVFAIL
] ++;
576 } else if( rep
->security
== sec_status_unchecked
&& must_validate
) {
577 verbose(VERB_ALGO
, "Cache reply: unchecked entry needs "
579 goto bail_out
; /* need to validate cache entry first */
580 } else if(rep
->security
== sec_status_secure
) {
581 if(reply_all_rrsets_secure(rep
))
585 verbose(VERB_ALGO
, "Cache reply: secure entry"
587 goto bail_out
; /* rrset changed, re-verify */
593 edns
->edns_version
= EDNS_ADVERTISED_VERSION
;
594 edns
->udp_size
= EDNS_ADVERTISED_SIZE
;
596 edns
->bits
&= EDNS_DO
;
597 if(!reply_info_answer_encode(qinfo
, rep
, id
, flags
,
598 repinfo
->c
->buffer
, timenow
, 1, worker
->scratchpad
,
599 udpsize
, edns
, (int)(edns
->bits
& EDNS_DO
), secure
)) {
600 error_encode(repinfo
->c
->buffer
, LDNS_RCODE_SERVFAIL
,
601 qinfo
, id
, flags
, edns
);
603 /* cannot send the reply right now, because blocking network syscall
604 * is bad while holding locks. */
605 rrset_array_unlock_touch(worker
->env
.rrset_cache
, worker
->scratchpad
,
606 rep
->ref
, rep
->rrset_count
);
607 regional_free_all(worker
->scratchpad
);
608 if(worker
->stats
.extended
) {
609 if(secure
) worker
->stats
.ans_secure
++;
610 server_stats_insrcode(&worker
->stats
, repinfo
->c
->buffer
);
612 /* go and return this buffer to the client */
616 /** Reply to client and perform prefetch to keep cache up to date */
618 reply_and_prefetch(struct worker
* worker
, struct query_info
* qinfo
,
619 uint16_t flags
, struct comm_reply
* repinfo
, time_t leeway
)
621 /* first send answer to client to keep its latency
622 * as small as a cachereply */
623 comm_point_send_reply(repinfo
);
624 server_stats_prefetch(&worker
->stats
, worker
);
626 /* create the prefetch in the mesh as a normal lookup without
627 * client addrs waiting, which has the cache blacklisted (to bypass
628 * the cache and go to the network for the data). */
629 /* this (potentially) runs the mesh for the new query */
630 mesh_new_prefetch(worker
->env
.mesh
, qinfo
, flags
, leeway
+
631 PREFETCH_EXPIRY_ADD
);
635 * Fill CH class answer into buffer. Keeps query.
637 * @param str: string to put into text record (<255).
638 * @param edns: edns reply information.
641 chaos_replystr(sldns_buffer
* pkt
, const char* str
, struct edns_data
* edns
)
643 size_t len
= strlen(str
);
644 unsigned int rd
= LDNS_RD_WIRE(sldns_buffer_begin(pkt
));
645 unsigned int cd
= LDNS_CD_WIRE(sldns_buffer_begin(pkt
));
646 if(len
>255) len
=255; /* cap size of TXT record */
647 sldns_buffer_clear(pkt
);
648 sldns_buffer_skip(pkt
, (ssize_t
)sizeof(uint16_t)); /* skip id */
649 sldns_buffer_write_u16(pkt
, (uint16_t)(BIT_QR
|BIT_RA
));
650 if(rd
) LDNS_RD_SET(sldns_buffer_begin(pkt
));
651 if(cd
) LDNS_CD_SET(sldns_buffer_begin(pkt
));
652 sldns_buffer_write_u16(pkt
, 1); /* qdcount */
653 sldns_buffer_write_u16(pkt
, 1); /* ancount */
654 sldns_buffer_write_u16(pkt
, 0); /* nscount */
655 sldns_buffer_write_u16(pkt
, 0); /* arcount */
656 (void)query_dname_len(pkt
); /* skip qname */
657 sldns_buffer_skip(pkt
, (ssize_t
)sizeof(uint16_t)); /* skip qtype */
658 sldns_buffer_skip(pkt
, (ssize_t
)sizeof(uint16_t)); /* skip qclass */
659 sldns_buffer_write_u16(pkt
, 0xc00c); /* compr ptr to query */
660 sldns_buffer_write_u16(pkt
, LDNS_RR_TYPE_TXT
);
661 sldns_buffer_write_u16(pkt
, LDNS_RR_CLASS_CH
);
662 sldns_buffer_write_u32(pkt
, 0); /* TTL */
663 sldns_buffer_write_u16(pkt
, sizeof(uint8_t) + len
);
664 sldns_buffer_write_u8(pkt
, len
);
665 sldns_buffer_write(pkt
, str
, len
);
666 sldns_buffer_flip(pkt
);
667 edns
->edns_version
= EDNS_ADVERTISED_VERSION
;
668 edns
->udp_size
= EDNS_ADVERTISED_SIZE
;
669 edns
->bits
&= EDNS_DO
;
670 attach_edns_record(pkt
, edns
);
674 * Answer CH class queries.
676 * @param qinfo: query info. Pointer into packet buffer.
677 * @param edns: edns info from query.
678 * @param pkt: packet buffer.
679 * @return: true if a reply is to be sent.
682 answer_chaos(struct worker
* w
, struct query_info
* qinfo
,
683 struct edns_data
* edns
, sldns_buffer
* pkt
)
685 struct config_file
* cfg
= w
->env
.cfg
;
686 if(qinfo
->qtype
!= LDNS_RR_TYPE_ANY
&& qinfo
->qtype
!= LDNS_RR_TYPE_TXT
)
688 if(query_dname_compare(qinfo
->qname
,
689 (uint8_t*)"\002id\006server") == 0 ||
690 query_dname_compare(qinfo
->qname
,
691 (uint8_t*)"\010hostname\004bind") == 0)
693 if(cfg
->hide_identity
)
695 if(cfg
->identity
==NULL
|| cfg
->identity
[0]==0) {
696 char buf
[MAXHOSTNAMELEN
+1];
697 if (gethostname(buf
, MAXHOSTNAMELEN
) == 0) {
698 buf
[MAXHOSTNAMELEN
] = 0;
699 chaos_replystr(pkt
, buf
, edns
);
701 log_err("gethostname: %s", strerror(errno
));
702 chaos_replystr(pkt
, "no hostname", edns
);
705 else chaos_replystr(pkt
, cfg
->identity
, edns
);
708 if(query_dname_compare(qinfo
->qname
,
709 (uint8_t*)"\007version\006server") == 0 ||
710 query_dname_compare(qinfo
->qname
,
711 (uint8_t*)"\007version\004bind") == 0)
713 if(cfg
->hide_version
)
715 if(cfg
->version
==NULL
|| cfg
->version
[0]==0)
716 chaos_replystr(pkt
, PACKAGE_STRING
, edns
);
717 else chaos_replystr(pkt
, cfg
->version
, edns
);
724 deny_refuse(struct comm_point
* c
, enum acl_access acl
,
725 enum acl_access deny
, enum acl_access refuse
,
726 struct worker
* worker
, struct comm_reply
* repinfo
)
729 comm_point_drop_reply(repinfo
);
730 if(worker
->stats
.extended
)
731 worker
->stats
.unwanted_queries
++;
733 } else if(acl
== refuse
) {
734 log_addr(VERB_ALGO
, "refused query from",
735 &repinfo
->addr
, repinfo
->addrlen
);
736 log_buf(VERB_ALGO
, "refuse", c
->buffer
);
737 if(worker
->stats
.extended
)
738 worker
->stats
.unwanted_queries
++;
739 if(worker_check_request(c
->buffer
, worker
) == -1) {
740 comm_point_drop_reply(repinfo
);
741 return 0; /* discard this */
743 sldns_buffer_set_limit(c
->buffer
, LDNS_HEADER_SIZE
);
744 sldns_buffer_write_at(c
->buffer
, 4,
745 (uint8_t*)"\0\0\0\0\0\0\0\0", 8);
746 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
747 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
),
756 deny_refuse_all(struct comm_point
* c
, enum acl_access acl
,
757 struct worker
* worker
, struct comm_reply
* repinfo
)
759 return deny_refuse(c
, acl
, acl_deny
, acl_refuse
, worker
, repinfo
);
763 deny_refuse_non_local(struct comm_point
* c
, enum acl_access acl
,
764 struct worker
* worker
, struct comm_reply
* repinfo
)
766 return deny_refuse(c
, acl
, acl_deny_non_local
, acl_refuse_non_local
, worker
, repinfo
);
770 worker_handle_request(struct comm_point
* c
, void* arg
, int error
,
771 struct comm_reply
* repinfo
)
773 struct worker
* worker
= (struct worker
*)arg
;
776 struct lruhash_entry
* e
;
777 struct query_info qinfo
;
778 struct edns_data edns
;
782 if(error
!= NETEVENT_NOERROR
) {
783 /* some bad tcp query DNS formats give these error calls */
784 verbose(VERB_ALGO
, "handle request called with err=%d", error
);
788 if(worker
->dtenv
.log_client_query_messages
)
789 dt_msg_send_client_query(&worker
->dtenv
, &repinfo
->addr
, c
->type
,
792 acl
= acl_list_lookup(worker
->daemon
->acl
, &repinfo
->addr
,
794 if((ret
=deny_refuse_all(c
, acl
, worker
, repinfo
)) != -1)
800 if((ret
=worker_check_request(c
->buffer
, worker
)) != 0) {
801 verbose(VERB_ALGO
, "worker check request: bad query.");
802 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
804 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
805 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
), ret
);
808 comm_point_drop_reply(repinfo
);
811 worker
->stats
.num_queries
++;
812 /* see if query is in the cache */
813 if(!query_info_parse(&qinfo
, c
->buffer
)) {
814 verbose(VERB_ALGO
, "worker parse request: formerror.");
815 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
816 sldns_buffer_rewind(c
->buffer
);
817 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
818 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
),
820 server_stats_insrcode(&worker
->stats
, c
->buffer
);
823 if(worker
->env
.cfg
->log_queries
) {
825 addr_to_str(&repinfo
->addr
, repinfo
->addrlen
, ip
, sizeof(ip
));
826 log_nametypeclass(0, ip
, qinfo
.qname
, qinfo
.qtype
, qinfo
.qclass
);
828 if(qinfo
.qtype
== LDNS_RR_TYPE_AXFR
||
829 qinfo
.qtype
== LDNS_RR_TYPE_IXFR
) {
830 verbose(VERB_ALGO
, "worker request: refused zone transfer.");
831 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
832 sldns_buffer_rewind(c
->buffer
);
833 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
834 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
),
836 if(worker
->stats
.extended
) {
837 worker
->stats
.qtype
[qinfo
.qtype
]++;
838 server_stats_insrcode(&worker
->stats
, c
->buffer
);
842 if((ret
=parse_edns_from_pkt(c
->buffer
, &edns
)) != 0) {
843 verbose(VERB_ALGO
, "worker parse edns: formerror.");
844 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
845 sldns_buffer_rewind(c
->buffer
);
846 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
847 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
), ret
);
848 server_stats_insrcode(&worker
->stats
, c
->buffer
);
851 if(edns
.edns_present
&& edns
.edns_version
!= 0) {
852 edns
.ext_rcode
= (uint8_t)(EDNS_RCODE_BADVERS
>>4);
853 edns
.edns_version
= EDNS_ADVERTISED_VERSION
;
854 edns
.udp_size
= EDNS_ADVERTISED_SIZE
;
855 edns
.bits
&= EDNS_DO
;
856 verbose(VERB_ALGO
, "query with bad edns version.");
857 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
858 error_encode(c
->buffer
, EDNS_RCODE_BADVERS
&0xf, &qinfo
,
859 *(uint16_t*)(void *)sldns_buffer_begin(c
->buffer
),
860 sldns_buffer_read_u16_at(c
->buffer
, 2), NULL
);
861 attach_edns_record(c
->buffer
, &edns
);
864 if(edns
.edns_present
&& edns
.udp_size
< NORMAL_UDP_SIZE
&&
865 worker
->daemon
->cfg
->harden_short_bufsize
) {
866 verbose(VERB_QUERY
, "worker request: EDNS bufsize %d ignored",
868 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
869 edns
.udp_size
= NORMAL_UDP_SIZE
;
871 if(edns
.udp_size
> worker
->daemon
->cfg
->max_udp_size
&&
872 c
->type
== comm_udp
) {
874 "worker request: max UDP reply size modified"
875 " (%d to max-udp-size)", (int)edns
.udp_size
);
876 log_addr(VERB_CLIENT
,"from",&repinfo
->addr
, repinfo
->addrlen
);
877 edns
.udp_size
= worker
->daemon
->cfg
->max_udp_size
;
879 if(edns
.udp_size
< LDNS_HEADER_SIZE
) {
880 verbose(VERB_ALGO
, "worker request: edns is too small.");
881 log_addr(VERB_CLIENT
, "from", &repinfo
->addr
, repinfo
->addrlen
);
882 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
883 LDNS_TC_SET(sldns_buffer_begin(c
->buffer
));
884 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
),
885 LDNS_RCODE_SERVFAIL
);
886 sldns_buffer_set_position(c
->buffer
, LDNS_HEADER_SIZE
);
887 sldns_buffer_write_at(c
->buffer
, 4,
888 (uint8_t*)"\0\0\0\0\0\0\0\0", 8);
889 sldns_buffer_flip(c
->buffer
);
892 if(worker
->stats
.extended
)
893 server_stats_insquery(&worker
->stats
, c
, qinfo
.qtype
,
894 qinfo
.qclass
, &edns
, repinfo
);
895 if(c
->type
!= comm_udp
)
896 edns
.udp_size
= 65535; /* max size for TCP replies */
897 if(qinfo
.qclass
== LDNS_RR_CLASS_CH
&& answer_chaos(worker
, &qinfo
,
899 server_stats_insrcode(&worker
->stats
, c
->buffer
);
902 if(local_zones_answer(worker
->daemon
->local_zones
, &qinfo
, &edns
,
903 c
->buffer
, worker
->scratchpad
)) {
904 regional_free_all(worker
->scratchpad
);
905 if(sldns_buffer_limit(c
->buffer
) == 0) {
906 comm_point_drop_reply(repinfo
);
909 server_stats_insrcode(&worker
->stats
, c
->buffer
);
913 /* We've looked in our local zones. If the answer isn't there, we
914 * might need to bail out based on ACLs now. */
915 if((ret
=deny_refuse_non_local(c
, acl
, worker
, repinfo
)) != -1)
922 /* If this request does not have the recursion bit set, verify
923 * ACLs allow the snooping. */
924 if(!(LDNS_RD_WIRE(sldns_buffer_begin(c
->buffer
))) &&
925 acl
!= acl_allow_snoop
) {
926 sldns_buffer_set_limit(c
->buffer
, LDNS_HEADER_SIZE
);
927 sldns_buffer_write_at(c
->buffer
, 4,
928 (uint8_t*)"\0\0\0\0\0\0\0\0", 8);
929 LDNS_QR_SET(sldns_buffer_begin(c
->buffer
));
930 LDNS_RCODE_SET(sldns_buffer_begin(c
->buffer
),
932 sldns_buffer_flip(c
->buffer
);
933 server_stats_insrcode(&worker
->stats
, c
->buffer
);
934 log_addr(VERB_ALGO
, "refused nonrec (cache snoop) query from",
935 &repinfo
->addr
, repinfo
->addrlen
);
938 h
= query_info_hash(&qinfo
, sldns_buffer_read_u16_at(c
->buffer
, 2));
939 if((e
=slabhash_lookup(worker
->env
.msg_cache
, h
, &qinfo
, 0))) {
940 /* answer from cache - we have acquired a readlock on it */
941 if(answer_from_cache(worker
, &qinfo
,
942 (struct reply_info
*)e
->data
,
943 *(uint16_t*)(void *)sldns_buffer_begin(c
->buffer
),
944 sldns_buffer_read_u16_at(c
->buffer
, 2), repinfo
,
946 /* prefetch it if the prefetch TTL expired */
947 if(worker
->env
.cfg
->prefetch
&& *worker
->env
.now
>=
948 ((struct reply_info
*)e
->data
)->prefetch_ttl
) {
949 time_t leeway
= ((struct reply_info
*)e
->
950 data
)->ttl
- *worker
->env
.now
;
951 lock_rw_unlock(&e
->lock
);
952 reply_and_prefetch(worker
, &qinfo
,
953 sldns_buffer_read_u16_at(c
->buffer
, 2),
958 lock_rw_unlock(&e
->lock
);
961 verbose(VERB_ALGO
, "answer from the cache failed");
962 lock_rw_unlock(&e
->lock
);
964 if(!LDNS_RD_WIRE(sldns_buffer_begin(c
->buffer
))) {
965 if(answer_norec_from_cache(worker
, &qinfo
,
966 *(uint16_t*)(void *)sldns_buffer_begin(c
->buffer
),
967 sldns_buffer_read_u16_at(c
->buffer
, 2), repinfo
,
971 verbose(VERB_ALGO
, "answer norec from cache -- "
972 "need to validate or not primed");
974 sldns_buffer_rewind(c
->buffer
);
975 server_stats_querymiss(&worker
->stats
, worker
);
977 if(verbosity
>= VERB_CLIENT
) {
978 if(c
->type
== comm_udp
)
979 log_addr(VERB_CLIENT
, "udp request from",
980 &repinfo
->addr
, repinfo
->addrlen
);
981 else log_addr(VERB_CLIENT
, "tcp request from",
982 &repinfo
->addr
, repinfo
->addrlen
);
985 /* grab a work request structure for this new request */
986 mesh_new_client(worker
->env
.mesh
, &qinfo
,
987 sldns_buffer_read_u16_at(c
->buffer
, 2),
988 &edns
, repinfo
, *(uint16_t*)(void *)sldns_buffer_begin(c
->buffer
));
989 worker_mem_report(worker
, NULL
);
996 if(worker
->dtenv
.log_client_response_messages
)
997 dt_msg_send_client_response(&worker
->dtenv
, &repinfo
->addr
,
1004 worker_sighandler(int sig
, void* arg
)
1006 /* note that log, print, syscalls here give race conditions.
1007 * And cause hangups if the log-lock is held by the application. */
1008 struct worker
* worker
= (struct worker
*)arg
;
1012 comm_base_exit(worker
->base
);
1016 worker
->need_to_exit
= 1;
1017 comm_base_exit(worker
->base
);
1021 worker
->need_to_exit
= 1;
1022 comm_base_exit(worker
->base
);
1026 worker
->need_to_exit
= 1;
1027 comm_base_exit(worker
->base
);
1030 /* unknown signal, ignored */
1035 /** restart statistics timer for worker, if enabled */
1037 worker_restart_timer(struct worker
* worker
)
1039 if(worker
->env
.cfg
->stat_interval
> 0) {
1042 tv
.tv_sec
= worker
->env
.cfg
->stat_interval
;
1045 comm_timer_set(worker
->stat_timer
, &tv
);
1049 void worker_stat_timer_cb(void* arg
)
1051 struct worker
* worker
= (struct worker
*)arg
;
1052 server_stats_log(&worker
->stats
, worker
, worker
->thread_num
);
1053 mesh_stats(worker
->env
.mesh
, "mesh has");
1054 worker_mem_report(worker
, NULL
);
1055 if(!worker
->daemon
->cfg
->stat_cumulative
) {
1056 worker_stats_clear(worker
);
1058 /* start next timer */
1059 worker_restart_timer(worker
);
1062 void worker_probe_timer_cb(void* arg
)
1064 struct worker
* worker
= (struct worker
*)arg
;
1067 tv
.tv_sec
= (time_t)autr_probe_timer(&worker
->env
);
1071 comm_timer_set(worker
->env
.probe_timer
, &tv
);
1075 worker_create(struct daemon
* daemon
, int id
, int* ports
, int n
)
1078 struct worker
* worker
= (struct worker
*)calloc(1,
1079 sizeof(struct worker
));
1082 worker
->numports
= n
;
1083 worker
->ports
= (int*)memdup(ports
, sizeof(int)*n
);
1084 if(!worker
->ports
) {
1088 worker
->daemon
= daemon
;
1089 worker
->thread_num
= id
;
1090 if(!(worker
->cmd
= tube_create())) {
1091 free(worker
->ports
);
1095 /* create random state here to avoid locking trouble in RAND_bytes */
1096 seed
= (unsigned int)time(NULL
) ^ (unsigned int)getpid() ^
1097 (((unsigned int)worker
->thread_num
)<<17);
1098 /* shift thread_num so it does not match out pid bits */
1099 if(!(worker
->rndstate
= ub_initstate(seed
, daemon
->rand
))) {
1101 log_err("could not init random numbers.");
1102 tube_delete(worker
->cmd
);
1103 free(worker
->ports
);
1109 if(daemon
->cfg
->dnstap
) {
1110 log_assert(daemon
->dtenv
!= NULL
);
1111 memcpy(&worker
->dtenv
, daemon
->dtenv
, sizeof(struct dt_env
));
1112 if(!dt_init(&worker
->dtenv
))
1113 fatal_exit("dt_init failed");
1120 worker_init(struct worker
* worker
, struct config_file
*cfg
,
1121 struct listen_port
* ports
, int do_sigs
)
1124 struct dt_env
* dtenv
= &worker
->dtenv
;
1128 worker
->need_to_exit
= 0;
1129 worker
->base
= comm_base_create(do_sigs
);
1131 log_err("could not create event handling base");
1132 worker_delete(worker
);
1135 comm_base_set_slow_accept_handlers(worker
->base
, &worker_stop_accept
,
1136 &worker_start_accept
, worker
);
1139 ub_thread_sig_unblock(SIGHUP
);
1141 ub_thread_sig_unblock(SIGINT
);
1143 ub_thread_sig_unblock(SIGQUIT
);
1145 ub_thread_sig_unblock(SIGTERM
);
1146 #ifndef LIBEVENT_SIGNAL_PROBLEM
1147 worker
->comsig
= comm_signal_create(worker
->base
,
1148 worker_sighandler
, worker
);
1151 || !comm_signal_bind(worker
->comsig
, SIGHUP
)
1154 || !comm_signal_bind(worker
->comsig
, SIGQUIT
)
1156 || !comm_signal_bind(worker
->comsig
, SIGTERM
)
1157 || !comm_signal_bind(worker
->comsig
, SIGINT
)) {
1158 log_err("could not create signal handlers");
1159 worker_delete(worker
);
1162 #endif /* LIBEVENT_SIGNAL_PROBLEM */
1163 if(!daemon_remote_open_accept(worker
->daemon
->rc
,
1164 worker
->daemon
->rc_ports
, worker
)) {
1165 worker_delete(worker
);
1168 #ifdef UB_ON_WINDOWS
1169 wsvc_setup_worker(worker
);
1170 #endif /* UB_ON_WINDOWS */
1171 } else { /* !do_sigs */
1172 worker
->comsig
= NULL
;
1174 worker
->front
= listen_create(worker
->base
, ports
,
1175 cfg
->msg_buffer_size
, (int)cfg
->incoming_num_tcp
,
1176 worker
->daemon
->listen_sslctx
, dtenv
, worker_handle_request
,
1178 if(!worker
->front
) {
1179 log_err("could not create listening sockets");
1180 worker_delete(worker
);
1183 worker
->back
= outside_network_create(worker
->base
,
1184 cfg
->msg_buffer_size
, (size_t)cfg
->outgoing_num_ports
,
1185 cfg
->out_ifs
, cfg
->num_out_ifs
, cfg
->do_ip4
, cfg
->do_ip6
,
1186 cfg
->do_tcp
?cfg
->outgoing_num_tcp
:0,
1187 worker
->daemon
->env
->infra_cache
, worker
->rndstate
,
1188 cfg
->use_caps_bits_for_id
, worker
->ports
, worker
->numports
,
1189 cfg
->unwanted_threshold
, &worker_alloc_cleanup
, worker
,
1190 cfg
->do_udp
, worker
->daemon
->connect_sslctx
, cfg
->delay_close
,
1193 log_err("could not create outgoing sockets");
1194 worker_delete(worker
);
1197 /* start listening to commands */
1198 if(!tube_setup_bg_listen(worker
->cmd
, worker
->base
,
1199 &worker_handle_control_cmd
, worker
)) {
1200 log_err("could not create control compt.");
1201 worker_delete(worker
);
1204 worker
->stat_timer
= comm_timer_create(worker
->base
,
1205 worker_stat_timer_cb
, worker
);
1206 if(!worker
->stat_timer
) {
1207 log_err("could not create statistics timer");
1210 /* we use the msg_buffer_size as a good estimate for what the
1211 * user wants for memory usage sizes */
1212 worker
->scratchpad
= regional_create_custom(cfg
->msg_buffer_size
);
1213 if(!worker
->scratchpad
) {
1214 log_err("malloc failure");
1215 worker_delete(worker
);
1219 server_stats_init(&worker
->stats
, cfg
);
1220 alloc_init(&worker
->alloc
, &worker
->daemon
->superalloc
,
1221 worker
->thread_num
);
1222 alloc_set_id_cleanup(&worker
->alloc
, &worker_alloc_cleanup
, worker
);
1223 worker
->env
= *worker
->daemon
->env
;
1224 comm_base_timept(worker
->base
, &worker
->env
.now
, &worker
->env
.now_tv
);
1225 if(worker
->thread_num
== 0)
1226 log_set_time(worker
->env
.now
);
1227 worker
->env
.worker
= worker
;
1228 worker
->env
.send_query
= &worker_send_query
;
1229 worker
->env
.alloc
= &worker
->alloc
;
1230 worker
->env
.rnd
= worker
->rndstate
;
1231 worker
->env
.scratch
= worker
->scratchpad
;
1232 worker
->env
.mesh
= mesh_create(&worker
->daemon
->mods
, &worker
->env
);
1233 worker
->env
.detach_subs
= &mesh_detach_subs
;
1234 worker
->env
.attach_sub
= &mesh_attach_sub
;
1235 worker
->env
.kill_sub
= &mesh_state_delete
;
1236 worker
->env
.detect_cycle
= &mesh_detect_cycle
;
1237 worker
->env
.scratch_buffer
= sldns_buffer_new(cfg
->msg_buffer_size
);
1238 if(!(worker
->env
.fwds
= forwards_create()) ||
1239 !forwards_apply_cfg(worker
->env
.fwds
, cfg
)) {
1240 log_err("Could not set forward zones");
1241 worker_delete(worker
);
1244 if(!(worker
->env
.hints
= hints_create()) ||
1245 !hints_apply_cfg(worker
->env
.hints
, cfg
)) {
1246 log_err("Could not set root or stub hints");
1247 worker_delete(worker
);
1250 /* one probe timer per process -- if we have 5011 anchors */
1251 if(autr_get_num_anchors(worker
->env
.anchors
) > 0
1252 #ifndef THREADS_DISABLED
1253 && worker
->thread_num
== 0
1259 worker
->env
.probe_timer
= comm_timer_create(worker
->base
,
1260 worker_probe_timer_cb
, worker
);
1261 if(!worker
->env
.probe_timer
) {
1262 log_err("could not create 5011-probe timer");
1264 /* let timer fire, then it can reset itself */
1265 comm_timer_set(worker
->env
.probe_timer
, &tv
);
1268 if(!worker
->env
.mesh
|| !worker
->env
.scratch_buffer
) {
1269 worker_delete(worker
);
1272 worker_mem_report(worker
, NULL
);
1273 /* if statistics enabled start timer */
1274 if(worker
->env
.cfg
->stat_interval
> 0) {
1275 verbose(VERB_ALGO
, "set statistics interval %d secs",
1276 worker
->env
.cfg
->stat_interval
);
1277 worker_restart_timer(worker
);
1283 worker_work(struct worker
* worker
)
1285 comm_base_dispatch(worker
->base
);
1289 worker_delete(struct worker
* worker
)
1293 if(worker
->env
.mesh
&& verbosity
>= VERB_OPS
) {
1294 server_stats_log(&worker
->stats
, worker
, worker
->thread_num
);
1295 mesh_stats(worker
->env
.mesh
, "mesh has");
1296 worker_mem_report(worker
, NULL
);
1298 outside_network_quit_prepare(worker
->back
);
1299 mesh_delete(worker
->env
.mesh
);
1300 sldns_buffer_free(worker
->env
.scratch_buffer
);
1301 forwards_delete(worker
->env
.fwds
);
1302 hints_delete(worker
->env
.hints
);
1303 listen_delete(worker
->front
);
1304 outside_network_delete(worker
->back
);
1305 comm_signal_delete(worker
->comsig
);
1306 tube_delete(worker
->cmd
);
1307 comm_timer_delete(worker
->stat_timer
);
1308 comm_timer_delete(worker
->env
.probe_timer
);
1309 free(worker
->ports
);
1310 if(worker
->thread_num
== 0) {
1312 #ifdef UB_ON_WINDOWS
1313 wsvc_desetup_worker(worker
);
1314 #endif /* UB_ON_WINDOWS */
1316 comm_base_delete(worker
->base
);
1317 ub_randfree(worker
->rndstate
);
1318 alloc_clear(&worker
->alloc
);
1319 regional_destroy(worker
->scratchpad
);
1323 struct outbound_entry
*
1324 worker_send_query(uint8_t* qname
, size_t qnamelen
, uint16_t qtype
,
1325 uint16_t qclass
, uint16_t flags
, int dnssec
, int want_dnssec
,
1326 int nocaps
, struct sockaddr_storage
* addr
, socklen_t addrlen
,
1327 uint8_t* zone
, size_t zonelen
, struct module_qstate
* q
)
1329 struct worker
* worker
= q
->env
->worker
;
1330 struct outbound_entry
* e
= (struct outbound_entry
*)regional_alloc(
1331 q
->region
, sizeof(*e
));
1335 e
->qsent
= outnet_serviced_query(worker
->back
, qname
,
1336 qnamelen
, qtype
, qclass
, flags
, dnssec
, want_dnssec
, nocaps
,
1337 q
->env
->cfg
->tcp_upstream
, q
->env
->cfg
->ssl_upstream
, addr
,
1338 addrlen
, zone
, zonelen
, worker_handle_service_reply
, e
,
1339 worker
->back
->udp_buff
);
1347 worker_alloc_cleanup(void* arg
)
1349 struct worker
* worker
= (struct worker
*)arg
;
1350 slabhash_clear(&worker
->env
.rrset_cache
->table
);
1351 slabhash_clear(worker
->env
.msg_cache
);
1354 void worker_stats_clear(struct worker
* worker
)
1356 server_stats_init(&worker
->stats
, worker
->env
.cfg
);
1357 mesh_stats_clear(worker
->env
.mesh
);
1358 worker
->back
->unwanted_replies
= 0;
1359 worker
->back
->num_tcp_outgoing
= 0;
1362 void worker_start_accept(void* arg
)
1364 struct worker
* worker
= (struct worker
*)arg
;
1365 listen_start_accept(worker
->front
);
1366 if(worker
->thread_num
== 0)
1367 daemon_remote_start_accept(worker
->daemon
->rc
);
1370 void worker_stop_accept(void* arg
)
1372 struct worker
* worker
= (struct worker
*)arg
;
1373 listen_stop_accept(worker
->front
);
1374 if(worker
->thread_num
== 0)
1375 daemon_remote_stop_accept(worker
->daemon
->rc
);
1378 /* --- fake callbacks for fptr_wlist to work --- */
1379 struct outbound_entry
* libworker_send_query(uint8_t* ATTR_UNUSED(qname
),
1380 size_t ATTR_UNUSED(qnamelen
), uint16_t ATTR_UNUSED(qtype
),
1381 uint16_t ATTR_UNUSED(qclass
), uint16_t ATTR_UNUSED(flags
),
1382 int ATTR_UNUSED(dnssec
), int ATTR_UNUSED(want_dnssec
),
1383 int ATTR_UNUSED(nocaps
), struct sockaddr_storage
* ATTR_UNUSED(addr
),
1384 socklen_t
ATTR_UNUSED(addrlen
), uint8_t* ATTR_UNUSED(zone
),
1385 size_t ATTR_UNUSED(zonelen
), struct module_qstate
* ATTR_UNUSED(q
))
1391 int libworker_handle_reply(struct comm_point
* ATTR_UNUSED(c
),
1392 void* ATTR_UNUSED(arg
), int ATTR_UNUSED(error
),
1393 struct comm_reply
* ATTR_UNUSED(reply_info
))
1399 int libworker_handle_service_reply(struct comm_point
* ATTR_UNUSED(c
),
1400 void* ATTR_UNUSED(arg
), int ATTR_UNUSED(error
),
1401 struct comm_reply
* ATTR_UNUSED(reply_info
))
1407 void libworker_handle_control_cmd(struct tube
* ATTR_UNUSED(tube
),
1408 uint8_t* ATTR_UNUSED(buffer
), size_t ATTR_UNUSED(len
),
1409 int ATTR_UNUSED(error
), void* ATTR_UNUSED(arg
))
1414 void libworker_fg_done_cb(void* ATTR_UNUSED(arg
), int ATTR_UNUSED(rcode
),
1415 sldns_buffer
* ATTR_UNUSED(buf
), enum sec_status
ATTR_UNUSED(s
),
1416 char* ATTR_UNUSED(why_bogus
))
1421 void libworker_bg_done_cb(void* ATTR_UNUSED(arg
), int ATTR_UNUSED(rcode
),
1422 sldns_buffer
* ATTR_UNUSED(buf
), enum sec_status
ATTR_UNUSED(s
),
1423 char* ATTR_UNUSED(why_bogus
))
1428 void libworker_event_done_cb(void* ATTR_UNUSED(arg
), int ATTR_UNUSED(rcode
),
1429 sldns_buffer
* ATTR_UNUSED(buf
), enum sec_status
ATTR_UNUSED(s
),
1430 char* ATTR_UNUSED(why_bogus
))
1435 int context_query_cmp(const void* ATTR_UNUSED(a
), const void* ATTR_UNUSED(b
))
1441 int order_lock_cmp(const void* ATTR_UNUSED(e1
), const void* ATTR_UNUSED(e2
))
1447 int codeline_cmp(const void* ATTR_UNUSED(a
), const void* ATTR_UNUSED(b
))