1 /* 2 * daemon/stats.c - collect runtime performance indicators. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file describes the data structure used to collect runtime performance 40 * numbers. These 'statistics' may be of interest to the operator. 41 */ 42 #include "config.h" 43 #ifdef HAVE_TIME_H 44 #include <time.h> 45 #endif 46 #include <sys/time.h> 47 #include <sys/types.h> 48 #include "daemon/stats.h" 49 #include "daemon/worker.h" 50 #include "daemon/daemon.h" 51 #include "services/mesh.h" 52 #include "services/outside_network.h" 53 #include "services/listen_dnsport.h" 54 #include "util/config_file.h" 55 #include "util/tube.h" 56 #include "util/timehist.h" 57 #include "util/net_help.h" 58 #include "validator/validator.h" 59 #include "sldns/sbuffer.h" 60 #include "services/cache/rrset.h" 61 #include "services/cache/infra.h" 62 #include "validator/val_kcache.h" 63 64 /** add timers and the values do not overflow or become negative */ 65 static void 66 timeval_add(struct timeval* d, const struct timeval* add) 67 { 68 #ifndef S_SPLINT_S 69 d->tv_sec += add->tv_sec; 70 d->tv_usec += add->tv_usec; 71 if(d->tv_usec > 1000000) { 72 d->tv_usec -= 1000000; 73 d->tv_sec++; 74 } 75 #endif 76 } 77 78 void server_stats_init(struct server_stats* stats, struct config_file* cfg) 79 { 80 memset(stats, 0, sizeof(*stats)); 81 stats->extended = cfg->stat_extended; 82 } 83 84 void server_stats_querymiss(struct server_stats* stats, struct worker* worker) 85 { 86 stats->num_queries_missed_cache++; 87 stats->sum_query_list_size += worker->env.mesh->all.count; 88 if(worker->env.mesh->all.count > stats->max_query_list_size) 89 stats->max_query_list_size = worker->env.mesh->all.count; 90 } 91 92 void server_stats_prefetch(struct server_stats* stats, struct worker* worker) 93 { 94 stats->num_queries_prefetch++; 95 /* changes the query list size so account that, like a querymiss */ 96 stats->sum_query_list_size += worker->env.mesh->all.count; 97 if(worker->env.mesh->all.count > stats->max_query_list_size) 98 stats->max_query_list_size = worker->env.mesh->all.count; 99 } 100 101 void server_stats_log(struct server_stats* stats, struct worker* worker, 102 int threadnum) 103 { 104 log_info("server stats for thread %d: %u queries, " 105 "%u answers from cache, %u recursions, %u prefetch", 106 threadnum, (unsigned)stats->num_queries, 107 (unsigned)(stats->num_queries - 108 stats->num_queries_missed_cache), 109 (unsigned)stats->num_queries_missed_cache, 110 (unsigned)stats->num_queries_prefetch); 111 log_info("server stats for thread %d: requestlist max %u avg %g " 112 "exceeded %u jostled %u", threadnum, 113 (unsigned)stats->max_query_list_size, 114 (stats->num_queries_missed_cache+stats->num_queries_prefetch)? 115 (double)stats->sum_query_list_size/ 116 (stats->num_queries_missed_cache+ 117 stats->num_queries_prefetch) : 0.0, 118 (unsigned)worker->env.mesh->stats_dropped, 119 (unsigned)worker->env.mesh->stats_jostled); 120 } 121 122 /** get rrsets bogus number from validator */ 123 static size_t 124 get_rrset_bogus(struct worker* worker) 125 { 126 int m = modstack_find(&worker->env.mesh->mods, "validator"); 127 struct val_env* ve; 128 size_t r; 129 if(m == -1) 130 return 0; 131 ve = (struct val_env*)worker->env.modinfo[m]; 132 lock_basic_lock(&ve->bogus_lock); 133 r = ve->num_rrset_bogus; 134 if(!worker->env.cfg->stat_cumulative) 135 ve->num_rrset_bogus = 0; 136 lock_basic_unlock(&ve->bogus_lock); 137 return r; 138 } 139 140 void 141 server_stats_compile(struct worker* worker, struct stats_info* s, int reset) 142 { 143 int i; 144 struct listen_list* lp; 145 146 s->svr = worker->stats; 147 s->mesh_num_states = worker->env.mesh->all.count; 148 s->mesh_num_reply_states = worker->env.mesh->num_reply_states; 149 s->mesh_jostled = worker->env.mesh->stats_jostled; 150 s->mesh_dropped = worker->env.mesh->stats_dropped; 151 s->mesh_replies_sent = worker->env.mesh->replies_sent; 152 s->mesh_replies_sum_wait = worker->env.mesh->replies_sum_wait; 153 s->mesh_time_median = timehist_quartile(worker->env.mesh->histogram, 154 0.50); 155 156 /* add in the values from the mesh */ 157 s->svr.ans_secure += worker->env.mesh->ans_secure; 158 s->svr.ans_bogus += worker->env.mesh->ans_bogus; 159 s->svr.ans_rcode_nodata += worker->env.mesh->ans_nodata; 160 for(i=0; i<16; i++) 161 s->svr.ans_rcode[i] += worker->env.mesh->ans_rcode[i]; 162 timehist_export(worker->env.mesh->histogram, s->svr.hist, 163 NUM_BUCKETS_HIST); 164 /* values from outside network */ 165 s->svr.unwanted_replies = worker->back->unwanted_replies; 166 s->svr.qtcp_outgoing = worker->back->num_tcp_outgoing; 167 168 /* get and reset validator rrset bogus number */ 169 s->svr.rrset_bogus = get_rrset_bogus(worker); 170 171 /* get cache sizes */ 172 s->svr.msg_cache_count = count_slabhash_entries(worker->env.msg_cache); 173 s->svr.rrset_cache_count = count_slabhash_entries(&worker->env.rrset_cache->table); 174 s->svr.infra_cache_count = count_slabhash_entries(worker->env.infra_cache->hosts); 175 if(worker->env.key_cache) 176 s->svr.key_cache_count = count_slabhash_entries(worker->env.key_cache->slab); 177 else s->svr.key_cache_count = 0; 178 179 /* get tcp accept usage */ 180 s->svr.tcp_accept_usage = 0; 181 for(lp = worker->front->cps; lp; lp = lp->next) { 182 if(lp->com->type == comm_tcp_accept) 183 s->svr.tcp_accept_usage += lp->com->cur_tcp_count; 184 } 185 186 if(reset && !worker->env.cfg->stat_cumulative) { 187 worker_stats_clear(worker); 188 } 189 } 190 191 void server_stats_obtain(struct worker* worker, struct worker* who, 192 struct stats_info* s, int reset) 193 { 194 uint8_t *reply = NULL; 195 uint32_t len = 0; 196 if(worker == who) { 197 /* just fill it in */ 198 server_stats_compile(worker, s, reset); 199 return; 200 } 201 /* communicate over tube */ 202 verbose(VERB_ALGO, "write stats cmd"); 203 if(reset) 204 worker_send_cmd(who, worker_cmd_stats); 205 else worker_send_cmd(who, worker_cmd_stats_noreset); 206 verbose(VERB_ALGO, "wait for stats reply"); 207 if(!tube_read_msg(worker->cmd, &reply, &len, 0)) 208 fatal_exit("failed to read stats over cmd channel"); 209 if(len != (uint32_t)sizeof(*s)) 210 fatal_exit("stats on cmd channel wrong length %d %d", 211 (int)len, (int)sizeof(*s)); 212 memcpy(s, reply, (size_t)len); 213 free(reply); 214 } 215 216 void server_stats_reply(struct worker* worker, int reset) 217 { 218 struct stats_info s; 219 server_stats_compile(worker, &s, reset); 220 verbose(VERB_ALGO, "write stats replymsg"); 221 if(!tube_write_msg(worker->daemon->workers[0]->cmd, 222 (uint8_t*)&s, sizeof(s), 0)) 223 fatal_exit("could not write stat values over cmd channel"); 224 } 225 226 void server_stats_add(struct stats_info* total, struct stats_info* a) 227 { 228 total->svr.num_queries += a->svr.num_queries; 229 total->svr.num_queries_missed_cache += a->svr.num_queries_missed_cache; 230 total->svr.num_queries_prefetch += a->svr.num_queries_prefetch; 231 total->svr.sum_query_list_size += a->svr.sum_query_list_size; 232 /* the max size reached is upped to higher of both */ 233 if(a->svr.max_query_list_size > total->svr.max_query_list_size) 234 total->svr.max_query_list_size = a->svr.max_query_list_size; 235 236 if(a->svr.extended) { 237 int i; 238 total->svr.qtype_big += a->svr.qtype_big; 239 total->svr.qclass_big += a->svr.qclass_big; 240 total->svr.qtcp += a->svr.qtcp; 241 total->svr.qtcp_outgoing += a->svr.qtcp_outgoing; 242 total->svr.qipv6 += a->svr.qipv6; 243 total->svr.qbit_QR += a->svr.qbit_QR; 244 total->svr.qbit_AA += a->svr.qbit_AA; 245 total->svr.qbit_TC += a->svr.qbit_TC; 246 total->svr.qbit_RD += a->svr.qbit_RD; 247 total->svr.qbit_RA += a->svr.qbit_RA; 248 total->svr.qbit_Z += a->svr.qbit_Z; 249 total->svr.qbit_AD += a->svr.qbit_AD; 250 total->svr.qbit_CD += a->svr.qbit_CD; 251 total->svr.qEDNS += a->svr.qEDNS; 252 total->svr.qEDNS_DO += a->svr.qEDNS_DO; 253 total->svr.ans_rcode_nodata += a->svr.ans_rcode_nodata; 254 total->svr.ans_secure += a->svr.ans_secure; 255 total->svr.ans_bogus += a->svr.ans_bogus; 256 total->svr.rrset_bogus += a->svr.rrset_bogus; 257 total->svr.unwanted_replies += a->svr.unwanted_replies; 258 total->svr.unwanted_queries += a->svr.unwanted_queries; 259 total->svr.tcp_accept_usage += a->svr.tcp_accept_usage; 260 for(i=0; i<STATS_QTYPE_NUM; i++) 261 total->svr.qtype[i] += a->svr.qtype[i]; 262 for(i=0; i<STATS_QCLASS_NUM; i++) 263 total->svr.qclass[i] += a->svr.qclass[i]; 264 for(i=0; i<STATS_OPCODE_NUM; i++) 265 total->svr.qopcode[i] += a->svr.qopcode[i]; 266 for(i=0; i<STATS_RCODE_NUM; i++) 267 total->svr.ans_rcode[i] += a->svr.ans_rcode[i]; 268 for(i=0; i<NUM_BUCKETS_HIST; i++) 269 total->svr.hist[i] += a->svr.hist[i]; 270 } 271 272 total->mesh_num_states += a->mesh_num_states; 273 total->mesh_num_reply_states += a->mesh_num_reply_states; 274 total->mesh_jostled += a->mesh_jostled; 275 total->mesh_dropped += a->mesh_dropped; 276 total->mesh_replies_sent += a->mesh_replies_sent; 277 timeval_add(&total->mesh_replies_sum_wait, &a->mesh_replies_sum_wait); 278 /* the medians are averaged together, this is not as accurate as 279 * taking the median over all of the data, but is good and fast 280 * added up here, division later*/ 281 total->mesh_time_median += a->mesh_time_median; 282 } 283 284 void server_stats_insquery(struct server_stats* stats, struct comm_point* c, 285 uint16_t qtype, uint16_t qclass, struct edns_data* edns, 286 struct comm_reply* repinfo) 287 { 288 uint16_t flags = sldns_buffer_read_u16_at(c->buffer, 2); 289 if(qtype < STATS_QTYPE_NUM) 290 stats->qtype[qtype]++; 291 else stats->qtype_big++; 292 if(qclass < STATS_QCLASS_NUM) 293 stats->qclass[qclass]++; 294 else stats->qclass_big++; 295 stats->qopcode[ LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) ]++; 296 if(c->type != comm_udp) 297 stats->qtcp++; 298 if(repinfo && addr_is_ip6(&repinfo->addr, repinfo->addrlen)) 299 stats->qipv6++; 300 if( (flags&BIT_QR) ) 301 stats->qbit_QR++; 302 if( (flags&BIT_AA) ) 303 stats->qbit_AA++; 304 if( (flags&BIT_TC) ) 305 stats->qbit_TC++; 306 if( (flags&BIT_RD) ) 307 stats->qbit_RD++; 308 if( (flags&BIT_RA) ) 309 stats->qbit_RA++; 310 if( (flags&BIT_Z) ) 311 stats->qbit_Z++; 312 if( (flags&BIT_AD) ) 313 stats->qbit_AD++; 314 if( (flags&BIT_CD) ) 315 stats->qbit_CD++; 316 if(edns->edns_present) { 317 stats->qEDNS++; 318 if( (edns->bits & EDNS_DO) ) 319 stats->qEDNS_DO++; 320 } 321 } 322 323 void server_stats_insrcode(struct server_stats* stats, sldns_buffer* buf) 324 { 325 if(stats->extended && sldns_buffer_limit(buf) != 0) { 326 int r = (int)LDNS_RCODE_WIRE( sldns_buffer_begin(buf) ); 327 stats->ans_rcode[r] ++; 328 if(r == 0 && LDNS_ANCOUNT( sldns_buffer_begin(buf) ) == 0) 329 stats->ans_rcode_nodata ++; 330 } 331 } 332