1 #ifdef HAVE_CONFIG_H 2 # include <config.h> 3 #endif 4 5 #include <stdio.h> 6 7 #include "ntp_assert.h" 8 #include "ntp_syslog.h" 9 #include "ntp_stdlib.h" 10 #include "ntp_lists.h" 11 #include "recvbuff.h" 12 #include "iosignal.h" 13 14 #if (RECV_INC & (RECV_INC-1)) 15 # error RECV_INC not a power of 2! 16 #endif 17 #if (RECV_BATCH & (RECV_BATCH - 1)) 18 #error RECV_BATCH not a power of 2! 19 #endif 20 #if (RECV_BATCH < RECV_INC) 21 #error RECV_BATCH must be >= RECV_INC! 22 #endif 23 24 /* 25 * Memory allocation 26 */ 27 static u_long volatile full_recvbufs; /* recvbufs on full_recv_fifo */ 28 static u_long volatile free_recvbufs; /* recvbufs on free_recv_list */ 29 static u_long volatile total_recvbufs; /* total recvbufs currently in use */ 30 static u_long volatile lowater_adds; /* number of times we have added memory */ 31 static u_long volatile buffer_shortfall;/* number of missed free receive buffers 32 between replenishments */ 33 static u_long limit_recvbufs; /* maximum total of receive buffers */ 34 static u_long emerg_recvbufs; /* emergency/urgent buffers to keep */ 35 36 static DECL_FIFO_ANCHOR(recvbuf_t) full_recv_fifo; 37 static recvbuf_t * free_recv_list; 38 39 #if defined(SYS_WINNT) 40 41 /* 42 * For Windows we need to set up a lock to manipulate the 43 * recv buffers to prevent corruption. We keep it lock for as 44 * short a time as possible 45 */ 46 static CRITICAL_SECTION RecvLock; 47 static CRITICAL_SECTION FreeLock; 48 # define LOCK_R() EnterCriticalSection(&RecvLock) 49 # define UNLOCK_R() LeaveCriticalSection(&RecvLock) 50 # define LOCK_F() EnterCriticalSection(&FreeLock) 51 # define UNLOCK_F() LeaveCriticalSection(&FreeLock) 52 #else 53 # define LOCK_R() do {} while (FALSE) 54 # define UNLOCK_R() do {} while (FALSE) 55 # define LOCK_F() do {} while (FALSE) 56 # define UNLOCK_F() do {} while (FALSE) 57 #endif 58 59 #ifdef DEBUG 60 static void uninit_recvbuff(void); 61 #endif 62 63 64 u_long 65 free_recvbuffs (void) 66 { 67 return free_recvbufs; 68 } 69 70 u_long 71 full_recvbuffs (void) 72 { 73 return full_recvbufs; 74 } 75 76 u_long 77 total_recvbuffs (void) 78 { 79 return total_recvbufs; 80 } 81 82 u_long 83 lowater_additions(void) 84 { 85 return lowater_adds; 86 } 87 88 static inline void 89 initialise_buffer(recvbuf_t *buff) 90 { 91 ZERO(*buff); 92 } 93 94 static void 95 create_buffers( 96 size_t nbufs) 97 { 98 # ifndef DEBUG 99 static const u_int chunk = RECV_INC; 100 # else 101 /* Allocate each buffer individually so they can be free()d 102 * during ntpd shutdown on DEBUG builds to keep them out of heap 103 * leak reports. 104 */ 105 static const u_int chunk = 1; 106 # endif 107 108 register recvbuf_t *bufp; 109 u_int i; 110 size_t abuf; 111 112 if (limit_recvbufs <= total_recvbufs) 113 return; 114 115 abuf = nbufs + buffer_shortfall; 116 buffer_shortfall = 0; 117 118 if (abuf < nbufs || abuf > RECV_BATCH) 119 abuf = RECV_BATCH; /* clamp on overflow */ 120 else 121 abuf += (~abuf + 1) & (RECV_INC - 1); /* round up */ 122 123 if (abuf > (limit_recvbufs - total_recvbufs)) 124 abuf = limit_recvbufs - total_recvbufs; 125 abuf += (~abuf + 1) & (chunk - 1); /* round up */ 126 127 while (abuf) { 128 bufp = calloc(chunk, sizeof(*bufp)); 129 if (!bufp) { 130 limit_recvbufs = total_recvbufs; 131 break; 132 } 133 for (i = chunk; i; --i,++bufp) { 134 LINK_SLIST(free_recv_list, bufp, link); 135 } 136 free_recvbufs += chunk; 137 total_recvbufs += chunk; 138 abuf -= chunk; 139 } 140 ++lowater_adds; 141 } 142 143 void 144 init_recvbuff(int nbufs) 145 { 146 147 /* 148 * Init buffer free list and stat counters 149 */ 150 free_recvbufs = total_recvbufs = 0; 151 full_recvbufs = lowater_adds = 0; 152 153 limit_recvbufs = RECV_TOOMANY; 154 emerg_recvbufs = RECV_CLOCK; 155 156 create_buffers(nbufs); 157 158 # if defined(SYS_WINNT) 159 InitializeCriticalSection(&RecvLock); 160 InitializeCriticalSection(&FreeLock); 161 # endif 162 163 # ifdef DEBUG 164 atexit(&uninit_recvbuff); 165 # endif 166 } 167 168 169 #ifdef DEBUG 170 static void 171 uninit_recvbuff(void) 172 { 173 recvbuf_t *rbunlinked; 174 175 for (;;) { 176 UNLINK_FIFO(rbunlinked, full_recv_fifo, link); 177 if (rbunlinked == NULL) 178 break; 179 free(rbunlinked); 180 } 181 182 for (;;) { 183 UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link); 184 if (rbunlinked == NULL) 185 break; 186 free(rbunlinked); 187 } 188 # if defined(SYS_WINNT) 189 DeleteCriticalSection(&FreeLock); 190 DeleteCriticalSection(&RecvLock); 191 # endif 192 } 193 #endif /* DEBUG */ 194 195 196 /* 197 * freerecvbuf - make a single recvbuf available for reuse 198 */ 199 void 200 freerecvbuf(recvbuf_t *rb) 201 { 202 if (rb) { 203 if (--rb->used != 0) { 204 msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used); 205 rb->used = 0; 206 } 207 LOCK_F(); 208 LINK_SLIST(free_recv_list, rb, link); 209 ++free_recvbufs; 210 UNLOCK_F(); 211 } 212 } 213 214 215 void 216 add_full_recv_buffer(recvbuf_t *rb) 217 { 218 if (rb == NULL) { 219 msyslog(LOG_ERR, "add_full_recv_buffer received NULL buffer"); 220 return; 221 } 222 LOCK_R(); 223 LINK_FIFO(full_recv_fifo, rb, link); 224 ++full_recvbufs; 225 UNLOCK_R(); 226 } 227 228 229 recvbuf_t * 230 get_free_recv_buffer( 231 int /*BOOL*/ urgent 232 ) 233 { 234 recvbuf_t *buffer = NULL; 235 236 LOCK_F(); 237 if (free_recvbufs > (urgent ? emerg_recvbufs : 0)) { 238 UNLINK_HEAD_SLIST(buffer, free_recv_list, link); 239 } 240 241 if (buffer != NULL) { 242 if (free_recvbufs) 243 --free_recvbufs; 244 initialise_buffer(buffer); 245 ++buffer->used; 246 } else { 247 ++buffer_shortfall; 248 } 249 UNLOCK_F(); 250 251 return buffer; 252 } 253 254 255 #ifdef HAVE_IO_COMPLETION_PORT 256 recvbuf_t * 257 get_free_recv_buffer_alloc( 258 int /*BOOL*/ urgent 259 ) 260 { 261 LOCK_F(); 262 if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0) 263 create_buffers(RECV_INC); 264 UNLOCK_F(); 265 return get_free_recv_buffer(urgent); 266 } 267 #endif 268 269 270 recvbuf_t * 271 get_full_recv_buffer(void) 272 { 273 recvbuf_t * rbuf; 274 275 /* 276 * make sure there are free buffers when we wander off to do 277 * lengthy packet processing with any buffer we grab from the 278 * full list. 279 * 280 * fixes malloc() interrupted by SIGIO risk (Bug 889) 281 */ 282 LOCK_F(); 283 if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0) 284 create_buffers(RECV_INC); 285 UNLOCK_F(); 286 287 /* 288 * try to grab a full buffer 289 */ 290 LOCK_R(); 291 UNLINK_FIFO(rbuf, full_recv_fifo, link); 292 if (rbuf != NULL && full_recvbufs) 293 --full_recvbufs; 294 UNLOCK_R(); 295 296 return rbuf; 297 } 298 299 300 /* 301 * purge_recv_buffers_for_fd() - purges any previously-received input 302 * from a given file descriptor. 303 */ 304 void 305 purge_recv_buffers_for_fd( 306 int fd 307 ) 308 { 309 recvbuf_t *rbufp; 310 recvbuf_t *next; 311 recvbuf_t *punlinked; 312 recvbuf_t *freelist = NULL; 313 314 /* We want to hold only one lock at a time. So we do a scan on 315 * the full buffer queue, collecting items as we go, and when 316 * done we spool the the collected items to 'freerecvbuf()'. 317 */ 318 LOCK_R(); 319 320 for (rbufp = HEAD_FIFO(full_recv_fifo); 321 rbufp != NULL; 322 rbufp = next) 323 { 324 next = rbufp->link; 325 # ifdef HAVE_IO_COMPLETION_PORT 326 if (rbufp->dstadr == NULL && rbufp->fd == fd) 327 # else 328 if (rbufp->fd == fd) 329 # endif 330 { 331 UNLINK_MID_FIFO(punlinked, full_recv_fifo, 332 rbufp, link, recvbuf_t); 333 INSIST(punlinked == rbufp); 334 if (full_recvbufs) 335 --full_recvbufs; 336 rbufp->link = freelist; 337 freelist = rbufp; 338 } 339 } 340 341 UNLOCK_R(); 342 343 while (freelist) { 344 next = freelist->link; 345 freerecvbuf(freelist); 346 freelist = next; 347 } 348 } 349 350 351 /* 352 * Checks to see if there are buffers to process 353 */ 354 isc_boolean_t has_full_recv_buffer(void) 355 { 356 if (HEAD_FIFO(full_recv_fifo) != NULL) 357 return (ISC_TRUE); 358 else 359 return (ISC_FALSE); 360 } 361 362 363 #ifdef NTP_DEBUG_LISTS_H 364 void 365 check_gen_fifo_consistency(void *fifo) 366 { 367 gen_fifo *pf; 368 gen_node *pthis; 369 gen_node **pptail; 370 371 pf = fifo; 372 REQUIRE((NULL == pf->phead && NULL == pf->pptail) || 373 (NULL != pf->phead && NULL != pf->pptail)); 374 375 pptail = &pf->phead; 376 for (pthis = pf->phead; 377 pthis != NULL; 378 pthis = pthis->link) 379 if (NULL != pthis->link) 380 pptail = &pthis->link; 381 382 REQUIRE(NULL == pf->pptail || pptail == pf->pptail); 383 } 384 #endif /* NTP_DEBUG_LISTS_H */ 385