xref: /freebsd/contrib/ntp/libntp/recvbuff.c (revision f5f40dd63bc7acbb5312b26ac1ea1103c12352a6)
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4 
5 #include <stdio.h>
6 
7 #include "ntp_assert.h"
8 #include "ntp_syslog.h"
9 #include "ntp_stdlib.h"
10 #include "ntp_lists.h"
11 #include "recvbuff.h"
12 #include "iosignal.h"
13 
14 #if (RECV_INC & (RECV_INC-1))
15 # error RECV_INC not a power of 2!
16 #endif
17 #if (RECV_BATCH & (RECV_BATCH - 1))
18 #error RECV_BATCH not a power of 2!
19 #endif
20 #if (RECV_BATCH < RECV_INC)
21 #error RECV_BATCH must be >= RECV_INC!
22 #endif
23 
24 /*
25  * Memory allocation
26  */
27 static u_long volatile full_recvbufs;	/* recvbufs on full_recv_fifo */
28 static u_long volatile free_recvbufs;	/* recvbufs on free_recv_list */
29 static u_long volatile total_recvbufs;	/* total recvbufs currently in use */
30 static u_long volatile lowater_adds;	/* number of times we have added memory */
31 static u_long volatile buffer_shortfall;/* number of missed free receive buffers
32 					   between replenishments */
33 static u_long limit_recvbufs;		/* maximum total of receive buffers */
34 static u_long emerg_recvbufs;		/* emergency/urgent buffers to keep */
35 
36 static DECL_FIFO_ANCHOR(recvbuf_t) full_recv_fifo;
37 static recvbuf_t *		   free_recv_list;
38 
39 #if defined(SYS_WINNT)
40 
41 /*
42  * For Windows we need to set up a lock to manipulate the
43  * recv buffers to prevent corruption. We keep it lock for as
44  * short a time as possible
45  */
46 static CRITICAL_SECTION RecvLock;
47 static CRITICAL_SECTION FreeLock;
48 # define LOCK_R()	EnterCriticalSection(&RecvLock)
49 # define UNLOCK_R()	LeaveCriticalSection(&RecvLock)
50 # define LOCK_F()	EnterCriticalSection(&FreeLock)
51 # define UNLOCK_F()	LeaveCriticalSection(&FreeLock)
52 #else
53 # define LOCK_R()	do {} while (FALSE)
54 # define UNLOCK_R()	do {} while (FALSE)
55 # define LOCK_F()	do {} while (FALSE)
56 # define UNLOCK_F()	do {} while (FALSE)
57 #endif
58 
59 #ifdef DEBUG
60 static void uninit_recvbuff(void);
61 #endif
62 
63 
64 u_long
65 free_recvbuffs (void)
66 {
67 	return free_recvbufs;
68 }
69 
70 u_long
71 full_recvbuffs (void)
72 {
73 	return full_recvbufs;
74 }
75 
76 u_long
77 total_recvbuffs (void)
78 {
79 	return total_recvbufs;
80 }
81 
82 u_long
83 lowater_additions(void)
84 {
85 	return lowater_adds;
86 }
87 
88 static inline void
89 initialise_buffer(recvbuf_t *buff)
90 {
91 	ZERO(*buff);
92 }
93 
94 static void
95 create_buffers(
96 	size_t			nbufs
97 )
98 {
99 	static const u_int	chunk =
100 #   ifndef DEBUG
101 					RECV_INC;
102 #   else
103 	/* Allocate each buffer individually so they can be free()d
104 	 * during ntpd shutdown on DEBUG builds to keep them out of heap
105 	 * leak reports.
106 	 */
107 					1;
108 #   endif
109 	static int/*BOOL*/	doneonce;
110 	recvbuf_t *		bufp;
111 	u_int			i;
112 	size_t			abuf;
113 
114 	/*[bug 3666]: followup -- reset shortfalls in all cases */
115 	abuf = nbufs + buffer_shortfall;
116 	buffer_shortfall = 0;
117 
118 	if (limit_recvbufs <= total_recvbufs) {
119 		if (!doneonce) {
120 			msyslog(LOG_CRIT, "Unable to allocate receive"
121 					  " buffer, %lu/%lu",
122 				total_recvbufs, limit_recvbufs);
123 			doneonce = TRUE;
124 		}
125 		return;
126 	}
127 
128 	if (abuf < nbufs || abuf > RECV_BATCH) {
129 		abuf = RECV_BATCH;	/* clamp on overflow */
130 	} else {
131 		abuf += (~abuf + 1) & (RECV_INC - 1);	/* round up */
132 	}
133 	if (abuf > (limit_recvbufs - total_recvbufs)) {
134 		abuf = limit_recvbufs - total_recvbufs;
135 	}
136 	abuf += (~abuf + 1) & (chunk - 1);		/* round up */
137 
138 	while (abuf) {
139 		bufp = calloc(chunk, sizeof(*bufp));
140 		if (!bufp) {
141 			msyslog(LOG_CRIT, "Out of memory, allocating "
142 					  "%u recvbufs, %lu bytes",
143 				chunk, (u_long)sizeof(*bufp) * chunk);
144 			limit_recvbufs = total_recvbufs;
145 			break;
146 		}
147 		for (i = chunk; i; --i,++bufp) {
148 			LINK_SLIST(free_recv_list, bufp, link);
149 		}
150 		free_recvbufs += chunk;
151 		total_recvbufs += chunk;
152 		abuf -= chunk;
153 	}
154 	++lowater_adds;
155 }
156 
157 void
158 init_recvbuff(int nbufs)
159 {
160 
161 	/*
162 	 * Init buffer free list and stat counters
163 	 */
164 	free_recvbufs = total_recvbufs = 0;
165 	full_recvbufs = lowater_adds = 0;
166 
167 	limit_recvbufs = RECV_TOOMANY;
168 	emerg_recvbufs = RECV_CLOCK;
169 
170 	create_buffers(nbufs);
171 
172 #   if defined(SYS_WINNT)
173 	InitializeCriticalSection(&RecvLock);
174 	InitializeCriticalSection(&FreeLock);
175 #   endif
176 
177 #   ifdef DEBUG
178 	atexit(&uninit_recvbuff);
179 #   endif
180 }
181 
182 
183 #ifdef DEBUG
184 static void
185 uninit_recvbuff(void)
186 {
187 	recvbuf_t *rbunlinked;
188 
189 	for (;;) {
190 		UNLINK_FIFO(rbunlinked, full_recv_fifo, link);
191 		if (rbunlinked == NULL)
192 			break;
193 		free(rbunlinked);
194 	}
195 
196 	for (;;) {
197 		UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link);
198 		if (rbunlinked == NULL)
199 			break;
200 		free(rbunlinked);
201 	}
202 #   if defined(SYS_WINNT)
203 	DeleteCriticalSection(&FreeLock);
204 	DeleteCriticalSection(&RecvLock);
205 #   endif
206 }
207 #endif	/* DEBUG */
208 
209 
210 /*
211  * freerecvbuf - make a single recvbuf available for reuse
212  */
213 void
214 freerecvbuf(recvbuf_t *rb)
215 {
216 	if (rb) {
217 		if (--rb->used != 0) {
218 			msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used);
219 			rb->used = 0;
220 		}
221 		LOCK_F();
222 		LINK_SLIST(free_recv_list, rb, link);
223 		++free_recvbufs;
224 		UNLOCK_F();
225 	}
226 }
227 
228 
229 void
230 add_full_recv_buffer(recvbuf_t *rb)
231 {
232 	if (rb == NULL) {
233 		msyslog(LOG_ERR, "add_full_recv_buffer received NULL buffer");
234 		return;
235 	}
236 	LOCK_R();
237 	LINK_FIFO(full_recv_fifo, rb, link);
238 	++full_recvbufs;
239 	UNLOCK_R();
240 }
241 
242 
243 recvbuf_t *
244 get_free_recv_buffer(
245     int /*BOOL*/ urgent
246     )
247 {
248 	recvbuf_t *buffer = NULL;
249 
250 	LOCK_F();
251 	if (free_recvbufs > (urgent ? 0 : emerg_recvbufs)) {
252 		UNLINK_HEAD_SLIST(buffer, free_recv_list, link);
253 	}
254 
255 	if (buffer != NULL) {
256 		if (free_recvbufs)
257 			--free_recvbufs;
258 		initialise_buffer(buffer);
259 		++buffer->used;
260 	} else {
261 		++buffer_shortfall;
262 	}
263 	UNLOCK_F();
264 
265 	return buffer;
266 }
267 
268 
269 #ifdef HAVE_IO_COMPLETION_PORT
270 recvbuf_t *
271 get_free_recv_buffer_alloc(
272     int /*BOOL*/ urgent
273     )
274 {
275 	LOCK_F();
276 	if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
277 		create_buffers(RECV_INC);
278 	UNLOCK_F();
279 	return get_free_recv_buffer(urgent);
280 }
281 #endif
282 
283 
284 recvbuf_t *
285 get_full_recv_buffer(void)
286 {
287 	recvbuf_t *	rbuf;
288 
289 	/*
290 	 * make sure there are free buffers when we wander off to do
291 	 * lengthy packet processing with any buffer we grab from the
292 	 * full list.
293 	 *
294 	 * fixes malloc() interrupted by SIGIO risk (Bug 889)
295 	 */
296 	LOCK_F();
297 	if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
298 		create_buffers(RECV_INC);
299 	UNLOCK_F();
300 
301 	/*
302 	 * try to grab a full buffer
303 	 */
304 	LOCK_R();
305 	UNLINK_FIFO(rbuf, full_recv_fifo, link);
306 	if (rbuf != NULL && full_recvbufs)
307 		--full_recvbufs;
308 	UNLOCK_R();
309 
310 	return rbuf;
311 }
312 
313 
314 /*
315  * purge_recv_buffers_for_fd() - purges any previously-received input
316  *				 from a given file descriptor.
317  */
318 void
319 purge_recv_buffers_for_fd(
320 	int	fd
321 	)
322 {
323 	recvbuf_t *rbufp;
324 	recvbuf_t *next;
325 	recvbuf_t *punlinked;
326 	recvbuf_t *freelist = NULL;
327 
328 	/* We want to hold only one lock at a time. So we do a scan on
329 	 * the full buffer queue, collecting items as we go, and when
330 	 * done we spool the the collected items to 'freerecvbuf()'.
331 	 */
332 	LOCK_R();
333 
334 	for (rbufp = HEAD_FIFO(full_recv_fifo);
335 	     rbufp != NULL;
336 	     rbufp = next)
337 	{
338 		next = rbufp->link;
339 #	    ifdef HAVE_IO_COMPLETION_PORT
340 		if (rbufp->dstadr == NULL && rbufp->fd == fd)
341 #	    else
342 		if (rbufp->fd == fd)
343 #	    endif
344 		{
345 			UNLINK_MID_FIFO(punlinked, full_recv_fifo,
346 					rbufp, link, recvbuf_t);
347 			INSIST(punlinked == rbufp);
348 			if (full_recvbufs)
349 				--full_recvbufs;
350 			rbufp->link = freelist;
351 			freelist = rbufp;
352 		}
353 	}
354 
355 	UNLOCK_R();
356 
357 	while (freelist) {
358 		next = freelist->link;
359 		freerecvbuf(freelist);
360 		freelist = next;
361 	}
362 }
363 
364 
365 /*
366  * Checks to see if there are buffers to process
367  */
368 isc_boolean_t has_full_recv_buffer(void)
369 {
370 	if (HEAD_FIFO(full_recv_fifo) != NULL)
371 		return (ISC_TRUE);
372 	else
373 		return (ISC_FALSE);
374 }
375 
376 
377 #ifdef NTP_DEBUG_LISTS_H
378 void
379 check_gen_fifo_consistency(void *fifo)
380 {
381 	gen_fifo *pf;
382 	gen_node *pthis;
383 	gen_node **pptail;
384 
385 	pf = fifo;
386 	REQUIRE((NULL == pf->phead && NULL == pf->pptail) ||
387 		(NULL != pf->phead && NULL != pf->pptail));
388 
389 	pptail = &pf->phead;
390 	for (pthis = pf->phead;
391 	     pthis != NULL;
392 	     pthis = pthis->link)
393 		if (NULL != pthis->link)
394 			pptail = &pthis->link;
395 
396 	REQUIRE(NULL == pf->pptail || pptail == pf->pptail);
397 }
398 #endif	/* NTP_DEBUG_LISTS_H */
399