xref: /freebsd/contrib/ntp/libntp/recvbuff.c (revision 9a14aa017b21c292740c00ee098195cd46642730)
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4 
5 #include <stdio.h>
6 #include "ntp_machine.h"
7 #include "ntp_fp.h"
8 #include "ntp_syslog.h"
9 #include "ntp_stdlib.h"
10 #include "ntp_io.h"
11 #include "recvbuff.h"
12 #include "iosignal.h"
13 
14 #include <isc/list.h>
15 /*
16  * Memory allocation
17  */
18 static u_long volatile full_recvbufs;	/* number of recvbufs on fulllist */
19 static u_long volatile free_recvbufs;	/* number of recvbufs on freelist */
20 static u_long volatile total_recvbufs;	/* total recvbufs currently in use */
21 static u_long volatile lowater_adds;	/* number of times we have added memory */
22 static u_long volatile buffer_shortfall;/* number of missed free receive buffers
23                                            between replenishments */
24 
25 static ISC_LIST(recvbuf_t)	full_recv_list;	/* Currently used recv buffers */
26 static ISC_LIST(recvbuf_t)	free_recv_list;	/* Currently unused buffers */
27 
28 #if defined(SYS_WINNT)
29 
30 /*
31  * For Windows we need to set up a lock to manipulate the
32  * recv buffers to prevent corruption. We keep it lock for as
33  * short a time as possible
34  */
35 static CRITICAL_SECTION RecvLock;
36 # define LOCK()		EnterCriticalSection(&RecvLock)
37 # define UNLOCK()	LeaveCriticalSection(&RecvLock)
38 #else
39 # define LOCK()
40 # define UNLOCK()
41 #endif
42 
43 u_long
44 free_recvbuffs (void)
45 {
46 	return free_recvbufs;
47 }
48 
49 u_long
50 full_recvbuffs (void)
51 {
52 	return full_recvbufs;
53 }
54 
55 u_long
56 total_recvbuffs (void)
57 {
58 	return total_recvbufs;
59 }
60 
61 u_long
62 lowater_additions(void)
63 {
64 	return lowater_adds;
65 }
66 
67 static void
68 initialise_buffer(recvbuf_t *buff)
69 {
70 	memset((char *) buff, 0, sizeof(recvbuf_t));
71 
72 #if defined SYS_WINNT
73 	buff->wsabuff.len = RX_BUFF_SIZE;
74 	buff->wsabuff.buf = (char *) buff->recv_buffer;
75 #endif
76 }
77 
78 static void
79 create_buffers(int nbufs)
80 {
81 	register recvbuf_t *bufp;
82 	int i, abuf;
83 
84 	abuf = nbufs + buffer_shortfall;
85 	buffer_shortfall = 0;
86 
87 	bufp = (recvbuf_t *) emalloc(abuf*sizeof(recvbuf_t));
88 
89 	for (i = 0; i < abuf; i++)
90 	{
91 		memset((char *) bufp, 0, sizeof(recvbuf_t));
92 		ISC_LIST_APPEND(free_recv_list, bufp, link);
93 		bufp++;
94 		free_recvbufs++;
95 		total_recvbufs++;
96 	}
97 	lowater_adds++;
98 }
99 
100 void
101 init_recvbuff(int nbufs)
102 {
103 
104 	/*
105 	 * Init buffer free list and stat counters
106 	 */
107 	ISC_LIST_INIT(full_recv_list);
108 	ISC_LIST_INIT(free_recv_list);
109 	free_recvbufs = total_recvbufs = 0;
110 	full_recvbufs = lowater_adds = 0;
111 
112 	create_buffers(nbufs);
113 
114 #if defined(SYS_WINNT)
115 	InitializeCriticalSection(&RecvLock);
116 #endif
117 
118 }
119 
120 /*
121  * freerecvbuf - make a single recvbuf available for reuse
122  */
123 void
124 freerecvbuf(recvbuf_t *rb)
125 {
126 	if (rb == NULL) {
127 		msyslog(LOG_ERR, "freerecvbuff received NULL buffer");
128 		return;
129 	}
130 
131 	LOCK();
132 	(rb->used)--;
133 	if (rb->used != 0)
134 		msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used);
135 	ISC_LIST_APPEND(free_recv_list, rb, link);
136 #if defined SYS_WINNT
137 	rb->wsabuff.len = RX_BUFF_SIZE;
138 	rb->wsabuff.buf = (char *) rb->recv_buffer;
139 #endif
140 	free_recvbufs++;
141 	UNLOCK();
142 }
143 
144 
145 void
146 add_full_recv_buffer(recvbuf_t *rb)
147 {
148 	if (rb == NULL) {
149 		msyslog(LOG_ERR, "add_full_recv_buffer received NULL buffer");
150 		return;
151 	}
152 	LOCK();
153 	ISC_LIST_APPEND(full_recv_list, rb, link);
154 	full_recvbufs++;
155 	UNLOCK();
156 }
157 
158 recvbuf_t *
159 get_free_recv_buffer(void)
160 {
161 	recvbuf_t * buffer = NULL;
162 	LOCK();
163 	buffer = ISC_LIST_HEAD(free_recv_list);
164 	if (buffer != NULL)
165 	{
166 		ISC_LIST_DEQUEUE(free_recv_list, buffer, link);
167 		free_recvbufs--;
168 		initialise_buffer(buffer);
169 		(buffer->used)++;
170 	}
171 	else
172 	{
173 		buffer_shortfall++;
174 	}
175 	UNLOCK();
176 	return (buffer);
177 }
178 
179 #ifdef HAVE_IO_COMPLETION_PORT
180 recvbuf_t *
181 get_free_recv_buffer_alloc(void)
182 {
183 	recvbuf_t * buffer = get_free_recv_buffer();
184 	if (buffer == NULL)
185 	{
186 		create_buffers(RECV_INC);
187 		buffer = get_free_recv_buffer();
188 	}
189 	return (buffer);
190 }
191 #endif
192 
193 recvbuf_t *
194 get_full_recv_buffer(void)
195 {
196 	recvbuf_t *rbuf;
197 	LOCK();
198 
199 #ifdef HAVE_SIGNALED_IO
200 	/*
201 	 * make sure there are free buffers when we
202 	 * wander off to do lengthy paket processing with
203 	 * any buffer we grab from the full list.
204 	 *
205 	 * fixes malloc() interrupted by SIGIO risk
206 	 * (Bug 889)
207 	 */
208 	rbuf = ISC_LIST_HEAD(free_recv_list);
209 	if (rbuf == NULL || buffer_shortfall > 0) {
210 		/*
211 		 * try to get us some more buffers
212 		 */
213 		create_buffers(RECV_INC);
214 	}
215 #endif
216 
217 	/*
218 	 * try to grab a full buffer
219 	 */
220 	rbuf = ISC_LIST_HEAD(full_recv_list);
221 	if (rbuf != NULL)
222 	{
223 		ISC_LIST_DEQUEUE(full_recv_list, rbuf, link);
224 		--full_recvbufs;
225 	}
226 	else
227 	{
228 		/*
229 		 * Make sure we reset the full count to 0
230 		 */
231 		full_recvbufs = 0;
232 	}
233 	UNLOCK();
234 	return (rbuf);
235 }
236 
237 /*
238  * Checks to see if there are buffers to process
239  */
240 isc_boolean_t has_full_recv_buffer(void)
241 {
242 	if (ISC_LIST_HEAD(full_recv_list) != NULL)
243 		return (ISC_TRUE);
244 	else
245 		return (ISC_FALSE);
246 }
247