xref: /freebsd/contrib/wpa/src/utils/eloop.c (revision 895f86f15fbf6540071feb9328c3c50ed1f027b8)
1 /*
2  * Event loop based on select() loop
3  * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4  *
5  * This software may be distributed under the terms of the BSD license.
6  * See README for more details.
7  */
8 
9 #include "includes.h"
10 #include <assert.h>
11 
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16 
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20 
21 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
22 #define CONFIG_ELOOP_SELECT
23 #endif
24 
25 #ifdef CONFIG_ELOOP_POLL
26 #include <poll.h>
27 #endif /* CONFIG_ELOOP_POLL */
28 
29 #ifdef CONFIG_ELOOP_EPOLL
30 #include <sys/epoll.h>
31 #endif /* CONFIG_ELOOP_EPOLL */
32 
33 struct eloop_sock {
34 	int sock;
35 	void *eloop_data;
36 	void *user_data;
37 	eloop_sock_handler handler;
38 	WPA_TRACE_REF(eloop);
39 	WPA_TRACE_REF(user);
40 	WPA_TRACE_INFO
41 };
42 
43 struct eloop_timeout {
44 	struct dl_list list;
45 	struct os_reltime time;
46 	void *eloop_data;
47 	void *user_data;
48 	eloop_timeout_handler handler;
49 	WPA_TRACE_REF(eloop);
50 	WPA_TRACE_REF(user);
51 	WPA_TRACE_INFO
52 };
53 
54 struct eloop_signal {
55 	int sig;
56 	void *user_data;
57 	eloop_signal_handler handler;
58 	int signaled;
59 };
60 
61 struct eloop_sock_table {
62 	int count;
63 	struct eloop_sock *table;
64 	eloop_event_type type;
65 	int changed;
66 };
67 
68 struct eloop_data {
69 	int max_sock;
70 
71 	int count; /* sum of all table counts */
72 #ifdef CONFIG_ELOOP_POLL
73 	int max_pollfd_map; /* number of pollfds_map currently allocated */
74 	int max_poll_fds; /* number of pollfds currently allocated */
75 	struct pollfd *pollfds;
76 	struct pollfd **pollfds_map;
77 #endif /* CONFIG_ELOOP_POLL */
78 #ifdef CONFIG_ELOOP_EPOLL
79 	int epollfd;
80 	int epoll_max_event_num;
81 	int epoll_max_fd;
82 	struct eloop_sock *epoll_table;
83 	struct epoll_event *epoll_events;
84 #endif /* CONFIG_ELOOP_EPOLL */
85 	struct eloop_sock_table readers;
86 	struct eloop_sock_table writers;
87 	struct eloop_sock_table exceptions;
88 
89 	struct dl_list timeout;
90 
91 	int signal_count;
92 	struct eloop_signal *signals;
93 	int signaled;
94 	int pending_terminate;
95 
96 	int terminate;
97 };
98 
99 static struct eloop_data eloop;
100 
101 
102 #ifdef WPA_TRACE
103 
104 static void eloop_sigsegv_handler(int sig)
105 {
106 	wpa_trace_show("eloop SIGSEGV");
107 	abort();
108 }
109 
110 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
111 {
112 	int i;
113 	if (table == NULL || table->table == NULL)
114 		return;
115 	for (i = 0; i < table->count; i++) {
116 		wpa_trace_add_ref(&table->table[i], eloop,
117 				  table->table[i].eloop_data);
118 		wpa_trace_add_ref(&table->table[i], user,
119 				  table->table[i].user_data);
120 	}
121 }
122 
123 
124 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
125 {
126 	int i;
127 	if (table == NULL || table->table == NULL)
128 		return;
129 	for (i = 0; i < table->count; i++) {
130 		wpa_trace_remove_ref(&table->table[i], eloop,
131 				     table->table[i].eloop_data);
132 		wpa_trace_remove_ref(&table->table[i], user,
133 				     table->table[i].user_data);
134 	}
135 }
136 
137 #else /* WPA_TRACE */
138 
139 #define eloop_trace_sock_add_ref(table) do { } while (0)
140 #define eloop_trace_sock_remove_ref(table) do { } while (0)
141 
142 #endif /* WPA_TRACE */
143 
144 
145 int eloop_init(void)
146 {
147 	os_memset(&eloop, 0, sizeof(eloop));
148 	dl_list_init(&eloop.timeout);
149 #ifdef CONFIG_ELOOP_EPOLL
150 	eloop.epollfd = epoll_create1(0);
151 	if (eloop.epollfd < 0) {
152 		wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
153 			   __func__, strerror(errno));
154 		return -1;
155 	}
156 	eloop.readers.type = EVENT_TYPE_READ;
157 	eloop.writers.type = EVENT_TYPE_WRITE;
158 	eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
159 #endif /* CONFIG_ELOOP_EPOLL */
160 #ifdef WPA_TRACE
161 	signal(SIGSEGV, eloop_sigsegv_handler);
162 #endif /* WPA_TRACE */
163 	return 0;
164 }
165 
166 
167 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
168                                      int sock, eloop_sock_handler handler,
169                                      void *eloop_data, void *user_data)
170 {
171 #ifdef CONFIG_ELOOP_EPOLL
172 	struct eloop_sock *temp_table;
173 	struct epoll_event ev, *temp_events;
174 	int next;
175 #endif /* CONFIG_ELOOP_EPOLL */
176 	struct eloop_sock *tmp;
177 	int new_max_sock;
178 
179 	if (sock > eloop.max_sock)
180 		new_max_sock = sock;
181 	else
182 		new_max_sock = eloop.max_sock;
183 
184 	if (table == NULL)
185 		return -1;
186 
187 #ifdef CONFIG_ELOOP_POLL
188 	if (new_max_sock >= eloop.max_pollfd_map) {
189 		struct pollfd **nmap;
190 		nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
191 					sizeof(struct pollfd *));
192 		if (nmap == NULL)
193 			return -1;
194 
195 		eloop.max_pollfd_map = new_max_sock + 50;
196 		eloop.pollfds_map = nmap;
197 	}
198 
199 	if (eloop.count + 1 > eloop.max_poll_fds) {
200 		struct pollfd *n;
201 		int nmax = eloop.count + 1 + 50;
202 		n = os_realloc_array(eloop.pollfds, nmax,
203 				     sizeof(struct pollfd));
204 		if (n == NULL)
205 			return -1;
206 
207 		eloop.max_poll_fds = nmax;
208 		eloop.pollfds = n;
209 	}
210 #endif /* CONFIG_ELOOP_POLL */
211 #ifdef CONFIG_ELOOP_EPOLL
212 	if (new_max_sock >= eloop.epoll_max_fd) {
213 		next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
214 		temp_table = os_realloc_array(eloop.epoll_table, next,
215 					      sizeof(struct eloop_sock));
216 		if (temp_table == NULL)
217 			return -1;
218 
219 		eloop.epoll_max_fd = next;
220 		eloop.epoll_table = temp_table;
221 	}
222 
223 	if (eloop.count + 1 > eloop.epoll_max_event_num) {
224 		next = eloop.epoll_max_event_num == 0 ? 8 :
225 			eloop.epoll_max_event_num * 2;
226 		temp_events = os_realloc_array(eloop.epoll_events, next,
227 					       sizeof(struct epoll_event));
228 		if (temp_events == NULL) {
229 			wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
230 				   "%s\n", __func__, strerror(errno));
231 			return -1;
232 		}
233 
234 		eloop.epoll_max_event_num = next;
235 		eloop.epoll_events = temp_events;
236 	}
237 #endif /* CONFIG_ELOOP_EPOLL */
238 
239 	eloop_trace_sock_remove_ref(table);
240 	tmp = os_realloc_array(table->table, table->count + 1,
241 			       sizeof(struct eloop_sock));
242 	if (tmp == NULL) {
243 		eloop_trace_sock_add_ref(table);
244 		return -1;
245 	}
246 
247 	tmp[table->count].sock = sock;
248 	tmp[table->count].eloop_data = eloop_data;
249 	tmp[table->count].user_data = user_data;
250 	tmp[table->count].handler = handler;
251 	wpa_trace_record(&tmp[table->count]);
252 	table->count++;
253 	table->table = tmp;
254 	eloop.max_sock = new_max_sock;
255 	eloop.count++;
256 	table->changed = 1;
257 	eloop_trace_sock_add_ref(table);
258 
259 #ifdef CONFIG_ELOOP_EPOLL
260 	os_memset(&ev, 0, sizeof(ev));
261 	switch (table->type) {
262 	case EVENT_TYPE_READ:
263 		ev.events = EPOLLIN;
264 		break;
265 	case EVENT_TYPE_WRITE:
266 		ev.events = EPOLLOUT;
267 		break;
268 	/*
269 	 * Exceptions are always checked when using epoll, but I suppose it's
270 	 * possible that someone registered a socket *only* for exception
271 	 * handling.
272 	 */
273 	case EVENT_TYPE_EXCEPTION:
274 		ev.events = EPOLLERR | EPOLLHUP;
275 		break;
276 	}
277 	ev.data.fd = sock;
278 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
279 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
280 			   "failed. %s\n", __func__, sock, strerror(errno));
281 		return -1;
282 	}
283 	os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
284 		  sizeof(struct eloop_sock));
285 #endif /* CONFIG_ELOOP_EPOLL */
286 	return 0;
287 }
288 
289 
290 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
291                                          int sock)
292 {
293 	int i;
294 
295 	if (table == NULL || table->table == NULL || table->count == 0)
296 		return;
297 
298 	for (i = 0; i < table->count; i++) {
299 		if (table->table[i].sock == sock)
300 			break;
301 	}
302 	if (i == table->count)
303 		return;
304 	eloop_trace_sock_remove_ref(table);
305 	if (i != table->count - 1) {
306 		os_memmove(&table->table[i], &table->table[i + 1],
307 			   (table->count - i - 1) *
308 			   sizeof(struct eloop_sock));
309 	}
310 	table->count--;
311 	eloop.count--;
312 	table->changed = 1;
313 	eloop_trace_sock_add_ref(table);
314 #ifdef CONFIG_ELOOP_EPOLL
315 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
316 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
317 			   "failed. %s\n", __func__, sock, strerror(errno));
318 		return;
319 	}
320 	os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
321 #endif /* CONFIG_ELOOP_EPOLL */
322 }
323 
324 
325 #ifdef CONFIG_ELOOP_POLL
326 
327 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
328 {
329 	if (fd < mx && fd >= 0)
330 		return pollfds_map[fd];
331 	return NULL;
332 }
333 
334 
335 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
336 				    struct eloop_sock_table *writers,
337 				    struct eloop_sock_table *exceptions,
338 				    struct pollfd *pollfds,
339 				    struct pollfd **pollfds_map,
340 				    int max_pollfd_map)
341 {
342 	int i;
343 	int nxt = 0;
344 	int fd;
345 	struct pollfd *pfd;
346 
347 	/* Clear pollfd lookup map. It will be re-populated below. */
348 	os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
349 
350 	if (readers && readers->table) {
351 		for (i = 0; i < readers->count; i++) {
352 			fd = readers->table[i].sock;
353 			assert(fd >= 0 && fd < max_pollfd_map);
354 			pollfds[nxt].fd = fd;
355 			pollfds[nxt].events = POLLIN;
356 			pollfds[nxt].revents = 0;
357 			pollfds_map[fd] = &(pollfds[nxt]);
358 			nxt++;
359 		}
360 	}
361 
362 	if (writers && writers->table) {
363 		for (i = 0; i < writers->count; i++) {
364 			/*
365 			 * See if we already added this descriptor, update it
366 			 * if so.
367 			 */
368 			fd = writers->table[i].sock;
369 			assert(fd >= 0 && fd < max_pollfd_map);
370 			pfd = pollfds_map[fd];
371 			if (!pfd) {
372 				pfd = &(pollfds[nxt]);
373 				pfd->events = 0;
374 				pfd->fd = fd;
375 				pollfds[i].revents = 0;
376 				pollfds_map[fd] = pfd;
377 				nxt++;
378 			}
379 			pfd->events |= POLLOUT;
380 		}
381 	}
382 
383 	/*
384 	 * Exceptions are always checked when using poll, but I suppose it's
385 	 * possible that someone registered a socket *only* for exception
386 	 * handling. Set the POLLIN bit in this case.
387 	 */
388 	if (exceptions && exceptions->table) {
389 		for (i = 0; i < exceptions->count; i++) {
390 			/*
391 			 * See if we already added this descriptor, just use it
392 			 * if so.
393 			 */
394 			fd = exceptions->table[i].sock;
395 			assert(fd >= 0 && fd < max_pollfd_map);
396 			pfd = pollfds_map[fd];
397 			if (!pfd) {
398 				pfd = &(pollfds[nxt]);
399 				pfd->events = POLLIN;
400 				pfd->fd = fd;
401 				pollfds[i].revents = 0;
402 				pollfds_map[fd] = pfd;
403 				nxt++;
404 			}
405 		}
406 	}
407 
408 	return nxt;
409 }
410 
411 
412 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
413 					   struct pollfd **pollfds_map,
414 					   int max_pollfd_map,
415 					   short int revents)
416 {
417 	int i;
418 	struct pollfd *pfd;
419 
420 	if (!table || !table->table)
421 		return 0;
422 
423 	table->changed = 0;
424 	for (i = 0; i < table->count; i++) {
425 		pfd = find_pollfd(pollfds_map, table->table[i].sock,
426 				  max_pollfd_map);
427 		if (!pfd)
428 			continue;
429 
430 		if (!(pfd->revents & revents))
431 			continue;
432 
433 		table->table[i].handler(table->table[i].sock,
434 					table->table[i].eloop_data,
435 					table->table[i].user_data);
436 		if (table->changed)
437 			return 1;
438 	}
439 
440 	return 0;
441 }
442 
443 
444 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
445 				      struct eloop_sock_table *writers,
446 				      struct eloop_sock_table *exceptions,
447 				      struct pollfd **pollfds_map,
448 				      int max_pollfd_map)
449 {
450 	if (eloop_sock_table_dispatch_table(readers, pollfds_map,
451 					    max_pollfd_map, POLLIN | POLLERR |
452 					    POLLHUP))
453 		return; /* pollfds may be invalid at this point */
454 
455 	if (eloop_sock_table_dispatch_table(writers, pollfds_map,
456 					    max_pollfd_map, POLLOUT))
457 		return; /* pollfds may be invalid at this point */
458 
459 	eloop_sock_table_dispatch_table(exceptions, pollfds_map,
460 					max_pollfd_map, POLLERR | POLLHUP);
461 }
462 
463 #endif /* CONFIG_ELOOP_POLL */
464 
465 #ifdef CONFIG_ELOOP_SELECT
466 
467 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
468 				     fd_set *fds)
469 {
470 	int i;
471 
472 	FD_ZERO(fds);
473 
474 	if (table->table == NULL)
475 		return;
476 
477 	for (i = 0; i < table->count; i++) {
478 		assert(table->table[i].sock >= 0);
479 		FD_SET(table->table[i].sock, fds);
480 	}
481 }
482 
483 
484 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
485 				      fd_set *fds)
486 {
487 	int i;
488 
489 	if (table == NULL || table->table == NULL)
490 		return;
491 
492 	table->changed = 0;
493 	for (i = 0; i < table->count; i++) {
494 		if (FD_ISSET(table->table[i].sock, fds)) {
495 			table->table[i].handler(table->table[i].sock,
496 						table->table[i].eloop_data,
497 						table->table[i].user_data);
498 			if (table->changed)
499 				break;
500 		}
501 	}
502 }
503 
504 #endif /* CONFIG_ELOOP_SELECT */
505 
506 
507 #ifdef CONFIG_ELOOP_EPOLL
508 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
509 {
510 	struct eloop_sock *table;
511 	int i;
512 
513 	for (i = 0; i < nfds; i++) {
514 		table = &eloop.epoll_table[events[i].data.fd];
515 		if (table->handler == NULL)
516 			continue;
517 		table->handler(table->sock, table->eloop_data,
518 			       table->user_data);
519 		if (eloop.readers.changed ||
520 		    eloop.writers.changed ||
521 		    eloop.exceptions.changed)
522 			break;
523 	}
524 }
525 #endif /* CONFIG_ELOOP_EPOLL */
526 
527 
528 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
529 {
530 	if (table) {
531 		int i;
532 		for (i = 0; i < table->count && table->table; i++) {
533 			wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
534 				   "sock=%d eloop_data=%p user_data=%p "
535 				   "handler=%p",
536 				   table->table[i].sock,
537 				   table->table[i].eloop_data,
538 				   table->table[i].user_data,
539 				   table->table[i].handler);
540 			wpa_trace_dump_funcname("eloop unregistered socket "
541 						"handler",
542 						table->table[i].handler);
543 			wpa_trace_dump("eloop sock", &table->table[i]);
544 		}
545 		os_free(table->table);
546 	}
547 }
548 
549 
550 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
551 			     void *eloop_data, void *user_data)
552 {
553 	return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
554 				   eloop_data, user_data);
555 }
556 
557 
558 void eloop_unregister_read_sock(int sock)
559 {
560 	eloop_unregister_sock(sock, EVENT_TYPE_READ);
561 }
562 
563 
564 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
565 {
566 	switch (type) {
567 	case EVENT_TYPE_READ:
568 		return &eloop.readers;
569 	case EVENT_TYPE_WRITE:
570 		return &eloop.writers;
571 	case EVENT_TYPE_EXCEPTION:
572 		return &eloop.exceptions;
573 	}
574 
575 	return NULL;
576 }
577 
578 
579 int eloop_register_sock(int sock, eloop_event_type type,
580 			eloop_sock_handler handler,
581 			void *eloop_data, void *user_data)
582 {
583 	struct eloop_sock_table *table;
584 
585 	assert(sock >= 0);
586 	table = eloop_get_sock_table(type);
587 	return eloop_sock_table_add_sock(table, sock, handler,
588 					 eloop_data, user_data);
589 }
590 
591 
592 void eloop_unregister_sock(int sock, eloop_event_type type)
593 {
594 	struct eloop_sock_table *table;
595 
596 	table = eloop_get_sock_table(type);
597 	eloop_sock_table_remove_sock(table, sock);
598 }
599 
600 
601 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
602 			   eloop_timeout_handler handler,
603 			   void *eloop_data, void *user_data)
604 {
605 	struct eloop_timeout *timeout, *tmp;
606 	os_time_t now_sec;
607 
608 	timeout = os_zalloc(sizeof(*timeout));
609 	if (timeout == NULL)
610 		return -1;
611 	if (os_get_reltime(&timeout->time) < 0) {
612 		os_free(timeout);
613 		return -1;
614 	}
615 	now_sec = timeout->time.sec;
616 	timeout->time.sec += secs;
617 	if (timeout->time.sec < now_sec) {
618 		/*
619 		 * Integer overflow - assume long enough timeout to be assumed
620 		 * to be infinite, i.e., the timeout would never happen.
621 		 */
622 		wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
623 			   "ever happen - ignore it", secs);
624 		os_free(timeout);
625 		return 0;
626 	}
627 	timeout->time.usec += usecs;
628 	while (timeout->time.usec >= 1000000) {
629 		timeout->time.sec++;
630 		timeout->time.usec -= 1000000;
631 	}
632 	timeout->eloop_data = eloop_data;
633 	timeout->user_data = user_data;
634 	timeout->handler = handler;
635 	wpa_trace_add_ref(timeout, eloop, eloop_data);
636 	wpa_trace_add_ref(timeout, user, user_data);
637 	wpa_trace_record(timeout);
638 
639 	/* Maintain timeouts in order of increasing time */
640 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
641 		if (os_reltime_before(&timeout->time, &tmp->time)) {
642 			dl_list_add(tmp->list.prev, &timeout->list);
643 			return 0;
644 		}
645 	}
646 	dl_list_add_tail(&eloop.timeout, &timeout->list);
647 
648 	return 0;
649 }
650 
651 
652 static void eloop_remove_timeout(struct eloop_timeout *timeout)
653 {
654 	dl_list_del(&timeout->list);
655 	wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
656 	wpa_trace_remove_ref(timeout, user, timeout->user_data);
657 	os_free(timeout);
658 }
659 
660 
661 int eloop_cancel_timeout(eloop_timeout_handler handler,
662 			 void *eloop_data, void *user_data)
663 {
664 	struct eloop_timeout *timeout, *prev;
665 	int removed = 0;
666 
667 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
668 			      struct eloop_timeout, list) {
669 		if (timeout->handler == handler &&
670 		    (timeout->eloop_data == eloop_data ||
671 		     eloop_data == ELOOP_ALL_CTX) &&
672 		    (timeout->user_data == user_data ||
673 		     user_data == ELOOP_ALL_CTX)) {
674 			eloop_remove_timeout(timeout);
675 			removed++;
676 		}
677 	}
678 
679 	return removed;
680 }
681 
682 
683 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
684 			     void *eloop_data, void *user_data,
685 			     struct os_reltime *remaining)
686 {
687 	struct eloop_timeout *timeout, *prev;
688 	int removed = 0;
689 	struct os_reltime now;
690 
691 	os_get_reltime(&now);
692 	remaining->sec = remaining->usec = 0;
693 
694 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
695 			      struct eloop_timeout, list) {
696 		if (timeout->handler == handler &&
697 		    (timeout->eloop_data == eloop_data) &&
698 		    (timeout->user_data == user_data)) {
699 			removed = 1;
700 			if (os_reltime_before(&now, &timeout->time))
701 				os_reltime_sub(&timeout->time, &now, remaining);
702 			eloop_remove_timeout(timeout);
703 			break;
704 		}
705 	}
706 	return removed;
707 }
708 
709 
710 int eloop_is_timeout_registered(eloop_timeout_handler handler,
711 				void *eloop_data, void *user_data)
712 {
713 	struct eloop_timeout *tmp;
714 
715 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
716 		if (tmp->handler == handler &&
717 		    tmp->eloop_data == eloop_data &&
718 		    tmp->user_data == user_data)
719 			return 1;
720 	}
721 
722 	return 0;
723 }
724 
725 
726 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
727 			  eloop_timeout_handler handler, void *eloop_data,
728 			  void *user_data)
729 {
730 	struct os_reltime now, requested, remaining;
731 	struct eloop_timeout *tmp;
732 
733 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
734 		if (tmp->handler == handler &&
735 		    tmp->eloop_data == eloop_data &&
736 		    tmp->user_data == user_data) {
737 			requested.sec = req_secs;
738 			requested.usec = req_usecs;
739 			os_get_reltime(&now);
740 			os_reltime_sub(&tmp->time, &now, &remaining);
741 			if (os_reltime_before(&requested, &remaining)) {
742 				eloop_cancel_timeout(handler, eloop_data,
743 						     user_data);
744 				eloop_register_timeout(requested.sec,
745 						       requested.usec,
746 						       handler, eloop_data,
747 						       user_data);
748 				return 1;
749 			}
750 			return 0;
751 		}
752 	}
753 
754 	return -1;
755 }
756 
757 
758 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
759 			    eloop_timeout_handler handler, void *eloop_data,
760 			    void *user_data)
761 {
762 	struct os_reltime now, requested, remaining;
763 	struct eloop_timeout *tmp;
764 
765 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
766 		if (tmp->handler == handler &&
767 		    tmp->eloop_data == eloop_data &&
768 		    tmp->user_data == user_data) {
769 			requested.sec = req_secs;
770 			requested.usec = req_usecs;
771 			os_get_reltime(&now);
772 			os_reltime_sub(&tmp->time, &now, &remaining);
773 			if (os_reltime_before(&remaining, &requested)) {
774 				eloop_cancel_timeout(handler, eloop_data,
775 						     user_data);
776 				eloop_register_timeout(requested.sec,
777 						       requested.usec,
778 						       handler, eloop_data,
779 						       user_data);
780 				return 1;
781 			}
782 			return 0;
783 		}
784 	}
785 
786 	return -1;
787 }
788 
789 
790 #ifndef CONFIG_NATIVE_WINDOWS
791 static void eloop_handle_alarm(int sig)
792 {
793 	wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
794 		   "two seconds. Looks like there\n"
795 		   "is a bug that ends up in a busy loop that "
796 		   "prevents clean shutdown.\n"
797 		   "Killing program forcefully.\n");
798 	exit(1);
799 }
800 #endif /* CONFIG_NATIVE_WINDOWS */
801 
802 
803 static void eloop_handle_signal(int sig)
804 {
805 	int i;
806 
807 #ifndef CONFIG_NATIVE_WINDOWS
808 	if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
809 		/* Use SIGALRM to break out from potential busy loops that
810 		 * would not allow the program to be killed. */
811 		eloop.pending_terminate = 1;
812 		signal(SIGALRM, eloop_handle_alarm);
813 		alarm(2);
814 	}
815 #endif /* CONFIG_NATIVE_WINDOWS */
816 
817 	eloop.signaled++;
818 	for (i = 0; i < eloop.signal_count; i++) {
819 		if (eloop.signals[i].sig == sig) {
820 			eloop.signals[i].signaled++;
821 			break;
822 		}
823 	}
824 }
825 
826 
827 static void eloop_process_pending_signals(void)
828 {
829 	int i;
830 
831 	if (eloop.signaled == 0)
832 		return;
833 	eloop.signaled = 0;
834 
835 	if (eloop.pending_terminate) {
836 #ifndef CONFIG_NATIVE_WINDOWS
837 		alarm(0);
838 #endif /* CONFIG_NATIVE_WINDOWS */
839 		eloop.pending_terminate = 0;
840 	}
841 
842 	for (i = 0; i < eloop.signal_count; i++) {
843 		if (eloop.signals[i].signaled) {
844 			eloop.signals[i].signaled = 0;
845 			eloop.signals[i].handler(eloop.signals[i].sig,
846 						 eloop.signals[i].user_data);
847 		}
848 	}
849 }
850 
851 
852 int eloop_register_signal(int sig, eloop_signal_handler handler,
853 			  void *user_data)
854 {
855 	struct eloop_signal *tmp;
856 
857 	tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
858 			       sizeof(struct eloop_signal));
859 	if (tmp == NULL)
860 		return -1;
861 
862 	tmp[eloop.signal_count].sig = sig;
863 	tmp[eloop.signal_count].user_data = user_data;
864 	tmp[eloop.signal_count].handler = handler;
865 	tmp[eloop.signal_count].signaled = 0;
866 	eloop.signal_count++;
867 	eloop.signals = tmp;
868 	signal(sig, eloop_handle_signal);
869 
870 	return 0;
871 }
872 
873 
874 int eloop_register_signal_terminate(eloop_signal_handler handler,
875 				    void *user_data)
876 {
877 	int ret = eloop_register_signal(SIGINT, handler, user_data);
878 	if (ret == 0)
879 		ret = eloop_register_signal(SIGTERM, handler, user_data);
880 	return ret;
881 }
882 
883 
884 int eloop_register_signal_reconfig(eloop_signal_handler handler,
885 				   void *user_data)
886 {
887 #ifdef CONFIG_NATIVE_WINDOWS
888 	return 0;
889 #else /* CONFIG_NATIVE_WINDOWS */
890 	return eloop_register_signal(SIGHUP, handler, user_data);
891 #endif /* CONFIG_NATIVE_WINDOWS */
892 }
893 
894 
895 void eloop_run(void)
896 {
897 #ifdef CONFIG_ELOOP_POLL
898 	int num_poll_fds;
899 	int timeout_ms = 0;
900 #endif /* CONFIG_ELOOP_POLL */
901 #ifdef CONFIG_ELOOP_SELECT
902 	fd_set *rfds, *wfds, *efds;
903 	struct timeval _tv;
904 #endif /* CONFIG_ELOOP_SELECT */
905 #ifdef CONFIG_ELOOP_EPOLL
906 	int timeout_ms = -1;
907 #endif /* CONFIG_ELOOP_EPOLL */
908 	int res;
909 	struct os_reltime tv, now;
910 
911 #ifdef CONFIG_ELOOP_SELECT
912 	rfds = os_malloc(sizeof(*rfds));
913 	wfds = os_malloc(sizeof(*wfds));
914 	efds = os_malloc(sizeof(*efds));
915 	if (rfds == NULL || wfds == NULL || efds == NULL)
916 		goto out;
917 #endif /* CONFIG_ELOOP_SELECT */
918 
919 	while (!eloop.terminate &&
920 	       (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
921 		eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
922 		struct eloop_timeout *timeout;
923 
924 		if (eloop.pending_terminate) {
925 			/*
926 			 * This may happen in some corner cases where a signal
927 			 * is received during a blocking operation. We need to
928 			 * process the pending signals and exit if requested to
929 			 * avoid hitting the SIGALRM limit if the blocking
930 			 * operation took more than two seconds.
931 			 */
932 			eloop_process_pending_signals();
933 			if (eloop.terminate)
934 				break;
935 		}
936 
937 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
938 					list);
939 		if (timeout) {
940 			os_get_reltime(&now);
941 			if (os_reltime_before(&now, &timeout->time))
942 				os_reltime_sub(&timeout->time, &now, &tv);
943 			else
944 				tv.sec = tv.usec = 0;
945 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
946 			timeout_ms = tv.sec * 1000 + tv.usec / 1000;
947 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
948 #ifdef CONFIG_ELOOP_SELECT
949 			_tv.tv_sec = tv.sec;
950 			_tv.tv_usec = tv.usec;
951 #endif /* CONFIG_ELOOP_SELECT */
952 		}
953 
954 #ifdef CONFIG_ELOOP_POLL
955 		num_poll_fds = eloop_sock_table_set_fds(
956 			&eloop.readers, &eloop.writers, &eloop.exceptions,
957 			eloop.pollfds, eloop.pollfds_map,
958 			eloop.max_pollfd_map);
959 		res = poll(eloop.pollfds, num_poll_fds,
960 			   timeout ? timeout_ms : -1);
961 #endif /* CONFIG_ELOOP_POLL */
962 #ifdef CONFIG_ELOOP_SELECT
963 		eloop_sock_table_set_fds(&eloop.readers, rfds);
964 		eloop_sock_table_set_fds(&eloop.writers, wfds);
965 		eloop_sock_table_set_fds(&eloop.exceptions, efds);
966 		res = select(eloop.max_sock + 1, rfds, wfds, efds,
967 			     timeout ? &_tv : NULL);
968 #endif /* CONFIG_ELOOP_SELECT */
969 #ifdef CONFIG_ELOOP_EPOLL
970 		if (eloop.count == 0) {
971 			res = 0;
972 		} else {
973 			res = epoll_wait(eloop.epollfd, eloop.epoll_events,
974 					 eloop.count, timeout_ms);
975 		}
976 #endif /* CONFIG_ELOOP_EPOLL */
977 		if (res < 0 && errno != EINTR && errno != 0) {
978 			wpa_printf(MSG_ERROR, "eloop: %s: %s",
979 #ifdef CONFIG_ELOOP_POLL
980 				   "poll"
981 #endif /* CONFIG_ELOOP_POLL */
982 #ifdef CONFIG_ELOOP_SELECT
983 				   "select"
984 #endif /* CONFIG_ELOOP_SELECT */
985 #ifdef CONFIG_ELOOP_EPOLL
986 				   "epoll"
987 #endif /* CONFIG_ELOOP_EPOLL */
988 				   , strerror(errno));
989 			goto out;
990 		}
991 
992 		eloop.readers.changed = 0;
993 		eloop.writers.changed = 0;
994 		eloop.exceptions.changed = 0;
995 
996 		eloop_process_pending_signals();
997 
998 		/* check if some registered timeouts have occurred */
999 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1000 					list);
1001 		if (timeout) {
1002 			os_get_reltime(&now);
1003 			if (!os_reltime_before(&now, &timeout->time)) {
1004 				void *eloop_data = timeout->eloop_data;
1005 				void *user_data = timeout->user_data;
1006 				eloop_timeout_handler handler =
1007 					timeout->handler;
1008 				eloop_remove_timeout(timeout);
1009 				handler(eloop_data, user_data);
1010 			}
1011 
1012 		}
1013 
1014 		if (res <= 0)
1015 			continue;
1016 
1017 		if (eloop.readers.changed ||
1018 		    eloop.writers.changed ||
1019 		    eloop.exceptions.changed) {
1020 			 /*
1021 			  * Sockets may have been closed and reopened with the
1022 			  * same FD in the signal or timeout handlers, so we
1023 			  * must skip the previous results and check again
1024 			  * whether any of the currently registered sockets have
1025 			  * events.
1026 			  */
1027 			continue;
1028 		}
1029 
1030 #ifdef CONFIG_ELOOP_POLL
1031 		eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1032 					  &eloop.exceptions, eloop.pollfds_map,
1033 					  eloop.max_pollfd_map);
1034 #endif /* CONFIG_ELOOP_POLL */
1035 #ifdef CONFIG_ELOOP_SELECT
1036 		eloop_sock_table_dispatch(&eloop.readers, rfds);
1037 		eloop_sock_table_dispatch(&eloop.writers, wfds);
1038 		eloop_sock_table_dispatch(&eloop.exceptions, efds);
1039 #endif /* CONFIG_ELOOP_SELECT */
1040 #ifdef CONFIG_ELOOP_EPOLL
1041 		eloop_sock_table_dispatch(eloop.epoll_events, res);
1042 #endif /* CONFIG_ELOOP_EPOLL */
1043 	}
1044 
1045 	eloop.terminate = 0;
1046 out:
1047 #ifdef CONFIG_ELOOP_SELECT
1048 	os_free(rfds);
1049 	os_free(wfds);
1050 	os_free(efds);
1051 #endif /* CONFIG_ELOOP_SELECT */
1052 	return;
1053 }
1054 
1055 
1056 void eloop_terminate(void)
1057 {
1058 	eloop.terminate = 1;
1059 }
1060 
1061 
1062 void eloop_destroy(void)
1063 {
1064 	struct eloop_timeout *timeout, *prev;
1065 	struct os_reltime now;
1066 
1067 	os_get_reltime(&now);
1068 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1069 			      struct eloop_timeout, list) {
1070 		int sec, usec;
1071 		sec = timeout->time.sec - now.sec;
1072 		usec = timeout->time.usec - now.usec;
1073 		if (timeout->time.usec < now.usec) {
1074 			sec--;
1075 			usec += 1000000;
1076 		}
1077 		wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1078 			   "eloop_data=%p user_data=%p handler=%p",
1079 			   sec, usec, timeout->eloop_data, timeout->user_data,
1080 			   timeout->handler);
1081 		wpa_trace_dump_funcname("eloop unregistered timeout handler",
1082 					timeout->handler);
1083 		wpa_trace_dump("eloop timeout", timeout);
1084 		eloop_remove_timeout(timeout);
1085 	}
1086 	eloop_sock_table_destroy(&eloop.readers);
1087 	eloop_sock_table_destroy(&eloop.writers);
1088 	eloop_sock_table_destroy(&eloop.exceptions);
1089 	os_free(eloop.signals);
1090 
1091 #ifdef CONFIG_ELOOP_POLL
1092 	os_free(eloop.pollfds);
1093 	os_free(eloop.pollfds_map);
1094 #endif /* CONFIG_ELOOP_POLL */
1095 #ifdef CONFIG_ELOOP_EPOLL
1096 	os_free(eloop.epoll_table);
1097 	os_free(eloop.epoll_events);
1098 	close(eloop.epollfd);
1099 #endif /* CONFIG_ELOOP_EPOLL */
1100 }
1101 
1102 
1103 int eloop_terminated(void)
1104 {
1105 	return eloop.terminate || eloop.pending_terminate;
1106 }
1107 
1108 
1109 void eloop_wait_for_read_sock(int sock)
1110 {
1111 #ifdef CONFIG_ELOOP_POLL
1112 	struct pollfd pfd;
1113 
1114 	if (sock < 0)
1115 		return;
1116 
1117 	os_memset(&pfd, 0, sizeof(pfd));
1118 	pfd.fd = sock;
1119 	pfd.events = POLLIN;
1120 
1121 	poll(&pfd, 1, -1);
1122 #endif /* CONFIG_ELOOP_POLL */
1123 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1124 	/*
1125 	 * We can use epoll() here. But epoll() requres 4 system calls.
1126 	 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1127 	 * epoll fd. So select() is better for performance here.
1128 	 */
1129 	fd_set rfds;
1130 
1131 	if (sock < 0)
1132 		return;
1133 
1134 	FD_ZERO(&rfds);
1135 	FD_SET(sock, &rfds);
1136 	select(sock + 1, &rfds, NULL, NULL, NULL);
1137 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1138 }
1139 
1140 #ifdef CONFIG_ELOOP_SELECT
1141 #undef CONFIG_ELOOP_SELECT
1142 #endif /* CONFIG_ELOOP_SELECT */
1143