• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3   * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4   *
5   * Redistribution and use in source and binary forms, with or without
6   * modification, are permitted provided that the following conditions
7   * are met:
8   * 1. Redistributions of source code must retain the above copyright
9   *    notice, this list of conditions and the following disclaimer.
10   * 2. Redistributions in binary form must reproduce the above copyright
11   *    notice, this list of conditions and the following disclaimer in the
12   *    documentation and/or other materials provided with the distribution.
13   * 3. The name of the author may not be used to endorse or promote products
14   *    derived from this software without specific prior written permission.
15   *
16   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26   */
27  #include "event2/event-config.h"
28  #include "evconfig-private.h"
29  
30  #ifdef _WIN32
31  #include <winsock2.h>
32  #define WIN32_LEAN_AND_MEAN
33  #include <windows.h>
34  #undef WIN32_LEAN_AND_MEAN
35  #endif
36  #include <sys/types.h>
37  #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38  #include <sys/time.h>
39  #endif
40  #include <sys/queue.h>
41  #ifdef EVENT__HAVE_SYS_SOCKET_H
42  #include <sys/socket.h>
43  #endif
44  #include <stdio.h>
45  #include <stdlib.h>
46  #ifdef EVENT__HAVE_UNISTD_H
47  #include <unistd.h>
48  #endif
49  #include <ctype.h>
50  #include <errno.h>
51  #include <signal.h>
52  #include <string.h>
53  #include <time.h>
54  #include <limits.h>
55  
56  #include "event2/event.h"
57  #include "event2/event_struct.h"
58  #include "event2/event_compat.h"
59  #include "event-internal.h"
60  #include "defer-internal.h"
61  #include "evthread-internal.h"
62  #include "event2/thread.h"
63  #include "event2/util.h"
64  #include "log-internal.h"
65  #include "evmap-internal.h"
66  #include "iocp-internal.h"
67  #include "changelist-internal.h"
68  #define HT_NO_CACHE_HASH_VALUES
69  #include "ht-internal.h"
70  #include "util-internal.h"
71  
72  
73  #ifdef EVENT__HAVE_WORKING_KQUEUE
74  #include "kqueue-internal.h"
75  #endif
76  
77  #ifdef EVENT__HAVE_EVENT_PORTS
78  extern const struct eventop evportops;
79  #endif
80  #ifdef EVENT__HAVE_SELECT
81  extern const struct eventop selectops;
82  #endif
83  #ifdef EVENT__HAVE_POLL
84  extern const struct eventop pollops;
85  #endif
86  #ifdef EVENT__HAVE_EPOLL
87  extern const struct eventop epollops;
88  #endif
89  #ifdef EVENT__HAVE_WORKING_KQUEUE
90  extern const struct eventop kqops;
91  #endif
92  #ifdef EVENT__HAVE_DEVPOLL
93  extern const struct eventop devpollops;
94  #endif
95  #ifdef _WIN32
96  extern const struct eventop win32ops;
97  #endif
98  
99  /* Array of backends in order of preference. */
100  static const struct eventop *eventops[] = {
101  #ifdef EVENT__HAVE_EVENT_PORTS
102  	&evportops,
103  #endif
104  #ifdef EVENT__HAVE_WORKING_KQUEUE
105  	&kqops,
106  #endif
107  #ifdef EVENT__HAVE_EPOLL
108  	&epollops,
109  #endif
110  #ifdef EVENT__HAVE_DEVPOLL
111  	&devpollops,
112  #endif
113  #ifdef EVENT__HAVE_POLL
114  	&pollops,
115  #endif
116  #ifdef EVENT__HAVE_SELECT
117  	&selectops,
118  #endif
119  #ifdef _WIN32
120  	&win32ops,
121  #endif
122  	NULL
123  };
124  
125  /* Global state; deprecated */
126  struct event_base *event_global_current_base_ = NULL;
127  #define current_base event_global_current_base_
128  
129  /* Global state */
130  
131  static void *event_self_cbarg_ptr_ = NULL;
132  
133  /* Prototypes */
134  static void	event_queue_insert_active(struct event_base *, struct event_callback *);
135  static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
136  static void	event_queue_insert_timeout(struct event_base *, struct event *);
137  static void	event_queue_insert_inserted(struct event_base *, struct event *);
138  static void	event_queue_remove_active(struct event_base *, struct event_callback *);
139  static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
140  static void	event_queue_remove_timeout(struct event_base *, struct event *);
141  static void	event_queue_remove_inserted(struct event_base *, struct event *);
142  static void event_queue_make_later_events_active(struct event_base *base);
143  
144  static int evthread_make_base_notifiable_nolock_(struct event_base *base);
145  static int event_del_(struct event *ev, int blocking);
146  
147  #ifdef USE_REINSERT_TIMEOUT
148  /* This code seems buggy; only turn it on if we find out what the trouble is. */
149  static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
150  #endif
151  
152  static int	event_haveevents(struct event_base *);
153  
154  static int	event_process_active(struct event_base *);
155  
156  static int	timeout_next(struct event_base *, struct timeval **);
157  static void	timeout_process(struct event_base *);
158  
159  static inline void	event_signal_closure(struct event_base *, struct event *ev);
160  static inline void	event_persist_closure(struct event_base *, struct event *ev);
161  
162  static int	evthread_notify_base(struct event_base *base);
163  
164  static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
165      struct event *ev);
166  
167  #ifndef EVENT__DISABLE_DEBUG_MODE
168  /* These functions implement a hashtable of which 'struct event *' structures
169   * have been setup or added.  We don't want to trust the content of the struct
170   * event itself, since we're trying to work through cases where an event gets
171   * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
172   */
173  
174  struct event_debug_entry {
175  	HT_ENTRY(event_debug_entry) node;
176  	const struct event *ptr;
177  	unsigned added : 1;
178  };
179  
180  static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)181  hash_debug_entry(const struct event_debug_entry *e)
182  {
183  	/* We need to do this silliness to convince compilers that we
184  	 * honestly mean to cast e->ptr to an integer, and discard any
185  	 * part of it that doesn't fit in an unsigned.
186  	 */
187  	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
188  	/* Our hashtable implementation is pretty sensitive to low bits,
189  	 * and every struct event is over 64 bytes in size, so we can
190  	 * just say >>6. */
191  	return (u >> 6);
192  }
193  
194  static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)195  eq_debug_entry(const struct event_debug_entry *a,
196      const struct event_debug_entry *b)
197  {
198  	return a->ptr == b->ptr;
199  }
200  
201  int event_debug_mode_on_ = 0;
202  
203  
204  #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
205  /**
206   * @brief debug mode variable which is set for any function/structure that needs
207   *        to be shared across threads (if thread support is enabled).
208   *
209   *        When and if evthreads are initialized, this variable will be evaluated,
210   *        and if set to something other than zero, this means the evthread setup
211   *        functions were called out of order.
212   *
213   *        See: "Locks and threading" in the documentation.
214   */
215  int event_debug_created_threadable_ctx_ = 0;
216  #endif
217  
218  /* Set if it's too late to enable event_debug_mode. */
219  static int event_debug_mode_too_late = 0;
220  #ifndef EVENT__DISABLE_THREAD_SUPPORT
221  static void *event_debug_map_lock_ = NULL;
222  #endif
223  static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
224  	HT_INITIALIZER();
225  
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)226  HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
227      eq_debug_entry)
228  HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
229      eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
230  
231  /* Macro: record that ev is now setup (that is, ready for an add) */
232  #define event_debug_note_setup_(ev) do {				\
233  	if (event_debug_mode_on_) {					\
234  		struct event_debug_entry *dent,find;			\
235  		find.ptr = (ev);					\
236  		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
237  		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
238  		if (dent) {						\
239  			dent->added = 0;				\
240  		} else {						\
241  			dent = mm_malloc(sizeof(*dent));		\
242  			if (!dent)					\
243  				event_err(1,				\
244  				    "Out of memory in debugging code");	\
245  			dent->ptr = (ev);				\
246  			dent->added = 0;				\
247  			HT_INSERT(event_debug_map, &global_debug_map, dent); \
248  		}							\
249  		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
250  	}								\
251  	event_debug_mode_too_late = 1;					\
252  	} while (0)
253  /* Macro: record that ev is no longer setup */
254  #define event_debug_note_teardown_(ev) do {				\
255  	if (event_debug_mode_on_) {					\
256  		struct event_debug_entry *dent,find;			\
257  		find.ptr = (ev);					\
258  		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
259  		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
260  		if (dent)						\
261  			mm_free(dent);					\
262  		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
263  	}								\
264  	event_debug_mode_too_late = 1;					\
265  	} while (0)
266  /* Macro: record that ev is now added */
267  #define event_debug_note_add_(ev)	do {				\
268  	if (event_debug_mode_on_) {					\
269  		struct event_debug_entry *dent,find;			\
270  		find.ptr = (ev);					\
271  		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
272  		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
273  		if (dent) {						\
274  			dent->added = 1;				\
275  		} else {						\
276  			event_errx(EVENT_ERR_ABORT_,			\
277  			    "%s: noting an add on a non-setup event %p" \
278  			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
279  			    ", flags: 0x%x)",				\
280  			    __func__, (ev), (ev)->ev_events,		\
281  			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
282  		}							\
283  		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
284  	}								\
285  	event_debug_mode_too_late = 1;					\
286  	} while (0)
287  /* Macro: record that ev is no longer added */
288  #define event_debug_note_del_(ev) do {					\
289  	if (event_debug_mode_on_) {					\
290  		struct event_debug_entry *dent,find;			\
291  		find.ptr = (ev);					\
292  		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
293  		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
294  		if (dent) {						\
295  			dent->added = 0;				\
296  		} else {						\
297  			event_errx(EVENT_ERR_ABORT_,			\
298  			    "%s: noting a del on a non-setup event %p"	\
299  			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
300  			    ", flags: 0x%x)",				\
301  			    __func__, (ev), (ev)->ev_events,		\
302  			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
303  		}							\
304  		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
305  	}								\
306  	event_debug_mode_too_late = 1;					\
307  	} while (0)
308  /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
309  #define event_debug_assert_is_setup_(ev) do {				\
310  	if (event_debug_mode_on_) {					\
311  		struct event_debug_entry *dent,find;			\
312  		find.ptr = (ev);					\
313  		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
314  		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
315  		if (!dent) {						\
316  			event_errx(EVENT_ERR_ABORT_,			\
317  			    "%s called on a non-initialized event %p"	\
318  			    " (events: 0x%x, fd: "EV_SOCK_FMT\
319  			    ", flags: 0x%x)",				\
320  			    __func__, (ev), (ev)->ev_events,		\
321  			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
322  		}							\
323  		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
324  	}								\
325  	} while (0)
326  /* Macro: assert that ev is not added (i.e., okay to tear down or set
327   * up again) */
328  #define event_debug_assert_not_added_(ev) do {				\
329  	if (event_debug_mode_on_) {					\
330  		struct event_debug_entry *dent,find;			\
331  		find.ptr = (ev);					\
332  		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
333  		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
334  		if (dent && dent->added) {				\
335  			event_errx(EVENT_ERR_ABORT_,			\
336  			    "%s called on an already added event %p"	\
337  			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
338  			    "flags: 0x%x)",				\
339  			    __func__, (ev), (ev)->ev_events,		\
340  			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
341  		}							\
342  		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
343  	}								\
344  	} while (0)
345  #else
346  #define event_debug_note_setup_(ev) \
347  	((void)0)
348  #define event_debug_note_teardown_(ev) \
349  	((void)0)
350  #define event_debug_note_add_(ev) \
351  	((void)0)
352  #define event_debug_note_del_(ev) \
353  	((void)0)
354  #define event_debug_assert_is_setup_(ev) \
355  	((void)0)
356  #define event_debug_assert_not_added_(ev) \
357  	((void)0)
358  #endif
359  
360  #define EVENT_BASE_ASSERT_LOCKED(base)		\
361  	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
362  
363  /* How often (in seconds) do we check for changes in wall clock time relative
364   * to monotonic time?  Set this to -1 for 'never.' */
365  #define CLOCK_SYNC_INTERVAL 5
366  
367  /** Set 'tp' to the current time according to 'base'.  We must hold the lock
368   * on 'base'.  If there is a cached time, return it.  Otherwise, use
369   * clock_gettime or gettimeofday as appropriate to find out the right time.
370   * Return 0 on success, -1 on failure.
371   */
372  static int
373  gettime(struct event_base *base, struct timeval *tp)
374  {
375  	EVENT_BASE_ASSERT_LOCKED(base);
376  
377  	if (base->tv_cache.tv_sec) {
378  		*tp = base->tv_cache;
379  		return (0);
380  	}
381  
382  	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
383  		return -1;
384  	}
385  
386  	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
387  	    < tp->tv_sec) {
388  		struct timeval tv;
389  		evutil_gettimeofday(&tv,NULL);
390  		evutil_timersub(&tv, tp, &base->tv_clock_diff);
391  		base->last_updated_clock_diff = tp->tv_sec;
392  	}
393  
394  	return 0;
395  }
396  
397  int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)398  event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
399  {
400  	int r;
401  	if (!base) {
402  		base = current_base;
403  		if (!current_base)
404  			return evutil_gettimeofday(tv, NULL);
405  	}
406  
407  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
408  	if (base->tv_cache.tv_sec == 0) {
409  		r = evutil_gettimeofday(tv, NULL);
410  	} else {
411  		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
412  		r = 0;
413  	}
414  	EVBASE_RELEASE_LOCK(base, th_base_lock);
415  	return r;
416  }
417  
418  /** Make 'base' have no current cached time. */
419  static inline void
clear_time_cache(struct event_base * base)420  clear_time_cache(struct event_base *base)
421  {
422  	base->tv_cache.tv_sec = 0;
423  }
424  
425  /** Replace the cached time in 'base' with the current time. */
426  static inline void
update_time_cache(struct event_base * base)427  update_time_cache(struct event_base *base)
428  {
429  	base->tv_cache.tv_sec = 0;
430  	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
431  	    gettime(base, &base->tv_cache);
432  }
433  
434  int
event_base_update_cache_time(struct event_base * base)435  event_base_update_cache_time(struct event_base *base)
436  {
437  
438  	if (!base) {
439  		base = current_base;
440  		if (!current_base)
441  			return -1;
442  	}
443  
444  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
445  	if (base->running_loop)
446  		update_time_cache(base);
447  	EVBASE_RELEASE_LOCK(base, th_base_lock);
448  	return 0;
449  }
450  
451  static inline struct event *
event_callback_to_event(struct event_callback * evcb)452  event_callback_to_event(struct event_callback *evcb)
453  {
454  	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
455  	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
456  }
457  
458  static inline struct event_callback *
event_to_event_callback(struct event * ev)459  event_to_event_callback(struct event *ev)
460  {
461  	return &ev->ev_evcallback;
462  }
463  
464  struct event_base *
event_init(void)465  event_init(void)
466  {
467  	struct event_base *base = event_base_new_with_config(NULL);
468  
469  	if (base == NULL) {
470  		event_errx(1, "%s: Unable to construct event_base", __func__);
471  		return NULL;
472  	}
473  
474  	current_base = base;
475  
476  	return (base);
477  }
478  
479  struct event_base *
event_base_new(void)480  event_base_new(void)
481  {
482  	struct event_base *base = NULL;
483  	struct event_config *cfg = event_config_new();
484  	if (cfg) {
485  		base = event_base_new_with_config(cfg);
486  		event_config_free(cfg);
487  	}
488  	return base;
489  }
490  
491  /** Return true iff 'method' is the name of a method that 'cfg' tells us to
492   * avoid. */
493  static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)494  event_config_is_avoided_method(const struct event_config *cfg,
495      const char *method)
496  {
497  	struct event_config_entry *entry;
498  
499  	TAILQ_FOREACH(entry, &cfg->entries, next) {
500  		if (entry->avoid_method != NULL &&
501  		    strcmp(entry->avoid_method, method) == 0)
502  			return (1);
503  	}
504  
505  	return (0);
506  }
507  
508  /** Return true iff 'method' is disabled according to the environment. */
509  static int
event_is_method_disabled(const char * name)510  event_is_method_disabled(const char *name)
511  {
512  	char environment[64];
513  	int i;
514  
515  	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
516  	for (i = 8; environment[i] != '\0'; ++i)
517  		environment[i] = EVUTIL_TOUPPER_(environment[i]);
518  	/* Note that evutil_getenv_() ignores the environment entirely if
519  	 * we're setuid */
520  	return (evutil_getenv_(environment) != NULL);
521  }
522  
523  int
event_base_get_features(const struct event_base * base)524  event_base_get_features(const struct event_base *base)
525  {
526  	return base->evsel->features;
527  }
528  
529  void
event_enable_debug_mode(void)530  event_enable_debug_mode(void)
531  {
532  #ifndef EVENT__DISABLE_DEBUG_MODE
533  	if (event_debug_mode_on_)
534  		event_errx(1, "%s was called twice!", __func__);
535  	if (event_debug_mode_too_late)
536  		event_errx(1, "%s must be called *before* creating any events "
537  		    "or event_bases",__func__);
538  
539  	event_debug_mode_on_ = 1;
540  
541  	HT_INIT(event_debug_map, &global_debug_map);
542  #endif
543  }
544  
545  void
event_disable_debug_mode(void)546  event_disable_debug_mode(void)
547  {
548  #ifndef EVENT__DISABLE_DEBUG_MODE
549  	struct event_debug_entry **ent, *victim;
550  
551  	EVLOCK_LOCK(event_debug_map_lock_, 0);
552  	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
553  		victim = *ent;
554  		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
555  		mm_free(victim);
556  	}
557  	HT_CLEAR(event_debug_map, &global_debug_map);
558  	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
559  
560  	event_debug_mode_on_  = 0;
561  #endif
562  }
563  
564  struct event_base *
event_base_new_with_config(const struct event_config * cfg)565  event_base_new_with_config(const struct event_config *cfg)
566  {
567  	int i;
568  	struct event_base *base;
569  	int should_check_environment;
570  
571  #ifndef EVENT__DISABLE_DEBUG_MODE
572  	event_debug_mode_too_late = 1;
573  #endif
574  
575  	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
576  		event_warn("%s: calloc", __func__);
577  		return NULL;
578  	}
579  
580  	if (cfg)
581  		base->flags = cfg->flags;
582  
583  	should_check_environment =
584  	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
585  
586  	{
587  		struct timeval tmp;
588  		int precise_time =
589  		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
590  		int flags;
591  		if (should_check_environment && !precise_time) {
592  			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
593  			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
594  		}
595  		flags = precise_time ? EV_MONOT_PRECISE : 0;
596  		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
597  
598  		gettime(base, &tmp);
599  	}
600  
601  	min_heap_ctor_(&base->timeheap);
602  
603  	base->sig.ev_signal_pair[0] = -1;
604  	base->sig.ev_signal_pair[1] = -1;
605  	base->th_notify_fd[0] = -1;
606  	base->th_notify_fd[1] = -1;
607  
608  	TAILQ_INIT(&base->active_later_queue);
609  
610  	evmap_io_initmap_(&base->io);
611  	evmap_signal_initmap_(&base->sigmap);
612  	event_changelist_init_(&base->changelist);
613  
614  	base->evbase = NULL;
615  
616  	if (cfg) {
617  		memcpy(&base->max_dispatch_time,
618  		    &cfg->max_dispatch_interval, sizeof(struct timeval));
619  		base->limit_callbacks_after_prio =
620  		    cfg->limit_callbacks_after_prio;
621  	} else {
622  		base->max_dispatch_time.tv_sec = -1;
623  		base->limit_callbacks_after_prio = 1;
624  	}
625  	if (cfg && cfg->max_dispatch_callbacks >= 0) {
626  		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
627  	} else {
628  		base->max_dispatch_callbacks = INT_MAX;
629  	}
630  	if (base->max_dispatch_callbacks == INT_MAX &&
631  	    base->max_dispatch_time.tv_sec == -1)
632  		base->limit_callbacks_after_prio = INT_MAX;
633  
634  	for (i = 0; eventops[i] && !base->evbase; i++) {
635  		if (cfg != NULL) {
636  			/* determine if this backend should be avoided */
637  			if (event_config_is_avoided_method(cfg,
638  				eventops[i]->name))
639  				continue;
640  			if ((eventops[i]->features & cfg->require_features)
641  			    != cfg->require_features)
642  				continue;
643  		}
644  
645  		/* also obey the environment variables */
646  		if (should_check_environment &&
647  		    event_is_method_disabled(eventops[i]->name))
648  			continue;
649  
650  		base->evsel = eventops[i];
651  
652  		base->evbase = base->evsel->init(base);
653  	}
654  
655  	if (base->evbase == NULL) {
656  		event_warnx("%s: no event mechanism available",
657  		    __func__);
658  		base->evsel = NULL;
659  		event_base_free(base);
660  		return NULL;
661  	}
662  
663  	if (evutil_getenv_("EVENT_SHOW_METHOD"))
664  		event_msgx("libevent using: %s", base->evsel->name);
665  
666  	/* allocate a single active event queue */
667  	if (event_base_priority_init(base, 1) < 0) {
668  		event_base_free(base);
669  		return NULL;
670  	}
671  
672  	/* prepare for threading */
673  
674  #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
675  	event_debug_created_threadable_ctx_ = 1;
676  #endif
677  
678  #ifndef EVENT__DISABLE_THREAD_SUPPORT
679  	if (EVTHREAD_LOCKING_ENABLED() &&
680  	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
681  		int r;
682  		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
683  		EVTHREAD_ALLOC_COND(base->current_event_cond);
684  		r = evthread_make_base_notifiable(base);
685  		if (r<0) {
686  			event_warnx("%s: Unable to make base notifiable.", __func__);
687  			event_base_free(base);
688  			return NULL;
689  		}
690  	}
691  #endif
692  
693  #ifdef _WIN32
694  	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
695  		event_base_start_iocp_(base, cfg->n_cpus_hint);
696  #endif
697  
698  	return (base);
699  }
700  
701  int
event_base_start_iocp_(struct event_base * base,int n_cpus)702  event_base_start_iocp_(struct event_base *base, int n_cpus)
703  {
704  #ifdef _WIN32
705  	if (base->iocp)
706  		return 0;
707  	base->iocp = event_iocp_port_launch_(n_cpus);
708  	if (!base->iocp) {
709  		event_warnx("%s: Couldn't launch IOCP", __func__);
710  		return -1;
711  	}
712  	return 0;
713  #else
714  	return -1;
715  #endif
716  }
717  
718  void
event_base_stop_iocp_(struct event_base * base)719  event_base_stop_iocp_(struct event_base *base)
720  {
721  #ifdef _WIN32
722  	int rv;
723  
724  	if (!base->iocp)
725  		return;
726  	rv = event_iocp_shutdown_(base->iocp, -1);
727  	EVUTIL_ASSERT(rv >= 0);
728  	base->iocp = NULL;
729  #endif
730  }
731  
732  static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)733  event_base_cancel_single_callback_(struct event_base *base,
734      struct event_callback *evcb,
735      int run_finalizers)
736  {
737  	int result = 0;
738  
739  	if (evcb->evcb_flags & EVLIST_INIT) {
740  		struct event *ev = event_callback_to_event(evcb);
741  		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
742  			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
743  			result = 1;
744  		}
745  	} else {
746  		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
747  		event_callback_cancel_nolock_(base, evcb, 1);
748  		EVBASE_RELEASE_LOCK(base, th_base_lock);
749  		result = 1;
750  	}
751  
752  	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
753  		switch (evcb->evcb_closure) {
754  		case EV_CLOSURE_EVENT_FINALIZE:
755  		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
756  			struct event *ev = event_callback_to_event(evcb);
757  			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
758  			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
759  				mm_free(ev);
760  			break;
761  		}
762  		case EV_CLOSURE_CB_FINALIZE:
763  			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
764  			break;
765  		default:
766  			break;
767  		}
768  	}
769  	return result;
770  }
771  
event_base_free_queues_(struct event_base * base,int run_finalizers)772  static int event_base_free_queues_(struct event_base *base, int run_finalizers)
773  {
774  	int deleted = 0, i;
775  
776  	for (i = 0; i < base->nactivequeues; ++i) {
777  		struct event_callback *evcb, *next;
778  		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
779  			next = TAILQ_NEXT(evcb, evcb_active_next);
780  			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
781  			evcb = next;
782  		}
783  	}
784  
785  	{
786  		struct event_callback *evcb;
787  		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
788  			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
789  		}
790  	}
791  
792  	return deleted;
793  }
794  
795  static void
event_base_free_(struct event_base * base,int run_finalizers)796  event_base_free_(struct event_base *base, int run_finalizers)
797  {
798  	int i, n_deleted=0;
799  	struct event *ev;
800  	/* XXXX grab the lock? If there is contention when one thread frees
801  	 * the base, then the contending thread will be very sad soon. */
802  
803  	/* event_base_free(NULL) is how to free the current_base if we
804  	 * made it with event_init and forgot to hold a reference to it. */
805  	if (base == NULL && current_base)
806  		base = current_base;
807  	/* Don't actually free NULL. */
808  	if (base == NULL) {
809  		event_warnx("%s: no base to free", __func__);
810  		return;
811  	}
812  	/* XXX(niels) - check for internal events first */
813  
814  #ifdef _WIN32
815  	event_base_stop_iocp_(base);
816  #endif
817  
818  	/* threading fds if we have them */
819  	if (base->th_notify_fd[0] != -1) {
820  		event_del(&base->th_notify);
821  		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
822  		if (base->th_notify_fd[1] != -1)
823  			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
824  		base->th_notify_fd[0] = -1;
825  		base->th_notify_fd[1] = -1;
826  		event_debug_unassign(&base->th_notify);
827  	}
828  
829  	/* Delete all non-internal events. */
830  	evmap_delete_all_(base);
831  
832  	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
833  		event_del(ev);
834  		++n_deleted;
835  	}
836  	for (i = 0; i < base->n_common_timeouts; ++i) {
837  		struct common_timeout_list *ctl =
838  		    base->common_timeout_queues[i];
839  		event_del(&ctl->timeout_event); /* Internal; doesn't count */
840  		event_debug_unassign(&ctl->timeout_event);
841  		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
842  			struct event *next = TAILQ_NEXT(ev,
843  			    ev_timeout_pos.ev_next_with_common_timeout);
844  			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
845  				event_del(ev);
846  				++n_deleted;
847  			}
848  			ev = next;
849  		}
850  		mm_free(ctl);
851  	}
852  	if (base->common_timeout_queues)
853  		mm_free(base->common_timeout_queues);
854  
855  	for (;;) {
856  		/* For finalizers we can register yet another finalizer out from
857  		 * finalizer, and iff finalizer will be in active_later_queue we can
858  		 * add finalizer to activequeues, and we will have events in
859  		 * activequeues after this function returns, which is not what we want
860  		 * (we even have an assertion for this).
861  		 *
862  		 * A simple case is bufferevent with underlying (i.e. filters).
863  		 */
864  		int i = event_base_free_queues_(base, run_finalizers);
865  		if (!i) {
866  			break;
867  		}
868  		n_deleted += i;
869  	}
870  
871  	if (n_deleted)
872  		event_debug(("%s: %d events were still set in base",
873  			__func__, n_deleted));
874  
875  	while (LIST_FIRST(&base->once_events)) {
876  		struct event_once *eonce = LIST_FIRST(&base->once_events);
877  		LIST_REMOVE(eonce, next_once);
878  		mm_free(eonce);
879  	}
880  
881  	if (base->evsel != NULL && base->evsel->dealloc != NULL)
882  		base->evsel->dealloc(base);
883  
884  	for (i = 0; i < base->nactivequeues; ++i)
885  		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
886  
887  	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
888  	min_heap_dtor_(&base->timeheap);
889  
890  	mm_free(base->activequeues);
891  
892  	evmap_io_clear_(&base->io);
893  	evmap_signal_clear_(&base->sigmap);
894  	event_changelist_freemem_(&base->changelist);
895  
896  	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
897  	EVTHREAD_FREE_COND(base->current_event_cond);
898  
899  	/* If we're freeing current_base, there won't be a current_base. */
900  	if (base == current_base)
901  		current_base = NULL;
902  	mm_free(base);
903  }
904  
905  void
event_base_free_nofinalize(struct event_base * base)906  event_base_free_nofinalize(struct event_base *base)
907  {
908  	event_base_free_(base, 0);
909  }
910  
911  void
event_base_free(struct event_base * base)912  event_base_free(struct event_base *base)
913  {
914  	event_base_free_(base, 1);
915  }
916  
917  /* Fake eventop; used to disable the backend temporarily inside event_reinit
918   * so that we can call event_del() on an event without telling the backend.
919   */
920  static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)921  nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
922      short events, void *fdinfo)
923  {
924  	return 0;
925  }
926  const struct eventop nil_eventop = {
927  	"nil",
928  	NULL, /* init: unused. */
929  	NULL, /* add: unused. */
930  	nil_backend_del, /* del: used, so needs to be killed. */
931  	NULL, /* dispatch: unused. */
932  	NULL, /* dealloc: unused. */
933  	0, 0, 0
934  };
935  
936  /* reinitialize the event base after a fork */
937  int
event_reinit(struct event_base * base)938  event_reinit(struct event_base *base)
939  {
940  	const struct eventop *evsel;
941  	int res = 0;
942  	int was_notifiable = 0;
943  	int had_signal_added = 0;
944  
945  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
946  
947  	evsel = base->evsel;
948  
949  	/* check if this event mechanism requires reinit on the backend */
950  	if (evsel->need_reinit) {
951  		/* We're going to call event_del() on our notify events (the
952  		 * ones that tell about signals and wakeup events).  But we
953  		 * don't actually want to tell the backend to change its
954  		 * state, since it might still share some resource (a kqueue,
955  		 * an epoll fd) with the parent process, and we don't want to
956  		 * delete the fds from _that_ backend, we temporarily stub out
957  		 * the evsel with a replacement.
958  		 */
959  		base->evsel = &nil_eventop;
960  	}
961  
962  	/* We need to re-create a new signal-notification fd and a new
963  	 * thread-notification fd.  Otherwise, we'll still share those with
964  	 * the parent process, which would make any notification sent to them
965  	 * get received by one or both of the event loops, more or less at
966  	 * random.
967  	 */
968  	if (base->sig.ev_signal_added) {
969  		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
970  		event_debug_unassign(&base->sig.ev_signal);
971  		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
972  		had_signal_added = 1;
973  		base->sig.ev_signal_added = 0;
974  	}
975  	if (base->sig.ev_signal_pair[0] != -1)
976  		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
977  	if (base->sig.ev_signal_pair[1] != -1)
978  		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
979  	if (base->th_notify_fn != NULL) {
980  		was_notifiable = 1;
981  		base->th_notify_fn = NULL;
982  	}
983  	if (base->th_notify_fd[0] != -1) {
984  		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
985  		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
986  		if (base->th_notify_fd[1] != -1)
987  			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
988  		base->th_notify_fd[0] = -1;
989  		base->th_notify_fd[1] = -1;
990  		event_debug_unassign(&base->th_notify);
991  	}
992  
993  	/* Replace the original evsel. */
994          base->evsel = evsel;
995  
996  	if (evsel->need_reinit) {
997  		/* Reconstruct the backend through brute-force, so that we do
998  		 * not share any structures with the parent process. For some
999  		 * backends, this is necessary: epoll and kqueue, for
1000  		 * instance, have events associated with a kernel
1001  		 * structure. If didn't reinitialize, we'd share that
1002  		 * structure with the parent process, and any changes made by
1003  		 * the parent would affect our backend's behavior (and vice
1004  		 * versa).
1005  		 */
1006  		if (base->evsel->dealloc != NULL)
1007  			base->evsel->dealloc(base);
1008  		base->evbase = evsel->init(base);
1009  		if (base->evbase == NULL) {
1010  			event_errx(1,
1011  			   "%s: could not reinitialize event mechanism",
1012  			   __func__);
1013  			res = -1;
1014  			goto done;
1015  		}
1016  
1017  		/* Empty out the changelist (if any): we are starting from a
1018  		 * blank slate. */
1019  		event_changelist_freemem_(&base->changelist);
1020  
1021  		/* Tell the event maps to re-inform the backend about all
1022  		 * pending events. This will make the signal notification
1023  		 * event get re-created if necessary. */
1024  		if (evmap_reinit_(base) < 0)
1025  			res = -1;
1026  	} else {
1027  		res = evsig_init_(base);
1028  		if (res == 0 && had_signal_added) {
1029  			res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1030  			if (res == 0)
1031  				base->sig.ev_signal_added = 1;
1032  		}
1033  	}
1034  
1035  	/* If we were notifiable before, and nothing just exploded, become
1036  	 * notifiable again. */
1037  	if (was_notifiable && res == 0)
1038  		res = evthread_make_base_notifiable_nolock_(base);
1039  
1040  done:
1041  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1042  	return (res);
1043  }
1044  
1045  /* Get the monotonic time for this event_base' timer */
1046  int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)1047  event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1048  {
1049    int rv = -1;
1050  
1051    if (base && tv) {
1052      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1053      rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1054      EVBASE_RELEASE_LOCK(base, th_base_lock);
1055    }
1056  
1057    return rv;
1058  }
1059  
1060  const char **
event_get_supported_methods(void)1061  event_get_supported_methods(void)
1062  {
1063  	static const char **methods = NULL;
1064  	const struct eventop **method;
1065  	const char **tmp;
1066  	int i = 0, k;
1067  
1068  	/* count all methods */
1069  	for (method = &eventops[0]; *method != NULL; ++method) {
1070  		++i;
1071  	}
1072  
1073  	/* allocate one more than we need for the NULL pointer */
1074  	tmp = mm_calloc((i + 1), sizeof(char *));
1075  	if (tmp == NULL)
1076  		return (NULL);
1077  
1078  	/* populate the array with the supported methods */
1079  	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1080  		tmp[i++] = eventops[k]->name;
1081  	}
1082  	tmp[i] = NULL;
1083  
1084  	if (methods != NULL)
1085  		mm_free((char**)methods);
1086  
1087  	methods = tmp;
1088  
1089  	return (methods);
1090  }
1091  
1092  struct event_config *
event_config_new(void)1093  event_config_new(void)
1094  {
1095  	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1096  
1097  	if (cfg == NULL)
1098  		return (NULL);
1099  
1100  	TAILQ_INIT(&cfg->entries);
1101  	cfg->max_dispatch_interval.tv_sec = -1;
1102  	cfg->max_dispatch_callbacks = INT_MAX;
1103  	cfg->limit_callbacks_after_prio = 1;
1104  
1105  	return (cfg);
1106  }
1107  
1108  static void
event_config_entry_free(struct event_config_entry * entry)1109  event_config_entry_free(struct event_config_entry *entry)
1110  {
1111  	if (entry->avoid_method != NULL)
1112  		mm_free((char *)entry->avoid_method);
1113  	mm_free(entry);
1114  }
1115  
1116  void
event_config_free(struct event_config * cfg)1117  event_config_free(struct event_config *cfg)
1118  {
1119  	struct event_config_entry *entry;
1120  
1121  	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1122  		TAILQ_REMOVE(&cfg->entries, entry, next);
1123  		event_config_entry_free(entry);
1124  	}
1125  	mm_free(cfg);
1126  }
1127  
1128  int
event_config_set_flag(struct event_config * cfg,int flag)1129  event_config_set_flag(struct event_config *cfg, int flag)
1130  {
1131  	if (!cfg)
1132  		return -1;
1133  	cfg->flags |= flag;
1134  	return 0;
1135  }
1136  
1137  int
event_config_avoid_method(struct event_config * cfg,const char * method)1138  event_config_avoid_method(struct event_config *cfg, const char *method)
1139  {
1140  	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1141  	if (entry == NULL)
1142  		return (-1);
1143  
1144  	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1145  		mm_free(entry);
1146  		return (-1);
1147  	}
1148  
1149  	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1150  
1151  	return (0);
1152  }
1153  
1154  int
event_config_require_features(struct event_config * cfg,int features)1155  event_config_require_features(struct event_config *cfg,
1156      int features)
1157  {
1158  	if (!cfg)
1159  		return (-1);
1160  	cfg->require_features = features;
1161  	return (0);
1162  }
1163  
1164  int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)1165  event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1166  {
1167  	if (!cfg)
1168  		return (-1);
1169  	cfg->n_cpus_hint = cpus;
1170  	return (0);
1171  }
1172  
1173  int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)1174  event_config_set_max_dispatch_interval(struct event_config *cfg,
1175      const struct timeval *max_interval, int max_callbacks, int min_priority)
1176  {
1177  	if (max_interval)
1178  		memcpy(&cfg->max_dispatch_interval, max_interval,
1179  		    sizeof(struct timeval));
1180  	else
1181  		cfg->max_dispatch_interval.tv_sec = -1;
1182  	cfg->max_dispatch_callbacks =
1183  	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
1184  	if (min_priority < 0)
1185  		min_priority = 0;
1186  	cfg->limit_callbacks_after_prio = min_priority;
1187  	return (0);
1188  }
1189  
1190  int
event_priority_init(int npriorities)1191  event_priority_init(int npriorities)
1192  {
1193  	return event_base_priority_init(current_base, npriorities);
1194  }
1195  
1196  int
event_base_priority_init(struct event_base * base,int npriorities)1197  event_base_priority_init(struct event_base *base, int npriorities)
1198  {
1199  	int i, r;
1200  	r = -1;
1201  
1202  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1203  
1204  	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1205  	    || npriorities >= EVENT_MAX_PRIORITIES)
1206  		goto err;
1207  
1208  	if (npriorities == base->nactivequeues)
1209  		goto ok;
1210  
1211  	if (base->nactivequeues) {
1212  		mm_free(base->activequeues);
1213  		base->nactivequeues = 0;
1214  	}
1215  
1216  	/* Allocate our priority queues */
1217  	base->activequeues = (struct evcallback_list *)
1218  	  mm_calloc(npriorities, sizeof(struct evcallback_list));
1219  	if (base->activequeues == NULL) {
1220  		event_warn("%s: calloc", __func__);
1221  		goto err;
1222  	}
1223  	base->nactivequeues = npriorities;
1224  
1225  	for (i = 0; i < base->nactivequeues; ++i) {
1226  		TAILQ_INIT(&base->activequeues[i]);
1227  	}
1228  
1229  ok:
1230  	r = 0;
1231  err:
1232  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1233  	return (r);
1234  }
1235  
1236  int
event_base_get_npriorities(struct event_base * base)1237  event_base_get_npriorities(struct event_base *base)
1238  {
1239  
1240  	int n;
1241  	if (base == NULL)
1242  		base = current_base;
1243  
1244  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1245  	n = base->nactivequeues;
1246  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1247  	return (n);
1248  }
1249  
1250  int
event_base_get_num_events(struct event_base * base,unsigned int type)1251  event_base_get_num_events(struct event_base *base, unsigned int type)
1252  {
1253  	int r = 0;
1254  
1255  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1256  
1257  	if (type & EVENT_BASE_COUNT_ACTIVE)
1258  		r += base->event_count_active;
1259  
1260  	if (type & EVENT_BASE_COUNT_VIRTUAL)
1261  		r += base->virtual_event_count;
1262  
1263  	if (type & EVENT_BASE_COUNT_ADDED)
1264  		r += base->event_count;
1265  
1266  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1267  
1268  	return r;
1269  }
1270  
1271  int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)1272  event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1273  {
1274  	int r = 0;
1275  
1276  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1277  
1278  	if (type & EVENT_BASE_COUNT_ACTIVE) {
1279  		r += base->event_count_active_max;
1280  		if (clear)
1281  			base->event_count_active_max = 0;
1282  	}
1283  
1284  	if (type & EVENT_BASE_COUNT_VIRTUAL) {
1285  		r += base->virtual_event_count_max;
1286  		if (clear)
1287  			base->virtual_event_count_max = 0;
1288  	}
1289  
1290  	if (type & EVENT_BASE_COUNT_ADDED) {
1291  		r += base->event_count_max;
1292  		if (clear)
1293  			base->event_count_max = 0;
1294  	}
1295  
1296  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1297  
1298  	return r;
1299  }
1300  
1301  /* Returns true iff we're currently watching any events. */
1302  static int
event_haveevents(struct event_base * base)1303  event_haveevents(struct event_base *base)
1304  {
1305  	/* Caller must hold th_base_lock */
1306  	return (base->virtual_event_count > 0 || base->event_count > 0);
1307  }
1308  
1309  /* "closure" function called when processing active signal events */
1310  static inline void
event_signal_closure(struct event_base * base,struct event * ev)1311  event_signal_closure(struct event_base *base, struct event *ev)
1312  {
1313  	short ncalls;
1314  	int should_break;
1315  
1316  	/* Allows deletes to work */
1317  	ncalls = ev->ev_ncalls;
1318  	if (ncalls != 0)
1319  		ev->ev_pncalls = &ncalls;
1320  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1321  	while (ncalls) {
1322  		ncalls--;
1323  		ev->ev_ncalls = ncalls;
1324  		if (ncalls == 0)
1325  			ev->ev_pncalls = NULL;
1326  		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1327  
1328  		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1329  		should_break = base->event_break;
1330  		EVBASE_RELEASE_LOCK(base, th_base_lock);
1331  
1332  		if (should_break) {
1333  			if (ncalls != 0)
1334  				ev->ev_pncalls = NULL;
1335  			return;
1336  		}
1337  	}
1338  }
1339  
1340  /* Common timeouts are special timeouts that are handled as queues rather than
1341   * in the minheap.  This is more efficient than the minheap if we happen to
1342   * know that we're going to get several thousands of timeout events all with
1343   * the same timeout value.
1344   *
1345   * Since all our timeout handling code assumes timevals can be copied,
1346   * assigned, etc, we can't use "magic pointer" to encode these common
1347   * timeouts.  Searching through a list to see if every timeout is common could
1348   * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1349   * is 32 bits long, but only uses 20 of those bits (since it can never be over
1350   * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1351   * of index into the event_base's aray of common timeouts.
1352   */
1353  
1354  #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1355  #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1356  #define COMMON_TIMEOUT_IDX_SHIFT 20
1357  #define COMMON_TIMEOUT_MASK     0xf0000000
1358  #define COMMON_TIMEOUT_MAGIC    0x50000000
1359  
1360  #define COMMON_TIMEOUT_IDX(tv) \
1361  	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1362  
1363  /** Return true iff if 'tv' is a common timeout in 'base' */
1364  static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)1365  is_common_timeout(const struct timeval *tv,
1366      const struct event_base *base)
1367  {
1368  	int idx;
1369  	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1370  		return 0;
1371  	idx = COMMON_TIMEOUT_IDX(tv);
1372  	return idx < base->n_common_timeouts;
1373  }
1374  
1375  /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1376   * one is a common timeout. */
1377  static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)1378  is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1379  {
1380  	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1381  	    (tv2->tv_usec & ~MICROSECONDS_MASK);
1382  }
1383  
1384  /** Requires that 'tv' is a common timeout.  Return the corresponding
1385   * common_timeout_list. */
1386  static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)1387  get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1388  {
1389  	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1390  }
1391  
1392  #if 0
1393  static inline int
1394  common_timeout_ok(const struct timeval *tv,
1395      struct event_base *base)
1396  {
1397  	const struct timeval *expect =
1398  	    &get_common_timeout_list(base, tv)->duration;
1399  	return tv->tv_sec == expect->tv_sec &&
1400  	    tv->tv_usec == expect->tv_usec;
1401  }
1402  #endif
1403  
1404  /* Add the timeout for the first event in given common timeout list to the
1405   * event_base's minheap. */
1406  static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)1407  common_timeout_schedule(struct common_timeout_list *ctl,
1408      const struct timeval *now, struct event *head)
1409  {
1410  	struct timeval timeout = head->ev_timeout;
1411  	timeout.tv_usec &= MICROSECONDS_MASK;
1412  	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1413  }
1414  
1415  /* Callback: invoked when the timeout for a common timeout queue triggers.
1416   * This means that (at least) the first event in that queue should be run,
1417   * and the timeout should be rescheduled if there are more events. */
1418  static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)1419  common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1420  {
1421  	struct timeval now;
1422  	struct common_timeout_list *ctl = arg;
1423  	struct event_base *base = ctl->base;
1424  	struct event *ev = NULL;
1425  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1426  	gettime(base, &now);
1427  	while (1) {
1428  		ev = TAILQ_FIRST(&ctl->events);
1429  		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1430  		    (ev->ev_timeout.tv_sec == now.tv_sec &&
1431  			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1432  			break;
1433  		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1434  		event_active_nolock_(ev, EV_TIMEOUT, 1);
1435  	}
1436  	if (ev)
1437  		common_timeout_schedule(ctl, &now, ev);
1438  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1439  }
1440  
1441  #define MAX_COMMON_TIMEOUTS 256
1442  
1443  const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)1444  event_base_init_common_timeout(struct event_base *base,
1445      const struct timeval *duration)
1446  {
1447  	int i;
1448  	struct timeval tv;
1449  	const struct timeval *result=NULL;
1450  	struct common_timeout_list *new_ctl;
1451  
1452  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1453  	if (duration->tv_usec > 1000000) {
1454  		memcpy(&tv, duration, sizeof(struct timeval));
1455  		if (is_common_timeout(duration, base))
1456  			tv.tv_usec &= MICROSECONDS_MASK;
1457  		tv.tv_sec += tv.tv_usec / 1000000;
1458  		tv.tv_usec %= 1000000;
1459  		duration = &tv;
1460  	}
1461  	for (i = 0; i < base->n_common_timeouts; ++i) {
1462  		const struct common_timeout_list *ctl =
1463  		    base->common_timeout_queues[i];
1464  		if (duration->tv_sec == ctl->duration.tv_sec &&
1465  		    duration->tv_usec ==
1466  		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1467  			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1468  			result = &ctl->duration;
1469  			goto done;
1470  		}
1471  	}
1472  	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1473  		event_warnx("%s: Too many common timeouts already in use; "
1474  		    "we only support %d per event_base", __func__,
1475  		    MAX_COMMON_TIMEOUTS);
1476  		goto done;
1477  	}
1478  	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1479  		int n = base->n_common_timeouts < 16 ? 16 :
1480  		    base->n_common_timeouts*2;
1481  		struct common_timeout_list **newqueues =
1482  		    mm_realloc(base->common_timeout_queues,
1483  			n*sizeof(struct common_timeout_queue *));
1484  		if (!newqueues) {
1485  			event_warn("%s: realloc",__func__);
1486  			goto done;
1487  		}
1488  		base->n_common_timeouts_allocated = n;
1489  		base->common_timeout_queues = newqueues;
1490  	}
1491  	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1492  	if (!new_ctl) {
1493  		event_warn("%s: calloc",__func__);
1494  		goto done;
1495  	}
1496  	TAILQ_INIT(&new_ctl->events);
1497  	new_ctl->duration.tv_sec = duration->tv_sec;
1498  	new_ctl->duration.tv_usec =
1499  	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1500  	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1501  	evtimer_assign(&new_ctl->timeout_event, base,
1502  	    common_timeout_callback, new_ctl);
1503  	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1504  	event_priority_set(&new_ctl->timeout_event, 0);
1505  	new_ctl->base = base;
1506  	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1507  	result = &new_ctl->duration;
1508  
1509  done:
1510  	if (result)
1511  		EVUTIL_ASSERT(is_common_timeout(result, base));
1512  
1513  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1514  	return result;
1515  }
1516  
1517  /* Closure function invoked when we're activating a persistent event. */
1518  static inline void
event_persist_closure(struct event_base * base,struct event * ev)1519  event_persist_closure(struct event_base *base, struct event *ev)
1520  {
1521  	void (*evcb_callback)(evutil_socket_t, short, void *);
1522  
1523          // Other fields of *ev that must be stored before executing
1524          evutil_socket_t evcb_fd;
1525          short evcb_res;
1526          void *evcb_arg;
1527  
1528  	/* reschedule the persistent event if we have a timeout. */
1529  	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1530  		/* If there was a timeout, we want it to run at an interval of
1531  		 * ev_io_timeout after the last time it was _scheduled_ for,
1532  		 * not ev_io_timeout after _now_.  If it fired for another
1533  		 * reason, though, the timeout ought to start ticking _now_. */
1534  		struct timeval run_at, relative_to, delay, now;
1535  		ev_uint32_t usec_mask = 0;
1536  		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1537  			&ev->ev_io_timeout));
1538  		gettime(base, &now);
1539  		if (is_common_timeout(&ev->ev_timeout, base)) {
1540  			delay = ev->ev_io_timeout;
1541  			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1542  			delay.tv_usec &= MICROSECONDS_MASK;
1543  			if (ev->ev_res & EV_TIMEOUT) {
1544  				relative_to = ev->ev_timeout;
1545  				relative_to.tv_usec &= MICROSECONDS_MASK;
1546  			} else {
1547  				relative_to = now;
1548  			}
1549  		} else {
1550  			delay = ev->ev_io_timeout;
1551  			if (ev->ev_res & EV_TIMEOUT) {
1552  				relative_to = ev->ev_timeout;
1553  			} else {
1554  				relative_to = now;
1555  			}
1556  		}
1557  		evutil_timeradd(&relative_to, &delay, &run_at);
1558  		if (evutil_timercmp(&run_at, &now, <)) {
1559  			/* Looks like we missed at least one invocation due to
1560  			 * a clock jump, not running the event loop for a
1561  			 * while, really slow callbacks, or
1562  			 * something. Reschedule relative to now.
1563  			 */
1564  			evutil_timeradd(&now, &delay, &run_at);
1565  		}
1566  		run_at.tv_usec |= usec_mask;
1567  		event_add_nolock_(ev, &run_at, 1);
1568  	}
1569  
1570  	// Save our callback before we release the lock
1571  	evcb_callback = ev->ev_callback;
1572          evcb_fd = ev->ev_fd;
1573          evcb_res = ev->ev_res;
1574          evcb_arg = ev->ev_arg;
1575  
1576  	// Release the lock
1577   	EVBASE_RELEASE_LOCK(base, th_base_lock);
1578  
1579  	// Execute the callback
1580          (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1581  }
1582  
1583  /*
1584    Helper for event_process_active to process all the events in a single queue,
1585    releasing the lock as we go.  This function requires that the lock be held
1586    when it's invoked.  Returns -1 if we get a signal or an event_break that
1587    means we should stop processing any active events now.  Otherwise returns
1588    the number of non-internal event_callbacks that we processed.
1589  */
1590  static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)1591  event_process_active_single_queue(struct event_base *base,
1592      struct evcallback_list *activeq,
1593      int max_to_process, const struct timeval *endtime)
1594  {
1595  	struct event_callback *evcb;
1596  	int count = 0;
1597  
1598  	EVUTIL_ASSERT(activeq != NULL);
1599  
1600  	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1601  		struct event *ev=NULL;
1602  		if (evcb->evcb_flags & EVLIST_INIT) {
1603  			ev = event_callback_to_event(evcb);
1604  
1605  			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1606  				event_queue_remove_active(base, evcb);
1607  			else
1608  				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1609  			event_debug((
1610  			    "event_process_active: event: %p, %s%s%scall %p",
1611  			    ev,
1612  			    ev->ev_res & EV_READ ? "EV_READ " : " ",
1613  			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1614  			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1615  			    ev->ev_callback));
1616  		} else {
1617  			event_queue_remove_active(base, evcb);
1618  			event_debug(("event_process_active: event_callback %p, "
1619  				"closure %d, call %p",
1620  				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1621  		}
1622  
1623  		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1624  			++count;
1625  
1626  
1627  		base->current_event = evcb;
1628  #ifndef EVENT__DISABLE_THREAD_SUPPORT
1629  		base->current_event_waiters = 0;
1630  #endif
1631  
1632  		switch (evcb->evcb_closure) {
1633  		case EV_CLOSURE_EVENT_SIGNAL:
1634  			EVUTIL_ASSERT(ev != NULL);
1635  			event_signal_closure(base, ev);
1636  			break;
1637  		case EV_CLOSURE_EVENT_PERSIST:
1638  			EVUTIL_ASSERT(ev != NULL);
1639  			event_persist_closure(base, ev);
1640  			break;
1641  		case EV_CLOSURE_EVENT: {
1642  			void (*evcb_callback)(evutil_socket_t, short, void *);
1643  			EVUTIL_ASSERT(ev != NULL);
1644  			evcb_callback = *ev->ev_callback;
1645  			EVBASE_RELEASE_LOCK(base, th_base_lock);
1646  			evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1647  		}
1648  		break;
1649  		case EV_CLOSURE_CB_SELF: {
1650  			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1651  			EVBASE_RELEASE_LOCK(base, th_base_lock);
1652  			evcb_selfcb(evcb, evcb->evcb_arg);
1653  		}
1654  		break;
1655  		case EV_CLOSURE_EVENT_FINALIZE:
1656  		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1657  			void (*evcb_evfinalize)(struct event *, void *);
1658  			int evcb_closure = evcb->evcb_closure;
1659  			EVUTIL_ASSERT(ev != NULL);
1660  			base->current_event = NULL;
1661  			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1662  			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1663  			EVBASE_RELEASE_LOCK(base, th_base_lock);
1664  			evcb_evfinalize(ev, ev->ev_arg);
1665  			event_debug_note_teardown_(ev);
1666  			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1667  				mm_free(ev);
1668  		}
1669  		break;
1670  		case EV_CLOSURE_CB_FINALIZE: {
1671  			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1672  			base->current_event = NULL;
1673  			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1674  			EVBASE_RELEASE_LOCK(base, th_base_lock);
1675  			evcb_cbfinalize(evcb, evcb->evcb_arg);
1676  		}
1677  		break;
1678  		default:
1679  			EVUTIL_ASSERT(0);
1680  		}
1681  
1682  		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1683  		base->current_event = NULL;
1684  #ifndef EVENT__DISABLE_THREAD_SUPPORT
1685  		if (base->current_event_waiters) {
1686  			base->current_event_waiters = 0;
1687  			EVTHREAD_COND_BROADCAST(base->current_event_cond);
1688  		}
1689  #endif
1690  
1691  		if (base->event_break)
1692  			return -1;
1693  		if (count >= max_to_process)
1694  			return count;
1695  		if (count && endtime) {
1696  			struct timeval now;
1697  			update_time_cache(base);
1698  			gettime(base, &now);
1699  			if (evutil_timercmp(&now, endtime, >=))
1700  				return count;
1701  		}
1702  		if (base->event_continue)
1703  			break;
1704  	}
1705  	return count;
1706  }
1707  
1708  /*
1709   * Active events are stored in priority queues.  Lower priorities are always
1710   * process before higher priorities.  Low priority events can starve high
1711   * priority ones.
1712   */
1713  
1714  static int
event_process_active(struct event_base * base)1715  event_process_active(struct event_base *base)
1716  {
1717  	/* Caller must hold th_base_lock */
1718  	struct evcallback_list *activeq = NULL;
1719  	int i, c = 0;
1720  	const struct timeval *endtime;
1721  	struct timeval tv;
1722  	const int maxcb = base->max_dispatch_callbacks;
1723  	const int limit_after_prio = base->limit_callbacks_after_prio;
1724  	if (base->max_dispatch_time.tv_sec >= 0) {
1725  		update_time_cache(base);
1726  		gettime(base, &tv);
1727  		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1728  		endtime = &tv;
1729  	} else {
1730  		endtime = NULL;
1731  	}
1732  
1733  	for (i = 0; i < base->nactivequeues; ++i) {
1734  		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1735  			base->event_running_priority = i;
1736  			activeq = &base->activequeues[i];
1737  			if (i < limit_after_prio)
1738  				c = event_process_active_single_queue(base, activeq,
1739  				    INT_MAX, NULL);
1740  			else
1741  				c = event_process_active_single_queue(base, activeq,
1742  				    maxcb, endtime);
1743  			if (c < 0) {
1744  				goto done;
1745  			} else if (c > 0)
1746  				break; /* Processed a real event; do not
1747  					* consider lower-priority events */
1748  			/* If we get here, all of the events we processed
1749  			 * were internal.  Continue. */
1750  		}
1751  	}
1752  
1753  done:
1754  	base->event_running_priority = -1;
1755  
1756  	return c;
1757  }
1758  
1759  /*
1760   * Wait continuously for events.  We exit only if no events are left.
1761   */
1762  
1763  int
event_dispatch(void)1764  event_dispatch(void)
1765  {
1766  	return (event_loop(0));
1767  }
1768  
1769  int
event_base_dispatch(struct event_base * event_base)1770  event_base_dispatch(struct event_base *event_base)
1771  {
1772  	return (event_base_loop(event_base, 0));
1773  }
1774  
1775  const char *
event_base_get_method(const struct event_base * base)1776  event_base_get_method(const struct event_base *base)
1777  {
1778  	EVUTIL_ASSERT(base);
1779  	return (base->evsel->name);
1780  }
1781  
1782  /** Callback: used to implement event_base_loopexit by telling the event_base
1783   * that it's time to exit its loop. */
1784  static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)1785  event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1786  {
1787  	struct event_base *base = arg;
1788  	base->event_gotterm = 1;
1789  }
1790  
1791  int
event_loopexit(const struct timeval * tv)1792  event_loopexit(const struct timeval *tv)
1793  {
1794  	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1795  		    current_base, tv));
1796  }
1797  
1798  int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)1799  event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1800  {
1801  	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1802  		    event_base, tv));
1803  }
1804  
1805  int
event_loopbreak(void)1806  event_loopbreak(void)
1807  {
1808  	return (event_base_loopbreak(current_base));
1809  }
1810  
1811  int
event_base_loopbreak(struct event_base * event_base)1812  event_base_loopbreak(struct event_base *event_base)
1813  {
1814  	int r = 0;
1815  	if (event_base == NULL)
1816  		return (-1);
1817  
1818  	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1819  	event_base->event_break = 1;
1820  
1821  	if (EVBASE_NEED_NOTIFY(event_base)) {
1822  		r = evthread_notify_base(event_base);
1823  	} else {
1824  		r = (0);
1825  	}
1826  	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1827  	return r;
1828  }
1829  
1830  int
event_base_loopcontinue(struct event_base * event_base)1831  event_base_loopcontinue(struct event_base *event_base)
1832  {
1833  	int r = 0;
1834  	if (event_base == NULL)
1835  		return (-1);
1836  
1837  	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1838  	event_base->event_continue = 1;
1839  
1840  	if (EVBASE_NEED_NOTIFY(event_base)) {
1841  		r = evthread_notify_base(event_base);
1842  	} else {
1843  		r = (0);
1844  	}
1845  	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1846  	return r;
1847  }
1848  
1849  int
event_base_got_break(struct event_base * event_base)1850  event_base_got_break(struct event_base *event_base)
1851  {
1852  	int res;
1853  	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1854  	res = event_base->event_break;
1855  	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1856  	return res;
1857  }
1858  
1859  int
event_base_got_exit(struct event_base * event_base)1860  event_base_got_exit(struct event_base *event_base)
1861  {
1862  	int res;
1863  	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1864  	res = event_base->event_gotterm;
1865  	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1866  	return res;
1867  }
1868  
1869  /* not thread safe */
1870  
1871  int
event_loop(int flags)1872  event_loop(int flags)
1873  {
1874  	return event_base_loop(current_base, flags);
1875  }
1876  
1877  int
event_base_loop(struct event_base * base,int flags)1878  event_base_loop(struct event_base *base, int flags)
1879  {
1880  	const struct eventop *evsel = base->evsel;
1881  	struct timeval tv;
1882  	struct timeval *tv_p;
1883  	int res, done, retval = 0;
1884  
1885  	/* Grab the lock.  We will release it inside evsel.dispatch, and again
1886  	 * as we invoke user callbacks. */
1887  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1888  
1889  	if (base->running_loop) {
1890  		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1891  		    " can run on each event_base at once.", __func__);
1892  		EVBASE_RELEASE_LOCK(base, th_base_lock);
1893  		return -1;
1894  	}
1895  
1896  	base->running_loop = 1;
1897  
1898  	clear_time_cache(base);
1899  
1900  	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1901  		evsig_set_base_(base);
1902  
1903  	done = 0;
1904  
1905  #ifndef EVENT__DISABLE_THREAD_SUPPORT
1906  	base->th_owner_id = EVTHREAD_GET_ID();
1907  #endif
1908  
1909  	base->event_gotterm = base->event_break = 0;
1910  
1911  	while (!done) {
1912  		base->event_continue = 0;
1913  		base->n_deferreds_queued = 0;
1914  
1915  		/* Terminate the loop if we have been asked to */
1916  		if (base->event_gotterm) {
1917  			break;
1918  		}
1919  
1920  		if (base->event_break) {
1921  			break;
1922  		}
1923  
1924  		tv_p = &tv;
1925  		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1926  			timeout_next(base, &tv_p);
1927  		} else {
1928  			/*
1929  			 * if we have active events, we just poll new events
1930  			 * without waiting.
1931  			 */
1932  			evutil_timerclear(&tv);
1933  		}
1934  
1935  		/* If we have no events, we just exit */
1936  		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1937  		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1938  			event_debug(("%s: no events registered.", __func__));
1939  			retval = 1;
1940  			goto done;
1941  		}
1942  
1943  		event_queue_make_later_events_active(base);
1944  
1945  		clear_time_cache(base);
1946  
1947  		res = evsel->dispatch(base, tv_p);
1948  
1949  		if (res == -1) {
1950  			event_debug(("%s: dispatch returned unsuccessfully.",
1951  				__func__));
1952  			retval = -1;
1953  			goto done;
1954  		}
1955  
1956  		update_time_cache(base);
1957  
1958  		timeout_process(base);
1959  
1960  		if (N_ACTIVE_CALLBACKS(base)) {
1961  			int n = event_process_active(base);
1962  			if ((flags & EVLOOP_ONCE)
1963  			    && N_ACTIVE_CALLBACKS(base) == 0
1964  			    && n != 0)
1965  				done = 1;
1966  		} else if (flags & EVLOOP_NONBLOCK)
1967  			done = 1;
1968  	}
1969  	event_debug(("%s: asked to terminate loop.", __func__));
1970  
1971  done:
1972  	clear_time_cache(base);
1973  	base->running_loop = 0;
1974  
1975  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1976  
1977  	return (retval);
1978  }
1979  
1980  /* One-time callback to implement event_base_once: invokes the user callback,
1981   * then deletes the allocated storage */
1982  static void
event_once_cb(evutil_socket_t fd,short events,void * arg)1983  event_once_cb(evutil_socket_t fd, short events, void *arg)
1984  {
1985  	struct event_once *eonce = arg;
1986  
1987  	(*eonce->cb)(fd, events, eonce->arg);
1988  	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1989  	LIST_REMOVE(eonce, next_once);
1990  	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1991  	event_debug_unassign(&eonce->ev);
1992  	mm_free(eonce);
1993  }
1994  
1995  /* not threadsafe, event scheduled once. */
1996  int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)1997  event_once(evutil_socket_t fd, short events,
1998      void (*callback)(evutil_socket_t, short, void *),
1999      void *arg, const struct timeval *tv)
2000  {
2001  	return event_base_once(current_base, fd, events, callback, arg, tv);
2002  }
2003  
2004  /* Schedules an event once */
2005  int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2006  event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2007      void (*callback)(evutil_socket_t, short, void *),
2008      void *arg, const struct timeval *tv)
2009  {
2010  	struct event_once *eonce;
2011  	int res = 0;
2012  	int activate = 0;
2013  
2014  	/* We cannot support signals that just fire once, or persistent
2015  	 * events. */
2016  	if (events & (EV_SIGNAL|EV_PERSIST))
2017  		return (-1);
2018  
2019  	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2020  		return (-1);
2021  
2022  	eonce->cb = callback;
2023  	eonce->arg = arg;
2024  
2025  	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2026  		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2027  
2028  		if (tv == NULL || ! evutil_timerisset(tv)) {
2029  			/* If the event is going to become active immediately,
2030  			 * don't put it on the timeout queue.  This is one
2031  			 * idiom for scheduling a callback, so let's make
2032  			 * it fast (and order-preserving). */
2033  			activate = 1;
2034  		}
2035  	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2036  		events &= EV_READ|EV_WRITE|EV_CLOSED;
2037  
2038  		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2039  	} else {
2040  		/* Bad event combination */
2041  		mm_free(eonce);
2042  		return (-1);
2043  	}
2044  
2045  	if (res == 0) {
2046  		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2047  		if (activate)
2048  			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2049  		else
2050  			res = event_add_nolock_(&eonce->ev, tv, 0);
2051  
2052  		if (res != 0) {
2053  			mm_free(eonce);
2054  			return (res);
2055  		} else {
2056  			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2057  		}
2058  		EVBASE_RELEASE_LOCK(base, th_base_lock);
2059  	}
2060  
2061  	return (0);
2062  }
2063  
2064  int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2065  event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2066  {
2067  	if (!base)
2068  		base = current_base;
2069  	if (arg == &event_self_cbarg_ptr_)
2070  		arg = ev;
2071  
2072  	event_debug_assert_not_added_(ev);
2073  
2074  	ev->ev_base = base;
2075  
2076  	ev->ev_callback = callback;
2077  	ev->ev_arg = arg;
2078  	ev->ev_fd = fd;
2079  	ev->ev_events = events;
2080  	ev->ev_res = 0;
2081  	ev->ev_flags = EVLIST_INIT;
2082  	ev->ev_ncalls = 0;
2083  	ev->ev_pncalls = NULL;
2084  
2085  	if (events & EV_SIGNAL) {
2086  		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2087  			event_warnx("%s: EV_SIGNAL is not compatible with "
2088  			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2089  			return -1;
2090  		}
2091  		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2092  	} else {
2093  		if (events & EV_PERSIST) {
2094  			evutil_timerclear(&ev->ev_io_timeout);
2095  			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2096  		} else {
2097  			ev->ev_closure = EV_CLOSURE_EVENT;
2098  		}
2099  	}
2100  
2101  	min_heap_elem_init_(ev);
2102  
2103  	if (base != NULL) {
2104  		/* by default, we put new events into the middle priority */
2105  		ev->ev_pri = base->nactivequeues / 2;
2106  	}
2107  
2108  	event_debug_note_setup_(ev);
2109  
2110  	return 0;
2111  }
2112  
2113  int
event_base_set(struct event_base * base,struct event * ev)2114  event_base_set(struct event_base *base, struct event *ev)
2115  {
2116  	/* Only innocent events may be assigned to a different base */
2117  	if (ev->ev_flags != EVLIST_INIT)
2118  		return (-1);
2119  
2120  	event_debug_assert_is_setup_(ev);
2121  
2122  	ev->ev_base = base;
2123  	ev->ev_pri = base->nactivequeues/2;
2124  
2125  	return (0);
2126  }
2127  
2128  void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2129  event_set(struct event *ev, evutil_socket_t fd, short events,
2130  	  void (*callback)(evutil_socket_t, short, void *), void *arg)
2131  {
2132  	int r;
2133  	r = event_assign(ev, current_base, fd, events, callback, arg);
2134  	EVUTIL_ASSERT(r == 0);
2135  }
2136  
2137  void *
event_self_cbarg(void)2138  event_self_cbarg(void)
2139  {
2140  	return &event_self_cbarg_ptr_;
2141  }
2142  
2143  struct event *
event_base_get_running_event(struct event_base * base)2144  event_base_get_running_event(struct event_base *base)
2145  {
2146  	struct event *ev = NULL;
2147  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2148  	if (EVBASE_IN_THREAD(base)) {
2149  		struct event_callback *evcb = base->current_event;
2150  		if (evcb->evcb_flags & EVLIST_INIT)
2151  			ev = event_callback_to_event(evcb);
2152  	}
2153  	EVBASE_RELEASE_LOCK(base, th_base_lock);
2154  	return ev;
2155  }
2156  
2157  struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)2158  event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2159  {
2160  	struct event *ev;
2161  	ev = mm_malloc(sizeof(struct event));
2162  	if (ev == NULL)
2163  		return (NULL);
2164  	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2165  		mm_free(ev);
2166  		return (NULL);
2167  	}
2168  
2169  	return (ev);
2170  }
2171  
2172  void
event_free(struct event * ev)2173  event_free(struct event *ev)
2174  {
2175  	/* This is disabled, so that events which have been finalized be a
2176  	 * valid target for event_free(). That's */
2177  	// event_debug_assert_is_setup_(ev);
2178  
2179  	/* make sure that this event won't be coming back to haunt us. */
2180  	event_del(ev);
2181  	event_debug_note_teardown_(ev);
2182  	mm_free(ev);
2183  
2184  }
2185  
2186  void
event_debug_unassign(struct event * ev)2187  event_debug_unassign(struct event *ev)
2188  {
2189  	event_debug_assert_not_added_(ev);
2190  	event_debug_note_teardown_(ev);
2191  
2192  	ev->ev_flags &= ~EVLIST_INIT;
2193  }
2194  
2195  #define EVENT_FINALIZE_FREE_ 0x10000
2196  static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)2197  event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2198  {
2199  	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2200  	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2201  
2202  	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2203  	ev->ev_closure = closure;
2204  	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2205  	event_active_nolock_(ev, EV_FINALIZE, 1);
2206  	ev->ev_flags |= EVLIST_FINALIZING;
2207  	return 0;
2208  }
2209  
2210  static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2211  event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2212  {
2213  	int r;
2214  	struct event_base *base = ev->ev_base;
2215  	if (EVUTIL_FAILURE_CHECK(!base)) {
2216  		event_warnx("%s: event has no event_base set.", __func__);
2217  		return -1;
2218  	}
2219  
2220  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2221  	r = event_finalize_nolock_(base, flags, ev, cb);
2222  	EVBASE_RELEASE_LOCK(base, th_base_lock);
2223  	return r;
2224  }
2225  
2226  int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2227  event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2228  {
2229  	return event_finalize_impl_(flags, ev, cb);
2230  }
2231  
2232  int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2233  event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2234  {
2235  	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2236  }
2237  
2238  void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2239  event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2240  {
2241  	struct event *ev = NULL;
2242  	if (evcb->evcb_flags & EVLIST_INIT) {
2243  		ev = event_callback_to_event(evcb);
2244  		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2245  	} else {
2246  		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2247  	}
2248  
2249  	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2250  	evcb->evcb_cb_union.evcb_cbfinalize = cb;
2251  	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2252  	evcb->evcb_flags |= EVLIST_FINALIZING;
2253  }
2254  
2255  void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2256  event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2257  {
2258  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2259  	event_callback_finalize_nolock_(base, flags, evcb, cb);
2260  	EVBASE_RELEASE_LOCK(base, th_base_lock);
2261  }
2262  
2263  /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2264   * callback will be invoked on *one of them*, after they have *all* been
2265   * finalized. */
2266  int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))2267  event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2268  {
2269  	int n_pending = 0, i;
2270  
2271  	if (base == NULL)
2272  		base = current_base;
2273  
2274  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2275  
2276  	event_debug(("%s: %d events finalizing", __func__, n_cbs));
2277  
2278  	/* At most one can be currently executing; the rest we just
2279  	 * cancel... But we always make sure that the finalize callback
2280  	 * runs. */
2281  	for (i = 0; i < n_cbs; ++i) {
2282  		struct event_callback *evcb = evcbs[i];
2283  		if (evcb == base->current_event) {
2284  			event_callback_finalize_nolock_(base, 0, evcb, cb);
2285  			++n_pending;
2286  		} else {
2287  			event_callback_cancel_nolock_(base, evcb, 0);
2288  		}
2289  	}
2290  
2291  	if (n_pending == 0) {
2292  		/* Just do the first one. */
2293  		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2294  	}
2295  
2296  	EVBASE_RELEASE_LOCK(base, th_base_lock);
2297  	return 0;
2298  }
2299  
2300  /*
2301   * Set's the priority of an event - if an event is already scheduled
2302   * changing the priority is going to fail.
2303   */
2304  
2305  int
event_priority_set(struct event * ev,int pri)2306  event_priority_set(struct event *ev, int pri)
2307  {
2308  	event_debug_assert_is_setup_(ev);
2309  
2310  	if (ev->ev_flags & EVLIST_ACTIVE)
2311  		return (-1);
2312  	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2313  		return (-1);
2314  
2315  	ev->ev_pri = pri;
2316  
2317  	return (0);
2318  }
2319  
2320  /*
2321   * Checks if a specific event is pending or scheduled.
2322   */
2323  
2324  int
event_pending(const struct event * ev,short event,struct timeval * tv)2325  event_pending(const struct event *ev, short event, struct timeval *tv)
2326  {
2327  	int flags = 0;
2328  
2329  	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2330  		event_warnx("%s: event has no event_base set.", __func__);
2331  		return 0;
2332  	}
2333  
2334  	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2335  	event_debug_assert_is_setup_(ev);
2336  
2337  	if (ev->ev_flags & EVLIST_INSERTED)
2338  		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2339  	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2340  		flags |= ev->ev_res;
2341  	if (ev->ev_flags & EVLIST_TIMEOUT)
2342  		flags |= EV_TIMEOUT;
2343  
2344  	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2345  
2346  	/* See if there is a timeout that we should report */
2347  	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2348  		struct timeval tmp = ev->ev_timeout;
2349  		tmp.tv_usec &= MICROSECONDS_MASK;
2350  		/* correctly remamp to real time */
2351  		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2352  	}
2353  
2354  	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2355  
2356  	return (flags & event);
2357  }
2358  
2359  int
event_initialized(const struct event * ev)2360  event_initialized(const struct event *ev)
2361  {
2362  	if (!(ev->ev_flags & EVLIST_INIT))
2363  		return 0;
2364  
2365  	return 1;
2366  }
2367  
2368  void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)2369  event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2370  {
2371  	event_debug_assert_is_setup_(event);
2372  
2373  	if (base_out)
2374  		*base_out = event->ev_base;
2375  	if (fd_out)
2376  		*fd_out = event->ev_fd;
2377  	if (events_out)
2378  		*events_out = event->ev_events;
2379  	if (callback_out)
2380  		*callback_out = event->ev_callback;
2381  	if (arg_out)
2382  		*arg_out = event->ev_arg;
2383  }
2384  
2385  size_t
event_get_struct_event_size(void)2386  event_get_struct_event_size(void)
2387  {
2388  	return sizeof(struct event);
2389  }
2390  
2391  evutil_socket_t
event_get_fd(const struct event * ev)2392  event_get_fd(const struct event *ev)
2393  {
2394  	event_debug_assert_is_setup_(ev);
2395  	return ev->ev_fd;
2396  }
2397  
2398  struct event_base *
event_get_base(const struct event * ev)2399  event_get_base(const struct event *ev)
2400  {
2401  	event_debug_assert_is_setup_(ev);
2402  	return ev->ev_base;
2403  }
2404  
2405  short
event_get_events(const struct event * ev)2406  event_get_events(const struct event *ev)
2407  {
2408  	event_debug_assert_is_setup_(ev);
2409  	return ev->ev_events;
2410  }
2411  
2412  event_callback_fn
event_get_callback(const struct event * ev)2413  event_get_callback(const struct event *ev)
2414  {
2415  	event_debug_assert_is_setup_(ev);
2416  	return ev->ev_callback;
2417  }
2418  
2419  void *
event_get_callback_arg(const struct event * ev)2420  event_get_callback_arg(const struct event *ev)
2421  {
2422  	event_debug_assert_is_setup_(ev);
2423  	return ev->ev_arg;
2424  }
2425  
2426  int
event_get_priority(const struct event * ev)2427  event_get_priority(const struct event *ev)
2428  {
2429  	event_debug_assert_is_setup_(ev);
2430  	return ev->ev_pri;
2431  }
2432  
2433  int
event_add(struct event * ev,const struct timeval * tv)2434  event_add(struct event *ev, const struct timeval *tv)
2435  {
2436  	int res;
2437  
2438  	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2439  		event_warnx("%s: event has no event_base set.", __func__);
2440  		return -1;
2441  	}
2442  
2443  	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2444  
2445  	res = event_add_nolock_(ev, tv, 0);
2446  
2447  	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2448  
2449  	return (res);
2450  }
2451  
2452  /* Helper callback: wake an event_base from another thread.  This version
2453   * works by writing a byte to one end of a socketpair, so that the event_base
2454   * listening on the other end will wake up as the corresponding event
2455   * triggers */
2456  static int
evthread_notify_base_default(struct event_base * base)2457  evthread_notify_base_default(struct event_base *base)
2458  {
2459  	char buf[1];
2460  	int r;
2461  	buf[0] = (char) 0;
2462  #ifdef _WIN32
2463  	r = send(base->th_notify_fd[1], buf, 1, 0);
2464  #else
2465  	r = write(base->th_notify_fd[1], buf, 1);
2466  #endif
2467  	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2468  }
2469  
2470  #ifdef EVENT__HAVE_EVENTFD
2471  /* Helper callback: wake an event_base from another thread.  This version
2472   * assumes that you have a working eventfd() implementation. */
2473  static int
evthread_notify_base_eventfd(struct event_base * base)2474  evthread_notify_base_eventfd(struct event_base *base)
2475  {
2476  	ev_uint64_t msg = 1;
2477  	int r;
2478  	do {
2479  		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2480  	} while (r < 0 && errno == EAGAIN);
2481  
2482  	return (r < 0) ? -1 : 0;
2483  }
2484  #endif
2485  
2486  
2487  /** Tell the thread currently running the event_loop for base (if any) that it
2488   * needs to stop waiting in its dispatch function (if it is) and process all
2489   * active callbacks. */
2490  static int
evthread_notify_base(struct event_base * base)2491  evthread_notify_base(struct event_base *base)
2492  {
2493  	EVENT_BASE_ASSERT_LOCKED(base);
2494  	if (!base->th_notify_fn)
2495  		return -1;
2496  	if (base->is_notify_pending)
2497  		return 0;
2498  	base->is_notify_pending = 1;
2499  	return base->th_notify_fn(base);
2500  }
2501  
2502  /* Implementation function to remove a timeout on a currently pending event.
2503   */
2504  int
event_remove_timer_nolock_(struct event * ev)2505  event_remove_timer_nolock_(struct event *ev)
2506  {
2507  	struct event_base *base = ev->ev_base;
2508  
2509  	EVENT_BASE_ASSERT_LOCKED(base);
2510  	event_debug_assert_is_setup_(ev);
2511  
2512  	event_debug(("event_remove_timer_nolock: event: %p", ev));
2513  
2514  	/* If it's not pending on a timeout, we don't need to do anything. */
2515  	if (ev->ev_flags & EVLIST_TIMEOUT) {
2516  		event_queue_remove_timeout(base, ev);
2517  		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2518  	}
2519  
2520  	return (0);
2521  }
2522  
2523  int
event_remove_timer(struct event * ev)2524  event_remove_timer(struct event *ev)
2525  {
2526  	int res;
2527  
2528  	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2529  		event_warnx("%s: event has no event_base set.", __func__);
2530  		return -1;
2531  	}
2532  
2533  	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2534  
2535  	res = event_remove_timer_nolock_(ev);
2536  
2537  	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2538  
2539  	return (res);
2540  }
2541  
2542  /* Implementation function to add an event.  Works just like event_add,
2543   * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2544   * we treat tv as an absolute time, not as an interval to add to the current
2545   * time */
2546  int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)2547  event_add_nolock_(struct event *ev, const struct timeval *tv,
2548      int tv_is_absolute)
2549  {
2550  	struct event_base *base = ev->ev_base;
2551  	int res = 0;
2552  	int notify = 0;
2553  
2554  	EVENT_BASE_ASSERT_LOCKED(base);
2555  	event_debug_assert_is_setup_(ev);
2556  
2557  	event_debug((
2558  		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2559  		 ev,
2560  		 EV_SOCK_ARG(ev->ev_fd),
2561  		 ev->ev_events & EV_READ ? "EV_READ " : " ",
2562  		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2563  		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2564  		 tv ? "EV_TIMEOUT " : " ",
2565  		 ev->ev_callback));
2566  
2567  	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2568  
2569  	if (ev->ev_flags & EVLIST_FINALIZING) {
2570  		/* XXXX debug */
2571  		return (-1);
2572  	}
2573  
2574  	/*
2575  	 * prepare for timeout insertion further below, if we get a
2576  	 * failure on any step, we should not change any state.
2577  	 */
2578  	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2579  		if (min_heap_reserve_(&base->timeheap,
2580  			1 + min_heap_size_(&base->timeheap)) == -1)
2581  			return (-1);  /* ENOMEM == errno */
2582  	}
2583  
2584  	/* If the main thread is currently executing a signal event's
2585  	 * callback, and we are not the main thread, then we want to wait
2586  	 * until the callback is done before we mess with the event, or else
2587  	 * we can race on ev_ncalls and ev_pncalls below. */
2588  #ifndef EVENT__DISABLE_THREAD_SUPPORT
2589  	if (base->current_event == event_to_event_callback(ev) &&
2590  	    (ev->ev_events & EV_SIGNAL)
2591  	    && !EVBASE_IN_THREAD(base)) {
2592  		++base->current_event_waiters;
2593  		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2594  	}
2595  #endif
2596  
2597  	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2598  	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2599  		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2600  			res = evmap_io_add_(base, ev->ev_fd, ev);
2601  		else if (ev->ev_events & EV_SIGNAL)
2602  			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2603  		if (res != -1)
2604  			event_queue_insert_inserted(base, ev);
2605  		if (res == 1) {
2606  			/* evmap says we need to notify the main thread. */
2607  			notify = 1;
2608  			res = 0;
2609  		}
2610  	}
2611  
2612  	/*
2613  	 * we should change the timeout state only if the previous event
2614  	 * addition succeeded.
2615  	 */
2616  	if (res != -1 && tv != NULL) {
2617  		struct timeval now;
2618  		int common_timeout;
2619  #ifdef USE_REINSERT_TIMEOUT
2620  		int was_common;
2621  		int old_timeout_idx;
2622  #endif
2623  
2624  		/*
2625  		 * for persistent timeout events, we remember the
2626  		 * timeout value and re-add the event.
2627  		 *
2628  		 * If tv_is_absolute, this was already set.
2629  		 */
2630  		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2631  			ev->ev_io_timeout = *tv;
2632  
2633  #ifndef USE_REINSERT_TIMEOUT
2634  		if (ev->ev_flags & EVLIST_TIMEOUT) {
2635  			event_queue_remove_timeout(base, ev);
2636  		}
2637  #endif
2638  
2639  		/* Check if it is active due to a timeout.  Rescheduling
2640  		 * this timeout before the callback can be executed
2641  		 * removes it from the active list. */
2642  		if ((ev->ev_flags & EVLIST_ACTIVE) &&
2643  		    (ev->ev_res & EV_TIMEOUT)) {
2644  			if (ev->ev_events & EV_SIGNAL) {
2645  				/* See if we are just active executing
2646  				 * this event in a loop
2647  				 */
2648  				if (ev->ev_ncalls && ev->ev_pncalls) {
2649  					/* Abort loop */
2650  					*ev->ev_pncalls = 0;
2651  				}
2652  			}
2653  
2654  			event_queue_remove_active(base, event_to_event_callback(ev));
2655  		}
2656  
2657  		gettime(base, &now);
2658  
2659  		common_timeout = is_common_timeout(tv, base);
2660  #ifdef USE_REINSERT_TIMEOUT
2661  		was_common = is_common_timeout(&ev->ev_timeout, base);
2662  		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2663  #endif
2664  
2665  		if (tv_is_absolute) {
2666  			ev->ev_timeout = *tv;
2667  		} else if (common_timeout) {
2668  			struct timeval tmp = *tv;
2669  			tmp.tv_usec &= MICROSECONDS_MASK;
2670  			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2671  			ev->ev_timeout.tv_usec |=
2672  			    (tv->tv_usec & ~MICROSECONDS_MASK);
2673  		} else {
2674  			evutil_timeradd(&now, tv, &ev->ev_timeout);
2675  		}
2676  
2677  		event_debug((
2678  			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2679  			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2680  
2681  #ifdef USE_REINSERT_TIMEOUT
2682  		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2683  #else
2684  		event_queue_insert_timeout(base, ev);
2685  #endif
2686  
2687  		if (common_timeout) {
2688  			struct common_timeout_list *ctl =
2689  			    get_common_timeout_list(base, &ev->ev_timeout);
2690  			if (ev == TAILQ_FIRST(&ctl->events)) {
2691  				common_timeout_schedule(ctl, &now, ev);
2692  			}
2693  		} else {
2694  			struct event* top = NULL;
2695  			/* See if the earliest timeout is now earlier than it
2696  			 * was before: if so, we will need to tell the main
2697  			 * thread to wake up earlier than it would otherwise.
2698  			 * We double check the timeout of the top element to
2699  			 * handle time distortions due to system suspension.
2700  			 */
2701  			if (min_heap_elt_is_top_(ev))
2702  				notify = 1;
2703  			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2704  					 evutil_timercmp(&top->ev_timeout, &now, <))
2705  				notify = 1;
2706  		}
2707  	}
2708  
2709  	/* if we are not in the right thread, we need to wake up the loop */
2710  	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2711  		evthread_notify_base(base);
2712  
2713  	event_debug_note_add_(ev);
2714  
2715  	return (res);
2716  }
2717  
2718  static int
event_del_(struct event * ev,int blocking)2719  event_del_(struct event *ev, int blocking)
2720  {
2721  	int res;
2722  
2723  	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2724  		event_warnx("%s: event has no event_base set.", __func__);
2725  		return -1;
2726  	}
2727  
2728  	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2729  
2730  	res = event_del_nolock_(ev, blocking);
2731  
2732  	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2733  
2734  	return (res);
2735  }
2736  
2737  int
event_del(struct event * ev)2738  event_del(struct event *ev)
2739  {
2740  	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2741  }
2742  
2743  int
event_del_block(struct event * ev)2744  event_del_block(struct event *ev)
2745  {
2746  	return event_del_(ev, EVENT_DEL_BLOCK);
2747  }
2748  
2749  int
event_del_noblock(struct event * ev)2750  event_del_noblock(struct event *ev)
2751  {
2752  	return event_del_(ev, EVENT_DEL_NOBLOCK);
2753  }
2754  
2755  /** Helper for event_del: always called with th_base_lock held.
2756   *
2757   * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2758   * EVEN_IF_FINALIZING} values. See those for more information.
2759   */
2760  int
event_del_nolock_(struct event * ev,int blocking)2761  event_del_nolock_(struct event *ev, int blocking)
2762  {
2763  	struct event_base *base;
2764  	int res = 0, notify = 0;
2765  
2766  	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2767  		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2768  
2769  	/* An event without a base has not been added */
2770  	if (ev->ev_base == NULL)
2771  		return (-1);
2772  
2773  	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2774  
2775  	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2776  		if (ev->ev_flags & EVLIST_FINALIZING) {
2777  			/* XXXX Debug */
2778  			return 0;
2779  		}
2780  	}
2781  
2782  	/* If the main thread is currently executing this event's callback,
2783  	 * and we are not the main thread, then we want to wait until the
2784  	 * callback is done before we start removing the event.  That way,
2785  	 * when this function returns, it will be safe to free the
2786  	 * user-supplied argument. */
2787  	base = ev->ev_base;
2788  #ifndef EVENT__DISABLE_THREAD_SUPPORT
2789  	if (blocking != EVENT_DEL_NOBLOCK &&
2790  	    base->current_event == event_to_event_callback(ev) &&
2791  	    !EVBASE_IN_THREAD(base) &&
2792  	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2793  		++base->current_event_waiters;
2794  		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2795  	}
2796  #endif
2797  
2798  	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2799  
2800  	/* See if we are just active executing this event in a loop */
2801  	if (ev->ev_events & EV_SIGNAL) {
2802  		if (ev->ev_ncalls && ev->ev_pncalls) {
2803  			/* Abort loop */
2804  			*ev->ev_pncalls = 0;
2805  		}
2806  	}
2807  
2808  	if (ev->ev_flags & EVLIST_TIMEOUT) {
2809  		/* NOTE: We never need to notify the main thread because of a
2810  		 * deleted timeout event: all that could happen if we don't is
2811  		 * that the dispatch loop might wake up too early.  But the
2812  		 * point of notifying the main thread _is_ to wake up the
2813  		 * dispatch loop early anyway, so we wouldn't gain anything by
2814  		 * doing it.
2815  		 */
2816  		event_queue_remove_timeout(base, ev);
2817  	}
2818  
2819  	if (ev->ev_flags & EVLIST_ACTIVE)
2820  		event_queue_remove_active(base, event_to_event_callback(ev));
2821  	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2822  		event_queue_remove_active_later(base, event_to_event_callback(ev));
2823  
2824  	if (ev->ev_flags & EVLIST_INSERTED) {
2825  		event_queue_remove_inserted(base, ev);
2826  		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2827  			res = evmap_io_del_(base, ev->ev_fd, ev);
2828  		else
2829  			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2830  		if (res == 1) {
2831  			/* evmap says we need to notify the main thread. */
2832  			notify = 1;
2833  			res = 0;
2834  		}
2835  	}
2836  
2837  	/* if we are not in the right thread, we need to wake up the loop */
2838  	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2839  		evthread_notify_base(base);
2840  
2841  	event_debug_note_del_(ev);
2842  
2843  	return (res);
2844  }
2845  
2846  void
event_active(struct event * ev,int res,short ncalls)2847  event_active(struct event *ev, int res, short ncalls)
2848  {
2849  	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2850  		event_warnx("%s: event has no event_base set.", __func__);
2851  		return;
2852  	}
2853  
2854  	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2855  
2856  	event_debug_assert_is_setup_(ev);
2857  
2858  	event_active_nolock_(ev, res, ncalls);
2859  
2860  	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2861  }
2862  
2863  
2864  void
event_active_nolock_(struct event * ev,int res,short ncalls)2865  event_active_nolock_(struct event *ev, int res, short ncalls)
2866  {
2867  	struct event_base *base;
2868  
2869  	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2870  		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2871  
2872  	base = ev->ev_base;
2873  	EVENT_BASE_ASSERT_LOCKED(base);
2874  
2875  	if (ev->ev_flags & EVLIST_FINALIZING) {
2876  		/* XXXX debug */
2877  		return;
2878  	}
2879  
2880  	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2881  	default:
2882  	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2883  		EVUTIL_ASSERT(0);
2884  		break;
2885  	case EVLIST_ACTIVE:
2886  		/* We get different kinds of events, add them together */
2887  		ev->ev_res |= res;
2888  		return;
2889  	case EVLIST_ACTIVE_LATER:
2890  		ev->ev_res |= res;
2891  		break;
2892  	case 0:
2893  		ev->ev_res = res;
2894  		break;
2895  	}
2896  
2897  	if (ev->ev_pri < base->event_running_priority)
2898  		base->event_continue = 1;
2899  
2900  	if (ev->ev_events & EV_SIGNAL) {
2901  #ifndef EVENT__DISABLE_THREAD_SUPPORT
2902  		if (base->current_event == event_to_event_callback(ev) &&
2903  		    !EVBASE_IN_THREAD(base)) {
2904  			++base->current_event_waiters;
2905  			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2906  		}
2907  #endif
2908  		ev->ev_ncalls = ncalls;
2909  		ev->ev_pncalls = NULL;
2910  	}
2911  
2912  	event_callback_activate_nolock_(base, event_to_event_callback(ev));
2913  }
2914  
2915  void
event_active_later_(struct event * ev,int res)2916  event_active_later_(struct event *ev, int res)
2917  {
2918  	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2919  	event_active_later_nolock_(ev, res);
2920  	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2921  }
2922  
2923  void
event_active_later_nolock_(struct event * ev,int res)2924  event_active_later_nolock_(struct event *ev, int res)
2925  {
2926  	struct event_base *base = ev->ev_base;
2927  	EVENT_BASE_ASSERT_LOCKED(base);
2928  
2929  	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2930  		/* We get different kinds of events, add them together */
2931  		ev->ev_res |= res;
2932  		return;
2933  	}
2934  
2935  	ev->ev_res = res;
2936  
2937  	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2938  }
2939  
2940  int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)2941  event_callback_activate_(struct event_base *base,
2942      struct event_callback *evcb)
2943  {
2944  	int r;
2945  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2946  	r = event_callback_activate_nolock_(base, evcb);
2947  	EVBASE_RELEASE_LOCK(base, th_base_lock);
2948  	return r;
2949  }
2950  
2951  int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)2952  event_callback_activate_nolock_(struct event_base *base,
2953      struct event_callback *evcb)
2954  {
2955  	int r = 1;
2956  
2957  	if (evcb->evcb_flags & EVLIST_FINALIZING)
2958  		return 0;
2959  
2960  	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2961  	default:
2962  		EVUTIL_ASSERT(0);
2963  	case EVLIST_ACTIVE_LATER:
2964  		event_queue_remove_active_later(base, evcb);
2965  		r = 0;
2966  		break;
2967  	case EVLIST_ACTIVE:
2968  		return 0;
2969  	case 0:
2970  		break;
2971  	}
2972  
2973  	event_queue_insert_active(base, evcb);
2974  
2975  	if (EVBASE_NEED_NOTIFY(base))
2976  		evthread_notify_base(base);
2977  
2978  	return r;
2979  }
2980  
2981  int
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)2982  event_callback_activate_later_nolock_(struct event_base *base,
2983      struct event_callback *evcb)
2984  {
2985  	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2986  		return 0;
2987  
2988  	event_queue_insert_active_later(base, evcb);
2989  	if (EVBASE_NEED_NOTIFY(base))
2990  		evthread_notify_base(base);
2991  	return 1;
2992  }
2993  
2994  void
event_callback_init_(struct event_base * base,struct event_callback * cb)2995  event_callback_init_(struct event_base *base,
2996      struct event_callback *cb)
2997  {
2998  	memset(cb, 0, sizeof(*cb));
2999  	cb->evcb_pri = base->nactivequeues - 1;
3000  }
3001  
3002  int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)3003  event_callback_cancel_(struct event_base *base,
3004      struct event_callback *evcb)
3005  {
3006  	int r;
3007  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3008  	r = event_callback_cancel_nolock_(base, evcb, 0);
3009  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3010  	return r;
3011  }
3012  
3013  int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)3014  event_callback_cancel_nolock_(struct event_base *base,
3015      struct event_callback *evcb, int even_if_finalizing)
3016  {
3017  	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3018  		return 0;
3019  
3020  	if (evcb->evcb_flags & EVLIST_INIT)
3021  		return event_del_nolock_(event_callback_to_event(evcb),
3022  		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3023  
3024  	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3025  	default:
3026  	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3027  		EVUTIL_ASSERT(0);
3028  		break;
3029  	case EVLIST_ACTIVE:
3030  		/* We get different kinds of events, add them together */
3031  		event_queue_remove_active(base, evcb);
3032  		return 0;
3033  	case EVLIST_ACTIVE_LATER:
3034  		event_queue_remove_active_later(base, evcb);
3035  		break;
3036  	case 0:
3037  		break;
3038  	}
3039  
3040  	return 0;
3041  }
3042  
3043  void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)3044  event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3045  {
3046  	memset(cb, 0, sizeof(*cb));
3047  	cb->evcb_cb_union.evcb_selfcb = fn;
3048  	cb->evcb_arg = arg;
3049  	cb->evcb_pri = priority;
3050  	cb->evcb_closure = EV_CLOSURE_CB_SELF;
3051  }
3052  
3053  void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)3054  event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3055  {
3056  	cb->evcb_pri = priority;
3057  }
3058  
3059  void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)3060  event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3061  {
3062  	if (!base)
3063  		base = current_base;
3064  	event_callback_cancel_(base, cb);
3065  }
3066  
3067  #define MAX_DEFERREDS_QUEUED 32
3068  int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)3069  event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3070  {
3071  	int r = 1;
3072  	if (!base)
3073  		base = current_base;
3074  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3075  	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3076  		r = event_callback_activate_later_nolock_(base, cb);
3077  	} else {
3078  		r = event_callback_activate_nolock_(base, cb);
3079  		if (r) {
3080  			++base->n_deferreds_queued;
3081  		}
3082  	}
3083  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3084  	return r;
3085  }
3086  
3087  static int
timeout_next(struct event_base * base,struct timeval ** tv_p)3088  timeout_next(struct event_base *base, struct timeval **tv_p)
3089  {
3090  	/* Caller must hold th_base_lock */
3091  	struct timeval now;
3092  	struct event *ev;
3093  	struct timeval *tv = *tv_p;
3094  	int res = 0;
3095  
3096  	ev = min_heap_top_(&base->timeheap);
3097  
3098  	if (ev == NULL) {
3099  		/* if no time-based events are active wait for I/O */
3100  		*tv_p = NULL;
3101  		goto out;
3102  	}
3103  
3104  	if (gettime(base, &now) == -1) {
3105  		res = -1;
3106  		goto out;
3107  	}
3108  
3109  	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3110  		evutil_timerclear(tv);
3111  		goto out;
3112  	}
3113  
3114  	evutil_timersub(&ev->ev_timeout, &now, tv);
3115  
3116  	EVUTIL_ASSERT(tv->tv_sec >= 0);
3117  	EVUTIL_ASSERT(tv->tv_usec >= 0);
3118  	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3119  
3120  out:
3121  	return (res);
3122  }
3123  
3124  /* Activate every event whose timeout has elapsed. */
3125  static void
timeout_process(struct event_base * base)3126  timeout_process(struct event_base *base)
3127  {
3128  	/* Caller must hold lock. */
3129  	struct timeval now;
3130  	struct event *ev;
3131  
3132  	if (min_heap_empty_(&base->timeheap)) {
3133  		return;
3134  	}
3135  
3136  	gettime(base, &now);
3137  
3138  	while ((ev = min_heap_top_(&base->timeheap))) {
3139  		if (evutil_timercmp(&ev->ev_timeout, &now, >))
3140  			break;
3141  
3142  		/* delete this event from the I/O queues */
3143  		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3144  
3145  		event_debug(("timeout_process: event: %p, call %p",
3146  			 ev, ev->ev_callback));
3147  		event_active_nolock_(ev, EV_TIMEOUT, 1);
3148  	}
3149  }
3150  
3151  #if (EVLIST_INTERNAL >> 4) != 1
3152  #error "Mismatch for value of EVLIST_INTERNAL"
3153  #endif
3154  
3155  #ifndef MAX
3156  #define MAX(a,b) (((a)>(b))?(a):(b))
3157  #endif
3158  
3159  #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3160  
3161  /* These are a fancy way to spell
3162       if (flags & EVLIST_INTERNAL)
3163           base->event_count--/++;
3164  */
3165  #define DECR_EVENT_COUNT(base,flags) \
3166  	((base)->event_count -= (~((flags) >> 4) & 1))
3167  #define INCR_EVENT_COUNT(base,flags) do {					\
3168  	((base)->event_count += (~((flags) >> 4) & 1));				\
3169  	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
3170  } while (0)
3171  
3172  static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)3173  event_queue_remove_inserted(struct event_base *base, struct event *ev)
3174  {
3175  	EVENT_BASE_ASSERT_LOCKED(base);
3176  	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3177  		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3178  		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3179  		return;
3180  	}
3181  	DECR_EVENT_COUNT(base, ev->ev_flags);
3182  	ev->ev_flags &= ~EVLIST_INSERTED;
3183  }
3184  static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)3185  event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3186  {
3187  	EVENT_BASE_ASSERT_LOCKED(base);
3188  	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3189  		event_errx(1, "%s: %p not on queue %x", __func__,
3190  			   evcb, EVLIST_ACTIVE);
3191  		return;
3192  	}
3193  	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3194  	evcb->evcb_flags &= ~EVLIST_ACTIVE;
3195  	base->event_count_active--;
3196  
3197  	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3198  	    evcb, evcb_active_next);
3199  }
3200  static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)3201  event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3202  {
3203  	EVENT_BASE_ASSERT_LOCKED(base);
3204  	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3205  		event_errx(1, "%s: %p not on queue %x", __func__,
3206  			   evcb, EVLIST_ACTIVE_LATER);
3207  		return;
3208  	}
3209  	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3210  	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3211  	base->event_count_active--;
3212  
3213  	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3214  }
3215  static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)3216  event_queue_remove_timeout(struct event_base *base, struct event *ev)
3217  {
3218  	EVENT_BASE_ASSERT_LOCKED(base);
3219  	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3220  		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3221  		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3222  		return;
3223  	}
3224  	DECR_EVENT_COUNT(base, ev->ev_flags);
3225  	ev->ev_flags &= ~EVLIST_TIMEOUT;
3226  
3227  	if (is_common_timeout(&ev->ev_timeout, base)) {
3228  		struct common_timeout_list *ctl =
3229  		    get_common_timeout_list(base, &ev->ev_timeout);
3230  		TAILQ_REMOVE(&ctl->events, ev,
3231  		    ev_timeout_pos.ev_next_with_common_timeout);
3232  	} else {
3233  		min_heap_erase_(&base->timeheap, ev);
3234  	}
3235  }
3236  
3237  #ifdef USE_REINSERT_TIMEOUT
3238  /* Remove and reinsert 'ev' into the timeout queue. */
3239  static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)3240  event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3241      int was_common, int is_common, int old_timeout_idx)
3242  {
3243  	struct common_timeout_list *ctl;
3244  	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3245  		event_queue_insert_timeout(base, ev);
3246  		return;
3247  	}
3248  
3249  	switch ((was_common<<1) | is_common) {
3250  	case 3: /* Changing from one common timeout to another */
3251  		ctl = base->common_timeout_queues[old_timeout_idx];
3252  		TAILQ_REMOVE(&ctl->events, ev,
3253  		    ev_timeout_pos.ev_next_with_common_timeout);
3254  		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3255  		insert_common_timeout_inorder(ctl, ev);
3256  		break;
3257  	case 2: /* Was common; is no longer common */
3258  		ctl = base->common_timeout_queues[old_timeout_idx];
3259  		TAILQ_REMOVE(&ctl->events, ev,
3260  		    ev_timeout_pos.ev_next_with_common_timeout);
3261  		min_heap_push_(&base->timeheap, ev);
3262  		break;
3263  	case 1: /* Wasn't common; has become common. */
3264  		min_heap_erase_(&base->timeheap, ev);
3265  		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3266  		insert_common_timeout_inorder(ctl, ev);
3267  		break;
3268  	case 0: /* was in heap; is still on heap. */
3269  		min_heap_adjust_(&base->timeheap, ev);
3270  		break;
3271  	default:
3272  		EVUTIL_ASSERT(0); /* unreachable */
3273  		break;
3274  	}
3275  }
3276  #endif
3277  
3278  /* Add 'ev' to the common timeout list in 'ev'. */
3279  static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)3280  insert_common_timeout_inorder(struct common_timeout_list *ctl,
3281      struct event *ev)
3282  {
3283  	struct event *e;
3284  	/* By all logic, we should just be able to append 'ev' to the end of
3285  	 * ctl->events, since the timeout on each 'ev' is set to {the common
3286  	 * timeout} + {the time when we add the event}, and so the events
3287  	 * should arrive in order of their timeeouts.  But just in case
3288  	 * there's some wacky threading issue going on, we do a search from
3289  	 * the end of 'ev' to find the right insertion point.
3290  	 */
3291  	TAILQ_FOREACH_REVERSE(e, &ctl->events,
3292  	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3293  		/* This timercmp is a little sneaky, since both ev and e have
3294  		 * magic values in tv_usec.  Fortunately, they ought to have
3295  		 * the _same_ magic values in tv_usec.  Let's assert for that.
3296  		 */
3297  		EVUTIL_ASSERT(
3298  			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3299  		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3300  			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3301  			    ev_timeout_pos.ev_next_with_common_timeout);
3302  			return;
3303  		}
3304  	}
3305  	TAILQ_INSERT_HEAD(&ctl->events, ev,
3306  	    ev_timeout_pos.ev_next_with_common_timeout);
3307  }
3308  
3309  static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)3310  event_queue_insert_inserted(struct event_base *base, struct event *ev)
3311  {
3312  	EVENT_BASE_ASSERT_LOCKED(base);
3313  
3314  	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3315  		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3316  		    ev, EV_SOCK_ARG(ev->ev_fd));
3317  		return;
3318  	}
3319  
3320  	INCR_EVENT_COUNT(base, ev->ev_flags);
3321  
3322  	ev->ev_flags |= EVLIST_INSERTED;
3323  }
3324  
3325  static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)3326  event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3327  {
3328  	EVENT_BASE_ASSERT_LOCKED(base);
3329  
3330  	if (evcb->evcb_flags & EVLIST_ACTIVE) {
3331  		/* Double insertion is possible for active events */
3332  		return;
3333  	}
3334  
3335  	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3336  
3337  	evcb->evcb_flags |= EVLIST_ACTIVE;
3338  
3339  	base->event_count_active++;
3340  	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3341  	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3342  	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3343  	    evcb, evcb_active_next);
3344  }
3345  
3346  static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)3347  event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3348  {
3349  	EVENT_BASE_ASSERT_LOCKED(base);
3350  	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3351  		/* Double insertion is possible */
3352  		return;
3353  	}
3354  
3355  	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3356  	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3357  	base->event_count_active++;
3358  	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3359  	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3360  	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3361  }
3362  
3363  static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)3364  event_queue_insert_timeout(struct event_base *base, struct event *ev)
3365  {
3366  	EVENT_BASE_ASSERT_LOCKED(base);
3367  
3368  	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3369  		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3370  		    ev, EV_SOCK_ARG(ev->ev_fd));
3371  		return;
3372  	}
3373  
3374  	INCR_EVENT_COUNT(base, ev->ev_flags);
3375  
3376  	ev->ev_flags |= EVLIST_TIMEOUT;
3377  
3378  	if (is_common_timeout(&ev->ev_timeout, base)) {
3379  		struct common_timeout_list *ctl =
3380  		    get_common_timeout_list(base, &ev->ev_timeout);
3381  		insert_common_timeout_inorder(ctl, ev);
3382  	} else {
3383  		min_heap_push_(&base->timeheap, ev);
3384  	}
3385  }
3386  
3387  static void
event_queue_make_later_events_active(struct event_base * base)3388  event_queue_make_later_events_active(struct event_base *base)
3389  {
3390  	struct event_callback *evcb;
3391  	EVENT_BASE_ASSERT_LOCKED(base);
3392  
3393  	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3394  		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3395  		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3396  		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3397  		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3398  		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3399  	}
3400  }
3401  
3402  /* Functions for debugging */
3403  
3404  const char *
event_get_version(void)3405  event_get_version(void)
3406  {
3407  	return (EVENT__VERSION);
3408  }
3409  
3410  ev_uint32_t
event_get_version_number(void)3411  event_get_version_number(void)
3412  {
3413  	return (EVENT__NUMERIC_VERSION);
3414  }
3415  
3416  /*
3417   * No thread-safe interface needed - the information should be the same
3418   * for all threads.
3419   */
3420  
3421  const char *
event_get_method(void)3422  event_get_method(void)
3423  {
3424  	return (current_base->evsel->name);
3425  }
3426  
3427  #ifndef EVENT__DISABLE_MM_REPLACEMENT
3428  static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3429  static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3430  static void (*mm_free_fn_)(void *p) = NULL;
3431  
3432  void *
event_mm_malloc_(size_t sz)3433  event_mm_malloc_(size_t sz)
3434  {
3435  	if (sz == 0)
3436  		return NULL;
3437  
3438  	if (mm_malloc_fn_)
3439  		return mm_malloc_fn_(sz);
3440  	else
3441  		return malloc(sz);
3442  }
3443  
3444  void *
event_mm_calloc_(size_t count,size_t size)3445  event_mm_calloc_(size_t count, size_t size)
3446  {
3447  	if (count == 0 || size == 0)
3448  		return NULL;
3449  
3450  	if (mm_malloc_fn_) {
3451  		size_t sz = count * size;
3452  		void *p = NULL;
3453  		if (count > EV_SIZE_MAX / size)
3454  			goto error;
3455  		p = mm_malloc_fn_(sz);
3456  		if (p)
3457  			return memset(p, 0, sz);
3458  	} else {
3459  		void *p = calloc(count, size);
3460  #ifdef _WIN32
3461  		/* Windows calloc doesn't reliably set ENOMEM */
3462  		if (p == NULL)
3463  			goto error;
3464  #endif
3465  		return p;
3466  	}
3467  
3468  error:
3469  	errno = ENOMEM;
3470  	return NULL;
3471  }
3472  
3473  char *
event_mm_strdup_(const char * str)3474  event_mm_strdup_(const char *str)
3475  {
3476  	if (!str) {
3477  		errno = EINVAL;
3478  		return NULL;
3479  	}
3480  
3481  	if (mm_malloc_fn_) {
3482  		size_t ln = strlen(str);
3483  		void *p = NULL;
3484  		if (ln == EV_SIZE_MAX)
3485  			goto error;
3486  		p = mm_malloc_fn_(ln+1);
3487  		if (p)
3488  			return memcpy(p, str, ln+1);
3489  	} else
3490  #ifdef _WIN32
3491  		return _strdup(str);
3492  #else
3493  		return strdup(str);
3494  #endif
3495  
3496  error:
3497  	errno = ENOMEM;
3498  	return NULL;
3499  }
3500  
3501  void *
event_mm_realloc_(void * ptr,size_t sz)3502  event_mm_realloc_(void *ptr, size_t sz)
3503  {
3504  	if (mm_realloc_fn_)
3505  		return mm_realloc_fn_(ptr, sz);
3506  	else
3507  		return realloc(ptr, sz);
3508  }
3509  
3510  void
event_mm_free_(void * ptr)3511  event_mm_free_(void *ptr)
3512  {
3513  	if (mm_free_fn_)
3514  		mm_free_fn_(ptr);
3515  	else
3516  		free(ptr);
3517  }
3518  
3519  void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))3520  event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3521  			void *(*realloc_fn)(void *ptr, size_t sz),
3522  			void (*free_fn)(void *ptr))
3523  {
3524  	mm_malloc_fn_ = malloc_fn;
3525  	mm_realloc_fn_ = realloc_fn;
3526  	mm_free_fn_ = free_fn;
3527  }
3528  #endif
3529  
3530  #ifdef EVENT__HAVE_EVENTFD
3531  static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)3532  evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3533  {
3534  	ev_uint64_t msg;
3535  	ev_ssize_t r;
3536  	struct event_base *base = arg;
3537  
3538  	r = read(fd, (void*) &msg, sizeof(msg));
3539  	if (r<0 && errno != EAGAIN) {
3540  		event_sock_warn(fd, "Error reading from eventfd");
3541  	}
3542  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3543  	base->is_notify_pending = 0;
3544  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3545  }
3546  #endif
3547  
3548  static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)3549  evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3550  {
3551  	unsigned char buf[1024];
3552  	struct event_base *base = arg;
3553  #ifdef _WIN32
3554  	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3555  		;
3556  #else
3557  	while (read(fd, (char*)buf, sizeof(buf)) > 0)
3558  		;
3559  #endif
3560  
3561  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3562  	base->is_notify_pending = 0;
3563  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3564  }
3565  
3566  int
evthread_make_base_notifiable(struct event_base * base)3567  evthread_make_base_notifiable(struct event_base *base)
3568  {
3569  	int r;
3570  	if (!base)
3571  		return -1;
3572  
3573  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3574  	r = evthread_make_base_notifiable_nolock_(base);
3575  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3576  	return r;
3577  }
3578  
3579  static int
evthread_make_base_notifiable_nolock_(struct event_base * base)3580  evthread_make_base_notifiable_nolock_(struct event_base *base)
3581  {
3582  	void (*cb)(evutil_socket_t, short, void *);
3583  	int (*notify)(struct event_base *);
3584  
3585  	if (base->th_notify_fn != NULL) {
3586  		/* The base is already notifiable: we're doing fine. */
3587  		return 0;
3588  	}
3589  
3590  #if defined(EVENT__HAVE_WORKING_KQUEUE)
3591  	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3592  		base->th_notify_fn = event_kq_notify_base_;
3593  		/* No need to add an event here; the backend can wake
3594  		 * itself up just fine. */
3595  		return 0;
3596  	}
3597  #endif
3598  
3599  #ifdef EVENT__HAVE_EVENTFD
3600  	base->th_notify_fd[0] = evutil_eventfd_(0,
3601  	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3602  	if (base->th_notify_fd[0] >= 0) {
3603  		base->th_notify_fd[1] = -1;
3604  		notify = evthread_notify_base_eventfd;
3605  		cb = evthread_notify_drain_eventfd;
3606  	} else
3607  #endif
3608  	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3609  		notify = evthread_notify_base_default;
3610  		cb = evthread_notify_drain_default;
3611  	} else {
3612  		return -1;
3613  	}
3614  
3615  	base->th_notify_fn = notify;
3616  
3617  	/* prepare an event that we can use for wakeup */
3618  	event_assign(&base->th_notify, base, base->th_notify_fd[0],
3619  				 EV_READ|EV_PERSIST, cb, base);
3620  
3621  	/* we need to mark this as internal event */
3622  	base->th_notify.ev_flags |= EVLIST_INTERNAL;
3623  	event_priority_set(&base->th_notify, 0);
3624  
3625  	return event_add_nolock_(&base->th_notify, NULL, 0);
3626  }
3627  
3628  int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3629  event_base_foreach_event_nolock_(struct event_base *base,
3630      event_base_foreach_event_cb fn, void *arg)
3631  {
3632  	int r, i;
3633  	unsigned u;
3634  	struct event *ev;
3635  
3636  	/* Start out with all the EVLIST_INSERTED events. */
3637  	if ((r = evmap_foreach_event_(base, fn, arg)))
3638  		return r;
3639  
3640  	/* Okay, now we deal with those events that have timeouts and are in
3641  	 * the min-heap. */
3642  	for (u = 0; u < base->timeheap.n; ++u) {
3643  		ev = base->timeheap.p[u];
3644  		if (ev->ev_flags & EVLIST_INSERTED) {
3645  			/* we already processed this one */
3646  			continue;
3647  		}
3648  		if ((r = fn(base, ev, arg)))
3649  			return r;
3650  	}
3651  
3652  	/* Now for the events in one of the timeout queues.
3653  	 * the min-heap. */
3654  	for (i = 0; i < base->n_common_timeouts; ++i) {
3655  		struct common_timeout_list *ctl =
3656  		    base->common_timeout_queues[i];
3657  		TAILQ_FOREACH(ev, &ctl->events,
3658  		    ev_timeout_pos.ev_next_with_common_timeout) {
3659  			if (ev->ev_flags & EVLIST_INSERTED) {
3660  				/* we already processed this one */
3661  				continue;
3662  			}
3663  			if ((r = fn(base, ev, arg)))
3664  				return r;
3665  		}
3666  	}
3667  
3668  	/* Finally, we deal wit all the active events that we haven't touched
3669  	 * yet. */
3670  	for (i = 0; i < base->nactivequeues; ++i) {
3671  		struct event_callback *evcb;
3672  		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3673  			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3674  				/* This isn't an event (evlist_init clear), or
3675  				 * we already processed it. (inserted or
3676  				 * timeout set */
3677  				continue;
3678  			}
3679  			ev = event_callback_to_event(evcb);
3680  			if ((r = fn(base, ev, arg)))
3681  				return r;
3682  		}
3683  	}
3684  
3685  	return 0;
3686  }
3687  
3688  /* Helper for event_base_dump_events: called on each event in the event base;
3689   * dumps only the inserted events. */
3690  static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)3691  dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3692  {
3693  	FILE *output = arg;
3694  	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3695  	    "sig" : "fd ";
3696  
3697  	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3698  		return 0;
3699  
3700  	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3701  	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3702  	    (e->ev_events&EV_READ)?" Read":"",
3703  	    (e->ev_events&EV_WRITE)?" Write":"",
3704  	    (e->ev_events&EV_CLOSED)?" EOF":"",
3705  	    (e->ev_events&EV_SIGNAL)?" Signal":"",
3706  	    (e->ev_events&EV_PERSIST)?" Persist":"",
3707  	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3708  	if (e->ev_flags & EVLIST_TIMEOUT) {
3709  		struct timeval tv;
3710  		tv.tv_sec = e->ev_timeout.tv_sec;
3711  		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3712  		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3713  		fprintf(output, " Timeout=%ld.%06d",
3714  		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3715  	}
3716  	fputc('\n', output);
3717  
3718  	return 0;
3719  }
3720  
3721  /* Helper for event_base_dump_events: called on each event in the event base;
3722   * dumps only the active events. */
3723  static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)3724  dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3725  {
3726  	FILE *output = arg;
3727  	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3728  	    "sig" : "fd ";
3729  
3730  	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3731  		return 0;
3732  
3733  	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3734  	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3735  	    (e->ev_res&EV_READ)?" Read":"",
3736  	    (e->ev_res&EV_WRITE)?" Write":"",
3737  	    (e->ev_res&EV_CLOSED)?" EOF":"",
3738  	    (e->ev_res&EV_SIGNAL)?" Signal":"",
3739  	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3740  	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3741  	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3742  
3743  	return 0;
3744  }
3745  
3746  int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3747  event_base_foreach_event(struct event_base *base,
3748      event_base_foreach_event_cb fn, void *arg)
3749  {
3750  	int r;
3751  	if ((!fn) || (!base)) {
3752  		return -1;
3753  	}
3754  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3755  	r = event_base_foreach_event_nolock_(base, fn, arg);
3756  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3757  	return r;
3758  }
3759  
3760  
3761  void
event_base_dump_events(struct event_base * base,FILE * output)3762  event_base_dump_events(struct event_base *base, FILE *output)
3763  {
3764  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3765  	fprintf(output, "Inserted events:\n");
3766  	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3767  
3768  	fprintf(output, "Active events:\n");
3769  	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3770  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3771  }
3772  
3773  void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)3774  event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3775  {
3776  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3777  	evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3778  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3779  }
3780  
3781  void
event_base_active_by_signal(struct event_base * base,int sig)3782  event_base_active_by_signal(struct event_base *base, int sig)
3783  {
3784  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3785  	evmap_signal_active_(base, sig, 1);
3786  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3787  }
3788  
3789  
3790  void
event_base_add_virtual_(struct event_base * base)3791  event_base_add_virtual_(struct event_base *base)
3792  {
3793  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3794  	base->virtual_event_count++;
3795  	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3796  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3797  }
3798  
3799  void
event_base_del_virtual_(struct event_base * base)3800  event_base_del_virtual_(struct event_base *base)
3801  {
3802  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3803  	EVUTIL_ASSERT(base->virtual_event_count > 0);
3804  	base->virtual_event_count--;
3805  	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3806  		evthread_notify_base(base);
3807  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3808  }
3809  
3810  static void
event_free_debug_globals_locks(void)3811  event_free_debug_globals_locks(void)
3812  {
3813  #ifndef EVENT__DISABLE_THREAD_SUPPORT
3814  #ifndef EVENT__DISABLE_DEBUG_MODE
3815  	if (event_debug_map_lock_ != NULL) {
3816  		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3817  		event_debug_map_lock_ = NULL;
3818  		evthreadimpl_disable_lock_debugging_();
3819  	}
3820  #endif /* EVENT__DISABLE_DEBUG_MODE */
3821  #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3822  	return;
3823  }
3824  
3825  static void
event_free_debug_globals(void)3826  event_free_debug_globals(void)
3827  {
3828  	event_free_debug_globals_locks();
3829  }
3830  
3831  static void
event_free_evsig_globals(void)3832  event_free_evsig_globals(void)
3833  {
3834  	evsig_free_globals_();
3835  }
3836  
3837  static void
event_free_evutil_globals(void)3838  event_free_evutil_globals(void)
3839  {
3840  	evutil_free_globals_();
3841  }
3842  
3843  static void
event_free_globals(void)3844  event_free_globals(void)
3845  {
3846  	event_free_debug_globals();
3847  	event_free_evsig_globals();
3848  	event_free_evutil_globals();
3849  }
3850  
3851  void
libevent_global_shutdown(void)3852  libevent_global_shutdown(void)
3853  {
3854  	event_disable_debug_mode();
3855  	event_free_globals();
3856  }
3857  
3858  #ifndef EVENT__DISABLE_THREAD_SUPPORT
3859  int
event_global_setup_locks_(const int enable_locks)3860  event_global_setup_locks_(const int enable_locks)
3861  {
3862  #ifndef EVENT__DISABLE_DEBUG_MODE
3863  	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3864  #endif
3865  	if (evsig_global_setup_locks_(enable_locks) < 0)
3866  		return -1;
3867  	if (evutil_global_setup_locks_(enable_locks) < 0)
3868  		return -1;
3869  	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3870  		return -1;
3871  	return 0;
3872  }
3873  #endif
3874  
3875  void
event_base_assert_ok_(struct event_base * base)3876  event_base_assert_ok_(struct event_base *base)
3877  {
3878  	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3879  	event_base_assert_ok_nolock_(base);
3880  	EVBASE_RELEASE_LOCK(base, th_base_lock);
3881  }
3882  
3883  void
event_base_assert_ok_nolock_(struct event_base * base)3884  event_base_assert_ok_nolock_(struct event_base *base)
3885  {
3886  	int i;
3887  	int count;
3888  
3889  	/* First do checks on the per-fd and per-signal lists */
3890  	evmap_check_integrity_(base);
3891  
3892  	/* Check the heap property */
3893  	for (i = 1; i < (int)base->timeheap.n; ++i) {
3894  		int parent = (i - 1) / 2;
3895  		struct event *ev, *p_ev;
3896  		ev = base->timeheap.p[i];
3897  		p_ev = base->timeheap.p[parent];
3898  		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3899  		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3900  		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3901  	}
3902  
3903  	/* Check that the common timeouts are fine */
3904  	for (i = 0; i < base->n_common_timeouts; ++i) {
3905  		struct common_timeout_list *ctl = base->common_timeout_queues[i];
3906  		struct event *last=NULL, *ev;
3907  
3908  		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3909  
3910  		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3911  			if (last)
3912  				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3913  			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3914  			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3915  			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3916  			last = ev;
3917  		}
3918  	}
3919  
3920  	/* Check the active queues. */
3921  	count = 0;
3922  	for (i = 0; i < base->nactivequeues; ++i) {
3923  		struct event_callback *evcb;
3924  		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3925  		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3926  			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3927  			EVUTIL_ASSERT(evcb->evcb_pri == i);
3928  			++count;
3929  		}
3930  	}
3931  
3932  	{
3933  		struct event_callback *evcb;
3934  		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3935  			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3936  			++count;
3937  		}
3938  	}
3939  	EVUTIL_ASSERT(count == base->event_count_active);
3940  }
3941