LCOV - code coverage report
Current view: top level - core/mainloop - mainloop.c (source / functions) Hit Total Coverage
Test: lcov.info Lines: 315 894 35.2 %
Date: 2021-11-24 03:28:48 Functions: 37 95 38.9 %

          Line data    Source code
       1             : /* Copyright (c) 2001 Matej Pfajfar.
       2             :  * Copyright (c) 2001-2004, Roger Dingledine.
       3             :  * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
       4             :  * Copyright (c) 2007-2021, The Tor Project, Inc. */
       5             : /* See LICENSE for licensing information */
       6             : 
       7             : /**
       8             :  * \file mainloop.c
       9             :  * \brief Toplevel module. Handles signals, multiplexes between
      10             :  *     connections, implements main loop, and drives scheduled events.
      11             :  *
      12             :  * For the main loop itself; see run_main_loop_once().  It invokes the rest of
      13             :  * Tor mostly through Libevent callbacks.  Libevent callbacks can happen when
      14             :  * a timer elapses, a signal is received, a socket is ready to read or write,
      15             :  * or an event is manually activated.
      16             :  *
      17             :  * Most events in Tor are driven from these callbacks:
      18             :  *  <ul>
      19             :  *   <li>conn_read_callback() and conn_write_callback() here, which are
      20             :  *     invoked when a socket is ready to read or write respectively.
      21             :  *   <li>signal_callback(), which handles incoming signals.
      22             :  *  </ul>
      23             :  * Other events are used for specific purposes, or for building more complex
      24             :  * control structures.  If you search for usage of tor_libevent_new(), you
      25             :  * will find all the events that we construct in Tor.
      26             :  *
      27             :  * Tor has numerous housekeeping operations that need to happen
      28             :  * regularly. They are handled in different ways:
      29             :  * <ul>
      30             :  *   <li>The most frequent operations are handled after every read or write
      31             :  *    event, at the end of connection_handle_read() and
      32             :  *    connection_handle_write().
      33             :  *
      34             :  *   <li>The next most frequent operations happen after each invocation of the
      35             :  *     main loop, in run_main_loop_once().
      36             :  *
      37             :  *   <li>Once per second, we run all of the operations listed in
      38             :  *     second_elapsed_callback(), and in its child, run_scheduled_events().
      39             :  *
      40             :  *   <li>Once-a-second operations are handled in second_elapsed_callback().
      41             :  *
      42             :  *   <li>More infrequent operations take place based on the periodic event
      43             :  *     driver in periodic.c .  These are stored in the periodic_events[]
      44             :  *     table.
      45             :  * </ul>
      46             :  *
      47             :  **/
      48             : 
      49             : #define MAINLOOP_PRIVATE
      50             : #include "core/or/or.h"
      51             : 
      52             : #include "app/config/config.h"
      53             : #include "app/config/statefile.h"
      54             : #include "app/main/ntmain.h"
      55             : #include "core/mainloop/connection.h"
      56             : #include "core/mainloop/cpuworker.h"
      57             : #include "core/mainloop/mainloop.h"
      58             : #include "core/mainloop/netstatus.h"
      59             : #include "core/mainloop/periodic.h"
      60             : #include "core/or/channel.h"
      61             : #include "core/or/channelpadding.h"
      62             : #include "core/or/channeltls.h"
      63             : #include "core/or/circuitbuild.h"
      64             : #include "core/or/circuitlist.h"
      65             : #include "core/or/circuituse.h"
      66             : #include "core/or/connection_edge.h"
      67             : #include "core/or/connection_or.h"
      68             : #include "core/or/dos.h"
      69             : #include "core/or/status.h"
      70             : #include "feature/client/addressmap.h"
      71             : #include "feature/client/bridges.h"
      72             : #include "feature/client/dnsserv.h"
      73             : #include "feature/client/entrynodes.h"
      74             : #include "feature/client/proxymode.h"
      75             : #include "feature/client/transports.h"
      76             : #include "feature/control/control.h"
      77             : #include "feature/control/control_events.h"
      78             : #include "feature/dirauth/authmode.h"
      79             : #include "feature/dircache/consdiffmgr.h"
      80             : #include "feature/dirclient/dirclient_modes.h"
      81             : #include "feature/dircommon/directory.h"
      82             : #include "feature/hibernate/hibernate.h"
      83             : #include "feature/hs/hs_cache.h"
      84             : #include "feature/hs/hs_client.h"
      85             : #include "feature/hs/hs_service.h"
      86             : #include "feature/nodelist/microdesc.h"
      87             : #include "feature/nodelist/networkstatus.h"
      88             : #include "feature/nodelist/nodelist.h"
      89             : #include "feature/nodelist/routerlist.h"
      90             : #include "feature/relay/dns.h"
      91             : #include "feature/relay/routerkeys.h"
      92             : #include "feature/relay/routermode.h"
      93             : #include "feature/relay/selftest.h"
      94             : #include "feature/stats/geoip_stats.h"
      95             : #include "feature/stats/predict_ports.h"
      96             : #include "feature/stats/connstats.h"
      97             : #include "feature/stats/rephist.h"
      98             : #include "lib/buf/buffers.h"
      99             : #include "lib/crypt_ops/crypto_rand.h"
     100             : #include "lib/err/backtrace.h"
     101             : #include "lib/tls/buffers_tls.h"
     102             : 
     103             : #include "lib/net/buffers_net.h"
     104             : #include "lib/evloop/compat_libevent.h"
     105             : 
     106             : #include <event2/event.h>
     107             : 
     108             : #include "core/or/cell_st.h"
     109             : #include "core/or/entry_connection_st.h"
     110             : #include "feature/nodelist/networkstatus_st.h"
     111             : #include "core/or/or_connection_st.h"
     112             : #include "app/config/or_state_st.h"
     113             : #include "feature/nodelist/routerinfo_st.h"
     114             : #include "core/or/socks_request_st.h"
     115             : 
     116             : #ifdef HAVE_UNISTD_H
     117             : #include <unistd.h>
     118             : #endif
     119             : 
     120             : #ifdef HAVE_SYSTEMD
     121             : #   if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__)
     122             : /* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse
     123             :  * Coverity. Here's a kludge to unconfuse it.
     124             :  */
     125             : #   define __INCLUDE_LEVEL__ 2
     126             : #endif /* defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__) */
     127             : #include <systemd/sd-daemon.h>
     128             : #endif /* defined(HAVE_SYSTEMD) */
     129             : 
     130             : /* Token bucket for all traffic. */
     131             : token_bucket_rw_t global_bucket;
     132             : 
     133             : /* Token bucket for relayed traffic. */
     134             : token_bucket_rw_t global_relayed_bucket;
     135             : 
     136             : /* XXX we might want to keep stats about global_relayed_*_bucket too. Or not.*/
     137             : /** How many bytes have we read since we started the process? */
     138             : static uint64_t stats_n_bytes_read = 0;
     139             : /** How many bytes have we written since we started the process? */
     140             : static uint64_t stats_n_bytes_written = 0;
     141             : /** What time did this process start up? */
     142             : time_t time_of_process_start = 0;
     143             : /** How many seconds have we been running? */
     144             : static long stats_n_seconds_working = 0;
     145             : /** How many times have we returned from the main loop successfully? */
     146             : static uint64_t stats_n_main_loop_successes = 0;
     147             : /** How many times have we received an error from the main loop? */
     148             : static uint64_t stats_n_main_loop_errors = 0;
     149             : /** How many times have we returned from the main loop with no events. */
     150             : static uint64_t stats_n_main_loop_idle = 0;
     151             : 
     152             : /** How often will we honor SIGNEWNYM requests? */
     153             : #define MAX_SIGNEWNYM_RATE 10
     154             : /** When did we last process a SIGNEWNYM request? */
     155             : static time_t time_of_last_signewnym = 0;
     156             : /** Is there a signewnym request we're currently waiting to handle? */
     157             : static int signewnym_is_pending = 0;
     158             : /** Mainloop event for the deferred signewnym call. */
     159             : static mainloop_event_t *handle_deferred_signewnym_ev = NULL;
     160             : /** How many times have we called newnym? */
     161             : static unsigned newnym_epoch = 0;
     162             : 
     163             : /** Smartlist of all open connections. */
     164             : STATIC smartlist_t *connection_array = NULL;
     165             : /** List of connections that have been marked for close and need to be freed
     166             :  * and removed from connection_array. */
     167             : static smartlist_t *closeable_connection_lst = NULL;
     168             : /** List of linked connections that are currently reading data into their
     169             :  * inbuf from their partner's outbuf. */
     170             : static smartlist_t *active_linked_connection_lst = NULL;
     171             : /** Flag: Set to true iff we entered the current libevent main loop via
     172             :  * <b>loop_once</b>. If so, there's no need to trigger a loopexit in order
     173             :  * to handle linked connections. */
     174             : static int called_loop_once = 0;
     175             : /** Flag: if true, it's time to shut down, so the main loop should exit as
     176             :  * soon as possible.
     177             :  */
     178             : static int main_loop_should_exit = 0;
     179             : /** The return value that the main loop should yield when it exits, if
     180             :  * main_loop_should_exit is true.
     181             :  */
     182             : static int main_loop_exit_value = 0;
     183             : 
     184             : /** We set this to 1 when we've opened a circuit, so we can print a log
     185             :  * entry to inform the user that Tor is working.  We set it to 0 when
     186             :  * we think the fact that we once opened a circuit doesn't mean we can do so
     187             :  * any longer (a big time jump happened, when we notice our directory is
     188             :  * heinously out-of-date, etc.
     189             :  */
     190             : static int can_complete_circuits = 0;
     191             : 
     192             : /** How often do we check for router descriptors that we should download
     193             :  * when we have too little directory info? */
     194             : #define GREEDY_DESCRIPTOR_RETRY_INTERVAL (10)
     195             : /** How often do we check for router descriptors that we should download
     196             :  * when we have enough directory info? */
     197             : #define LAZY_DESCRIPTOR_RETRY_INTERVAL (60)
     198             : 
     199             : static int conn_close_if_marked(int i);
     200             : static void connection_start_reading_from_linked_conn(connection_t *conn);
     201             : static int connection_should_read_from_linked_conn(connection_t *conn);
     202             : static void conn_read_callback(evutil_socket_t fd, short event, void *_conn);
     203             : static void conn_write_callback(evutil_socket_t fd, short event, void *_conn);
     204             : static void shutdown_did_not_work_callback(evutil_socket_t fd, short event,
     205             :                                            void *arg) ATTR_NORETURN;
     206             : 
     207             : /****************************************************************************
     208             :  *
     209             :  * This section contains accessors and other methods on the connection_array
     210             :  * variables (which are global within this file and unavailable outside it).
     211             :  *
     212             :  ****************************************************************************/
     213             : 
     214             : /** Return 1 if we have successfully built a circuit, and nothing has changed
     215             :  * to make us think that maybe we can't.
     216             :  */
     217             : int
     218           0 : have_completed_a_circuit(void)
     219             : {
     220           0 :   return can_complete_circuits;
     221             : }
     222             : 
     223             : /** Note that we have successfully built a circuit, so that reachability
     224             :  * testing and introduction points and so on may be attempted. */
     225             : void
     226           0 : note_that_we_completed_a_circuit(void)
     227             : {
     228           0 :   can_complete_circuits = 1;
     229           0 : }
     230             : 
     231             : /** Note that something has happened (like a clock jump, or DisableNetwork) to
     232             :  * make us think that maybe we can't complete circuits. */
     233             : void
     234           7 : note_that_we_maybe_cant_complete_circuits(void)
     235             : {
     236           7 :   can_complete_circuits = 0;
     237           7 : }
     238             : 
     239             : /** Add <b>conn</b> to the array of connections that we can poll on.  The
     240             :  * connection's socket must be set; the connection starts out
     241             :  * non-reading and non-writing.
     242             :  */
     243             : int
     244          21 : connection_add_impl(connection_t *conn, int is_connecting)
     245             : {
     246          21 :   tor_assert(conn);
     247          21 :   tor_assert(SOCKET_OK(conn->s) ||
     248             :              conn->linked ||
     249             :              (conn->type == CONN_TYPE_AP &&
     250             :               TO_EDGE_CONN(conn)->is_dns_request));
     251             : 
     252          21 :   tor_assert(conn->conn_array_index == -1); /* can only connection_add once */
     253          21 :   conn->conn_array_index = smartlist_len(connection_array);
     254          21 :   smartlist_add(connection_array, conn);
     255             : 
     256          21 :   (void) is_connecting;
     257             : 
     258          21 :   if (SOCKET_OK(conn->s) || conn->linked) {
     259          21 :     conn->read_event = tor_event_new(tor_libevent_get_base(),
     260             :          conn->s, EV_READ|EV_PERSIST, conn_read_callback, conn);
     261          21 :     conn->write_event = tor_event_new(tor_libevent_get_base(),
     262             :          conn->s, EV_WRITE|EV_PERSIST, conn_write_callback, conn);
     263             :     /* XXXX CHECK FOR NULL RETURN! */
     264             :   }
     265             : 
     266          21 :   log_debug(LD_NET,"new conn type %s, socket %d, address %s, n_conns %d.",
     267             :             conn_type_to_string(conn->type), (int)conn->s, conn->address,
     268             :             smartlist_len(connection_array));
     269             : 
     270          21 :   return 0;
     271             : }
     272             : 
     273             : /** Tell libevent that we don't care about <b>conn</b> any more. */
     274             : void
     275          52 : connection_unregister_events(connection_t *conn)
     276             : {
     277          52 :   if (conn->read_event) {
     278           0 :     if (event_del(conn->read_event))
     279           0 :       log_warn(LD_BUG, "Error removing read event for %d", (int)conn->s);
     280           0 :     tor_free(conn->read_event);
     281             :   }
     282          52 :   if (conn->write_event) {
     283           0 :     if (event_del(conn->write_event))
     284           0 :       log_warn(LD_BUG, "Error removing write event for %d", (int)conn->s);
     285           0 :     tor_free(conn->write_event);
     286             :   }
     287          52 :   if (conn->type == CONN_TYPE_AP_DNS_LISTENER) {
     288           0 :     dnsserv_close_listener(conn);
     289             :   }
     290          52 : }
     291             : 
     292             : /** Remove the connection from the global list, and remove the
     293             :  * corresponding poll entry.  Calling this function will shift the last
     294             :  * connection (if any) into the position occupied by conn.
     295             :  */
     296             : int
     297          14 : connection_remove(connection_t *conn)
     298             : {
     299          14 :   int current_index;
     300          14 :   connection_t *tmp;
     301             : 
     302          14 :   tor_assert(conn);
     303             : 
     304          14 :   log_debug(LD_NET,"removing socket %d (type %s), n_conns now %d",
     305             :             (int)conn->s, conn_type_to_string(conn->type),
     306             :             smartlist_len(connection_array));
     307             : 
     308          14 :   if (conn->type == CONN_TYPE_AP && conn->socket_family == AF_UNIX) {
     309           0 :     log_info(LD_NET, "Closing SOCKS Unix socket connection");
     310             :   }
     311             : 
     312          14 :   control_event_conn_bandwidth(conn);
     313             : 
     314          14 :   tor_assert(conn->conn_array_index >= 0);
     315          14 :   current_index = conn->conn_array_index;
     316          14 :   connection_unregister_events(conn); /* This is redundant, but cheap. */
     317          14 :   if (current_index == smartlist_len(connection_array)-1) { /* at the end */
     318           2 :     smartlist_del(connection_array, current_index);
     319           2 :     return 0;
     320             :   }
     321             : 
     322             :   /* replace this one with the one at the end */
     323          12 :   smartlist_del(connection_array, current_index);
     324          12 :   tmp = smartlist_get(connection_array, current_index);
     325          12 :   tmp->conn_array_index = current_index;
     326             : 
     327          12 :   return 0;
     328             : }
     329             : 
     330             : /** If <b>conn</b> is an edge conn, remove it from the list
     331             :  * of conn's on this circuit. If it's not on an edge,
     332             :  * flush and send destroys for all circuits on this conn.
     333             :  *
     334             :  * Remove it from connection_array (if applicable) and
     335             :  * from closeable_connection_list.
     336             :  *
     337             :  * Then free it.
     338             :  */
     339             : static void
     340          23 : connection_unlink(connection_t *conn)
     341             : {
     342          23 :   connection_about_to_close_connection(conn);
     343          23 :   if (conn->conn_array_index >= 0) {
     344          14 :     connection_remove(conn);
     345             :   }
     346          23 :   if (conn->linked_conn) {
     347           6 :     conn->linked_conn->linked_conn = NULL;
     348           6 :     if (! conn->linked_conn->marked_for_close &&
     349             :         conn->linked_conn->reading_from_linked_conn)
     350           0 :       connection_start_reading(conn->linked_conn);
     351           6 :     conn->linked_conn = NULL;
     352             :   }
     353          23 :   smartlist_remove(closeable_connection_lst, conn);
     354          23 :   smartlist_remove(active_linked_connection_lst, conn);
     355          23 :   if (conn->type == CONN_TYPE_EXIT) {
     356           0 :     assert_connection_edge_not_dns_pending(TO_EDGE_CONN(conn));
     357             :   }
     358          23 :   if (conn->type == CONN_TYPE_OR) {
     359           0 :     if (!tor_digest_is_zero(TO_OR_CONN(conn)->identity_digest))
     360           0 :       connection_or_clear_identity(TO_OR_CONN(conn));
     361             :     /* connection_unlink() can only get called if the connection
     362             :      * was already on the closeable list, and it got there by
     363             :      * connection_mark_for_close(), which was called from
     364             :      * connection_or_close_normally() or
     365             :      * connection_or_close_for_error(), so the channel should
     366             :      * already be in CHANNEL_STATE_CLOSING, and then the
     367             :      * connection_about_to_close_connection() goes to
     368             :      * connection_or_about_to_close(), which calls channel_closed()
     369             :      * to notify the channel_t layer, and closed the channel, so
     370             :      * nothing more to do here to deal with the channel associated
     371             :      * with an orconn.
     372             :      */
     373             :   }
     374          23 :   connection_free(conn);
     375          23 : }
     376             : 
     377             : /** Event that invokes schedule_active_linked_connections_cb. */
     378             : static mainloop_event_t *schedule_active_linked_connections_event = NULL;
     379             : 
     380             : /**
     381             :  * Callback: used to activate read events for all linked connections, so
     382             :  * libevent knows to call their read callbacks.  This callback run as a
     383             :  * postloop event, so that the events _it_ activates don't happen until
     384             :  * Libevent has a chance to check for other events.
     385             :  */
     386             : static void
     387           0 : schedule_active_linked_connections_cb(mainloop_event_t *event, void *arg)
     388             : {
     389           0 :   (void)event;
     390           0 :   (void)arg;
     391             : 
     392             :   /* All active linked conns should get their read events activated,
     393             :    * so that libevent knows to run their callbacks. */
     394           0 :   SMARTLIST_FOREACH(active_linked_connection_lst, connection_t *, conn,
     395             :                     event_active(conn->read_event, EV_READ, 1));
     396             : 
     397             :   /* Reactivate the event if we still have connections in the active list.
     398             :    *
     399             :    * A linked connection doesn't get woken up by I/O but rather artificially
     400             :    * by this event callback. It has directory data spooled in it and it is
     401             :    * sent incrementally by small chunks unless spool_eagerly is true. For that
     402             :    * to happen, we need to induce the activation of the read event so it can
     403             :    * be flushed. */
     404           0 :   if (smartlist_len(active_linked_connection_lst)) {
     405           0 :     mainloop_event_activate(schedule_active_linked_connections_event);
     406             :   }
     407           0 : }
     408             : 
     409             : /** Initialize the global connection list, closeable connection list,
     410             :  * and active connection list. */
     411             : void
     412         258 : tor_init_connection_lists(void)
     413             : {
     414         258 :   if (!connection_array)
     415         235 :     connection_array = smartlist_new();
     416         258 :   if (!closeable_connection_lst)
     417         246 :     closeable_connection_lst = smartlist_new();
     418         258 :   if (!active_linked_connection_lst)
     419         246 :     active_linked_connection_lst = smartlist_new();
     420         258 : }
     421             : 
     422             : /** Schedule <b>conn</b> to be closed. **/
     423             : void
     424          23 : add_connection_to_closeable_list(connection_t *conn)
     425             : {
     426          23 :   tor_assert(!smartlist_contains(closeable_connection_lst, conn));
     427          23 :   tor_assert(conn->marked_for_close);
     428          23 :   assert_connection_ok(conn, time(NULL));
     429          23 :   smartlist_add(closeable_connection_lst, conn);
     430          23 :   mainloop_schedule_postloop_cleanup();
     431          23 : }
     432             : 
     433             : /** Return 1 if conn is on the closeable list, else return 0. */
     434             : int
     435          24 : connection_is_on_closeable_list(connection_t *conn)
     436             : {
     437          24 :   return smartlist_contains(closeable_connection_lst, conn);
     438             : }
     439             : 
     440             : /** Return true iff conn is in the current poll array. */
     441             : int
     442          24 : connection_in_array(connection_t *conn)
     443             : {
     444          24 :   return smartlist_contains(connection_array, conn);
     445             : }
     446             : 
     447             : /** Set <b>*array</b> to an array of all connections. <b>*array</b> must not
     448             :  * be modified.
     449             :  */
     450       46133 : MOCK_IMPL(smartlist_t *,
     451             : get_connection_array, (void))
     452             : {
     453       46133 :   if (!connection_array)
     454           9 :     connection_array = smartlist_new();
     455       46133 :   return connection_array;
     456             : }
     457             : 
     458             : /**
     459             :  * Return the amount of network traffic read, in bytes, over the life of this
     460             :  * process.
     461             :  */
     462           1 : MOCK_IMPL(uint64_t,
     463             : get_bytes_read,(void))
     464             : {
     465           1 :   return stats_n_bytes_read;
     466             : }
     467             : 
     468             : /**
     469             :  * Return the amount of network traffic read, in bytes, over the life of this
     470             :  * process.
     471             :  */
     472           1 : MOCK_IMPL(uint64_t,
     473             : get_bytes_written,(void))
     474             : {
     475           1 :   return stats_n_bytes_written;
     476             : }
     477             : 
     478             : /**
     479             :  * Increment the amount of network traffic read and written, over the life of
     480             :  * this process.
     481             :  */
     482             : void
     483           0 : stats_increment_bytes_read_and_written(uint64_t r, uint64_t w)
     484             : {
     485           0 :   stats_n_bytes_read += r;
     486           0 :   stats_n_bytes_written += w;
     487           0 : }
     488             : 
     489             : /** Set the event mask on <b>conn</b> to <b>events</b>.  (The event
     490             :  * mask is a bitmask whose bits are READ_EVENT and WRITE_EVENT)
     491             :  */
     492             : void
     493           0 : connection_watch_events(connection_t *conn, watchable_events_t events)
     494             : {
     495           0 :   if (events & READ_EVENT)
     496           0 :     connection_start_reading(conn);
     497             :   else
     498           0 :     connection_stop_reading(conn);
     499             : 
     500           0 :   if (events & WRITE_EVENT)
     501           0 :     connection_start_writing(conn);
     502             :   else
     503           0 :     connection_stop_writing(conn);
     504           0 : }
     505             : 
     506             : /** Return true iff <b>conn</b> is listening for read events. */
     507             : int
     508           0 : connection_is_reading(connection_t *conn)
     509             : {
     510           0 :   tor_assert(conn);
     511             : 
     512           0 :   return conn->reading_from_linked_conn ||
     513           0 :     (conn->read_event && event_pending(conn->read_event, EV_READ, NULL));
     514             : }
     515             : 
     516             : /** Reset our main loop counters. */
     517             : void
     518           0 : reset_main_loop_counters(void)
     519             : {
     520           0 :   stats_n_main_loop_successes = 0;
     521           0 :   stats_n_main_loop_errors = 0;
     522           0 :   stats_n_main_loop_idle = 0;
     523           0 : }
     524             : 
     525             : /** Increment the main loop success counter. */
     526             : static void
     527           0 : increment_main_loop_success_count(void)
     528             : {
     529           0 :   ++stats_n_main_loop_successes;
     530           0 : }
     531             : 
     532             : /** Get the main loop success counter. */
     533             : uint64_t
     534           0 : get_main_loop_success_count(void)
     535             : {
     536           0 :   return stats_n_main_loop_successes;
     537             : }
     538             : 
     539             : /** Increment the main loop error counter. */
     540             : static void
     541           0 : increment_main_loop_error_count(void)
     542             : {
     543           0 :   ++stats_n_main_loop_errors;
     544           0 : }
     545             : 
     546             : /** Get the main loop error counter. */
     547             : uint64_t
     548           0 : get_main_loop_error_count(void)
     549             : {
     550           0 :   return stats_n_main_loop_errors;
     551             : }
     552             : 
     553             : /** Increment the main loop idle counter. */
     554             : static void
     555           0 : increment_main_loop_idle_count(void)
     556             : {
     557           0 :   ++stats_n_main_loop_idle;
     558           0 : }
     559             : 
     560             : /** Get the main loop idle counter. */
     561             : uint64_t
     562           0 : get_main_loop_idle_count(void)
     563             : {
     564           0 :   return stats_n_main_loop_idle;
     565             : }
     566             : 
     567             : /** Check whether <b>conn</b> is correct in having (or not having) a
     568             :  * read/write event (passed in <b>ev</b>). On success, return 0. On failure,
     569             :  * log a warning and return -1. */
     570             : static int
     571           2 : connection_check_event(connection_t *conn, struct event *ev)
     572             : {
     573           2 :   int bad;
     574             : 
     575           2 :   if (conn->type == CONN_TYPE_AP && TO_EDGE_CONN(conn)->is_dns_request) {
     576             :     /* DNS requests which we launch through the dnsserv.c module do not have
     577             :      * any underlying socket or any underlying linked connection, so they
     578             :      * shouldn't have any attached events either.
     579             :      */
     580           0 :     bad = ev != NULL;
     581             :   } else {
     582             :     /* Everything else should have an underlying socket, or a linked
     583             :      * connection (which is also tracked with a read_event/write_event pair).
     584             :      */
     585           2 :     bad = ev == NULL;
     586             :   }
     587             : 
     588           2 :   if (bad) {
     589           0 :     log_warn(LD_BUG, "Event missing on connection %p [%s;%s]. "
     590             :              "socket=%d. linked=%d. "
     591             :              "is_dns_request=%d. Marked_for_close=%s:%d",
     592             :              conn,
     593             :              conn_type_to_string(conn->type),
     594             :              conn_state_to_string(conn->type, conn->state),
     595             :              (int)conn->s, (int)conn->linked,
     596             :              (conn->type == CONN_TYPE_AP &&
     597             :                                TO_EDGE_CONN(conn)->is_dns_request),
     598             :              conn->marked_for_close_file ? conn->marked_for_close_file : "-",
     599             :              conn->marked_for_close
     600             :              );
     601           0 :     log_backtrace(LOG_WARN, LD_BUG, "Backtrace attached.");
     602           0 :     return -1;
     603             :   }
     604             :   return 0;
     605             : }
     606             : 
     607             : /** Tell the main loop to stop notifying <b>conn</b> of any read events. */
     608           0 : MOCK_IMPL(void,
     609             : connection_stop_reading,(connection_t *conn))
     610             : {
     611           0 :   tor_assert(conn);
     612             : 
     613           0 :   if (connection_check_event(conn, conn->read_event) < 0) {
     614             :     return;
     615             :   }
     616             : 
     617           0 :   if (conn->linked) {
     618           0 :     conn->reading_from_linked_conn = 0;
     619           0 :     connection_stop_reading_from_linked_conn(conn);
     620             :   } else {
     621           0 :     if (event_del(conn->read_event))
     622           0 :       log_warn(LD_NET, "Error from libevent setting read event state for %d "
     623             :                "to unwatched: %s",
     624             :                (int)conn->s,
     625             :                tor_socket_strerror(tor_socket_errno(conn->s)));
     626             :   }
     627             : }
     628             : 
     629             : /** Tell the main loop to start notifying <b>conn</b> of any read events. */
     630           2 : MOCK_IMPL(void,
     631             : connection_start_reading,(connection_t *conn))
     632             : {
     633           2 :   tor_assert(conn);
     634             : 
     635           2 :   if (connection_check_event(conn, conn->read_event) < 0) {
     636             :     return;
     637             :   }
     638             : 
     639           2 :   if (conn->linked) {
     640           0 :     conn->reading_from_linked_conn = 1;
     641           0 :     if (connection_should_read_from_linked_conn(conn))
     642           0 :       connection_start_reading_from_linked_conn(conn);
     643             :   } else {
     644           2 :     if (event_add(conn->read_event, NULL))
     645           0 :       log_warn(LD_NET, "Error from libevent setting read event state for %d "
     646             :                "to watched: %s",
     647             :                (int)conn->s,
     648             :                tor_socket_strerror(tor_socket_errno(conn->s)));
     649             :   }
     650             : }
     651             : 
     652             : /** Return true iff <b>conn</b> is listening for write events. */
     653             : int
     654           0 : connection_is_writing(connection_t *conn)
     655             : {
     656           0 :   tor_assert(conn);
     657             : 
     658           0 :   return conn->writing_to_linked_conn ||
     659           0 :     (conn->write_event && event_pending(conn->write_event, EV_WRITE, NULL));
     660             : }
     661             : 
     662             : /** Tell the main loop to stop notifying <b>conn</b> of any write events. */
     663           0 : MOCK_IMPL(void,
     664             : connection_stop_writing,(connection_t *conn))
     665             : {
     666           0 :   tor_assert(conn);
     667             : 
     668           0 :   if (connection_check_event(conn, conn->write_event) < 0) {
     669             :     return;
     670             :   }
     671             : 
     672           0 :   if (conn->linked) {
     673           0 :     conn->writing_to_linked_conn = 0;
     674           0 :     if (conn->linked_conn)
     675           0 :       connection_stop_reading_from_linked_conn(conn->linked_conn);
     676             :   } else {
     677           0 :     if (event_del(conn->write_event))
     678           0 :       log_warn(LD_NET, "Error from libevent setting write event state for %d "
     679             :                "to unwatched: %s",
     680             :                (int)conn->s,
     681             :                tor_socket_strerror(tor_socket_errno(conn->s)));
     682             :   }
     683             : }
     684             : 
     685             : /** Tell the main loop to start notifying <b>conn</b> of any write events. */
     686           0 : MOCK_IMPL(void,
     687             : connection_start_writing,(connection_t *conn))
     688             : {
     689           0 :   tor_assert(conn);
     690             : 
     691           0 :   if (connection_check_event(conn, conn->write_event) < 0) {
     692             :     return;
     693             :   }
     694             : 
     695           0 :   if (conn->linked) {
     696           0 :     conn->writing_to_linked_conn = 1;
     697           0 :     if (conn->linked_conn &&
     698           0 :         connection_should_read_from_linked_conn(conn->linked_conn))
     699           0 :       connection_start_reading_from_linked_conn(conn->linked_conn);
     700             :   } else {
     701           0 :     if (event_add(conn->write_event, NULL))
     702           0 :       log_warn(LD_NET, "Error from libevent setting write event state for %d "
     703             :                "to watched: %s",
     704             :                (int)conn->s,
     705             :                tor_socket_strerror(tor_socket_errno(conn->s)));
     706             :   }
     707             : }
     708             : 
     709             : /** Return true iff <b>conn</b> is linked conn, and reading from the conn
     710             :  * linked to it would be good and feasible.  (Reading is "feasible" if the
     711             :  * other conn exists and has data in its outbuf, and is "good" if we have our
     712             :  * reading_from_linked_conn flag set and the other conn has its
     713             :  * writing_to_linked_conn flag set.)*/
     714             : static int
     715           0 : connection_should_read_from_linked_conn(connection_t *conn)
     716             : {
     717           0 :   if (conn->linked && conn->reading_from_linked_conn) {
     718           0 :     if (! conn->linked_conn ||
     719           0 :         (conn->linked_conn->writing_to_linked_conn &&
     720           0 :          buf_datalen(conn->linked_conn->outbuf)))
     721           0 :       return 1;
     722             :   }
     723             :   return 0;
     724             : }
     725             : 
     726             : /** Event to run 'shutdown did not work callback'. */
     727             : static struct event *shutdown_did_not_work_event = NULL;
     728             : 
     729             : /** Failsafe measure that should never actually be necessary: If
     730             :  * tor_shutdown_event_loop_and_exit() somehow doesn't successfully exit the
     731             :  * event loop, then this callback will kill Tor with an assertion failure
     732             :  * seconds later
     733             :  */
     734             : static void
     735           0 : shutdown_did_not_work_callback(evutil_socket_t fd, short event, void *arg)
     736             : {
     737             :   // LCOV_EXCL_START
     738             :   (void) fd;
     739             :   (void) event;
     740             :   (void) arg;
     741             :   tor_assert_unreached();
     742             :   // LCOV_EXCL_STOP
     743             : }
     744             : 
     745             : #ifdef ENABLE_RESTART_DEBUGGING
     746             : static struct event *tor_shutdown_event_loop_for_restart_event = NULL;
     747             : static void
     748             : tor_shutdown_event_loop_for_restart_cb(
     749             :                       evutil_socket_t fd, short event, void *arg)
     750             : {
     751             :   (void)fd;
     752             :   (void)event;
     753             :   (void)arg;
     754             :   tor_event_free(tor_shutdown_event_loop_for_restart_event);
     755             :   tor_shutdown_event_loop_and_exit(0);
     756             : }
     757             : #endif /* defined(ENABLE_RESTART_DEBUGGING) */
     758             : 
     759             : /**
     760             :  * After finishing the current callback (if any), shut down the main loop,
     761             :  * clean up the process, and exit with <b>exitcode</b>.
     762             :  */
     763             : void
     764           4 : tor_shutdown_event_loop_and_exit(int exitcode)
     765             : {
     766           4 :   if (main_loop_should_exit)
     767           1 :     return; /* Ignore multiple calls to this function. */
     768             : 
     769           4 :   main_loop_should_exit = 1;
     770           4 :   main_loop_exit_value = exitcode;
     771             : 
     772           4 :   if (! tor_libevent_is_initialized()) {
     773             :     return; /* No event loop to shut down. */
     774             :   }
     775             : 
     776             :   /* Die with an assertion failure in ten seconds, if for some reason we don't
     777             :    * exit normally. */
     778             :   /* XXXX We should consider this code if it's never used. */
     779           3 :   struct timeval ten_seconds = { 10, 0 };
     780           3 :   shutdown_did_not_work_event = tor_evtimer_new(
     781             :                   tor_libevent_get_base(),
     782             :                   shutdown_did_not_work_callback, NULL);
     783           3 :   event_add(shutdown_did_not_work_event, &ten_seconds);
     784             : 
     785             :   /* Unlike exit_loop_after_delay(), exit_loop_after_callback
     786             :    * prevents other callbacks from running. */
     787           3 :   tor_libevent_exit_loop_after_callback(tor_libevent_get_base());
     788             : }
     789             : 
     790             : /** Return true iff tor_shutdown_event_loop_and_exit() has been called. */
     791             : int
     792           1 : tor_event_loop_shutdown_is_pending(void)
     793             : {
     794           1 :   return main_loop_should_exit;
     795             : }
     796             : 
     797             : /** Helper: Tell the main loop to begin reading bytes into <b>conn</b> from
     798             :  * its linked connection, if it is not doing so already.  Called by
     799             :  * connection_start_reading and connection_start_writing as appropriate. */
     800             : static void
     801           0 : connection_start_reading_from_linked_conn(connection_t *conn)
     802             : {
     803           0 :   tor_assert(conn);
     804           0 :   tor_assert(conn->linked == 1);
     805             : 
     806           0 :   if (!conn->active_on_link) {
     807           0 :     conn->active_on_link = 1;
     808           0 :     smartlist_add(active_linked_connection_lst, conn);
     809           0 :     mainloop_event_activate(schedule_active_linked_connections_event);
     810             :   } else {
     811           0 :     tor_assert(smartlist_contains(active_linked_connection_lst, conn));
     812             :   }
     813           0 : }
     814             : 
     815             : /** Tell the main loop to stop reading bytes into <b>conn</b> from its linked
     816             :  * connection, if is currently doing so.  Called by connection_stop_reading,
     817             :  * connection_stop_writing, and connection_read. */
     818             : void
     819           0 : connection_stop_reading_from_linked_conn(connection_t *conn)
     820             : {
     821           0 :   tor_assert(conn);
     822           0 :   tor_assert(conn->linked == 1);
     823             : 
     824           0 :   if (conn->active_on_link) {
     825           0 :     conn->active_on_link = 0;
     826             :     /* FFFF We could keep an index here so we can smartlist_del
     827             :      * cleanly.  On the other hand, this doesn't show up on profiles,
     828             :      * so let's leave it alone for now. */
     829           0 :     smartlist_remove(active_linked_connection_lst, conn);
     830             :   } else {
     831           0 :     tor_assert(!smartlist_contains(active_linked_connection_lst, conn));
     832             :   }
     833           0 : }
     834             : 
     835             : /** Close all connections that have been scheduled to get closed. */
     836             : STATIC void
     837          25 : close_closeable_connections(void)
     838             : {
     839          25 :   int i;
     840          48 :   for (i = 0; i < smartlist_len(closeable_connection_lst); ) {
     841          23 :     connection_t *conn = smartlist_get(closeable_connection_lst, i);
     842          23 :     if (conn->conn_array_index < 0) {
     843           9 :       connection_unlink(conn); /* blow it away right now */
     844             :     } else {
     845          14 :       if (!conn_close_if_marked(conn->conn_array_index))
     846           0 :         ++i;
     847             :     }
     848             :   }
     849          25 : }
     850             : 
     851             : /** Count moribund connections for the OOS handler */
     852           0 : MOCK_IMPL(int,
     853             : connection_count_moribund, (void))
     854             : {
     855           0 :   int moribund = 0;
     856             : 
     857             :   /*
     858             :    * Count things we'll try to kill when close_closeable_connections()
     859             :    * runs next.
     860             :    */
     861           0 :   SMARTLIST_FOREACH_BEGIN(closeable_connection_lst, connection_t *, conn) {
     862           0 :     if (SOCKET_OK(conn->s) && connection_is_moribund(conn)) ++moribund;
     863           0 :   } SMARTLIST_FOREACH_END(conn);
     864             : 
     865           0 :   return moribund;
     866             : }
     867             : 
     868             : /** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has
     869             :  * some data to read. */
     870             : static void
     871           0 : conn_read_callback(evutil_socket_t fd, short event, void *_conn)
     872             : {
     873           0 :   connection_t *conn = _conn;
     874           0 :   (void)fd;
     875           0 :   (void)event;
     876             : 
     877           0 :   log_debug(LD_NET,"socket %d wants to read.",(int)conn->s);
     878             : 
     879             :   /* assert_connection_ok(conn, time(NULL)); */
     880             : 
     881             :   /* Handle marked for close connections early */
     882           0 :   if (conn->marked_for_close && connection_is_reading(conn)) {
     883             :     /* Libevent says we can read, but we are marked for close so we will never
     884             :      * try to read again. We will try to close the connection below inside of
     885             :      * close_closeable_connections(), but let's make sure not to cause Libevent
     886             :      * to spin on conn_read_callback() while we wait for the socket to let us
     887             :      * flush to it.*/
     888           0 :     connection_stop_reading(conn);
     889             :   }
     890             : 
     891           0 :   if (connection_handle_read(conn) < 0) {
     892           0 :     if (!conn->marked_for_close) {
     893             : #ifndef _WIN32
     894           0 :       log_warn(LD_BUG,"Unhandled error on read for %s connection "
     895             :                "(fd %d); removing",
     896             :                conn_type_to_string(conn->type), (int)conn->s);
     897           0 :       tor_fragile_assert();
     898             : #endif /* !defined(_WIN32) */
     899           0 :       if (CONN_IS_EDGE(conn))
     900           0 :         connection_edge_end_errno(TO_EDGE_CONN(conn));
     901           0 :       connection_mark_for_close(conn);
     902             :     }
     903             :   }
     904           0 :   assert_connection_ok(conn, time(NULL));
     905             : 
     906           0 :   if (smartlist_len(closeable_connection_lst))
     907           0 :     close_closeable_connections();
     908           0 : }
     909             : 
     910             : /** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has
     911             :  * some data to write. */
     912             : static void
     913           0 : conn_write_callback(evutil_socket_t fd, short events, void *_conn)
     914             : {
     915           0 :   connection_t *conn = _conn;
     916           0 :   (void)fd;
     917           0 :   (void)events;
     918             : 
     919           0 :   LOG_FN_CONN(conn, (LOG_DEBUG, LD_NET, "socket %d wants to write.",
     920             :                      (int)conn->s));
     921             : 
     922             :   /* assert_connection_ok(conn, time(NULL)); */
     923             : 
     924           0 :   if (connection_handle_write(conn, 0) < 0) {
     925           0 :     if (!conn->marked_for_close) {
     926             :       /* this connection is broken. remove it. */
     927           0 :       log_fn(LOG_WARN,LD_BUG,
     928             :              "unhandled error on write for %s connection (fd %d); removing",
     929             :              conn_type_to_string(conn->type), (int)conn->s);
     930           0 :       tor_fragile_assert();
     931           0 :       if (CONN_IS_EDGE(conn)) {
     932             :         /* otherwise we cry wolf about duplicate close */
     933           0 :         edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
     934           0 :         if (!edge_conn->end_reason)
     935           0 :           edge_conn->end_reason = END_STREAM_REASON_INTERNAL;
     936           0 :         edge_conn->edge_has_sent_end = 1;
     937             :       }
     938           0 :       connection_close_immediate(conn); /* So we don't try to flush. */
     939           0 :       connection_mark_for_close(conn);
     940             :     }
     941             :   }
     942           0 :   assert_connection_ok(conn, time(NULL));
     943             : 
     944           0 :   if (smartlist_len(closeable_connection_lst))
     945           0 :     close_closeable_connections();
     946           0 : }
     947             : 
     948             : /** If the connection at connection_array[i] is marked for close, then:
     949             :  *    - If it has data that it wants to flush, try to flush it.
     950             :  *    - If it _still_ has data to flush, and conn->hold_open_until_flushed is
     951             :  *      true, then leave the connection open and return.
     952             :  *    - Otherwise, remove the connection from connection_array and from
     953             :  *      all other lists, close it, and free it.
     954             :  * Returns 1 if the connection was closed, 0 otherwise.
     955             :  */
     956             : static int
     957          14 : conn_close_if_marked(int i)
     958             : {
     959          14 :   connection_t *conn;
     960          14 :   int retval;
     961          14 :   time_t now;
     962             : 
     963          14 :   conn = smartlist_get(connection_array, i);
     964          14 :   if (!conn->marked_for_close)
     965             :     return 0; /* nothing to see here, move along */
     966          14 :   now = time(NULL);
     967          14 :   assert_connection_ok(conn, now);
     968             : 
     969          14 :   log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").",
     970             :             conn->s);
     971             : 
     972             :   /* If the connection we are about to close was trying to connect to
     973             :   a proxy server and failed, the client won't be able to use that
     974             :   proxy. We should warn the user about this. */
     975          14 :   if (conn->proxy_state == PROXY_INFANT)
     976           0 :     log_failed_proxy_connection(conn);
     977             : 
     978          20 :   if ((SOCKET_OK(conn->s) || conn->linked_conn) &&
     979           6 :       connection_wants_to_flush(conn)) {
     980             :     /* s == -1 means it's an incomplete edge connection, or that the socket
     981             :      * has already been closed as unflushable. */
     982           0 :     ssize_t sz = connection_bucket_write_limit(conn, now);
     983           0 :     if (!conn->hold_open_until_flushed)
     984           0 :       log_info(LD_NET,
     985             :                "Conn (addr %s, fd %d, type %s, state %d) marked, but wants "
     986             :                "to flush %"TOR_PRIuSZ" bytes. (Marked at %s:%d)",
     987             :                escaped_safe_str_client(conn->address),
     988             :                (int)conn->s, conn_type_to_string(conn->type), conn->state,
     989             :                connection_get_outbuf_len(conn),
     990             :                conn->marked_for_close_file, conn->marked_for_close);
     991           0 :     if (conn->linked_conn) {
     992           0 :       retval = (int) buf_move_all(conn->linked_conn->inbuf, conn->outbuf);
     993           0 :       if (retval >= 0) {
     994             :         /* The linked conn will notice that it has data when it notices that
     995             :          * we're gone. */
     996           0 :         connection_start_reading_from_linked_conn(conn->linked_conn);
     997             :       }
     998           0 :       log_debug(LD_GENERAL, "Flushed last %d bytes from a linked conn; "
     999             :                "%d left; wants-to-flush==%d", retval,
    1000             :                 (int)connection_get_outbuf_len(conn),
    1001             :                 connection_wants_to_flush(conn));
    1002           0 :     } else if (connection_speaks_cells(conn)) {
    1003           0 :       if (conn->state == OR_CONN_STATE_OPEN) {
    1004           0 :         retval = buf_flush_to_tls(conn->outbuf, TO_OR_CONN(conn)->tls, sz);
    1005             :       } else
    1006             :         retval = -1; /* never flush non-open broken tls connections */
    1007             :     } else {
    1008           0 :       retval = buf_flush_to_socket(conn->outbuf, conn->s, sz);
    1009             :     }
    1010           0 :     if (retval >= 0 && /* Technically, we could survive things like
    1011             :                           TLS_WANT_WRITE here. But don't bother for now. */
    1012           0 :         conn->hold_open_until_flushed && connection_wants_to_flush(conn)) {
    1013           0 :       if (retval > 0) {
    1014           0 :         LOG_FN_CONN(conn, (LOG_INFO,LD_NET,
    1015             :                            "Holding conn (fd %d) open for more flushing.",
    1016             :                            (int)conn->s));
    1017           0 :         conn->timestamp_last_write_allowed = now; /* reset so we can flush
    1018             :                                                    * more */
    1019           0 :       } else if (sz == 0) {
    1020             :         /* Also, retval==0.  If we get here, we didn't want to write anything
    1021             :          * (because of rate-limiting) and we didn't. */
    1022             : 
    1023             :         /* Connection must flush before closing, but it's being rate-limited.
    1024             :          * Let's remove from Libevent, and mark it as blocked on bandwidth
    1025             :          * so it will be re-added on next token bucket refill. Prevents
    1026             :          * busy Libevent loops where we keep ending up here and returning
    1027             :          * 0 until we are no longer blocked on bandwidth.
    1028             :          */
    1029           0 :         connection_consider_empty_write_buckets(conn);
    1030             :         /* Make sure that consider_empty_buckets really disabled the
    1031             :          * connection: */
    1032           0 :         if (BUG(connection_is_writing(conn))) {
    1033           0 :           connection_write_bw_exhausted(conn, true);
    1034             :         }
    1035             : 
    1036             :         /* The connection is being held due to write rate limit and thus will
    1037             :          * flush its data later. We need to stop reading because this
    1038             :          * connection is about to be closed once flushed. It should not
    1039             :          * process anything more coming in at this stage. */
    1040           0 :         connection_stop_reading(conn);
    1041             :       }
    1042           0 :       return 0;
    1043             :     }
    1044           0 :     if (connection_wants_to_flush(conn)) {
    1045           0 :       log_fn(LOG_INFO, LD_NET, "We stalled too much while trying to write %d "
    1046             :              "bytes to address %s.  If this happens a lot, either "
    1047             :              "something is wrong with your network connection, or "
    1048             :              "something is wrong with theirs. "
    1049             :              "(fd %d, type %s, state %d, marked at %s:%d).",
    1050             :              (int)connection_get_outbuf_len(conn),
    1051             :              escaped_safe_str_client(conn->address),
    1052             :              (int)conn->s, conn_type_to_string(conn->type), conn->state,
    1053             :              conn->marked_for_close_file,
    1054             :              conn->marked_for_close);
    1055             :     }
    1056             :   }
    1057             : 
    1058          14 :   connection_unlink(conn); /* unlink, remove, free */
    1059          14 :   return 1;
    1060             : }
    1061             : 
    1062             : /** Implementation for directory_all_unreachable.  This is done in a callback,
    1063             :  * since otherwise it would complicate Tor's control-flow graph beyond all
    1064             :  * reason.
    1065             :  */
    1066             : static void
    1067           0 : directory_all_unreachable_cb(mainloop_event_t *event, void *arg)
    1068             : {
    1069           0 :   (void)event;
    1070           0 :   (void)arg;
    1071             : 
    1072           0 :   connection_t *conn;
    1073             : 
    1074           0 :   while ((conn = connection_get_by_type_state(CONN_TYPE_AP,
    1075             :                                               AP_CONN_STATE_CIRCUIT_WAIT))) {
    1076           0 :     entry_connection_t *entry_conn = TO_ENTRY_CONN(conn);
    1077           0 :     log_notice(LD_NET,
    1078             :                "Is your network connection down? "
    1079             :                "Failing connection to '%s:%d'.",
    1080             :                safe_str_client(entry_conn->socks_request->address),
    1081             :                entry_conn->socks_request->port);
    1082           0 :     connection_mark_unattached_ap(entry_conn,
    1083             :                                   END_STREAM_REASON_NET_UNREACHABLE);
    1084             :   }
    1085           0 :   control_event_general_error("DIR_ALL_UNREACHABLE");
    1086           0 : }
    1087             : 
    1088             : static mainloop_event_t *directory_all_unreachable_cb_event = NULL;
    1089             : 
    1090             : /** We've just tried every dirserver we know about, and none of
    1091             :  * them were reachable. Assume the network is down. Change state
    1092             :  * so next time an application connection arrives we'll delay it
    1093             :  * and try another directory fetch. Kill off all the circuit_wait
    1094             :  * streams that are waiting now, since they will all timeout anyway.
    1095             :  */
    1096             : void
    1097           0 : directory_all_unreachable(time_t now)
    1098             : {
    1099           0 :   (void)now;
    1100             : 
    1101           0 :   reset_uptime(); /* reset it */
    1102             : 
    1103           0 :   if (!directory_all_unreachable_cb_event) {
    1104           0 :     directory_all_unreachable_cb_event =
    1105           0 :       mainloop_event_new(directory_all_unreachable_cb, NULL);
    1106           0 :     tor_assert(directory_all_unreachable_cb_event);
    1107             :   }
    1108             : 
    1109           0 :   mainloop_event_activate(directory_all_unreachable_cb_event);
    1110           0 : }
    1111             : 
    1112             : /** This function is called whenever we successfully pull down some new
    1113             :  * network statuses or server descriptors. */
    1114             : void
    1115           1 : directory_info_has_arrived(time_t now, int from_cache, int suppress_logs)
    1116             : {
    1117           1 :   const or_options_t *options = get_options();
    1118             : 
    1119             :   /* if we have enough dir info, then update our guard status with
    1120             :    * whatever we just learned. */
    1121           1 :   int invalidate_circs = guards_update_all();
    1122             : 
    1123           1 :   if (invalidate_circs) {
    1124           0 :     circuit_mark_all_unused_circs();
    1125           0 :     circuit_mark_all_dirty_circs_as_unusable();
    1126             :   }
    1127             : 
    1128           1 :   if (!router_have_minimum_dir_info()) {
    1129           2 :     int quiet = suppress_logs || from_cache ||
    1130           1 :                 dirclient_too_idle_to_fetch_descriptors(options, now);
    1131           2 :     tor_log(quiet ? LOG_INFO : LOG_NOTICE, LD_DIR,
    1132             :         "I learned some more directory information, but not enough to "
    1133             :         "build a circuit: %s", get_dir_info_status_string());
    1134           1 :     update_all_descriptor_downloads(now);
    1135           1 :     return;
    1136             :   } else {
    1137           0 :     if (dirclient_fetches_from_authorities(options)) {
    1138           0 :       update_all_descriptor_downloads(now);
    1139             :     }
    1140             : 
    1141             :     /* Don't even bother trying to get extrainfo until the rest of our
    1142             :      * directory info is up-to-date */
    1143           0 :     if (options->DownloadExtraInfo)
    1144           0 :       update_extrainfo_downloads(now);
    1145             :   }
    1146             : 
    1147           0 :   if (server_mode(options) && !net_is_disabled() && !from_cache &&
    1148           0 :       (have_completed_a_circuit() || !any_predicted_circuits(now)))
    1149           0 :    router_do_reachability_checks();
    1150             : }
    1151             : 
    1152             : /** Perform regular maintenance tasks for a single connection.  This
    1153             :  * function gets run once per second per connection by run_scheduled_events.
    1154             :  */
    1155             : static void
    1156           0 : run_connection_housekeeping(int i, time_t now)
    1157             : {
    1158           0 :   cell_t cell;
    1159           0 :   connection_t *conn = smartlist_get(connection_array, i);
    1160           0 :   const or_options_t *options = get_options();
    1161           0 :   or_connection_t *or_conn;
    1162           0 :   channel_t *chan = NULL;
    1163           0 :   int have_any_circuits;
    1164           0 :   int past_keepalive =
    1165           0 :     now >= conn->timestamp_last_write_allowed + options->KeepalivePeriod;
    1166             : 
    1167           0 :   if (conn->outbuf && !connection_get_outbuf_len(conn) &&
    1168           0 :       conn->type == CONN_TYPE_OR)
    1169           0 :     TO_OR_CONN(conn)->timestamp_lastempty = now;
    1170             : 
    1171           0 :   if (conn->marked_for_close) {
    1172             :     /* nothing to do here */
    1173           0 :     return;
    1174             :   }
    1175             : 
    1176             :   /* Expire any directory connections that haven't been active (sent
    1177             :    * if a server or received if a client) for 5 min */
    1178           0 :   if (conn->type == CONN_TYPE_DIR &&
    1179           0 :       ((DIR_CONN_IS_SERVER(conn) &&
    1180           0 :         conn->timestamp_last_write_allowed
    1181           0 :             + options->TestingDirConnectionMaxStall < now) ||
    1182           0 :        (!DIR_CONN_IS_SERVER(conn) &&
    1183           0 :         conn->timestamp_last_read_allowed
    1184           0 :             + options->TestingDirConnectionMaxStall < now))) {
    1185           0 :     log_info(LD_DIR,"Expiring wedged directory conn (fd %d, purpose %d)",
    1186             :              (int)conn->s, conn->purpose);
    1187             :     /* This check is temporary; it's to let us know whether we should consider
    1188             :      * parsing partial serverdesc responses. */
    1189           0 :     if (conn->purpose == DIR_PURPOSE_FETCH_SERVERDESC &&
    1190           0 :         connection_get_inbuf_len(conn) >= 1024) {
    1191           0 :       log_info(LD_DIR,"Trying to extract information from wedged server desc "
    1192             :                "download.");
    1193           0 :       connection_dir_reached_eof(TO_DIR_CONN(conn));
    1194             :     } else {
    1195           0 :       connection_mark_for_close(conn);
    1196             :     }
    1197           0 :     return;
    1198             :   }
    1199             : 
    1200           0 :   if (!connection_speaks_cells(conn))
    1201             :     return; /* we're all done here, the rest is just for OR conns */
    1202             : 
    1203             :   /* If we haven't flushed to an OR connection for a while, then either nuke
    1204             :      the connection or send a keepalive, depending. */
    1205             : 
    1206           0 :   or_conn = TO_OR_CONN(conn);
    1207           0 :   tor_assert(conn->outbuf);
    1208             : 
    1209           0 :   chan = TLS_CHAN_TO_BASE(or_conn->chan);
    1210           0 :   tor_assert(chan);
    1211             : 
    1212           0 :   if (channel_num_circuits(chan) != 0) {
    1213           0 :     have_any_circuits = 1;
    1214           0 :     chan->timestamp_last_had_circuits = now;
    1215             :   } else {
    1216             :     have_any_circuits = 0;
    1217             :   }
    1218             : 
    1219           0 :   if (channel_is_bad_for_new_circs(TLS_CHAN_TO_BASE(or_conn->chan)) &&
    1220             :       ! have_any_circuits) {
    1221             :     /* It's bad for new circuits, and has no unmarked circuits on it:
    1222             :      * mark it now. */
    1223           0 :     log_info(LD_OR,
    1224             :              "Expiring non-used OR connection to fd %d (%s:%d) [Too old].",
    1225             :              (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port);
    1226           0 :     if (conn->state == OR_CONN_STATE_CONNECTING)
    1227           0 :       connection_or_connect_failed(TO_OR_CONN(conn),
    1228             :                                    END_OR_CONN_REASON_TIMEOUT,
    1229             :                                    "Tor gave up on the connection");
    1230           0 :     connection_or_close_normally(TO_OR_CONN(conn), 1);
    1231           0 :   } else if (!connection_state_is_open(conn)) {
    1232           0 :     if (past_keepalive) {
    1233             :       /* We never managed to actually get this connection open and happy. */
    1234           0 :       log_info(LD_OR,"Expiring non-open OR connection to fd %d (%s:%d).",
    1235             :                (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port);
    1236           0 :       connection_or_close_normally(TO_OR_CONN(conn), 0);
    1237             :     }
    1238           0 :   } else if (we_are_hibernating() &&
    1239           0 :              ! have_any_circuits &&
    1240           0 :              !connection_get_outbuf_len(conn)) {
    1241             :     /* We're hibernating or shutting down, there's no circuits, and nothing to
    1242             :      * flush.*/
    1243           0 :     log_info(LD_OR,"Expiring non-used OR connection to fd %d (%s:%d) "
    1244             :              "[Hibernating or exiting].",
    1245             :              (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port);
    1246           0 :     connection_or_close_normally(TO_OR_CONN(conn), 1);
    1247           0 :   } else if (!have_any_circuits &&
    1248           0 :              now - or_conn->idle_timeout >=
    1249           0 :                                          chan->timestamp_last_had_circuits) {
    1250           0 :     log_info(LD_OR,"Expiring non-used OR connection %"PRIu64" to fd %d "
    1251             :              "(%s:%d) [no circuits for %d; timeout %d; %scanonical].",
    1252             :              (chan->global_identifier),
    1253             :              (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port,
    1254             :              (int)(now - chan->timestamp_last_had_circuits),
    1255             :              or_conn->idle_timeout,
    1256             :              or_conn->is_canonical ? "" : "non");
    1257           0 :     connection_or_close_normally(TO_OR_CONN(conn), 0);
    1258           0 :   } else if (
    1259           0 :       now >= or_conn->timestamp_lastempty + options->KeepalivePeriod*10 &&
    1260             :       now >=
    1261           0 :           conn->timestamp_last_write_allowed + options->KeepalivePeriod*10) {
    1262           0 :     log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL,
    1263             :            "Expiring stuck OR connection to fd %d (%s:%d). (%d bytes to "
    1264             :            "flush; %d seconds since last write)",
    1265             :            (int)conn->s, fmt_and_decorate_addr(&conn->addr), conn->port,
    1266             :            (int)connection_get_outbuf_len(conn),
    1267             :            (int)(now-conn->timestamp_last_write_allowed));
    1268           0 :     connection_or_close_normally(TO_OR_CONN(conn), 0);
    1269           0 :   } else if (past_keepalive && !connection_get_outbuf_len(conn)) {
    1270             :     /* send a padding cell */
    1271           0 :     log_fn(LOG_DEBUG,LD_OR,"Sending keepalive to (%s:%d)",
    1272             :            fmt_and_decorate_addr(&conn->addr), conn->port);
    1273           0 :     memset(&cell,0,sizeof(cell_t));
    1274           0 :     cell.command = CELL_PADDING;
    1275           0 :     connection_or_write_cell_to_buf(&cell, or_conn);
    1276             :   } else {
    1277           0 :     channelpadding_decide_to_pad_channel(chan);
    1278             :   }
    1279             : }
    1280             : 
    1281             : /** Honor a NEWNYM request: make future requests unlinkable to past
    1282             :  * requests. */
    1283             : static void
    1284           0 : signewnym_impl(time_t now)
    1285             : {
    1286           0 :   const or_options_t *options = get_options();
    1287           0 :   if (!proxy_mode(options)) {
    1288           0 :     log_info(LD_CONTROL, "Ignoring SIGNAL NEWNYM because client functionality "
    1289             :              "is disabled.");
    1290           0 :     return;
    1291             :   }
    1292             : 
    1293           0 :   circuit_mark_all_dirty_circs_as_unusable();
    1294           0 :   addressmap_clear_transient();
    1295           0 :   hs_client_purge_state();
    1296           0 :   time_of_last_signewnym = now;
    1297           0 :   signewnym_is_pending = 0;
    1298             : 
    1299           0 :   ++newnym_epoch;
    1300             : 
    1301           0 :   control_event_signal(SIGNEWNYM);
    1302             : }
    1303             : 
    1304             : /** Callback: run a deferred signewnym. */
    1305             : static void
    1306           0 : handle_deferred_signewnym_cb(mainloop_event_t *event, void *arg)
    1307             : {
    1308           0 :   (void)event;
    1309           0 :   (void)arg;
    1310           0 :   log_info(LD_CONTROL, "Honoring delayed NEWNYM request");
    1311           0 :   do_signewnym(time(NULL));
    1312           0 : }
    1313             : 
    1314             : /** Either perform a signewnym or schedule one, depending on rate limiting. */
    1315             : void
    1316           0 : do_signewnym(time_t now)
    1317             : {
    1318           0 :   if (time_of_last_signewnym + MAX_SIGNEWNYM_RATE > now) {
    1319           0 :     const time_t delay_sec =
    1320           0 :       time_of_last_signewnym + MAX_SIGNEWNYM_RATE - now;
    1321           0 :     if (! signewnym_is_pending) {
    1322           0 :       signewnym_is_pending = 1;
    1323           0 :       if (!handle_deferred_signewnym_ev) {
    1324           0 :         handle_deferred_signewnym_ev =
    1325           0 :           mainloop_event_postloop_new(handle_deferred_signewnym_cb, NULL);
    1326             :       }
    1327           0 :       const struct timeval delay_tv = { delay_sec, 0 };
    1328           0 :       mainloop_event_schedule(handle_deferred_signewnym_ev, &delay_tv);
    1329             :     }
    1330           0 :     log_notice(LD_CONTROL,
    1331             :                "Rate limiting NEWNYM request: delaying by %d second(s)",
    1332             :                (int)(delay_sec));
    1333             :   } else {
    1334           0 :     signewnym_impl(now);
    1335             :   }
    1336           0 : }
    1337             : 
    1338             : /** Return the number of times that signewnym has been called. */
    1339             : unsigned
    1340           0 : get_signewnym_epoch(void)
    1341             : {
    1342           0 :   return newnym_epoch;
    1343             : }
    1344             : 
    1345             : /** True iff we have initialized all the members of <b>periodic_events</b>.
    1346             :  * Used to prevent double-initialization. */
    1347             : static int periodic_events_initialized = 0;
    1348             : 
    1349             : /* Declare all the timer callback functions... */
    1350             : #ifndef COCCI
    1351             : #undef CALLBACK
    1352             : #define CALLBACK(name) \
    1353             :   static int name ## _callback(time_t, const or_options_t *)
    1354             : 
    1355             : CALLBACK(add_entropy);
    1356             : CALLBACK(check_expired_networkstatus);
    1357             : CALLBACK(clean_caches);
    1358             : CALLBACK(clean_consdiffmgr);
    1359             : CALLBACK(fetch_networkstatus);
    1360             : CALLBACK(heartbeat);
    1361             : CALLBACK(hs_service);
    1362             : CALLBACK(launch_descriptor_fetches);
    1363             : CALLBACK(prune_old_routers);
    1364             : CALLBACK(record_bridge_stats);
    1365             : CALLBACK(rend_cache_failure_clean);
    1366             : CALLBACK(reset_padding_counts);
    1367             : CALLBACK(retry_listeners);
    1368             : CALLBACK(rotate_x509_certificate);
    1369             : CALLBACK(save_state);
    1370             : CALLBACK(write_stats_file);
    1371             : CALLBACK(control_per_second_events);
    1372             : CALLBACK(second_elapsed);
    1373             : 
    1374             : #undef CALLBACK
    1375             : 
    1376             : /* Now we declare an array of periodic_event_item_t for each periodic event */
    1377             : #define CALLBACK(name, r, f)                            \
    1378             :   PERIODIC_EVENT(name, PERIODIC_EVENT_ROLE_ ## r, f)
    1379             : #define FL(name) (PERIODIC_EVENT_FLAG_ ## name)
    1380             : #endif /* !defined(COCCI) */
    1381             : 
    1382             : STATIC periodic_event_item_t mainloop_periodic_events[] = {
    1383             : 
    1384             :   /* Everyone needs to run these. They need to have very long timeouts for
    1385             :    * that to be safe. */
    1386             :   CALLBACK(add_entropy, ALL, 0),
    1387             :   CALLBACK(heartbeat, ALL, 0),
    1388             :   CALLBACK(reset_padding_counts, ALL, 0),
    1389             : 
    1390             :   /* This is a legacy catch-all callback that runs once per second if
    1391             :    * we are online and active. */
    1392             :   CALLBACK(second_elapsed, NET_PARTICIPANT,
    1393             :            FL(RUN_ON_DISABLE)),
    1394             : 
    1395             :   /* XXXX Do we have a reason to do this on a callback? Does it do any good at
    1396             :    * all?  For now, if we're dormant, we can let our listeners decay. */
    1397             :   CALLBACK(retry_listeners, NET_PARTICIPANT, FL(NEED_NET)),
    1398             : 
    1399             :   /* We need to do these if we're participating in the Tor network. */
    1400             :   CALLBACK(check_expired_networkstatus, NET_PARTICIPANT, 0),
    1401             :   CALLBACK(fetch_networkstatus, NET_PARTICIPANT, 0),
    1402             :   CALLBACK(launch_descriptor_fetches, NET_PARTICIPANT, FL(NEED_NET)),
    1403             :   CALLBACK(rotate_x509_certificate, NET_PARTICIPANT, 0),
    1404             :   CALLBACK(check_network_participation, NET_PARTICIPANT, 0),
    1405             : 
    1406             :   /* We need to do these if we're participating in the Tor network, and
    1407             :    * immediately before we stop. */
    1408             :   CALLBACK(clean_caches, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
    1409             :   CALLBACK(save_state, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
    1410             :   CALLBACK(write_stats_file, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
    1411             :   CALLBACK(prune_old_routers, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
    1412             : 
    1413             :   /* Hidden Service service only. */
    1414             :   CALLBACK(hs_service, HS_SERVICE, FL(NEED_NET)), // XXXX break this down more
    1415             : 
    1416             :   /* Bridge only. */
    1417             :   CALLBACK(record_bridge_stats, BRIDGE, 0),
    1418             : 
    1419             :   /* Client only. */
    1420             :   /* XXXX this could be restricted to CLIENT+NET_PARTICIPANT */
    1421             :   CALLBACK(rend_cache_failure_clean, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
    1422             : 
    1423             :   /* Directory server only. */
    1424             :   CALLBACK(clean_consdiffmgr, DIRSERVER, 0),
    1425             : 
    1426             :   /* Controller with per-second events only. */
    1427             :   CALLBACK(control_per_second_events, CONTROLEV, 0),
    1428             : 
    1429             :   END_OF_PERIODIC_EVENTS
    1430             : };
    1431             : #ifndef COCCI
    1432             : #undef CALLBACK
    1433             : #undef FL
    1434             : #endif
    1435             : 
    1436             : /* These are pointers to members of periodic_events[] that are used to
    1437             :  * implement particular callbacks.  We keep them separate here so that we
    1438             :  * can access them by name.  We also keep them inside periodic_events[]
    1439             :  * so that we can implement "reset all timers" in a reasonable way. */
    1440             : static periodic_event_item_t *fetch_networkstatus_event=NULL;
    1441             : static periodic_event_item_t *launch_descriptor_fetches_event=NULL;
    1442             : static periodic_event_item_t *check_dns_honesty_event=NULL;
    1443             : static periodic_event_item_t *save_state_event=NULL;
    1444             : static periodic_event_item_t *prune_old_routers_event=NULL;
    1445             : 
    1446             : /** Reset all the periodic events so we'll do all our actions again as if we
    1447             :  * just started up.
    1448             :  * Useful if our clock just moved back a long time from the future,
    1449             :  * so we don't wait until that future arrives again before acting.
    1450             :  */
    1451             : void
    1452           1 : reset_all_main_loop_timers(void)
    1453             : {
    1454           1 :   periodic_events_reset_all();
    1455           1 : }
    1456             : 
    1457             : /** Return a bitmask of the roles this tor instance is configured for using
    1458             :  * the given options. */
    1459             : STATIC int
    1460         308 : get_my_roles(const or_options_t *options)
    1461             : {
    1462         308 :   tor_assert(options);
    1463             : 
    1464         308 :   int roles = PERIODIC_EVENT_ROLE_ALL;
    1465         308 :   int is_bridge = options->BridgeRelay;
    1466         308 :   int is_relay = server_mode(options);
    1467         308 :   int is_dirauth = authdir_mode_v3(options);
    1468         308 :   int is_bridgeauth = authdir_mode_bridge(options);
    1469         308 :   int is_hidden_service = !!hs_service_get_num_services();
    1470         308 :   int is_dirserver = dir_server_mode(options);
    1471         308 :   int sending_control_events = control_any_per_second_event_enabled();
    1472             : 
    1473             :   /* We also consider tor to have the role of a client if the ControlPort is
    1474             :    * set because a lot of things can be done over the control port which
    1475             :    * requires tor to have basic functionalities. */
    1476         398 :   int is_client = options_any_client_port_set(options) ||
    1477         308 :                   options->ControlPort_set ||
    1478          90 :                   options->OwningControllerFD != UINT64_MAX;
    1479             : 
    1480         308 :   int is_net_participant = is_participating_on_network() ||
    1481         308 :     is_relay || is_hidden_service;
    1482             : 
    1483         308 :   if (is_bridge) roles |= PERIODIC_EVENT_ROLE_BRIDGE;
    1484         308 :   if (is_client) roles |= PERIODIC_EVENT_ROLE_CLIENT;
    1485         308 :   if (is_relay) roles |= PERIODIC_EVENT_ROLE_RELAY;
    1486         308 :   if (is_dirauth) roles |= PERIODIC_EVENT_ROLE_DIRAUTH;
    1487         308 :   if (is_bridgeauth) roles |= PERIODIC_EVENT_ROLE_BRIDGEAUTH;
    1488         308 :   if (is_hidden_service) roles |= PERIODIC_EVENT_ROLE_HS_SERVICE;
    1489         308 :   if (is_dirserver) roles |= PERIODIC_EVENT_ROLE_DIRSERVER;
    1490         308 :   if (is_net_participant) roles |= PERIODIC_EVENT_ROLE_NET_PARTICIPANT;
    1491         308 :   if (sending_control_events) roles |= PERIODIC_EVENT_ROLE_CONTROLEV;
    1492             : 
    1493         308 :   return roles;
    1494             : }
    1495             : 
    1496             : /** Event to run initialize_periodic_events_cb */
    1497             : static struct event *initialize_periodic_events_event = NULL;
    1498             : 
    1499             : /** Helper, run one second after setup:
    1500             :  * Initializes all members of periodic_events and starts them running.
    1501             :  *
    1502             :  * (We do this one second after setup for backward-compatibility reasons;
    1503             :  * it might not actually be necessary.) */
    1504             : static void
    1505           0 : initialize_periodic_events_cb(evutil_socket_t fd, short events, void *data)
    1506             : {
    1507           0 :   (void) fd;
    1508           0 :   (void) events;
    1509           0 :   (void) data;
    1510             : 
    1511           0 :   tor_event_free(initialize_periodic_events_event);
    1512             : 
    1513           0 :   rescan_periodic_events(get_options());
    1514           0 : }
    1515             : 
    1516             : /** Set up all the members of mainloop_periodic_events[], and configure them
    1517             :  * all to be launched from a callback. */
    1518             : void
    1519         248 : initialize_periodic_events(void)
    1520             : {
    1521         248 :   if (periodic_events_initialized)
    1522             :     return;
    1523             : 
    1524         244 :   periodic_events_initialized = 1;
    1525             : 
    1526        4880 :   for (int i = 0; mainloop_periodic_events[i].name; ++i) {
    1527        4636 :     periodic_events_register(&mainloop_periodic_events[i]);
    1528             :   }
    1529             : 
    1530             :   /* Set up all periodic events. We'll launch them by roles. */
    1531             : 
    1532             : #ifndef COCCI
    1533             : #define NAMED_CALLBACK(name) \
    1534             :   STMT_BEGIN name ## _event = periodic_events_find( #name ); STMT_END
    1535             : #endif
    1536             : 
    1537         244 :   NAMED_CALLBACK(prune_old_routers);
    1538         244 :   NAMED_CALLBACK(fetch_networkstatus);
    1539         244 :   NAMED_CALLBACK(launch_descriptor_fetches);
    1540         244 :   NAMED_CALLBACK(check_dns_honesty);
    1541         244 :   NAMED_CALLBACK(save_state);
    1542             : }
    1543             : 
    1544             : STATIC void
    1545         237 : teardown_periodic_events(void)
    1546             : {
    1547         237 :   periodic_events_disconnect_all();
    1548         237 :   fetch_networkstatus_event = NULL;
    1549         237 :   launch_descriptor_fetches_event = NULL;
    1550         237 :   check_dns_honesty_event = NULL;
    1551         237 :   save_state_event = NULL;
    1552         237 :   prune_old_routers_event = NULL;
    1553         237 :   periodic_events_initialized = 0;
    1554         237 : }
    1555             : 
    1556             : static mainloop_event_t *rescan_periodic_events_ev = NULL;
    1557             : 
    1558             : /** Callback: rescan the periodic event list. */
    1559             : static void
    1560           0 : rescan_periodic_events_cb(mainloop_event_t *event, void *arg)
    1561             : {
    1562           0 :   (void)event;
    1563           0 :   (void)arg;
    1564           0 :   rescan_periodic_events(get_options());
    1565           0 : }
    1566             : 
    1567             : /**
    1568             :  * Schedule an event that will rescan which periodic events should run.
    1569             :  **/
    1570           5 : MOCK_IMPL(void,
    1571             : schedule_rescan_periodic_events,(void))
    1572             : {
    1573           5 :   if (!rescan_periodic_events_ev) {
    1574           5 :     rescan_periodic_events_ev =
    1575           5 :       mainloop_event_new(rescan_periodic_events_cb, NULL);
    1576             :   }
    1577           5 :   mainloop_event_activate(rescan_periodic_events_ev);
    1578           5 : }
    1579             : 
    1580             : /** Do a pass at all our periodic events, disable those we don't need anymore
    1581             :  * and enable those we need now using the given options. */
    1582             : void
    1583         297 : rescan_periodic_events(const or_options_t *options)
    1584             : {
    1585         297 :   tor_assert(options);
    1586             : 
    1587         297 :   periodic_events_rescan_by_roles(get_my_roles(options), net_is_disabled());
    1588         297 : }
    1589             : 
    1590             : /* We just got new options globally set, see if we need to enabled or disable
    1591             :  * periodic events. */
    1592             : void
    1593         220 : periodic_events_on_new_options(const or_options_t *options)
    1594             : {
    1595         220 :   rescan_periodic_events(options);
    1596         220 : }
    1597             : 
    1598             : /**
    1599             :  * Update our schedule so that we'll check whether we need to fetch directory
    1600             :  * info immediately.
    1601             :  */
    1602             : void
    1603           0 : reschedule_directory_downloads(void)
    1604             : {
    1605           0 :   tor_assert(fetch_networkstatus_event);
    1606           0 :   tor_assert(launch_descriptor_fetches_event);
    1607             : 
    1608           0 :   periodic_event_reschedule(fetch_networkstatus_event);
    1609           0 :   periodic_event_reschedule(launch_descriptor_fetches_event);
    1610           0 : }
    1611             : 
    1612             : /** Mainloop callback: clean up circuits, channels, and connections
    1613             :  * that are pending close. */
    1614             : static void
    1615           0 : postloop_cleanup_cb(mainloop_event_t *ev, void *arg)
    1616             : {
    1617           0 :   (void)ev;
    1618           0 :   (void)arg;
    1619           0 :   circuit_close_all_marked();
    1620           0 :   close_closeable_connections();
    1621           0 :   channel_run_cleanup();
    1622           0 :   channel_listener_run_cleanup();
    1623           0 : }
    1624             : 
    1625             : /** Event to run postloop_cleanup_cb */
    1626             : static mainloop_event_t *postloop_cleanup_ev=NULL;
    1627             : 
    1628             : /** Schedule a post-loop event to clean up marked channels, connections, and
    1629             :  * circuits. */
    1630             : void
    1631          68 : mainloop_schedule_postloop_cleanup(void)
    1632             : {
    1633          68 :   if (PREDICT_UNLIKELY(postloop_cleanup_ev == NULL)) {
    1634             :     // (It's possible that we can get here if we decide to close a connection
    1635             :     // in the earliest stages of our configuration, before we create events.)
    1636             :     return;
    1637             :   }
    1638          68 :   mainloop_event_activate(postloop_cleanup_ev);
    1639             : }
    1640             : 
    1641             : /** Event to run 'scheduled_shutdown_cb' */
    1642             : static mainloop_event_t *scheduled_shutdown_ev=NULL;
    1643             : 
    1644             : /** Callback: run a scheduled shutdown */
    1645             : static void
    1646           0 : scheduled_shutdown_cb(mainloop_event_t *ev, void *arg)
    1647             : {
    1648           0 :   (void)ev;
    1649           0 :   (void)arg;
    1650           0 :   log_notice(LD_GENERAL, "Clean shutdown finished. Exiting.");
    1651           0 :   tor_shutdown_event_loop_and_exit(0);
    1652           0 : }
    1653             : 
    1654             : /** Schedule the mainloop to exit after <b>delay_sec</b> seconds. */
    1655             : void
    1656           0 : mainloop_schedule_shutdown(int delay_sec)
    1657             : {
    1658           0 :   const struct timeval delay_tv = { delay_sec, 0 };
    1659           0 :   if (! scheduled_shutdown_ev) {
    1660           0 :     scheduled_shutdown_ev = mainloop_event_new(scheduled_shutdown_cb, NULL);
    1661             :   }
    1662           0 :   mainloop_event_schedule(scheduled_shutdown_ev, &delay_tv);
    1663           0 : }
    1664             : 
    1665             : /** Perform regular maintenance tasks.  This function gets run once per
    1666             :  * second.
    1667             :  */
    1668             : static int
    1669           0 : second_elapsed_callback(time_t now, const or_options_t *options)
    1670             : {
    1671             :   /* 0. See if our bandwidth limits are exhausted and we should hibernate
    1672             :    *
    1673             :    * Note: we have redundant mechanisms to handle the case where it's
    1674             :    * time to wake up from hibernation; or where we have a scheduled
    1675             :    * shutdown and it's time to run it, but this will also handle those.
    1676             :    */
    1677           0 :   consider_hibernation(now);
    1678             : 
    1679             :   /* Maybe enough time elapsed for us to reconsider a circuit. */
    1680           0 :   circuit_upgrade_circuits_from_guard_wait();
    1681             : 
    1682           0 :   if (options->UseBridges && !net_is_disabled()) {
    1683             :     /* Note: this check uses net_is_disabled(), not should_delay_dir_fetches()
    1684             :      * -- the latter is only for fetching consensus-derived directory info. */
    1685             :     // TODO: client
    1686             :     //     Also, schedule this rather than probing 1x / sec
    1687           0 :     fetch_bridge_descriptors(options, now);
    1688             :   }
    1689             : 
    1690           0 :   if (accounting_is_enabled(options)) {
    1691             :     // TODO: refactor or rewrite?
    1692           0 :     accounting_run_housekeeping(now);
    1693             :   }
    1694             : 
    1695             :   /* 3a. Every second, we examine pending circuits and prune the
    1696             :    *    ones which have been pending for more than a few seconds.
    1697             :    *    We do this before step 4, so it can try building more if
    1698             :    *    it's not comfortable with the number of available circuits.
    1699             :    */
    1700             :   /* (If our circuit build timeout can ever become lower than a second (which
    1701             :    * it can't, currently), we should do this more often.) */
    1702             :   // TODO: All expire stuff can become NET_PARTICIPANT, RUN_ON_DISABLE
    1703           0 :   circuit_expire_building();
    1704           0 :   circuit_expire_waiting_for_better_guard();
    1705             : 
    1706             :   /* 3b. Also look at pending streams and prune the ones that 'began'
    1707             :    *     a long time ago but haven't gotten a 'connected' yet.
    1708             :    *     Do this before step 4, so we can put them back into pending
    1709             :    *     state to be picked up by the new circuit.
    1710             :    */
    1711           0 :   connection_ap_expire_beginning();
    1712             : 
    1713             :   /* 3c. And expire connections that we've held open for too long.
    1714             :    */
    1715           0 :   connection_expire_held_open();
    1716             : 
    1717             :   /* 4. Every second, we try a new circuit if there are no valid
    1718             :    *    circuits. Every NewCircuitPeriod seconds, we expire circuits
    1719             :    *    that became dirty more than MaxCircuitDirtiness seconds ago,
    1720             :    *    and we make a new circ if there are no clean circuits.
    1721             :    */
    1722           0 :   const int have_dir_info = router_have_minimum_dir_info();
    1723           0 :   if (have_dir_info && !net_is_disabled()) {
    1724           0 :     circuit_build_needed_circs(now);
    1725             :   } else {
    1726           0 :     circuit_expire_old_circs_as_needed(now);
    1727             :   }
    1728             : 
    1729             :   /* 5. We do housekeeping for each connection... */
    1730           0 :   channel_update_bad_for_new_circs(NULL, 0);
    1731           0 :   int i;
    1732           0 :   for (i=0;i<smartlist_len(connection_array);i++) {
    1733           0 :     run_connection_housekeeping(i, now);
    1734             :   }
    1735             : 
    1736             :   /* Run again in a second. */
    1737           0 :   return 1;
    1738             : }
    1739             : 
    1740             : /**
    1741             :  * Periodic callback: Every {LAZY,GREEDY}_DESCRIPTOR_RETRY_INTERVAL,
    1742             :  * see about fetching descriptors, microdescriptors, and extrainfo
    1743             :  * documents.
    1744             :  */
    1745             : static int
    1746           0 : launch_descriptor_fetches_callback(time_t now, const or_options_t *options)
    1747             : {
    1748           0 :   if (should_delay_dir_fetches(options, NULL))
    1749             :       return PERIODIC_EVENT_NO_UPDATE;
    1750             : 
    1751           0 :   update_all_descriptor_downloads(now);
    1752           0 :   update_extrainfo_downloads(now);
    1753           0 :   if (router_have_minimum_dir_info())
    1754             :     return LAZY_DESCRIPTOR_RETRY_INTERVAL;
    1755             :   else
    1756           0 :     return GREEDY_DESCRIPTOR_RETRY_INTERVAL;
    1757             : }
    1758             : 
    1759             : /**
    1760             :  * Periodic event: Rotate our X.509 certificates and TLS keys once every
    1761             :  * MAX_SSL_KEY_LIFETIME_INTERNAL.
    1762             :  */
    1763             : static int
    1764           0 : rotate_x509_certificate_callback(time_t now, const or_options_t *options)
    1765             : {
    1766           0 :   static int first = 1;
    1767           0 :   (void)now;
    1768           0 :   (void)options;
    1769           0 :   if (first) {
    1770           0 :     first = 0;
    1771           0 :     return MAX_SSL_KEY_LIFETIME_INTERNAL;
    1772             :   }
    1773             : 
    1774             :   /* 1b. Every MAX_SSL_KEY_LIFETIME_INTERNAL seconds, we change our
    1775             :    * TLS context. */
    1776           0 :   log_info(LD_GENERAL,"Rotating tls context.");
    1777           0 :   if (router_initialize_tls_context() < 0) {
    1778           0 :     log_err(LD_BUG, "Error reinitializing TLS context");
    1779           0 :     tor_assert_unreached();
    1780             :   }
    1781           0 :   if (generate_ed_link_cert(options, now, 1)) {
    1782           0 :     log_err(LD_OR, "Unable to update Ed25519->TLS link certificate for "
    1783             :             "new TLS context.");
    1784           0 :     tor_assert_unreached();
    1785             :   }
    1786             : 
    1787             :   /* We also make sure to rotate the TLS connections themselves if they've
    1788             :    * been up for too long -- but that's done via is_bad_for_new_circs in
    1789             :    * run_connection_housekeeping() above. */
    1790             :   return MAX_SSL_KEY_LIFETIME_INTERNAL;
    1791             : }
    1792             : 
    1793             : /**
    1794             :  * Periodic callback: once an hour, grab some more entropy from the
    1795             :  * kernel and feed it to our CSPRNG.
    1796             :  **/
    1797             : static int
    1798           0 : add_entropy_callback(time_t now, const or_options_t *options)
    1799             : {
    1800           0 :   (void)now;
    1801           0 :   (void)options;
    1802             :   /* We already seeded once, so don't die on failure. */
    1803           0 :   if (crypto_seed_rng() < 0) {
    1804           0 :     log_warn(LD_GENERAL, "Tried to re-seed RNG, but failed. We already "
    1805             :              "seeded once, though, so we won't exit here.");
    1806             :   }
    1807             : 
    1808             :   /** How often do we add more entropy to OpenSSL's RNG pool? */
    1809             : #define ENTROPY_INTERVAL (60*60)
    1810           0 :   return ENTROPY_INTERVAL;
    1811             : }
    1812             : 
    1813             : /** Periodic callback: if there has been no network usage in a while,
    1814             :  * enter a dormant state. */
    1815             : STATIC int
    1816           6 : check_network_participation_callback(time_t now, const or_options_t *options)
    1817             : {
    1818             :   /* If we're a server, we can't become dormant. */
    1819           6 :   if (server_mode(options)) {
    1820           1 :     goto found_activity;
    1821             :   }
    1822             : 
    1823             :   /* If we aren't allowed to become dormant, then participation doesn't
    1824             :      matter */
    1825           5 :   if (! options->DormantTimeoutEnabled) {
    1826           0 :     goto found_activity;
    1827             :   }
    1828             : 
    1829             :   /* If we're running an onion service, we can't become dormant. */
    1830             :   /* XXXX this would be nice to change, so that we can be dormant with a
    1831             :    * service. */
    1832           5 :   if (hs_service_get_num_services()) {
    1833           1 :     goto found_activity;
    1834             :   }
    1835             : 
    1836             :   /* If we have any currently open entry streams other than "linked"
    1837             :    * connections used for directory requests, those count as user activity.
    1838             :    */
    1839           4 :   if (options->DormantTimeoutDisabledByIdleStreams) {
    1840           2 :     if (connection_get_by_type_nonlinked(CONN_TYPE_AP) != NULL) {
    1841           1 :       goto found_activity;
    1842             :     }
    1843             :   }
    1844             : 
    1845             :   /* XXXX Make this configurable? */
    1846             : /** How often do we check whether we have had network activity? */
    1847             : #define CHECK_PARTICIPATION_INTERVAL (5*60)
    1848             : 
    1849             :   /* Become dormant if there has been no user activity in a long time.
    1850             :    * (The funny checks below are in order to prevent overflow.) */
    1851           3 :   time_t time_since_last_activity = 0;
    1852           3 :   if (get_last_user_activity_time() < now)
    1853           3 :     time_since_last_activity = now - get_last_user_activity_time();
    1854           3 :   if (time_since_last_activity >= options->DormantClientTimeout) {
    1855           2 :     log_notice(LD_GENERAL, "No user activity in a long time: becoming"
    1856             :                " dormant.");
    1857           2 :     set_network_participation(false);
    1858           2 :     rescan_periodic_events(options);
    1859             :   }
    1860             : 
    1861             :   return CHECK_PARTICIPATION_INTERVAL;
    1862             : 
    1863           3 :  found_activity:
    1864           3 :   note_user_activity(now);
    1865           3 :   return CHECK_PARTICIPATION_INTERVAL;
    1866             : }
    1867             : 
    1868             : /**
    1869             :  * Periodic callback: If our consensus is too old, recalculate whether
    1870             :  * we can actually use it.
    1871             :  */
    1872             : static int
    1873           0 : check_expired_networkstatus_callback(time_t now, const or_options_t *options)
    1874             : {
    1875           0 :   (void)options;
    1876             :   /* Check whether our networkstatus has expired. */
    1877           0 :   networkstatus_t *ns = networkstatus_get_latest_consensus();
    1878             :   /* Use reasonably live consensuses until they are no longer reasonably live.
    1879             :    */
    1880           0 :   if (ns && !networkstatus_consensus_reasonably_live(ns, now) &&
    1881           0 :       router_have_minimum_dir_info()) {
    1882           0 :     router_dir_info_changed();
    1883             :   }
    1884             : #define CHECK_EXPIRED_NS_INTERVAL (2*60)
    1885           0 :   return CHECK_EXPIRED_NS_INTERVAL;
    1886             : }
    1887             : 
    1888             : /**
    1889             :  * Scheduled callback: Save the state file to disk if appropriate.
    1890             :  */
    1891             : static int
    1892           0 : save_state_callback(time_t now, const or_options_t *options)
    1893             : {
    1894           0 :   (void) options;
    1895           0 :   (void) or_state_save(now); // only saves if appropriate
    1896           0 :   const time_t next_write = get_or_state()->next_write;
    1897           0 :   if (next_write == TIME_MAX) {
    1898             :     return 86400;
    1899             :   }
    1900           0 :   return safe_timer_diff(now, next_write);
    1901             : }
    1902             : 
    1903             : /** Reschedule the event for saving the state file.
    1904             :  *
    1905             :  * Run this when the state becomes dirty. */
    1906             : void
    1907           0 : reschedule_or_state_save(void)
    1908             : {
    1909           0 :   if (save_state_event == NULL) {
    1910             :     /* This can happen early on during startup. */
    1911             :     return;
    1912             :   }
    1913           0 :   periodic_event_reschedule(save_state_event);
    1914             : }
    1915             : 
    1916             : /**
    1917             :  * Periodic callback: Write statistics to disk if appropriate.
    1918             :  */
    1919             : static int
    1920           0 : write_stats_file_callback(time_t now, const or_options_t *options)
    1921             : {
    1922             :   /* 1g. Check whether we should write statistics to disk.
    1923             :    */
    1924             : #define CHECK_WRITE_STATS_INTERVAL (60*60)
    1925           0 :   time_t next_time_to_write_stats_files = now + CHECK_WRITE_STATS_INTERVAL;
    1926           0 :   if (options->CellStatistics) {
    1927           0 :     time_t next_write =
    1928           0 :       rep_hist_buffer_stats_write(now);
    1929           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1930           0 :       next_time_to_write_stats_files = next_write;
    1931             :   }
    1932           0 :   if (options->DirReqStatistics) {
    1933           0 :     time_t next_write = geoip_dirreq_stats_write(now);
    1934           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1935           0 :       next_time_to_write_stats_files = next_write;
    1936             :   }
    1937           0 :   if (options->EntryStatistics) {
    1938           0 :     time_t next_write = geoip_entry_stats_write(now);
    1939           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1940           0 :       next_time_to_write_stats_files = next_write;
    1941             :   }
    1942           0 :   if (options->HiddenServiceStatistics) {
    1943           0 :     time_t next_write = rep_hist_hs_stats_write(now, false);
    1944           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1945           0 :       next_time_to_write_stats_files = next_write;
    1946             : 
    1947           0 :     next_write = rep_hist_hs_stats_write(now, true);
    1948           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1949           0 :       next_time_to_write_stats_files = next_write;
    1950             :   }
    1951           0 :   if (options->ExitPortStatistics) {
    1952           0 :     time_t next_write = rep_hist_exit_stats_write(now);
    1953           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1954           0 :       next_time_to_write_stats_files = next_write;
    1955             :   }
    1956           0 :   if (options->ConnDirectionStatistics) {
    1957           0 :     time_t next_write = conn_stats_save(now);
    1958           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1959           0 :       next_time_to_write_stats_files = next_write;
    1960             :   }
    1961           0 :   if (options->BridgeAuthoritativeDir) {
    1962           0 :     time_t next_write = rep_hist_desc_stats_write(now);
    1963           0 :     if (next_write && next_write < next_time_to_write_stats_files)
    1964           0 :       next_time_to_write_stats_files = next_write;
    1965             :   }
    1966             : 
    1967           0 :   return safe_timer_diff(now, next_time_to_write_stats_files);
    1968             : }
    1969             : 
    1970             : static int
    1971           0 : reset_padding_counts_callback(time_t now, const or_options_t *options)
    1972             : {
    1973           0 :   if (options->PaddingStatistics) {
    1974           0 :     rep_hist_prep_published_padding_counts(now);
    1975             :   }
    1976             : 
    1977           0 :   rep_hist_reset_padding_counts();
    1978           0 :   return REPHIST_CELL_PADDING_COUNTS_INTERVAL;
    1979             : }
    1980             : 
    1981             : static int should_init_bridge_stats = 1;
    1982             : 
    1983             : /**
    1984             :  * Periodic callback: Write bridge statistics to disk if appropriate.
    1985             :  */
    1986             : static int
    1987           0 : record_bridge_stats_callback(time_t now, const or_options_t *options)
    1988             : {
    1989             :   /* 1h. Check whether we should write bridge statistics to disk.
    1990             :    */
    1991           0 :   if (should_record_bridge_info(options)) {
    1992           0 :     if (should_init_bridge_stats) {
    1993             :       /* (Re-)initialize bridge statistics. */
    1994           0 :         geoip_bridge_stats_init(now);
    1995           0 :         should_init_bridge_stats = 0;
    1996           0 :         return WRITE_STATS_INTERVAL;
    1997             :     } else {
    1998             :       /* Possibly write bridge statistics to disk and ask when to write
    1999             :        * them next time. */
    2000           0 :       time_t next = geoip_bridge_stats_write(now);
    2001           0 :       return safe_timer_diff(now, next);
    2002             :     }
    2003           0 :   } else if (!should_init_bridge_stats) {
    2004             :     /* Bridge mode was turned off. Ensure that stats are re-initialized
    2005             :      * next time bridge mode is turned on. */
    2006           0 :     should_init_bridge_stats = 1;
    2007             :   }
    2008             :   return PERIODIC_EVENT_NO_UPDATE;
    2009             : }
    2010             : 
    2011             : /**
    2012             :  * Periodic callback: Clean in-memory caches every once in a while
    2013             :  */
    2014             : static int
    2015           0 : clean_caches_callback(time_t now, const or_options_t *options)
    2016             : {
    2017             :   /* Remove old information from rephist and the rend cache. */
    2018           0 :   rep_history_clean(now - options->RephistTrackTime);
    2019           0 :   hs_cache_clean_as_client(now);
    2020           0 :   hs_cache_clean_as_dir(now);
    2021           0 :   microdesc_cache_rebuild(NULL, 0);
    2022             : #define CLEAN_CACHES_INTERVAL (30*60)
    2023           0 :   return CLEAN_CACHES_INTERVAL;
    2024             : }
    2025             : 
    2026             : /**
    2027             :  * Periodic callback: Clean the cache of failed hidden service lookups
    2028             :  * frequently.
    2029             :  */
    2030             : static int
    2031           0 : rend_cache_failure_clean_callback(time_t now, const or_options_t *options)
    2032             : {
    2033           0 :   (void)options;
    2034             :   /* We don't keep entries that are more than five minutes old so we try to
    2035             :    * clean it as soon as we can since we want to make sure the client waits
    2036             :    * as little as possible for reachability reasons. */
    2037           0 :   hs_cache_client_intro_state_clean(now);
    2038           0 :   return 30;
    2039             : }
    2040             : 
    2041             : /**
    2042             :  * Periodic callback: prune routerlist of old information about Tor network.
    2043             :  */
    2044             : static int
    2045           0 : prune_old_routers_callback(time_t now, const or_options_t *options)
    2046             : {
    2047             : #define ROUTERLIST_PRUNING_INTERVAL (60*60) // 1 hour.
    2048           0 :   (void)now;
    2049           0 :   (void)options;
    2050             : 
    2051           0 :   if (!net_is_disabled()) {
    2052             :     /* If any networkstatus documents are no longer recent, we need to
    2053             :      * update all the descriptors' running status. */
    2054             :     /* Remove dead routers. */
    2055           0 :     log_debug(LD_GENERAL, "Pruning routerlist...");
    2056           0 :     routerlist_remove_old_routers();
    2057             :   }
    2058             : 
    2059           0 :   return ROUTERLIST_PRUNING_INTERVAL;
    2060             : }
    2061             : 
    2062             : /**
    2063             :  * Periodic event: once a minute, (or every second if TestingTorNetwork, or
    2064             :  * during client bootstrap), check whether we want to download any
    2065             :  * networkstatus documents. */
    2066             : static int
    2067           0 : fetch_networkstatus_callback(time_t now, const or_options_t *options)
    2068             : {
    2069             :   /* How often do we check whether we should download network status
    2070             :    * documents? */
    2071           0 :   const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping(
    2072             :                                                                         now);
    2073           0 :   const int prefer_mirrors = !dirclient_fetches_from_authorities(
    2074           0 :                                                               get_options());
    2075           0 :   int networkstatus_dl_check_interval = 60;
    2076             :   /* check more often when testing, or when bootstrapping from mirrors
    2077             :    * (connection limits prevent too many connections being made) */
    2078           0 :   if (options->TestingTorNetwork
    2079           0 :       || (we_are_bootstrapping && prefer_mirrors)) {
    2080           0 :     networkstatus_dl_check_interval = 1;
    2081             :   }
    2082             : 
    2083           0 :   if (should_delay_dir_fetches(options, NULL))
    2084             :     return PERIODIC_EVENT_NO_UPDATE;
    2085             : 
    2086           0 :   update_networkstatus_downloads(now);
    2087           0 :   return networkstatus_dl_check_interval;
    2088             : }
    2089             : 
    2090             : /**
    2091             :  * Periodic callback: Every 60 seconds, we relaunch listeners if any died. */
    2092             : static int
    2093           0 : retry_listeners_callback(time_t now, const or_options_t *options)
    2094             : {
    2095           0 :   (void)now;
    2096           0 :   (void)options;
    2097           0 :   if (!net_is_disabled()) {
    2098           0 :     retry_all_listeners(NULL, 0);
    2099           0 :     return 60;
    2100             :   }
    2101             :   return PERIODIC_EVENT_NO_UPDATE;
    2102             : }
    2103             : 
    2104             : static int heartbeat_callback_first_time = 1;
    2105             : 
    2106             : /**
    2107             :  * Periodic callback: write the heartbeat message in the logs.
    2108             :  *
    2109             :  * If writing the heartbeat message to the logs fails for some reason, retry
    2110             :  * again after <b>MIN_HEARTBEAT_PERIOD</b> seconds.
    2111             :  */
    2112             : static int
    2113           0 : heartbeat_callback(time_t now, const or_options_t *options)
    2114             : {
    2115             :   /* Check if heartbeat is disabled */
    2116           0 :   if (!options->HeartbeatPeriod) {
    2117             :     return PERIODIC_EVENT_NO_UPDATE;
    2118             :   }
    2119             : 
    2120             :   /* Skip the first one. */
    2121           0 :   if (heartbeat_callback_first_time) {
    2122           0 :     heartbeat_callback_first_time = 0;
    2123           0 :     return options->HeartbeatPeriod;
    2124             :   }
    2125             : 
    2126             :   /* Write the heartbeat message */
    2127           0 :   if (log_heartbeat(now) == 0) {
    2128           0 :     return options->HeartbeatPeriod;
    2129             :   } else {
    2130             :     /* If we couldn't write the heartbeat log message, try again in the minimum
    2131             :      * interval of time. */
    2132             :     return MIN_HEARTBEAT_PERIOD;
    2133             :   }
    2134             : }
    2135             : 
    2136             : #define CDM_CLEAN_CALLBACK_INTERVAL 600
    2137             : static int
    2138           0 : clean_consdiffmgr_callback(time_t now, const or_options_t *options)
    2139             : {
    2140           0 :   (void)now;
    2141           0 :   if (dir_server_mode(options)) {
    2142           0 :     consdiffmgr_cleanup();
    2143             :   }
    2144           0 :   return CDM_CLEAN_CALLBACK_INTERVAL;
    2145             : }
    2146             : 
    2147             : /*
    2148             :  * Periodic callback: Run scheduled events for HS service. This is called
    2149             :  * every second.
    2150             :  */
    2151             : static int
    2152           0 : hs_service_callback(time_t now, const or_options_t *options)
    2153             : {
    2154           0 :   (void) options;
    2155             : 
    2156             :   /* We need to at least be able to build circuits and that we actually have
    2157             :    * a working network. */
    2158           0 :   if (!have_completed_a_circuit() || net_is_disabled() ||
    2159           0 :       !networkstatus_get_reasonably_live_consensus(now,
    2160             :                                          usable_consensus_flavor())) {
    2161           0 :     goto end;
    2162             :   }
    2163             : 
    2164           0 :   hs_service_run_scheduled_events(now);
    2165             : 
    2166           0 :  end:
    2167             :   /* Every 1 second. */
    2168           0 :   return 1;
    2169             : }
    2170             : 
    2171             : /*
    2172             :  * Periodic callback: Send once-per-second events to the controller(s).
    2173             :  * This is called every second.
    2174             :  */
    2175             : static int
    2176           0 : control_per_second_events_callback(time_t now, const or_options_t *options)
    2177             : {
    2178           0 :   (void) options;
    2179           0 :   (void) now;
    2180             : 
    2181           0 :   control_per_second_events();
    2182             : 
    2183           0 :   return 1;
    2184             : }
    2185             : 
    2186             : /** Last time that update_current_time was called. */
    2187             : static time_t current_second = 0;
    2188             : /** Last time that update_current_time updated current_second. */
    2189             : static monotime_coarse_t current_second_last_changed;
    2190             : 
    2191             : /**
    2192             :  * Set the current time to "now", which should be the value returned by
    2193             :  * time().  Check for clock jumps and track the total number of seconds we
    2194             :  * have been running.
    2195             :  */
    2196             : void
    2197          12 : update_current_time(time_t now)
    2198             : {
    2199          12 :   if (PREDICT_LIKELY(now == current_second)) {
    2200             :     /* We call this function a lot.  Most frequently, the current second
    2201             :      * will not have changed, so we just return. */
    2202           1 :     return;
    2203             :   }
    2204             : 
    2205          11 :   const time_t seconds_elapsed = current_second ? (now - current_second) : 0;
    2206             : 
    2207             :   /* Check the wall clock against the monotonic clock, so we can
    2208             :    * better tell idleness from clock jumps and/or other shenanigans. */
    2209          11 :   monotime_coarse_t last_updated;
    2210          11 :   memcpy(&last_updated, &current_second_last_changed, sizeof(last_updated));
    2211          11 :   monotime_coarse_get(&current_second_last_changed);
    2212             : 
    2213             :   /** How much clock jumping means that we should adjust our idea of when
    2214             :    * to go dormant? */
    2215             : #define NUM_JUMPED_SECONDS_BEFORE_NETSTATUS_UPDATE 20
    2216             : 
    2217             :   /* Don't go dormant early or late just because we jumped in time. */
    2218          11 :   if (ABS(seconds_elapsed) >= NUM_JUMPED_SECONDS_BEFORE_NETSTATUS_UPDATE) {
    2219           4 :     if (is_participating_on_network()) {
    2220           0 :       netstatus_note_clock_jumped(seconds_elapsed);
    2221             :     }
    2222             :   }
    2223             : 
    2224             :   /** How much clock jumping do we tolerate? */
    2225             : #define NUM_JUMPED_SECONDS_BEFORE_WARN 100
    2226             : 
    2227             :   /** How much idleness do we tolerate? */
    2228             : #define NUM_IDLE_SECONDS_BEFORE_WARN 3600
    2229             : 
    2230          11 :   if (seconds_elapsed < -NUM_JUMPED_SECONDS_BEFORE_WARN) {
    2231             :     // moving back in time is always a bad sign.
    2232           1 :     circuit_note_clock_jumped(seconds_elapsed, false);
    2233             : 
    2234          10 :   } else if (seconds_elapsed >= NUM_JUMPED_SECONDS_BEFORE_WARN) {
    2235             :     /* Compare the monotonic clock to the result of time(). */
    2236           3 :     const int32_t monotime_msec_passed =
    2237           3 :       monotime_coarse_diff_msec32(&last_updated,
    2238             :                                   &current_second_last_changed);
    2239           3 :     const int monotime_sec_passed = monotime_msec_passed / 1000;
    2240           3 :     const int discrepancy = monotime_sec_passed - (int)seconds_elapsed;
    2241             :     /* If the monotonic clock deviates from time(NULL), we have a couple of
    2242             :      * possibilities.  On some systems, this means we have been suspended or
    2243             :      * sleeping.  Everywhere, it can mean that the wall-clock time has
    2244             :      * been changed -- for example, with settimeofday().
    2245             :      *
    2246             :      * On the other hand, if the monotonic time matches with the wall-clock
    2247             :      * time, we've probably just been idle for a while, with no events firing.
    2248             :      * we tolerate much more of that.
    2249             :      */
    2250           3 :     const bool clock_jumped = abs(discrepancy) > 2;
    2251             : 
    2252           3 :     if (clock_jumped || seconds_elapsed >= NUM_IDLE_SECONDS_BEFORE_WARN) {
    2253           2 :       circuit_note_clock_jumped(seconds_elapsed, ! clock_jumped);
    2254             :     }
    2255           7 :   } else if (seconds_elapsed > 0) {
    2256           4 :     stats_n_seconds_working += seconds_elapsed;
    2257             :   }
    2258             : 
    2259          11 :   update_approx_time(now);
    2260          11 :   current_second = now;
    2261             : }
    2262             : 
    2263             : #ifdef HAVE_SYSTEMD_209
    2264             : static periodic_timer_t *systemd_watchdog_timer = NULL;
    2265             : 
    2266             : /** Libevent callback: invoked to reset systemd watchdog. */
    2267             : static void
    2268           0 : systemd_watchdog_callback(periodic_timer_t *timer, void *arg)
    2269             : {
    2270           0 :   (void)timer;
    2271           0 :   (void)arg;
    2272           0 :   sd_notify(0, "WATCHDOG=1");
    2273           0 : }
    2274             : #endif /* defined(HAVE_SYSTEMD_209) */
    2275             : 
    2276             : #define UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST (6*60*60)
    2277             : 
    2278             : /** Called when our IP address seems to have changed. <b>on_client_conn</b>
    2279             :  * should be true if:
    2280             :  *   - we detected a change in our interface address, using an outbound
    2281             :  *     connection, and therefore
    2282             :  *   - our client TLS keys need to be rotated.
    2283             :  * Otherwise, it should be false, and:
    2284             :  *   - we detected a change in our published address
    2285             :  *     (using some other method), and therefore
    2286             :  *   - the published addresses in our descriptor need to change.
    2287             :  */
    2288             : void
    2289          18 : ip_address_changed(int on_client_conn)
    2290             : {
    2291          18 :   const or_options_t *options = get_options();
    2292          18 :   int server = server_mode(options);
    2293             : 
    2294          18 :   if (on_client_conn) {
    2295           0 :     if (! server) {
    2296             :       /* Okay, change our keys. */
    2297           0 :       if (init_keys_client() < 0)
    2298           0 :         log_warn(LD_GENERAL, "Unable to rotate keys after IP change!");
    2299             :     }
    2300             :   } else {
    2301          18 :     if (server) {
    2302           0 :       if (get_uptime() > UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST)
    2303           0 :         reset_bandwidth_test();
    2304           0 :       reset_uptime();
    2305           0 :       router_reset_reachability();
    2306             :       /* All relays include their IP addresses as their ORPort addresses in
    2307             :        * their descriptor.
    2308             :        * Exit relays also incorporate interface addresses in their exit
    2309             :        * policies, when ExitPolicyRejectLocalInterfaces is set. */
    2310           0 :       mark_my_descriptor_dirty("IP address changed");
    2311             :     }
    2312             :   }
    2313             : 
    2314          18 :   dns_servers_relaunch_checks();
    2315          18 : }
    2316             : 
    2317             : /** Forget what we've learned about the correctness of our DNS servers, and
    2318             :  * start learning again. */
    2319             : void
    2320          20 : dns_servers_relaunch_checks(void)
    2321             : {
    2322          20 :   if (server_mode(get_options())) {
    2323           2 :     dns_reset_correctness_checks();
    2324           2 :     if (check_dns_honesty_event) {
    2325           0 :       periodic_event_reschedule(check_dns_honesty_event);
    2326             :     }
    2327             :   }
    2328          20 : }
    2329             : 
    2330             : /** Initialize some mainloop_event_t objects that we require. */
    2331             : void
    2332           9 : initialize_mainloop_events(void)
    2333             : {
    2334           9 :   if (!schedule_active_linked_connections_event) {
    2335           9 :     schedule_active_linked_connections_event =
    2336           9 :       mainloop_event_postloop_new(schedule_active_linked_connections_cb, NULL);
    2337             :   }
    2338           9 :   if (!postloop_cleanup_ev) {
    2339           9 :     postloop_cleanup_ev =
    2340           9 :       mainloop_event_postloop_new(postloop_cleanup_cb, NULL);
    2341             :   }
    2342           9 : }
    2343             : 
    2344             : /** Tor main loop. */
    2345             : int
    2346           0 : do_main_loop(void)
    2347             : {
    2348             :   /* initialize the periodic events first, so that code that depends on the
    2349             :    * events being present does not assert.
    2350             :    */
    2351           0 :   tor_assert(periodic_events_initialized);
    2352           0 :   initialize_mainloop_events();
    2353             : 
    2354           0 :   periodic_events_connect_all();
    2355             : 
    2356           0 :   struct timeval one_second = { 1, 0 };
    2357           0 :   initialize_periodic_events_event = tor_evtimer_new(
    2358             :                   tor_libevent_get_base(),
    2359             :                   initialize_periodic_events_cb, NULL);
    2360           0 :   event_add(initialize_periodic_events_event, &one_second);
    2361             : 
    2362             : #ifdef HAVE_SYSTEMD_209
    2363           0 :   uint64_t watchdog_delay;
    2364             :   /* set up systemd watchdog notification. */
    2365           0 :   if (sd_watchdog_enabled(1, &watchdog_delay) > 0) {
    2366           0 :     if (! systemd_watchdog_timer) {
    2367           0 :       struct timeval watchdog;
    2368             :       /* The manager will "act on" us if we don't send them a notification
    2369             :        * every 'watchdog_delay' microseconds.  So, send notifications twice
    2370             :        * that often.  */
    2371           0 :       watchdog_delay /= 2;
    2372           0 :       watchdog.tv_sec = watchdog_delay  / 1000000;
    2373           0 :       watchdog.tv_usec = watchdog_delay % 1000000;
    2374             : 
    2375           0 :       systemd_watchdog_timer = periodic_timer_new(tor_libevent_get_base(),
    2376             :                                                   &watchdog,
    2377             :                                                   systemd_watchdog_callback,
    2378             :                                                   NULL);
    2379           0 :       tor_assert(systemd_watchdog_timer);
    2380             :     }
    2381             :   }
    2382             : #endif /* defined(HAVE_SYSTEMD_209) */
    2383             : #ifdef ENABLE_RESTART_DEBUGGING
    2384             :   {
    2385             :     static int first_time = 1;
    2386             : 
    2387             :     if (first_time && getenv("TOR_DEBUG_RESTART")) {
    2388             :       first_time = 0;
    2389             :       const char *sec_str = getenv("TOR_DEBUG_RESTART_AFTER_SECONDS");
    2390             :       long sec;
    2391             :       int sec_ok=0;
    2392             :       if (sec_str &&
    2393             :           (sec = tor_parse_long(sec_str, 10, 0, INT_MAX, &sec_ok, NULL)) &&
    2394             :           sec_ok) {
    2395             :         /* Okay, we parsed the seconds. */
    2396             :       } else {
    2397             :         sec = 5;
    2398             :       }
    2399             :       struct timeval restart_after = { (time_t) sec, 0 };
    2400             :       tor_shutdown_event_loop_for_restart_event =
    2401             :         tor_evtimer_new(tor_libevent_get_base(),
    2402             :                         tor_shutdown_event_loop_for_restart_cb, NULL);
    2403             :       event_add(tor_shutdown_event_loop_for_restart_event, &restart_after);
    2404             :     }
    2405             :   }
    2406             : #endif /* defined(ENABLE_RESTART_DEBUGGING) */
    2407             : 
    2408           0 :   return run_main_loop_until_done();
    2409             : }
    2410             : 
    2411             : #ifndef _WIN32
    2412             : /** Rate-limiter for EINVAL-type libevent warnings. */
    2413             : static ratelim_t libevent_error_ratelim = RATELIM_INIT(10);
    2414             : #endif
    2415             : 
    2416             : /**
    2417             :  * Run the main loop a single time. Return 0 for "exit"; -1 for "exit with
    2418             :  * error", and 1 for "run this again."
    2419             :  */
    2420             : static int
    2421           3 : run_main_loop_once(void)
    2422             : {
    2423           3 :   int loop_result;
    2424             : 
    2425           3 :   if (nt_service_is_stopping())
    2426             :     return 0;
    2427             : 
    2428           3 :   if (main_loop_should_exit)
    2429             :     return 0;
    2430             : 
    2431             : #ifndef _WIN32
    2432             :   /* Make it easier to tell whether libevent failure is our fault or not. */
    2433           3 :   errno = 0;
    2434             : #endif
    2435             : 
    2436           3 :   if (get_options()->MainloopStats) {
    2437             :     /* We always enforce that EVLOOP_ONCE is passed to event_base_loop() if we
    2438             :      * are collecting main loop statistics. */
    2439           0 :     called_loop_once = 1;
    2440             :   } else {
    2441           3 :     called_loop_once = 0;
    2442             :   }
    2443             : 
    2444             :   /* Make sure we know (about) what time it is. */
    2445           3 :   update_approx_time(time(NULL));
    2446             : 
    2447             :   /* Here it is: the main loop.  Here we tell Libevent to poll until we have
    2448             :    * an event, or the second ends, or until we have some active linked
    2449             :    * connections to trigger events for.  Libevent will wait till one
    2450             :    * of these happens, then run all the appropriate callbacks. */
    2451           3 :   loop_result = tor_libevent_run_event_loop(tor_libevent_get_base(),
    2452             :                                             called_loop_once);
    2453             : 
    2454           3 :   if (get_options()->MainloopStats) {
    2455             :     /* Update our main loop counters. */
    2456           0 :     if (loop_result == 0) {
    2457             :       // The call was successful.
    2458           0 :       increment_main_loop_success_count();
    2459           0 :     } else if (loop_result == -1) {
    2460             :       // The call was erroneous.
    2461           0 :       increment_main_loop_error_count();
    2462           0 :     } else if (loop_result == 1) {
    2463             :       // The call didn't have any active or pending events
    2464             :       // to handle.
    2465           0 :       increment_main_loop_idle_count();
    2466             :     }
    2467             :   }
    2468             : 
    2469             :   /* Oh, the loop failed.  That might be an error that we need to
    2470             :    * catch, but more likely, it's just an interrupted poll() call or something,
    2471             :    * and we should try again. */
    2472           3 :   if (loop_result < 0) {
    2473           0 :     int e = tor_socket_errno(-1);
    2474             :     /* let the program survive things like ^z */
    2475           0 :     if (e != EINTR && !ERRNO_IS_EINPROGRESS(e)) {
    2476           0 :       log_err(LD_NET,"libevent call with %s failed: %s [%d]",
    2477             :               tor_libevent_get_method(), tor_socket_strerror(e), e);
    2478           0 :       return -1;
    2479             : #ifndef _WIN32
    2480           0 :     } else if (e == EINVAL) {
    2481             :       log_fn_ratelim(&libevent_error_ratelim, LOG_WARN, LD_NET,
    2482             :                      "EINVAL from libevent: should you upgrade libevent?");
    2483             :       if (libevent_error_ratelim.n_calls_since_last_time > 8) {
    2484             :         log_err(LD_NET, "Too many libevent errors, too fast: dying");
    2485             :         return -1;
    2486             :       }
    2487             : #endif /* !defined(_WIN32) */
    2488             :     } else {
    2489           0 :       tor_assert_nonfatal_once(! ERRNO_IS_EINPROGRESS(e));
    2490           0 :       log_debug(LD_NET,"libevent call interrupted.");
    2491             :       /* You can't trust the results of this poll(). Go back to the
    2492             :        * top of the big for loop. */
    2493           0 :       return 1;
    2494             :     }
    2495             :   }
    2496             : 
    2497           3 :   if (main_loop_should_exit)
    2498           3 :     return 0;
    2499             : 
    2500             :   return 1;
    2501             : }
    2502             : 
    2503             : /** Run the run_main_loop_once() function until it declares itself done,
    2504             :  * and return its final return value.
    2505             :  *
    2506             :  * Shadow won't invoke this function, so don't fill it up with things.
    2507             :  */
    2508             : STATIC int
    2509           3 : run_main_loop_until_done(void)
    2510             : {
    2511           3 :   int loop_result = 1;
    2512             : 
    2513           3 :   main_loop_should_exit = 0;
    2514           3 :   main_loop_exit_value = 0;
    2515             : 
    2516           3 :   do {
    2517           3 :     loop_result = run_main_loop_once();
    2518           3 :   } while (loop_result == 1);
    2519             : 
    2520           3 :   if (main_loop_should_exit)
    2521           3 :     return main_loop_exit_value;
    2522             :   else
    2523             :     return loop_result;
    2524             : }
    2525             : 
    2526             : /** Returns Tor's uptime. */
    2527          38 : MOCK_IMPL(long,
    2528             : get_uptime,(void))
    2529             : {
    2530          38 :   return stats_n_seconds_working;
    2531             : }
    2532             : 
    2533             : /** Reset Tor's uptime. */
    2534          12 : MOCK_IMPL(void,
    2535             : reset_uptime,(void))
    2536             : {
    2537          12 :   stats_n_seconds_working = 0;
    2538          12 : }
    2539             : 
    2540             : void
    2541         235 : tor_mainloop_free_all(void)
    2542             : {
    2543         235 :   smartlist_free(connection_array);
    2544         235 :   smartlist_free(closeable_connection_lst);
    2545         235 :   smartlist_free(active_linked_connection_lst);
    2546         235 :   teardown_periodic_events();
    2547         235 :   tor_event_free(shutdown_did_not_work_event);
    2548         235 :   tor_event_free(initialize_periodic_events_event);
    2549         235 :   mainloop_event_free(directory_all_unreachable_cb_event);
    2550         235 :   mainloop_event_free(schedule_active_linked_connections_event);
    2551         235 :   mainloop_event_free(postloop_cleanup_ev);
    2552         235 :   mainloop_event_free(handle_deferred_signewnym_ev);
    2553         235 :   mainloop_event_free(scheduled_shutdown_ev);
    2554         235 :   mainloop_event_free(rescan_periodic_events_ev);
    2555             : 
    2556             : #ifdef HAVE_SYSTEMD_209
    2557         235 :   periodic_timer_free(systemd_watchdog_timer);
    2558             : #endif
    2559             : 
    2560         235 :   stats_n_bytes_read = stats_n_bytes_written = 0;
    2561             : 
    2562         235 :   memset(&global_bucket, 0, sizeof(global_bucket));
    2563         235 :   memset(&global_relayed_bucket, 0, sizeof(global_relayed_bucket));
    2564         235 :   time_of_process_start = 0;
    2565         235 :   time_of_last_signewnym = 0;
    2566         235 :   signewnym_is_pending = 0;
    2567         235 :   newnym_epoch = 0;
    2568         235 :   called_loop_once = 0;
    2569         235 :   main_loop_should_exit = 0;
    2570         235 :   main_loop_exit_value = 0;
    2571         235 :   can_complete_circuits = 0;
    2572         235 :   quiet_level = 0;
    2573         235 :   should_init_bridge_stats = 1;
    2574         235 :   heartbeat_callback_first_time = 1;
    2575         235 :   current_second = 0;
    2576         235 :   memset(&current_second_last_changed, 0,
    2577             :          sizeof(current_second_last_changed));
    2578         235 : }

Generated by: LCOV version 1.14