Tor  0.4.4.0-alpha-dev
scheduler_kist.c
Go to the documentation of this file.
1 /* Copyright (c) 2017-2020, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
3 
4 /**
5  * @file scheduler_kist.c
6  * @brief Implements the KIST cell scheduler.
7  **/
8 
9 #define SCHEDULER_KIST_PRIVATE
10 
11 #include "core/or/or.h"
12 #include "lib/buf/buffers.h"
13 #include "app/config/config.h"
16 #define CHANNEL_OBJECT_PRIVATE
17 #include "core/or/channel.h"
18 #include "core/or/channeltls.h"
19 #define SCHEDULER_PRIVATE
20 #include "core/or/scheduler.h"
21 #include "lib/math/fp.h"
22 
24 
25 #ifdef HAVE_SYS_IOCTL_H
26 #include <sys/ioctl.h>
27 #endif
28 
29 #ifdef HAVE_KIST_SUPPORT
30 /* Kernel interface needed for KIST. */
31 #include <netinet/tcp.h>
32 #include <linux/sockios.h>
33 #endif /* HAVE_KIST_SUPPORT */
34 
35 /*****************************************************************************
36  * Data structures and supporting functions
37  *****************************************************************************/
38 
39 /* Socket_table hash table stuff. The socket_table keeps track of per-socket
40  * limit information imposed by kist and used by kist. */
41 
42 static uint32_t
43 socket_table_ent_hash(const socket_table_ent_t *ent)
44 {
45  return (uint32_t)ent->chan->global_identifier;
46 }
47 
48 static unsigned
49 socket_table_ent_eq(const socket_table_ent_t *a, const socket_table_ent_t *b)
50 {
51  return a->chan == b->chan;
52 }
53 
54 typedef HT_HEAD(socket_table_s, socket_table_ent_t) socket_table_t;
55 
56 static socket_table_t socket_table = HT_INITIALIZER();
57 
58 HT_PROTOTYPE(socket_table_s, socket_table_ent_t, node, socket_table_ent_hash,
59  socket_table_ent_eq)
60 HT_GENERATE2(socket_table_s, socket_table_ent_t, node, socket_table_ent_hash,
61  socket_table_ent_eq, 0.6, tor_reallocarray, tor_free_)
62 
63 /* outbuf_table hash table stuff. The outbuf_table keeps track of which
64  * channels have data sitting in their outbuf so the kist scheduler can force
65  * a write from outbuf to kernel periodically during a run and at the end of a
66  * run. */
67 
68 typedef struct outbuf_table_ent_t {
69  HT_ENTRY(outbuf_table_ent_t) node;
70  channel_t *chan;
71 } outbuf_table_ent_t;
72 
73 static uint32_t
74 outbuf_table_ent_hash(const outbuf_table_ent_t *ent)
75 {
76  return (uint32_t)ent->chan->global_identifier;
77 }
78 
79 static unsigned
80 outbuf_table_ent_eq(const outbuf_table_ent_t *a, const outbuf_table_ent_t *b)
81 {
82  return a->chan->global_identifier == b->chan->global_identifier;
83 }
84 
85 HT_PROTOTYPE(outbuf_table_s, outbuf_table_ent_t, node, outbuf_table_ent_hash,
86  outbuf_table_ent_eq)
87 HT_GENERATE2(outbuf_table_s, outbuf_table_ent_t, node, outbuf_table_ent_hash,
88  outbuf_table_ent_eq, 0.6, tor_reallocarray, tor_free_)
89 
90 /*****************************************************************************
91  * Other internal data
92  *****************************************************************************/
93 
94 /* Store the last time the scheduler was run so we can decide when to next run
95  * the scheduler based on it. */
96 static monotime_t scheduler_last_run;
97 /* This is a factor for the extra_space calculation in kist per-socket limits.
98  * It is the number of extra congestion windows we want to write to the kernel.
99  */
100 static double sock_buf_size_factor = 1.0;
101 /* How often the scheduler runs. */
102 STATIC int sched_run_interval = KIST_SCHED_RUN_INTERVAL_DEFAULT;
103 
104 #ifdef HAVE_KIST_SUPPORT
105 /* Indicate if KIST lite mode is on or off. We can disable it at runtime.
106  * Important to have because of the KISTLite -> KIST possible transition. */
107 static unsigned int kist_lite_mode = 0;
108 /* Indicate if we don't have the kernel support. This can happen if the kernel
109  * changed and it doesn't recognized the values passed to the syscalls needed
110  * by KIST. In that case, fallback to the naive approach. */
111 static unsigned int kist_no_kernel_support = 0;
112 #else /* !defined(HAVE_KIST_SUPPORT) */
113 static unsigned int kist_lite_mode = 1;
114 #endif /* defined(HAVE_KIST_SUPPORT) */
115 
116 /*****************************************************************************
117  * Internally called function implementations
118  *****************************************************************************/
119 
120 /* Little helper function to get the length of a channel's output buffer */
121 static inline size_t
122 channel_outbuf_length(channel_t *chan)
123 {
124  tor_assert(chan);
125  /* In theory, this can not happen because we can not scheduler a channel
126  * without a connection that has its outbuf initialized. Just in case, bug
127  * on this so we can understand a bit more why it happened. */
128  if (SCHED_BUG(BASE_CHAN_TO_TLS(chan)->conn == NULL, chan)) {
129  return 0;
130  }
131  return buf_datalen(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn)->outbuf);
132 }
133 
134 /* Little helper function for HT_FOREACH_FN. */
135 static int
136 each_channel_write_to_kernel(outbuf_table_ent_t *ent, void *data)
137 {
138  (void) data; /* Make compiler happy. */
139  channel_write_to_kernel(ent->chan);
140  return 0; /* Returning non-zero removes the element from the table. */
141 }
142 
143 /* Free the given outbuf table entry ent. */
144 static int
145 free_outbuf_info_by_ent(outbuf_table_ent_t *ent, void *data)
146 {
147  (void) data; /* Make compiler happy. */
148  log_debug(LD_SCHED, "Freeing outbuf table entry from chan=%" PRIu64,
149  ent->chan->global_identifier);
150  tor_free(ent);
151  return 1; /* So HT_FOREACH_FN will remove the element */
152 }
153 
154 /* Free the given socket table entry ent. */
155 static int
156 free_socket_info_by_ent(socket_table_ent_t *ent, void *data)
157 {
158  (void) data; /* Make compiler happy. */
159  log_debug(LD_SCHED, "Freeing socket table entry from chan=%" PRIu64,
160  ent->chan->global_identifier);
161  tor_free(ent);
162  return 1; /* So HT_FOREACH_FN will remove the element */
163 }
164 
165 /* Clean up socket_table. Probably because the KIST sched impl is going away */
166 static void
167 free_all_socket_info(void)
168 {
169  HT_FOREACH_FN(socket_table_s, &socket_table, free_socket_info_by_ent, NULL);
170  HT_CLEAR(socket_table_s, &socket_table);
171 }
172 
173 static socket_table_ent_t *
174 socket_table_search(socket_table_t *table, const channel_t *chan)
175 {
176  socket_table_ent_t search, *ent = NULL;
177  search.chan = chan;
178  ent = HT_FIND(socket_table_s, table, &search);
179  return ent;
180 }
181 
182 /* Free a socket entry in table for the given chan. */
183 static void
184 free_socket_info_by_chan(socket_table_t *table, const channel_t *chan)
185 {
186  socket_table_ent_t *ent = NULL;
187  ent = socket_table_search(table, chan);
188  if (!ent)
189  return;
190  log_debug(LD_SCHED, "scheduler free socket info for chan=%" PRIu64,
191  chan->global_identifier);
192  HT_REMOVE(socket_table_s, table, ent);
193  free_socket_info_by_ent(ent, NULL);
194 }
195 
196 /* Perform system calls for the given socket in order to calculate kist's
197  * per-socket limit as documented in the function body. */
198 MOCK_IMPL(void,
199 update_socket_info_impl, (socket_table_ent_t *ent))
200 {
201 #ifdef HAVE_KIST_SUPPORT
202  int64_t tcp_space, extra_space;
203  tor_assert(ent);
204  tor_assert(ent->chan);
205  const tor_socket_t sock =
206  TO_CONN(BASE_CHAN_TO_TLS((channel_t *) ent->chan)->conn)->s;
207  struct tcp_info tcp;
208  socklen_t tcp_info_len = sizeof(tcp);
209 
210  if (kist_no_kernel_support || kist_lite_mode) {
211  goto fallback;
212  }
213 
214  /* Gather information */
215  if (getsockopt(sock, SOL_TCP, TCP_INFO, (void *)&(tcp), &tcp_info_len) < 0) {
216  if (errno == EINVAL) {
217  /* Oops, this option is not provided by the kernel, we'll have to
218  * disable KIST entirely. This can happen if tor was built on a machine
219  * with the support previously or if the kernel was updated and lost the
220  * support. */
221  log_notice(LD_SCHED, "Looks like our kernel doesn't have the support "
222  "for KIST anymore. We will fallback to the naive "
223  "approach. Remove KIST from the Schedulers list "
224  "to disable.");
225  kist_no_kernel_support = 1;
226  }
227  goto fallback;
228  }
229  if (ioctl(sock, SIOCOUTQNSD, &(ent->notsent)) < 0) {
230  if (errno == EINVAL) {
231  log_notice(LD_SCHED, "Looks like our kernel doesn't have the support "
232  "for KIST anymore. We will fallback to the naive "
233  "approach. Remove KIST from the Schedulers list "
234  "to disable.");
235  /* Same reason as the above. */
236  kist_no_kernel_support = 1;
237  }
238  goto fallback;
239  }
240  ent->cwnd = tcp.tcpi_snd_cwnd;
241  ent->unacked = tcp.tcpi_unacked;
242  ent->mss = tcp.tcpi_snd_mss;
243 
244  /* In order to reduce outbound kernel queuing delays and thus improve Tor's
245  * ability to prioritize circuits, KIST wants to set a socket write limit
246  * that is near the amount that the socket would be able to immediately send
247  * into the Internet.
248  *
249  * We first calculate how much the socket could send immediately (assuming
250  * completely full packets) according to the congestion window and the number
251  * of unacked packets.
252  *
253  * Then we add a little extra space in a controlled way. We do this so any
254  * when the kernel gets ACKs back for data currently sitting in the "TCP
255  * space", it will already have some more data to send immediately. It will
256  * not have to wait for the scheduler to run again. The amount of extra space
257  * is a factor of the current congestion window. With the suggested
258  * sock_buf_size_factor value of 1.0, we allow at most 2*cwnd bytes to sit in
259  * the kernel: 1 cwnd on the wire waiting for ACKs and 1 cwnd ready and
260  * waiting to be sent when those ACKs finally come.
261  *
262  * In the below diagram, we see some bytes in the TCP-space (denoted by '*')
263  * that have be sent onto the wire and are waiting for ACKs. We have a little
264  * more room in "TCP space" that we can fill with data that will be
265  * immediately sent. We also see the "extra space" KIST calculates. The sum
266  * of the empty "TCP space" and the "extra space" is the kist-imposed write
267  * limit for this socket.
268  *
269  * <----------------kernel-outbound-socket-queue----------------|
270  * <*********---------------------------------------------------|
271  * |----TCP-space-----|----extra-space-----|
272  * |------------------|
273  * ^ ((cwnd - unacked) * mss) bytes
274  * |--------------------|
275  * ^ ((cwnd * mss) * factor) bytes
276  */
277 
278  /* These values from the kernel are uint32_t, they will always fit into a
279  * int64_t tcp_space variable but if the congestion window cwnd is smaller
280  * than the unacked packets, the remaining TCP space is set to 0. */
281  if (ent->cwnd >= ent->unacked) {
282  tcp_space = (ent->cwnd - ent->unacked) * (int64_t)(ent->mss);
283  } else {
284  tcp_space = 0;
285  }
286 
287  /* The clamp_double_to_int64 makes sure the first part fits into an int64_t.
288  * In fact, if sock_buf_size_factor is still forced to be >= 0 in config.c,
289  * then it will be positive for sure. Then we subtract a uint32_t. Getting a
290  * negative value is OK, see after how it is being handled. */
291  extra_space =
293  (ent->cwnd * (int64_t)ent->mss) * sock_buf_size_factor) -
294  ent->notsent - (int64_t)channel_outbuf_length((channel_t *) ent->chan);
295  if ((tcp_space + extra_space) < 0) {
296  /* This means that the "notsent" queue is just too big so we shouldn't put
297  * more in the kernel for now. */
298  ent->limit = 0;
299  } else {
300  /* The positive sum of two int64_t will always fit into an uint64_t.
301  * And we know this will always be positive, since we checked above. */
302  ent->limit = (uint64_t)tcp_space + (uint64_t)extra_space;
303  }
304  return;
305 
306 #else /* !defined(HAVE_KIST_SUPPORT) */
307  goto fallback;
308 #endif /* defined(HAVE_KIST_SUPPORT) */
309 
310  fallback:
311  /* If all of a sudden we don't have kist support, we just zero out all the
312  * variables for this socket since we don't know what they should be. We
313  * also allow the socket to write as much as it can from the estimated
314  * number of cells the lower layer can accept, effectively returning it to
315  * Vanilla scheduler behavior. */
316  ent->cwnd = ent->unacked = ent->mss = ent->notsent = 0;
317  /* This function calls the specialized channel object (currently channeltls)
318  * and ask how many cells it can write on the outbuf which we then multiply
319  * by the size of the cells for this channel. The cast is because this
320  * function requires a non-const channel object, meh. */
321  ent->limit = channel_num_cells_writeable((channel_t *) ent->chan) *
322  (get_cell_network_size(ent->chan->wide_circ_ids) +
323  TLS_PER_CELL_OVERHEAD);
324 }
325 
326 /* Given a socket that isn't in the table, add it.
327  * Given a socket that is in the table, re-init values that need init-ing
328  * every scheduling run
329  */
330 static void
331 init_socket_info(socket_table_t *table, const channel_t *chan)
332 {
333  socket_table_ent_t *ent = NULL;
334  ent = socket_table_search(table, chan);
335  if (!ent) {
336  log_debug(LD_SCHED, "scheduler init socket info for chan=%" PRIu64,
337  chan->global_identifier);
338  ent = tor_malloc_zero(sizeof(*ent));
339  ent->chan = chan;
340  HT_INSERT(socket_table_s, table, ent);
341  }
342  ent->written = 0;
343 }
344 
345 /* Add chan to the outbuf table if it isn't already in it. If it is, then don't
346  * do anything */
347 static void
348 outbuf_table_add(outbuf_table_t *table, channel_t *chan)
349 {
350  outbuf_table_ent_t search, *ent;
351  search.chan = chan;
352  ent = HT_FIND(outbuf_table_s, table, &search);
353  if (!ent) {
354  log_debug(LD_SCHED, "scheduler init outbuf info for chan=%" PRIu64,
355  chan->global_identifier);
356  ent = tor_malloc_zero(sizeof(*ent));
357  ent->chan = chan;
358  HT_INSERT(outbuf_table_s, table, ent);
359  }
360 }
361 
362 static void
363 outbuf_table_remove(outbuf_table_t *table, channel_t *chan)
364 {
365  outbuf_table_ent_t search, *ent;
366  search.chan = chan;
367  ent = HT_FIND(outbuf_table_s, table, &search);
368  if (ent) {
369  HT_REMOVE(outbuf_table_s, table, ent);
370  free_outbuf_info_by_ent(ent, NULL);
371  }
372 }
373 
374 /* Set the scheduler running interval. */
375 static void
376 set_scheduler_run_interval(void)
377 {
378  int old_sched_run_interval = sched_run_interval;
379  sched_run_interval = kist_scheduler_run_interval();
380  if (old_sched_run_interval != sched_run_interval) {
381  log_info(LD_SCHED, "Scheduler KIST changing its running interval "
382  "from %" PRId32 " to %" PRId32,
383  old_sched_run_interval, sched_run_interval);
384  }
385 }
386 
387 /* Return true iff the channel hasn't hit its kist-imposed write limit yet */
388 static int
389 socket_can_write(socket_table_t *table, const channel_t *chan)
390 {
391  socket_table_ent_t *ent = NULL;
392  ent = socket_table_search(table, chan);
393  if (SCHED_BUG(!ent, chan)) {
394  return 1; // Just return true, saying that kist wouldn't limit the socket
395  }
396 
397  /* We previously calculated a write limit for this socket. In the below
398  * calculation, first determine how much room is left in bytes. Then divide
399  * that by the amount of space a cell takes. If there's room for at least 1
400  * cell, then KIST will allow the socket to write. */
401  int64_t kist_limit_space =
402  (int64_t) (ent->limit - ent->written) /
403  (CELL_MAX_NETWORK_SIZE + TLS_PER_CELL_OVERHEAD);
404  return kist_limit_space > 0;
405 }
406 
407 /* Update the channel's socket kernel information. */
408 static void
409 update_socket_info(socket_table_t *table, const channel_t *chan)
410 {
411  socket_table_ent_t *ent = NULL;
412  ent = socket_table_search(table, chan);
413  if (SCHED_BUG(!ent, chan)) {
414  return; // Whelp. Entry didn't exist for some reason so nothing to do.
415  }
416  update_socket_info_impl(ent);
417  log_debug(LD_SCHED, "chan=%" PRIu64 " updated socket info, limit: %" PRIu64
418  ", cwnd: %" PRIu32 ", unacked: %" PRIu32
419  ", notsent: %" PRIu32 ", mss: %" PRIu32,
420  ent->chan->global_identifier, ent->limit, ent->cwnd, ent->unacked,
421  ent->notsent, ent->mss);
422 }
423 
424 /* Increment the channel's socket written value by the number of bytes. */
425 static void
426 update_socket_written(socket_table_t *table, channel_t *chan, size_t bytes)
427 {
428  socket_table_ent_t *ent = NULL;
429  ent = socket_table_search(table, chan);
430  if (SCHED_BUG(!ent, chan)) {
431  return; // Whelp. Entry didn't exist so nothing to do.
432  }
433 
434  log_debug(LD_SCHED, "chan=%" PRIu64 " wrote %lu bytes, old was %" PRIi64,
435  chan->global_identifier, (unsigned long) bytes, ent->written);
436 
437  ent->written += bytes;
438 }
439 
440 /*
441  * A naive KIST impl would write every single cell all the way to the kernel.
442  * That would take a lot of system calls. A less bad KIST impl would write a
443  * channel's outbuf to the kernel only when we are switching to a different
444  * channel. But if we have two channels with equal priority, we end up writing
445  * one cell for each and bouncing back and forth. This KIST impl avoids that
446  * by only writing a channel's outbuf to the kernel if it has 8 cells or more
447  * in it.
448  */
449 MOCK_IMPL(int, channel_should_write_to_kernel,
450  (outbuf_table_t *table, channel_t *chan))
451 {
452  outbuf_table_add(table, chan);
453  /* CELL_MAX_NETWORK_SIZE * 8 because we only want to write the outbuf to the
454  * kernel if there's 8 or more cells waiting */
455  return channel_outbuf_length(chan) > (CELL_MAX_NETWORK_SIZE * 8);
456 }
457 
458 /* Little helper function to write a channel's outbuf all the way to the
459  * kernel */
460 MOCK_IMPL(void, channel_write_to_kernel, (channel_t *chan))
461 {
462  tor_assert(chan);
463  log_debug(LD_SCHED, "Writing %lu bytes to kernel for chan %" PRIu64,
464  (unsigned long)channel_outbuf_length(chan),
465  chan->global_identifier);
466  /* Note that 'connection_handle_write()' may change the scheduler state of
467  * the channel during the scheduling loop with
468  * 'connection_or_flushed_some()' -> 'scheduler_channel_wants_writes()'.
469  * This side-effect will only occur if the channel is currently in the
470  * 'SCHED_CHAN_WAITING_TO_WRITE' or 'SCHED_CHAN_IDLE' states, which KIST
471  * rarely uses, so it should be fine unless KIST begins using these states
472  * in the future. */
473  connection_handle_write(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), 0);
474 }
475 
476 /* Return true iff the scheduler has work to perform. */
477 static int
478 have_work(void)
479 {
481  IF_BUG_ONCE(!cp) {
482  return 0; // channels_pending doesn't exist so... no work?
483  }
484  return smartlist_len(cp) > 0;
485 }
486 
487 /* Function of the scheduler interface: free_all() */
488 static void
489 kist_free_all(void)
490 {
491  free_all_socket_info();
492 }
493 
494 /* Function of the scheduler interface: on_channel_free() */
495 static void
496 kist_on_channel_free_fn(const channel_t *chan)
497 {
498  free_socket_info_by_chan(&socket_table, chan);
499 }
500 
501 /* Function of the scheduler interface: on_new_consensus() */
502 static void
503 kist_scheduler_on_new_consensus(void)
504 {
505  set_scheduler_run_interval();
506 }
507 
508 /* Function of the scheduler interface: on_new_options() */
509 static void
510 kist_scheduler_on_new_options(void)
511 {
512  sock_buf_size_factor = get_options()->KISTSockBufSizeFactor;
513 
514  /* Calls kist_scheduler_run_interval which calls get_options(). */
515  set_scheduler_run_interval();
516 }
517 
518 /* Function of the scheduler interface: init() */
519 static void
520 kist_scheduler_init(void)
521 {
522  /* When initializing the scheduler, the last run could be 0 because it is
523  * declared static or a value in the past that was set when it was last
524  * used. In both cases, we want to initialize it to now so we don't risk
525  * using the value 0 which doesn't play well with our monotonic time
526  * interface.
527  *
528  * One side effect is that the first scheduler run will be at the next tick
529  * that is in now + 10 msec (KIST_SCHED_RUN_INTERVAL_DEFAULT) by default. */
530  monotime_get(&scheduler_last_run);
531 
532  kist_scheduler_on_new_options();
533  IF_BUG_ONCE(sched_run_interval == 0) {
534  log_warn(LD_SCHED, "We are initing the KIST scheduler and noticed the "
535  "KISTSchedRunInterval is telling us to not use KIST. That's "
536  "weird! We'll continue using KIST, but at %" PRId32 "ms.",
537  KIST_SCHED_RUN_INTERVAL_DEFAULT);
538  sched_run_interval = KIST_SCHED_RUN_INTERVAL_DEFAULT;
539  }
540 }
541 
542 /* Function of the scheduler interface: schedule() */
543 static void
544 kist_scheduler_schedule(void)
545 {
546  struct monotime_t now;
547  struct timeval next_run;
548  int64_t diff;
549 
550  if (!have_work()) {
551  return;
552  }
553  monotime_get(&now);
554 
555  /* If time is really monotonic, we can never have now being smaller than the
556  * last scheduler run. The scheduler_last_run at first is set to 0.
557  * Unfortunately, not all platforms guarantee monotonic time so we log at
558  * info level but don't make it more noisy. */
559  diff = monotime_diff_msec(&scheduler_last_run, &now);
560  if (diff < 0) {
561  log_info(LD_SCHED, "Monotonic time between now and last run of scheduler "
562  "is negative: %" PRId64 ". Setting diff to 0.", diff);
563  diff = 0;
564  }
565  if (diff < sched_run_interval) {
566  next_run.tv_sec = 0;
567  /* Takes 1000 ms -> us. This will always be valid because diff can NOT be
568  * negative and can NOT be bigger than sched_run_interval so values can
569  * only go from 1000 usec (diff set to interval - 1) to 100000 usec (diff
570  * set to 0) for the maximum allowed run interval (100ms). */
571  next_run.tv_usec = (int) ((sched_run_interval - diff) * 1000);
572  /* Re-adding an event reschedules it. It does not duplicate it. */
573  scheduler_ev_add(&next_run);
574  } else {
576  }
577 }
578 
579 /* Function of the scheduler interface: run() */
580 static void
581 kist_scheduler_run(void)
582 {
583  /* Define variables */
584  channel_t *chan = NULL; // current working channel
585  /* The last distinct chan served in a sched loop. */
586  channel_t *prev_chan = NULL;
587  int flush_result; // temporarily store results from flush calls
588  /* Channels to be re-adding to pending at the end */
589  smartlist_t *to_readd = NULL;
591 
592  outbuf_table_t outbuf_table = HT_INITIALIZER();
593 
594  /* For each pending channel, collect new kernel information */
595  SMARTLIST_FOREACH_BEGIN(cp, const channel_t *, pchan) {
596  init_socket_info(&socket_table, pchan);
597  update_socket_info(&socket_table, pchan);
598  } SMARTLIST_FOREACH_END(pchan);
599 
600  log_debug(LD_SCHED, "Running the scheduler. %d channels pending",
601  smartlist_len(cp));
602 
603  /* The main scheduling loop. Loop until there are no more pending channels */
604  while (smartlist_len(cp) > 0) {
605  /* get best channel */
607  offsetof(channel_t, sched_heap_idx));
608  if (SCHED_BUG(!chan, NULL)) {
609  /* Some-freaking-how a NULL got into the channels_pending. That should
610  * never happen, but it should be harmless to ignore it and keep looping.
611  */
612  continue;
613  }
614  outbuf_table_add(&outbuf_table, chan);
615 
616  /* if we have switched to a new channel, consider writing the previous
617  * channel's outbuf to the kernel. */
618  if (!prev_chan) {
619  prev_chan = chan;
620  }
621  if (prev_chan != chan) {
622  if (channel_should_write_to_kernel(&outbuf_table, prev_chan)) {
623  channel_write_to_kernel(prev_chan);
624  outbuf_table_remove(&outbuf_table, prev_chan);
625  }
626  prev_chan = chan;
627  }
628 
629  /* Only flush and write if the per-socket limit hasn't been hit */
630  if (socket_can_write(&socket_table, chan)) {
631  /* flush to channel queue/outbuf */
632  flush_result = (int)channel_flush_some_cells(chan, 1); // 1 for num cells
633  /* XXX: While flushing cells, it is possible that the connection write
634  * fails leading to the channel to be closed which triggers a release
635  * and free its entry in the socket table. And because of a engineering
636  * design issue, the error is not propagated back so we don't get an
637  * error at this point. So before we continue, make sure the channel is
638  * open and if not just ignore it. See #23751. */
639  if (!CHANNEL_IS_OPEN(chan)) {
640  /* Channel isn't open so we put it back in IDLE mode. It is either
641  * renegotiating its TLS session or about to be released. */
642  scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
643  continue;
644  }
645  /* flush_result has the # cells flushed */
646  if (flush_result > 0) {
647  update_socket_written(&socket_table, chan, flush_result *
648  (CELL_MAX_NETWORK_SIZE + TLS_PER_CELL_OVERHEAD));
649  } else {
650  /* XXX: This can happen because tor sometimes does flush in an
651  * opportunistic way cells from the circuit to the outbuf so the
652  * channel can end up here without having anything to flush nor needed
653  * to write to the kernel. Hopefully we'll fix that soon but for now
654  * we have to handle this case which happens kind of often. */
655  log_debug(LD_SCHED,
656  "We didn't flush anything on a chan that we think "
657  "can write and wants to write. The channel's state is '%s' "
658  "and in scheduler state '%s'. We're going to mark it as "
659  "waiting_for_cells (as that's most likely the issue) and "
660  "stop scheduling it this round.",
663  scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
664  continue;
665  }
666  }
667 
668  /* Decide what to do with the channel now */
669 
670  if (!channel_more_to_flush(chan) &&
671  !socket_can_write(&socket_table, chan)) {
672 
673  /* Case 1: no more cells to send, and cannot write */
674 
675  /*
676  * You might think we should put the channel in SCHED_CHAN_IDLE. And
677  * you're probably correct. While implementing KIST, we found that the
678  * scheduling system would sometimes lose track of channels when we did
679  * that. We suspect it has to do with the difference between "can't
680  * write because socket/outbuf is full" and KIST's "can't write because
681  * we've arbitrarily decided that that's enough for now." Sometimes
682  * channels run out of cells at the same time they hit their
683  * kist-imposed write limit and maybe the rest of Tor doesn't put the
684  * channel back in pending when it is supposed to.
685  *
686  * This should be investigated again. It is as simple as changing
687  * SCHED_CHAN_WAITING_FOR_CELLS to SCHED_CHAN_IDLE and seeing if Tor
688  * starts having serious throughput issues. Best done in shadow/chutney.
689  */
690  scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
691  } else if (!channel_more_to_flush(chan)) {
692 
693  /* Case 2: no more cells to send, but still open for writes */
694 
695  scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
696  } else if (!socket_can_write(&socket_table, chan)) {
697 
698  /* Case 3: cells to send, but cannot write */
699 
700  /*
701  * We want to write, but can't. If we left the channel in
702  * channels_pending, we would never exit the scheduling loop. We need to
703  * add it to a temporary list of channels to be added to channels_pending
704  * after the scheduling loop is over. They can hopefully be taken care of
705  * in the next scheduling round.
706  */
707  if (!to_readd) {
708  to_readd = smartlist_new();
709  }
710  smartlist_add(to_readd, chan);
711  } else {
712 
713  /* Case 4: cells to send, and still open for writes */
714 
715  scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
716  if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
718  offsetof(channel_t, sched_heap_idx), chan);
719  }
720  }
721  } /* End of main scheduling loop */
722 
723  /* Write the outbuf of any channels that still have data */
724  HT_FOREACH_FN(outbuf_table_s, &outbuf_table, each_channel_write_to_kernel,
725  NULL);
726  /* We are done with it. */
727  HT_FOREACH_FN(outbuf_table_s, &outbuf_table, free_outbuf_info_by_ent, NULL);
728  HT_CLEAR(outbuf_table_s, &outbuf_table);
729 
730  log_debug(LD_SCHED, "len pending=%d, len to_readd=%d",
731  smartlist_len(cp),
732  (to_readd ? smartlist_len(to_readd) : -1));
733 
734  /* Re-add any channels we need to */
735  if (to_readd) {
736  SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
737  scheduler_set_channel_state(readd_chan, SCHED_CHAN_PENDING);
738  if (!smartlist_contains(cp, readd_chan)) {
739  if (!SCHED_BUG(readd_chan->sched_heap_idx != -1, readd_chan)) {
740  /* XXXX Note that the check above is in theory redundant with
741  * the smartlist_contains check. But let's make sure we're
742  * not messing anything up, and leave them both for now. */
744  offsetof(channel_t, sched_heap_idx), readd_chan);
745  }
746  }
747  } SMARTLIST_FOREACH_END(readd_chan);
748  smartlist_free(to_readd);
749  }
750 
751  monotime_get(&scheduler_last_run);
752 }
753 
754 /*****************************************************************************
755  * Externally called function implementations not called through scheduler_t
756  *****************************************************************************/
757 
758 /* Stores the kist scheduler function pointers. */
759 static scheduler_t kist_scheduler = {
760  .type = SCHEDULER_KIST,
761  .free_all = kist_free_all,
762  .on_channel_free = kist_on_channel_free_fn,
763  .init = kist_scheduler_init,
764  .on_new_consensus = kist_scheduler_on_new_consensus,
765  .schedule = kist_scheduler_schedule,
766  .run = kist_scheduler_run,
767  .on_new_options = kist_scheduler_on_new_options,
768 };
769 
770 /* Return the KIST scheduler object. If it didn't exists, return a newly
771  * allocated one but init() is not called. */
772 scheduler_t *
773 get_kist_scheduler(void)
774 {
775  return &kist_scheduler;
776 }
777 
778 /* Check the torrc (and maybe consensus) for the configured KIST scheduler run
779  * interval.
780  * - If torrc > 0, then return the positive torrc value (should use KIST, and
781  * should use the set value)
782  * - If torrc == 0, then look in the consensus for what the value should be.
783  * - If == 0, then return 0 (don't use KIST)
784  * - If > 0, then return the positive consensus value
785  * - If consensus doesn't say anything, return 10 milliseconds, default.
786  */
787 int
788 kist_scheduler_run_interval(void)
789 {
790  int run_interval = get_options()->KISTSchedRunInterval;
791 
792  if (run_interval != 0) {
793  log_debug(LD_SCHED, "Found KISTSchedRunInterval=%" PRId32 " in torrc. "
794  "Using that.", run_interval);
795  return run_interval;
796  }
797 
798  log_debug(LD_SCHED, "KISTSchedRunInterval=0, turning to the consensus.");
799 
800  /* Will either be the consensus value or the default. Note that 0 can be
801  * returned which means the consensus wants us to NOT use KIST. */
802  return networkstatus_get_param(NULL, "KISTSchedRunInterval",
803  KIST_SCHED_RUN_INTERVAL_DEFAULT,
804  KIST_SCHED_RUN_INTERVAL_MIN,
805  KIST_SCHED_RUN_INTERVAL_MAX);
806 }
807 
808 /* Set KISTLite mode that is KIST without kernel support. */
809 void
810 scheduler_kist_set_lite_mode(void)
811 {
812  kist_lite_mode = 1;
813  kist_scheduler.type = SCHEDULER_KIST_LITE;
814  log_info(LD_SCHED,
815  "Setting KIST scheduler without kernel support (KISTLite mode)");
816 }
817 
818 /* Set KIST mode that is KIST with kernel support. */
819 void
820 scheduler_kist_set_full_mode(void)
821 {
822  kist_lite_mode = 0;
823  kist_scheduler.type = SCHEDULER_KIST;
824  log_info(LD_SCHED,
825  "Setting KIST scheduler with kernel support (KIST mode)");
826 }
827 
828 #ifdef HAVE_KIST_SUPPORT
829 
830 /* Return true iff the scheduler subsystem should use KIST. */
831 int
832 scheduler_can_use_kist(void)
833 {
834  if (kist_no_kernel_support) {
835  /* We have no kernel support so we can't use KIST. */
836  return 0;
837  }
838 
839  /* We do have the support, time to check if we can get the interval that the
840  * consensus can be disabling. */
841  int run_interval = kist_scheduler_run_interval();
842  log_debug(LD_SCHED, "Determined KIST sched_run_interval should be "
843  "%" PRId32 ". Can%s use KIST.",
844  run_interval, (run_interval > 0 ? "" : " not"));
845  return run_interval > 0;
846 }
847 
848 #else /* !defined(HAVE_KIST_SUPPORT) */
849 
850 int
851 scheduler_can_use_kist(void)
852 {
853  return 0;
854 }
855 
856 #endif /* defined(HAVE_KIST_SUPPORT) */
#define LD_SCHED
Definition: log.h:107
uint64_t global_identifier
Definition: channel.h:197
Header file for channeltls.c.
const char * get_scheduler_state_string(int scheduler_state)
Definition: scheduler.c:366
#define SMARTLIST_FOREACH_BEGIN(sl, type, var)
#define TO_CONN(c)
Definition: or.h:735
int32_t networkstatus_get_param(const networkstatus_t *ns, const char *param_name, int32_t default_val, int32_t min_val, int32_t max_val)
#define MOCK_IMPL(rv, funcname, arglist)
Definition: testsupport.h:133
channel_state_t state
Definition: channel.h:192
void smartlist_pqueue_add(smartlist_t *sl, int(*compare)(const void *a, const void *b), ptrdiff_t idx_field_offset, void *item)
Definition: smartlist.c:726
Header file for connection.c.
void * smartlist_pqueue_pop(smartlist_t *sl, int(*compare)(const void *a, const void *b), ptrdiff_t idx_field_offset)
Definition: smartlist.c:755
int channel_more_to_flush(channel_t *chan)
Definition: channel.c:1747
void smartlist_add(smartlist_t *sl, void *element)
OR connection structure.
smartlist_t * get_channels_pending(void)
Definition: scheduler.c:396
int smartlist_contains(const smartlist_t *sl, const void *element)
Header file for config.c.
void scheduler_ev_active(void)
Definition: scheduler.c:598
const or_options_t * get_options(void)
Definition: config.c:926
#define tor_assert(expr)
Definition: util_bug.h:102
const char * channel_state_to_string(channel_state_t state)
Definition: channel.c:315
size_t buf_datalen(const buf_t *buf)
Definition: buffers.c:394
#define tor_free(p)
Definition: malloc.h:52
void monotime_get(monotime_t *out)
smartlist_t * smartlist_new(void)
Header file for scheduler*.c.
double KISTSockBufSizeFactor
#define STATIC
Definition: testsupport.h:32
int channel_num_cells_writeable(channel_t *chan)
Definition: channel.c:3079
Header file for channel.c.
Header for fp.c.
enum channel_t::@9 scheduler_state
ssize_t channel_flush_some_cells(channel_t *chan, ssize_t num_cells)
Definition: channel.c:1704
Master header file for Tor-specific functionality.
HT_GENERATE2(cdm_diff_ht, cdm_diff_t, node, cdm_diff_hash, cdm_diff_eq, 0.6, tor_reallocarray, tor_free_) static void cdm_diff_free_(cdm_diff_t *diff)
Definition: consdiffmgr.c:222
void tor_free_(void *mem)
Definition: malloc.c:227
#define CELL_MAX_NETWORK_SIZE
Definition: or.h:579
#define tor_socket_t
Definition: nettypes.h:36
int sched_heap_idx
Definition: channel.h:292
#define IF_BUG_ONCE(cond)
Definition: util_bug.h:239
HT_PROTOTYPE(HT_GENERATE2(channel_gid_map, HT_GENERATE2(channel_t, HT_GENERATE2(gidmap_node, HT_GENERATE2(channel_id_hash, HT_GENERATE2(channel_id_eq)
Definition: channel.c:121
typedef HT_HEAD(hs_service_ht, hs_service_t) hs_service_ht
int64_t monotime_diff_msec(const monotime_t *start, const monotime_t *end)
Definition: compat_time.c:781
void scheduler_ev_add(const struct timeval *next_run)
Definition: scheduler.c:585
int KISTSchedRunInterval
Header file for buffers.c.
void scheduler_set_channel_state(channel_t *chan, int new_state)
Definition: scheduler.c:385
int64_t clamp_double_to_int64(double number)
Definition: fp.c:61
Header file for networkstatus.c.
int scheduler_compare_channels(const void *c1_v, const void *c2_v)
Definition: scheduler.c:403