tor  0.4.2.0-alpha-dev
circuitpadding.c
Go to the documentation of this file.
1 /* Copyright (c) 2017 The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
3 
49 #define CIRCUITPADDING_PRIVATE
50 
51 #include <math.h>
52 #include "lib/math/fp.h"
53 #include "lib/math/prob_distr.h"
54 #include "core/or/or.h"
55 #include "core/or/circuitpadding.h"
57 #include "core/or/circuitlist.h"
58 #include "core/or/circuituse.h"
59 #include "core/mainloop/netstatus.h"
60 #include "core/or/relay.h"
61 #include "feature/stats/rephist.h"
63 
64 #include "core/or/channel.h"
65 
66 #include "lib/time/compat_time.h"
67 #include "lib/defs/time.h"
69 
70 #include "core/or/crypt_path_st.h"
71 #include "core/or/circuit_st.h"
72 #include "core/or/origin_circuit_st.h"
73 #include "core/or/or_circuit_st.h"
74 #include "feature/nodelist/routerstatus_st.h"
75 #include "feature/nodelist/node_st.h"
76 #include "core/or/cell_st.h"
77 #include "core/or/extend_info_st.h"
78 #include "core/crypto/relay_crypto.h"
80 
81 #include "app/config/config.h"
82 
84  origin_circuit_t *circ);
85 static void circpad_setup_machine_on_circ(circuit_t *on_circ,
86  const circpad_machine_spec_t *machine);
88 
91 
93 static uint8_t circpad_padding_disabled;
94 static uint8_t circpad_padding_reduced;
95 static uint8_t circpad_global_max_padding_percent;
96 static uint16_t circpad_global_allowed_cells;
97 static uint16_t circpad_max_circ_queued_cells;
98 
101 static uint64_t circpad_global_nonpadding_sent;
102 
110 
118 
121 #define FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
122  STMT_BEGIN \
123  for (int loop_var = 0; loop_var < CIRCPAD_MAX_MACHINES; loop_var++) {
124 #define FOR_EACH_CIRCUIT_MACHINE_END } STMT_END ;
125 
128 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(loop_var, circ) \
129  FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
130  if (!(circ)->padding_info[loop_var]) \
131  continue;
132 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END } STMT_END ;
133 
137 static void
139 {
140  if (circ->padding_info[idx]) {
141  log_fn(LOG_INFO,LD_CIRC, "Freeing padding info idx %d on circuit %u (%d)",
142  idx, CIRCUIT_IS_ORIGIN(circ) ?
143  TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
144  circ->purpose);
145 
146  tor_free(circ->padding_info[idx]->histogram);
147  timer_free(circ->padding_info[idx]->padding_timer);
148  tor_free(circ->padding_info[idx]);
149  }
150 }
151 
171 int
173 {
174  /* If the circuit purpose is measurement or path bias, don't
175  * hold it open */
178  return 0;
179  }
180 
181  /* If the circuit is closed for any reason other than these three valid,
182  * client-side close reasons, do not try to keep it open. It is probably
183  * damaged or unusable. Note this is OK with vanguards because
184  * controller-closed circuits have REASON=REQUESTED, so vanguards-closed
185  * circuits will not be held open (we want them to close ASAP). */
186  if (!(reason == END_CIRC_REASON_NONE ||
187  reason == END_CIRC_REASON_FINISHED ||
188  reason == END_CIRC_REASON_IP_NOW_REDUNDANT)) {
189  return 0;
190  }
191 
194  if (!mi) {
195  continue; // No padding runtime info; check next machine
196  }
197 
199 
200  /* If we're in END state (NULL here), then check next machine */
201  if (!state) {
202  continue; // check next machine
203  }
204 
205  /* If the machine does not want to control the circuit close itself, then
206  * check the next machine */
207  if (!circ->padding_machine[i]->manage_circ_lifetime) {
208  continue; // check next machine
209  }
210 
211  /* If the machine has reached the END state, we can close. Check next
212  * machine. */
213  if (mi->current_state == CIRCPAD_STATE_END) {
214  continue; // check next machine
215  }
216 
217  log_info(LD_CIRC, "Circuit %d is not marked for close because of a "
218  "pending padding machine in index %d.",
219  CIRCUIT_IS_ORIGIN(circ) ?
220  TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0, i);
221 
222  /* If the machine has had no network events at all within the
223  * last circpad_delay_t timespan, it's in some deadlock state.
224  * Tell circuit_mark_for_close() that we don't own it anymore.
225  * This will allow circuit_expire_old_circuits_clientside() to
226  * close it.
227  */
228  if (circ->padding_info[i]->last_cell_time_sec +
229  (time_t)CIRCPAD_DELAY_MAX_SECS < approx_time()) {
230  log_notice(LD_BUG, "Circuit %d was not marked for close because of a "
231  "pending padding machine in index %d for over an hour. "
232  "Circuit is a %s",
233  CIRCUIT_IS_ORIGIN(circ) ?
234  TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
236 
237  return 0; // abort timer reached; mark the circuit for close now
238  }
239 
240  /* If we weren't marked dirty yet, let's pretend we're dirty now.
241  * ("Dirty" means that a circuit has been used for application traffic
242  * by Tor.. Dirty circuits have different expiry times, and are not
243  * considered in counts of built circuits, etc. By claiming that we're
244  * dirty, the rest of Tor will make decisions as if we were actually
245  * used by application data.
246  *
247  * This is most important for circuit_expire_old_circuits_clientside(),
248  * where we want that function to expire us after the padding machine
249  * has shut down, but using the MaxCircuitDirtiness timer instead of
250  * the idle circuit timer (again, we want this because we're not
251  * supposed to look idle to Guard nodes that can see our lifespan). */
252  if (!circ->timestamp_dirty)
253  circ->timestamp_dirty = approx_time();
254 
255  /* Take ownership of the circuit */
257 
258  return 1;
259  } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
260 
261  return 0; // No machine wanted to keep the circuit open; mark for close
262 }
263 
269 static int
271 {
272  int found = 0;
274  if (circ->padding_machine[i] &&
275  circ->padding_machine[i]->machine_num == machine_num) {
277  circ->padding_machine[i] = NULL;
278  found = 1;
279  }
280  } FOR_EACH_CIRCUIT_MACHINE_END;
281 
282  return found;
283 }
284 
288 void
290 {
293  } FOR_EACH_CIRCUIT_MACHINE_END;
294 }
295 
300 circpad_circuit_machineinfo_new(circuit_t *on_circ, int machine_index)
301 {
303  tor_malloc_zero(sizeof(circpad_machine_runtime_t));
304  mi->machine_index = machine_index;
305  mi->on_circ = on_circ;
307 
308  return mi;
309 }
310 
318 STATIC const circpad_state_t *
320 {
321  const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
322 
323  if (mi->current_state == CIRCPAD_STATE_END) {
324  return NULL;
325  } else if (BUG(mi->current_state >= machine->num_states)) {
327  "Invalid circuit padding state %d",
328  mi->current_state);
329 
330  return NULL;
331  }
332 
333  return &machine->states[mi->current_state];
334 }
335 
345 STATIC circpad_delay_t
348 {
350  circpad_delay_t rtt_add_usec = 0;
351 
352  /* Our state should have been checked to be non-null by the caller
353  * (circpad_machine_remove_token()) */
354  if (BUG(state == NULL)) {
355  return CIRCPAD_DELAY_INFINITE;
356  }
357 
358  /* The infinity bin has an upper bound of infinity, so make sure we return
359  * that if they ask for it. */
360  if (bin > CIRCPAD_INFINITY_BIN(state)) {
361  return CIRCPAD_DELAY_INFINITE;
362  }
363 
364  /* If we are using an RTT estimate, consider it as well. */
365  if (state->use_rtt_estimate) {
366  rtt_add_usec = mi->rtt_estimate_usec;
367  }
368 
369  return state->histogram_edges[bin] + rtt_add_usec;
370 }
371 
376 STATIC circpad_delay_t
379 {
380  return circpad_histogram_bin_to_usec(mi, bin+1) - 1;
381 }
382 
384 static circpad_delay_t
386  int bin_index)
387 {
388  circpad_delay_t left_bound = circpad_histogram_bin_to_usec(mi, bin_index);
389  circpad_delay_t right_bound = histogram_get_bin_upper_bound(mi, bin_index);
390 
391  return left_bound + (right_bound - left_bound)/2;
392 }
393 
404  circpad_delay_t usec)
405 {
407  circpad_delay_t rtt_add_usec = 0;
409 
410  /* Our state should have been checked to be non-null by the caller
411  * (circpad_machine_remove_token()) */
412  if (BUG(state == NULL)) {
413  return 0;
414  }
415 
416  /* If we are using an RTT estimate, consider it as well. */
417  if (state->use_rtt_estimate) {
418  rtt_add_usec = mi->rtt_estimate_usec;
419  }
420 
421  /* Walk through the bins and check the upper bound of each bin, if 'usec' is
422  * less-or-equal to that, return that bin. If rtt_estimate is enabled then
423  * add that to the upper bound of each bin.
424  *
425  * We don't want to return the infinity bin here, so don't go there. */
426  for (bin = 0 ; bin < CIRCPAD_INFINITY_BIN(state) ; bin++) {
427  if (usec <= histogram_get_bin_upper_bound(mi, bin) + rtt_add_usec) {
428  return bin;
429  }
430  }
431 
432  /* We don't want to return the infinity bin here, so if we still didn't find
433  * the right bin, return the highest non-infinity bin */
434  return CIRCPAD_INFINITY_BIN(state)-1;
435 }
436 
445 static inline int
447 {
448  /* No runtime histogram == no token removal */
449  if (mi->histogram == NULL) {
450  /* Machines that don't want token removal are trying to avoid
451  * potentially expensive mallocs, extra memory accesses, and/or
452  * potentially expensive monotime calls. Let's minimize checks
453  * and keep this path fast. */
454  tor_assert_nonfatal(mi->histogram_len == 0);
455  return 0;
456  } else {
457  /* Machines that do want token removal are less sensitive to performance.
458  * Let's spend some time to check that our state is consistent and sane */
460  if (BUG(!state)) {
461  return 1;
462  }
463  tor_assert_nonfatal(state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE);
464  tor_assert_nonfatal(state->histogram_len == mi->histogram_len);
465  tor_assert_nonfatal(mi->histogram_len != 0);
466  return 1;
467  }
468 
469  tor_assert_nonfatal_unreached();
470  return 0;
471 }
472 
478 STATIC void
480 {
482 
483  /* If this state doesn't exist, or doesn't have token removal,
484  * free any previous state's runtime histogram, and bail.
485  *
486  * If we don't have a token removal strategy, we also don't need a runtime
487  * histogram and we rely on the immutable one in machine_spec_t. */
488  if (!state || state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE) {
489  if (mi->histogram) {
490  tor_free(mi->histogram);
491  mi->histogram = NULL;
492  mi->histogram_len = 0;
493  }
494  return;
495  }
496 
497  /* Try to avoid re-mallocing if we don't really need to */
498  if (!mi->histogram || (mi->histogram
499  && mi->histogram_len != state->histogram_len)) {
500  tor_free(mi->histogram); // null ok
501  mi->histogram = tor_malloc_zero(sizeof(circpad_hist_token_t)
502  *state->histogram_len);
503  }
504  mi->histogram_len = state->histogram_len;
505 
506  memcpy(mi->histogram, state->histogram,
507  sizeof(circpad_hist_token_t)*state->histogram_len);
508 }
509 
513 static void
515 {
517  double length;
518 
519  if (!state || state->length_dist.type == CIRCPAD_DIST_NONE) {
520  mi->state_length = CIRCPAD_STATE_LENGTH_INFINITE;
521  return;
522  }
523 
524  length = circpad_distribution_sample(state->length_dist);
525  length = MAX(0, length);
526  length += state->start_length;
527 
528  if (state->max_length) {
529  length = MIN(length, state->max_length);
530  }
531 
532  mi->state_length = clamp_double_to_int64(length);
533 
534  log_info(LD_CIRC, "State length sampled to %"PRIu64" for circuit %u",
537 }
538 
545 static circpad_delay_t
547  circpad_delay_t delay_shift)
548 {
549  double val = circpad_distribution_sample(state->iat_dist);
550  /* These comparisons are safe, because the output is in the range
551  * [0, 2**32), and double has a precision of 53 bits. */
552  /* We want a positive sample value */
553  val = MAX(0, val);
554  /* Respect the maximum sample setting */
555  val = MIN(val, state->dist_max_sample_usec);
556 
557  /* Now apply the shift:
558  * This addition is exact: val is at most 2**32-1, delay_shift is at most
559  * 2**32-1, and doubles have a precision of 53 bits. */
560  val += delay_shift;
561 
562  /* Clamp the distribution at infinite delay val */
564 }
565 
574 STATIC circpad_delay_t
576 {
578  const circpad_hist_token_t *histogram = NULL;
579  circpad_hist_index_t curr_bin = 0;
580  circpad_delay_t bin_start, bin_end;
581  /* These three must all be larger than circpad_hist_token_t, because
582  * we sum several circpad_hist_token_t values across the histogram */
583  uint64_t curr_weight = 0;
584  uint64_t histogram_total_tokens = 0;
585  uint64_t bin_choice;
586 
587  tor_assert(state);
588 
589  if (state->iat_dist.type != CIRCPAD_DIST_NONE) {
590  /* Sample from a fixed IAT distribution and return */
591  circpad_delay_t iat_delay_shift = state->use_rtt_estimate ?
592  mi->rtt_estimate_usec + state->dist_added_shift_usec :
593  state->dist_added_shift_usec;
594  return circpad_distribution_sample_iat_delay(state, iat_delay_shift);
595  } else if (circpad_is_token_removal_supported(mi)) {
596  histogram = mi->histogram;
597  for (circpad_hist_index_t b = 0; b < state->histogram_len; b++)
598  histogram_total_tokens += histogram[b];
599  } else {
600  /* We have a histogram, but it's immutable */
601  histogram = state->histogram;
602  histogram_total_tokens = state->histogram_total_tokens;
603  }
604 
605  /* If we are out of tokens, don't schedule padding. */
606  if (!histogram_total_tokens) {
607  return CIRCPAD_DELAY_INFINITE;
608  }
609 
611  histogram_total_tokens);
612 
613  /* Skip all the initial zero bins */
614  while (!histogram[curr_bin]) {
615  curr_bin++;
616  }
617  curr_weight = histogram[curr_bin];
618 
619  // TODO: This is not constant-time. Pretty sure we don't
620  // really need it to be, though.
621  while (curr_weight < bin_choice) {
622  curr_bin++;
623  /* It should be impossible to run past the end of the histogram */
624  if (BUG(curr_bin >= state->histogram_len)) {
625  return CIRCPAD_DELAY_INFINITE;
626  }
627  curr_weight += histogram[curr_bin];
628  }
629 
630  /* Do some basic checking of the current bin we are in */
631  if (BUG(curr_bin >= state->histogram_len) ||
632  BUG(histogram[curr_bin] == 0)) {
633  return CIRCPAD_DELAY_INFINITE;
634  }
635 
636  // Store this index to remove the token upon callback.
638  mi->chosen_bin = curr_bin;
639  }
640 
641  if (curr_bin >= CIRCPAD_INFINITY_BIN(state)) {
643  mi->histogram[curr_bin] > 0) {
644  mi->histogram[curr_bin]--;
645  }
646 
647  // Infinity: Don't send a padding packet. Wait for a real packet
648  // and then see if our bins are empty or what else we should do.
649  return CIRCPAD_DELAY_INFINITE;
650  }
651 
652  tor_assert(curr_bin < CIRCPAD_INFINITY_BIN(state));
653 
654  bin_start = circpad_histogram_bin_to_usec(mi, curr_bin);
655  /* We don't need to reduct 1 from the upper bound because the random range
656  * function below samples from [bin_start, bin_end) */
657  bin_end = circpad_histogram_bin_to_usec(mi, curr_bin+1);
658 
659  /* Bin edges are monotonically increasing so this is a bug. Handle it. */
660  if (BUG(bin_start >= bin_end)) {
661  return bin_start;
662  }
663 
665  bin_start, bin_end);
666 }
667 
673 static double
675 {
676  log_fn(LOG_DEBUG,LD_CIRC, "Sampling delay with distribution %d",
677  dist.type);
678 
679  switch (dist.type) {
680  case CIRCPAD_DIST_NONE:
681  {
682  /* We should not get in here like this */
683  tor_assert_nonfatal_unreached();
684  return 0;
685  }
686  case CIRCPAD_DIST_UNIFORM:
687  {
688  // param2 is upper bound, param1 is lower
689  const struct uniform my_uniform = {
690  .base = UNIFORM(my_uniform),
691  .a = dist.param1,
692  .b = dist.param2,
693  };
694  return dist_sample(&my_uniform.base);
695  }
696  case CIRCPAD_DIST_LOGISTIC:
697  {
698  /* param1 is Mu, param2 is sigma. */
699  const struct logistic my_logistic = {
700  .base = LOGISTIC(my_logistic),
701  .mu = dist.param1,
702  .sigma = dist.param2,
703  };
704  return dist_sample(&my_logistic.base);
705  }
706  case CIRCPAD_DIST_LOG_LOGISTIC:
707  {
708  /* param1 is Alpha, param2 is 1.0/Beta */
709  const struct log_logistic my_log_logistic = {
710  .base = LOG_LOGISTIC(my_log_logistic),
711  .alpha = dist.param1,
712  .beta = dist.param2,
713  };
714  return dist_sample(&my_log_logistic.base);
715  }
716  case CIRCPAD_DIST_GEOMETRIC:
717  {
718  /* param1 is 'p' (success probability) */
719  const struct geometric my_geometric = {
720  .base = GEOMETRIC(my_geometric),
721  .p = dist.param1,
722  };
723  return dist_sample(&my_geometric.base);
724  }
725  case CIRCPAD_DIST_WEIBULL:
726  {
727  /* param1 is k, param2 is Lambda */
728  const struct weibull my_weibull = {
729  .base = WEIBULL(my_weibull),
730  .k = dist.param1,
731  .lambda = dist.param2,
732  };
733  return dist_sample(&my_weibull.base);
734  }
735  case CIRCPAD_DIST_PARETO:
736  {
737  /* param1 is sigma, param2 is xi, no more params for mu so we use 0 */
738  const struct genpareto my_genpareto = {
739  .base = GENPARETO(my_genpareto),
740  .mu = 0,
741  .sigma = dist.param1,
742  .xi = dist.param2,
743  };
744  return dist_sample(&my_genpareto.base);
745  }
746  }
747 
748  tor_assert_nonfatal_unreached();
749  return 0;
750 }
751 
760  circpad_delay_t target_bin_usec)
761 {
763  target_bin_usec);
764 
765  /* Don't remove from the infinity bin */
766  for (; bin < CIRCPAD_INFINITY_BIN(mi); bin++) {
767  if (mi->histogram[bin] &&
768  histogram_get_bin_upper_bound(mi, bin) >= target_bin_usec) {
769  return bin;
770  }
771  }
772 
773  return mi->histogram_len;
774 }
775 
784  circpad_delay_t target_bin_usec)
785 {
787  target_bin_usec);
788 
789  for (; bin >= 0; bin--) {
790  if (mi->histogram[bin] &&
791  circpad_histogram_bin_to_usec(mi, bin) <= target_bin_usec) {
792  return bin;
793  }
794  }
795 
796  return -1;
797 }
798 
805 STATIC void
807  circpad_delay_t target_bin_usec)
808 {
809  /* We need to remove the token from the first bin
810  * whose upper bound is greater than the target, and that
811  * has tokens remaining. */
813  target_bin_usec);
814 
815  if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) {
816  if (!BUG(mi->histogram[bin] == 0)) {
817  mi->histogram[bin]--;
818  }
819  }
820 }
821 
828 STATIC void
830  circpad_delay_t target_bin_usec)
831 {
833  target_bin_usec);
834 
835  if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) {
836  if (!BUG(mi->histogram[bin] == 0)) {
837  mi->histogram[bin]--;
838  }
839  }
840 }
841 
842 /* Helper macro: Ensure that the bin has tokens available, and BUG out of the
843  * function if it's not the case. */
844 #define ENSURE_BIN_CAPACITY(bin_index) \
845  if (BUG(mi->histogram[bin_index] == 0)) { \
846  return; \
847  }
848 
859 STATIC void
861  circpad_delay_t target_bin_usec,
862  bool use_usec)
863 {
864  circpad_hist_index_t lower, higher, current;
865  circpad_hist_index_t bin_to_remove = -1;
866 
867  lower = circpad_machine_first_lower_index(mi, target_bin_usec);
868  higher = circpad_machine_first_higher_index(mi, target_bin_usec);
869  current = circpad_histogram_usec_to_bin(mi, target_bin_usec);
870 
871  /* Sanity check the results */
872  if (BUG(lower > current) || BUG(higher < current)) {
873  return;
874  }
875 
876  /* Take care of edge cases first */
877  if (higher == mi->histogram_len && lower == -1) {
878  /* All bins are empty */
879  return;
880  } else if (higher == mi->histogram_len) {
881  /* All higher bins are empty */
882  ENSURE_BIN_CAPACITY(lower);
883  mi->histogram[lower]--;
884  return;
885  } else if (lower == -1) {
886  /* All lower bins are empty */
887  ENSURE_BIN_CAPACITY(higher);
888  mi->histogram[higher]--;
889  return;
890  }
891 
892  /* Now handle the intermediate cases */
893  if (use_usec) {
894  /* Find the closest bin midpoint to the target */
895  circpad_delay_t lower_usec = circpad_get_histogram_bin_midpoint(mi, lower);
896  circpad_delay_t higher_usec =
898 
899  if (target_bin_usec < lower_usec) {
900  // Lower bin is closer
901  ENSURE_BIN_CAPACITY(lower);
902  bin_to_remove = lower;
903  } else if (target_bin_usec > higher_usec) {
904  // Higher bin is closer
905  ENSURE_BIN_CAPACITY(higher);
906  bin_to_remove = higher;
907  } else if (target_bin_usec-lower_usec > higher_usec-target_bin_usec) {
908  // Higher bin is closer
909  ENSURE_BIN_CAPACITY(higher);
910  bin_to_remove = higher;
911  } else {
912  // Lower bin is closer
913  ENSURE_BIN_CAPACITY(lower);
914  bin_to_remove = lower;
915  }
916  mi->histogram[bin_to_remove]--;
917  log_debug(LD_CIRC, "Removing token from bin %d", bin_to_remove);
918  return;
919  } else {
920  if (current - lower > higher - current) {
921  // Higher bin is closer
922  ENSURE_BIN_CAPACITY(higher);
923  mi->histogram[higher]--;
924  return;
925  } else {
926  // Lower bin is closer
927  ENSURE_BIN_CAPACITY(lower);
928  mi->histogram[lower]--;
929  return;
930  }
931  }
932 }
933 
934 #undef ENSURE_BIN_CAPACITY
935 
943 static void
945  circpad_delay_t target_bin_usec)
946 {
948  target_bin_usec);
949 
950  if (mi->histogram[bin] > 0)
951  mi->histogram[bin]--;
952 }
953 
960 static circpad_decision_t
962 {
963  uint32_t histogram_total_tokens = 0;
964 
965  /* Check if bins empty. This requires summing up the current mutable
966  * machineinfo histogram token total and checking if it is zero.
967  * Machineinfo does not keep a running token count. We're assuming the
968  * extra space is not worth this short loop iteration.
969  *
970  * We also do not count infinity bin in histogram totals.
971  */
973  for (circpad_hist_index_t b = 0; b < CIRCPAD_INFINITY_BIN(mi); b++)
974  histogram_total_tokens += mi->histogram[b];
975 
976  /* If we change state, we're done */
977  if (histogram_total_tokens == 0) {
978  if (circpad_internal_event_bins_empty(mi) == CIRCPAD_STATE_CHANGED)
979  return CIRCPAD_STATE_CHANGED;
980  }
981  }
982 
983  if (mi->state_length == 0) {
985  }
986 
987  return CIRCPAD_STATE_UNCHANGED;
988 }
989 
996 static inline void
998 {
999  /* If we have a valid state length bound, consider it */
1000  if (mi->state_length != CIRCPAD_STATE_LENGTH_INFINITE &&
1001  !BUG(mi->state_length <= 0)) {
1002  mi->state_length--;
1003  }
1004 
1005  /*
1006  * Update non-padding counts for rate limiting: We scale at UINT16_MAX
1007  * because we only use this for a percentile limit of 2 sig figs, and
1008  * space is scare in the machineinfo struct.
1009  */
1010  mi->padding_sent++;
1011  if (mi->padding_sent == UINT16_MAX) {
1012  mi->padding_sent /= 2;
1013  mi->nonpadding_sent /= 2;
1014  }
1015 
1017 
1018  /* If we have a mutable histogram, reduce the token count from
1019  * the chosen padding bin (this assumes we always send padding
1020  * when we intended to). */
1022  /* Check array bounds and token count before removing */
1023  if (!BUG(mi->chosen_bin >= mi->histogram_len) &&
1024  !BUG(mi->histogram[mi->chosen_bin] == 0)) {
1025  mi->histogram[mi->chosen_bin]--;
1026  }
1027  }
1028 }
1029 
1037 static inline void
1039 {
1040  /* Update non-padding counts for rate limiting: We scale at UINT16_MAX
1041  * because we only use this for a percentile limit of 2 sig figs, and
1042  * space is scare in the machineinfo struct. */
1043  mi->nonpadding_sent++;
1044  if (mi->nonpadding_sent == UINT16_MAX) {
1045  mi->padding_sent /= 2;
1046  mi->nonpadding_sent /= 2;
1047  }
1048 
1049  /* Update any state packet length limits that apply */
1051 
1052  /* Remove a token from the histogram, if applicable */
1054 }
1055 
1063 static inline void
1066 {
1067  const circpad_state_t *state = NULL;
1068 
1069  if (mi->state_length == CIRCPAD_STATE_LENGTH_INFINITE)
1070  return;
1071 
1072  state = circpad_machine_current_state(mi);
1073 
1074  /* If we are not in a padding state (like start or end), we're done */
1075  if (!state)
1076  return;
1077 
1078  /* If we're enforcing a state length on non-padding packets,
1079  * decrement it */
1080  if (state->length_includes_nonpadding &&
1081  mi->state_length > 0) {
1082  mi->state_length--;
1083  }
1084 }
1085 
1092 STATIC void
1094 {
1095  const circpad_state_t *state = NULL;
1096  circpad_time_t current_time;
1097  circpad_delay_t target_bin_usec;
1098 
1099  /* Dont remove any tokens if there was no padding scheduled */
1100  if (!mi->padding_scheduled_at_usec) {
1101  return;
1102  }
1103 
1104  state = circpad_machine_current_state(mi);
1105 
1106  /* If we are not in a padding state (like start or end), we're done */
1107  if (!state)
1108  return;
1109  /* Don't remove any tokens if we're not doing token removal */
1111  return;
1112 
1113  current_time = monotime_absolute_usec();
1114 
1115  /* If we have scheduled padding some time in the future, we want to see what
1116  bin we are in at the current time */
1117  target_bin_usec = (circpad_delay_t)
1118  MIN((current_time - mi->padding_scheduled_at_usec),
1120 
1121  /* We are treating this non-padding cell as a padding cell, so we cancel
1122  padding timer, if present. */
1123  mi->padding_scheduled_at_usec = 0;
1124  if (mi->is_padding_timer_scheduled) {
1127  }
1128 
1129  /* Perform the specified token removal strategy */
1130  switch (state->token_removal) {
1132  circpad_machine_remove_closest_token(mi, target_bin_usec, 1);
1133  break;
1135  circpad_machine_remove_closest_token(mi, target_bin_usec, 0);
1136  break;
1138  circpad_machine_remove_lower_token(mi, target_bin_usec);
1139  break;
1141  circpad_machine_remove_higher_token(mi, target_bin_usec);
1142  break;
1144  circpad_machine_remove_exact(mi, target_bin_usec);
1145  break;
1147  default:
1148  tor_assert_nonfatal_unreached();
1149  log_warn(LD_BUG, "Circpad: Unknown token removal strategy %d",
1150  state->token_removal);
1151  break;
1152  }
1153 }
1154 
1166 circpad_send_command_to_hop,(origin_circuit_t *circ, uint8_t hopnum,
1167  uint8_t relay_command, const uint8_t *payload,
1168  ssize_t payload_len))
1169 {
1170  crypt_path_t *target_hop = circuit_get_cpath_hop(circ, hopnum);
1171  signed_error_t ret;
1172 
1173  /* Check that the cpath has the target hop */
1174  if (!target_hop) {
1175  log_fn(LOG_WARN, LD_BUG, "Padding circuit %u has %d hops, not %d",
1176  circ->global_identifier, circuit_get_cpath_len(circ), hopnum);
1177  return -1;
1178  }
1179 
1180  /* Check that the target hop is opened */
1181  if (target_hop->state != CPATH_STATE_OPEN) {
1183  "Padding circuit %u has %d hops, not %d",
1184  circ->global_identifier,
1185  circuit_get_cpath_opened_len(circ), hopnum);
1186  return -1;
1187  }
1188 
1189  /* Send the drop command to the second hop */
1190  ret = relay_send_command_from_edge(0, TO_CIRCUIT(circ), relay_command,
1191  (const char*)payload, payload_len,
1192  target_hop);
1193  return ret;
1194 }
1195 
1209 {
1210  circuit_t *circ = mi->on_circ;
1211  int machine_idx = mi->machine_index;
1212  mi->padding_scheduled_at_usec = 0;
1213  circpad_statenum_t state = mi->current_state;
1214 
1215  /* Make sure circuit didn't close on us */
1216  if (mi->on_circ->marked_for_close) {
1218  "Padding callback on circuit marked for close (%u). Ignoring.",
1219  CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1221  return CIRCPAD_STATE_CHANGED;
1222  }
1223 
1225 
1226  if (CIRCUIT_IS_ORIGIN(mi->on_circ)) {
1227  circpad_send_command_to_hop(TO_ORIGIN_CIRCUIT(mi->on_circ),
1228  CIRCPAD_GET_MACHINE(mi)->target_hopnum,
1229  RELAY_COMMAND_DROP, NULL, 0);
1230  log_info(LD_CIRC, "Callback: Sending padding to origin circuit %u"
1231  " (%d) [length: %"PRIu64"]",
1233  mi->on_circ->purpose, mi->state_length);
1234  } else {
1235  // If we're a non-origin circ, we can just send from here as if we're the
1236  // edge.
1237  if (TO_OR_CIRCUIT(circ)->p_chan_cells.n <= circpad_max_circ_queued_cells) {
1238  log_info(LD_CIRC, "Callback: Sending padding to circuit (%d)"
1239  " [length: %"PRIu64"]", mi->on_circ->purpose, mi->state_length);
1240  relay_send_command_from_edge(0, mi->on_circ, RELAY_COMMAND_DROP, NULL,
1241  0, NULL);
1243  } else {
1244  static ratelim_t cell_lim = RATELIM_INIT(600);
1245  log_fn_ratelim(&cell_lim,LOG_NOTICE,LD_CIRC,
1246  "Too many cells (%d) in circ queue to send padding.",
1247  TO_OR_CIRCUIT(circ)->p_chan_cells.n);
1248  }
1249  }
1250 
1251  /* This is a padding cell sent from the client or from the middle node,
1252  * (because it's invoked from circuitpadding.c) */
1254 
1255  /* The circpad_cell_event_padding_sent() could cause us to transition.
1256  * Check that we still have a padding machineinfo, and then check our token
1257  * supply. */
1258  if (circ->padding_info[machine_idx] != NULL) {
1259  if (state != circ->padding_info[machine_idx]->current_state)
1260  return CIRCPAD_STATE_CHANGED;
1261  else
1262  return check_machine_token_supply(circ->padding_info[machine_idx]);
1263  } else {
1264  return CIRCPAD_STATE_CHANGED;
1265  }
1266 }
1267 
1276 static void
1277 circpad_send_padding_callback(tor_timer_t *timer, void *args,
1278  const struct monotime_t *time)
1279 {
1281  (void)timer; (void)time;
1282 
1283  if (mi && mi->on_circ) {
1284  assert_circuit_ok(mi->on_circ);
1286  } else {
1287  // This shouldn't happen (represents a timer leak)
1289  "Circuit closed while waiting for padding timer.");
1291  }
1292 
1293  // TODO-MP-AP: Unify this counter with channelpadding for rephist stats
1294  //total_timers_pending--;
1295 }
1296 
1300 void
1302 {
1304  networkstatus_get_param(ns, "circpad_padding_disabled",
1305  0, 0, 1);
1306 
1307  circpad_padding_reduced =
1308  networkstatus_get_param(ns, "circpad_padding_reduced",
1309  0, 0, 1);
1310 
1311  circpad_global_allowed_cells =
1312  networkstatus_get_param(ns, "circpad_global_allowed_cells",
1313  0, 0, UINT16_MAX-1);
1314 
1315  circpad_global_max_padding_percent =
1316  networkstatus_get_param(ns, "circpad_global_max_padding_pct",
1317  0, 0, 100);
1318 
1319  circpad_max_circ_queued_cells =
1320  networkstatus_get_param(ns, "circpad_max_circ_queued_cells",
1321  CIRCWINDOW_START_MAX, 0, 50*CIRCWINDOW_START_MAX);
1322 }
1323 
1327 static bool
1329 {
1330  /* If padding has been disabled in the consensus, don't send any more
1331  * padding. Technically the machine should be shut down when the next
1332  * machine condition check happens, but machine checks only happen on
1333  * certain circuit events, and if padding is disabled due to some
1334  * network overload or DoS condition, we really want to stop ASAP. */
1335  if (circpad_padding_disabled || !get_options()->CircuitPadding) {
1336  return 0;
1337  }
1338 
1339  return 1;
1340 }
1341 
1355 STATIC bool
1357 {
1358  const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
1359 
1360  /* If machine_padding_pct is non-zero, and we've sent more
1361  * than the allowed count of padding cells, then check our
1362  * percent limits for this machine. */
1363  if (machine->max_padding_percent &&
1364  mi->padding_sent >= machine->allowed_padding_count) {
1365  uint32_t total_cells = mi->padding_sent + mi->nonpadding_sent;
1366 
1367  /* Check the percent */
1368  if ((100*(uint32_t)mi->padding_sent) / total_cells >
1369  machine->max_padding_percent) {
1370  return 1; // limit is reached. Stop.
1371  }
1372  }
1373 
1374  /* If circpad_max_global_padding_pct is non-zero, and we've
1375  * sent more than the global padding cell limit, then check our
1376  * global tor process percentage limit on padding. */
1377  if (circpad_global_max_padding_percent &&
1378  circpad_global_padding_sent >= circpad_global_allowed_cells) {
1379  uint64_t total_cells = circpad_global_padding_sent +
1380  circpad_global_nonpadding_sent;
1381 
1382  /* Check the percent */
1383  if ((100*circpad_global_padding_sent) / total_cells >
1384  circpad_global_max_padding_percent) {
1385  return 1; // global limit reached. Stop.
1386  }
1387  }
1388 
1389  return 0; // All good!
1390 }
1391 
1404 circpad_machine_schedule_padding,(circpad_machine_runtime_t *mi))
1405 {
1406  circpad_delay_t in_usec = 0;
1407  struct timeval timeout;
1408  tor_assert(mi);
1409 
1410  /* Don't schedule padding if it is disabled */
1411  if (!circpad_is_padding_allowed()) {
1412  static ratelim_t padding_lim = RATELIM_INIT(600);
1413  log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC,
1414  "Padding has been disabled, but machine still on circuit %"PRIu64
1415  ", %d",
1416  mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0,
1417  mi->on_circ->n_circ_id);
1418 
1419  return CIRCPAD_STATE_UNCHANGED;
1420  }
1421 
1422  /* Don't schedule padding if we are currently in dormant mode. */
1423  if (!is_participating_on_network()) {
1424  log_info(LD_CIRC, "Not scheduling padding because we are dormant.");
1425  return CIRCPAD_STATE_UNCHANGED;
1426  }
1427 
1428  // Don't pad in end (but also don't cancel any previously
1429  // scheduled padding either).
1430  if (mi->current_state == CIRCPAD_STATE_END) {
1431  log_fn(LOG_INFO, LD_CIRC, "Padding end state on circuit %u",
1432  CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1434  return CIRCPAD_STATE_UNCHANGED;
1435  }
1436 
1437  /* Check our padding limits */
1439  if (CIRCUIT_IS_ORIGIN(mi->on_circ)) {
1441  "Padding machine has reached padding limit on circuit %u",
1443  } else {
1444  static ratelim_t padding_lim = RATELIM_INIT(600);
1445  log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC,
1446  "Padding machine has reached padding limit on circuit %"PRIu64
1447  ", %d",
1448  mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0,
1449  mi->on_circ->n_circ_id);
1450  }
1451  return CIRCPAD_STATE_UNCHANGED;
1452  }
1453 
1454  if (mi->is_padding_timer_scheduled) {
1455  /* Cancel current timer (if any) */
1458  }
1459 
1460  /* in_usec = in microseconds */
1461  in_usec = circpad_machine_sample_delay(mi);
1462  /* If we're using token removal, we need to know when the padding
1463  * was scheduled at, so we can remove the appropriate token if
1464  * a non-padding cell is sent before the padding timer expires.
1465  *
1466  * However, since monotime is unpredictably expensive, let's avoid
1467  * using it for machines that don't need token removal. */
1469  mi->padding_scheduled_at_usec = monotime_absolute_usec();
1470  } else {
1471  mi->padding_scheduled_at_usec = 1;
1472  }
1473  log_fn(LOG_INFO,LD_CIRC,"\tPadding in %u usec on circuit %u", in_usec,
1474  CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1476 
1477  // Don't schedule if we have infinite delay.
1478  if (in_usec == CIRCPAD_DELAY_INFINITE) {
1480  }
1481 
1482  if (mi->state_length == 0) {
1483  /* If we're at length 0, that means we hit 0 after sending
1484  * a cell earlier, and emitted an event for it, but
1485  * for whatever reason we did not decide to change states then.
1486  * So maybe the machine is waiting for bins empty, or for an
1487  * infinity event later? That would be a strange machine,
1488  * but there's no reason to make it impossible. */
1489  return CIRCPAD_STATE_UNCHANGED;
1490  }
1491 
1492  if (in_usec <= 0) {
1494  }
1495 
1496  timeout.tv_sec = in_usec/TOR_USEC_PER_SEC;
1497  timeout.tv_usec = (in_usec%TOR_USEC_PER_SEC);
1498 
1499  log_fn(LOG_INFO, LD_CIRC, "\tPadding circuit %u in %u sec, %u usec",
1500  CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1502  (unsigned)timeout.tv_sec, (unsigned)timeout.tv_usec);
1503 
1504  if (mi->padding_timer) {
1506  } else {
1507  mi->padding_timer =
1509  }
1510  timer_schedule(mi->padding_timer, &timeout);
1512 
1513  // TODO-MP-AP: Unify with channelpadding counter
1514  //rep_hist_padding_count_timers(++total_timers_pending);
1515 
1516  return CIRCPAD_STATE_UNCHANGED;
1517 }
1518 
1528 static void
1530 {
1531  const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
1532  circuit_t *on_circ = mi->on_circ;
1533 
1534  log_fn(LOG_INFO,LD_CIRC, "Padding machine in end state on circuit %u (%d)",
1535  CIRCUIT_IS_ORIGIN(on_circ) ?
1536  TO_ORIGIN_CIRCUIT(on_circ)->global_identifier : 0,
1537  on_circ->purpose);
1538 
1539  /*
1540  * We allow machines to shut down and delete themselves as opposed
1541  * to just going back to START or waiting forever in END so that
1542  * we can handle the case where this machine started while it was
1543  * the only machine that matched conditions, but *since* then more
1544  * "higher ranking" machines now match the conditions, and would
1545  * be given a chance to take precedence over this one in
1546  * circpad_add_matching_machines().
1547  *
1548  * Returning to START or waiting forever in END would not give those
1549  * other machines a chance to be launched, where as shutting down
1550  * here does.
1551  */
1552  if (machine->should_negotiate_end) {
1553  if (machine->is_origin_side) {
1554  /* We free the machine info here so that we can be replaced
1555  * by a different machine. But we must leave the padding_machine
1556  * in place to wait for the negotiated response */
1558  machine->machine_index);
1560  machine->machine_num,
1561  machine->target_hopnum,
1562  CIRCPAD_COMMAND_STOP);
1563  } else {
1565  machine->machine_index);
1567  machine->machine_num,
1568  CIRCPAD_COMMAND_STOP,
1569  CIRCPAD_RESPONSE_OK);
1570  on_circ->padding_machine[machine->machine_index] = NULL;
1571  }
1572  }
1573 }
1574 
1584 circpad_machine_spec_transition,(circpad_machine_runtime_t *mi,
1585  circpad_event_t event))
1586 {
1587  const circpad_state_t *state =
1589 
1590  /* If state is null we are in the end state. */
1591  if (!state) {
1592  /* If we in end state we don't pad no matter what. */
1593  return CIRCPAD_STATE_UNCHANGED;
1594  }
1595 
1596  /* Check if this event is ignored or causes a cancel */
1597  if (state->next_state[event] == CIRCPAD_STATE_IGNORE) {
1598  return CIRCPAD_STATE_UNCHANGED;
1599  } else if (state->next_state[event] == CIRCPAD_STATE_CANCEL) {
1600  /* Check cancel events and cancel any pending padding */
1601  mi->padding_scheduled_at_usec = 0;
1602  if (mi->is_padding_timer_scheduled) {
1604  /* Cancel current timer (if any) */
1606  }
1607  return CIRCPAD_STATE_UNCHANGED;
1608  } else {
1609  circpad_statenum_t s = state->next_state[event];
1610  /* See if we need to transition to any other states based on this event.
1611  * Whenever a transition happens, even to our own state, we schedule
1612  * padding.
1613  *
1614  * So if a state only wants to schedule padding for an event, it specifies
1615  * a transition to itself. All non-specified events are ignored.
1616  */
1618  "Circuit %u circpad machine %d transitioning from %u to %u",
1619  CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1621  mi->machine_index, mi->current_state, s);
1622 
1623  /* If this is not the same state, switch and init tokens,
1624  * otherwise just reschedule padding. */
1625  if (mi->current_state != s) {
1626  mi->current_state = s;
1629 
1630  /* If we transition to the end state, check to see
1631  * if this machine wants to be shut down at end */
1632  if (s == CIRCPAD_STATE_END) {
1634  /* We transitioned but we don't pad in end. Also, mi
1635  * may be freed. Returning STATE_CHANGED prevents us
1636  * from accessing it in any callers of this function. */
1637  return CIRCPAD_STATE_CHANGED;
1638  }
1639 
1640  /* We transitioned to a new state, schedule padding */
1641  circpad_machine_schedule_padding(mi);
1642  return CIRCPAD_STATE_CHANGED;
1643  }
1644 
1645  /* We transitioned back to the same state. Schedule padding,
1646  * and inform if that causes a state transition. */
1647  return circpad_machine_schedule_padding(mi);
1648  }
1649 
1650  return CIRCPAD_STATE_UNCHANGED;
1651 }
1652 
1664 static void
1667 {
1668  /* Origin circuits don't estimate RTT. They could do it easily enough,
1669  * but they have no reason to use it in any delay calculations. */
1670  if (CIRCUIT_IS_ORIGIN(circ) || mi->stop_rtt_update)
1671  return;
1672 
1673  /* If we already have a last received packet time, that means we
1674  * did not get a response before this packet. The RTT estimate
1675  * only makes sense if we do not have multiple packets on the
1676  * wire, so stop estimating if this is the second packet
1677  * back to back. However, for the first set of back-to-back
1678  * packets, we can wait until the very first response comes back
1679  * to us, to measure that RTT (for the response to optimistic
1680  * data, for example). Hence stop_rtt_update is only checked
1681  * in this received side function, and not in send side below.
1682  */
1683  if (mi->last_received_time_usec) {
1684  /* We also allow multiple back-to-back packets if the circuit is not
1685  * opened, to handle var cells.
1686  * XXX: Will this work with out var cell plans? Maybe not,
1687  * since we're opened at the middle hop as soon as we process
1688  * one var extend2 :/ */
1689  if (circ->state == CIRCUIT_STATE_OPEN) {
1691  "Stopping padding RTT estimation on circuit (%"PRIu64
1692  ", %d) after two back to back packets. Current RTT: %d",
1693  circ->n_chan ? circ->n_chan->global_identifier : 0,
1694  circ->n_circ_id, mi->rtt_estimate_usec);
1695  mi->stop_rtt_update = 1;
1696 
1697  if (!mi->rtt_estimate_usec) {
1698  static ratelim_t rtt_lim = RATELIM_INIT(600);
1699  log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG,
1700  "Circuit got two cells back to back before estimating RTT.");
1701  }
1702  }
1703  } else {
1705  if (BUG(!state)) {
1706  return;
1707  }
1708 
1709  /* Since monotime is unpredictably expensive, only update this field
1710  * if rtt estimates are needed. Otherwise, stop the rtt update. */
1711  if (state->use_rtt_estimate) {
1712  mi->last_received_time_usec = monotime_absolute_usec();
1713  } else {
1714  /* Let's fast-path future decisions not to update rtt if the
1715  * feature is not in use. */
1716  mi->stop_rtt_update = 1;
1717  }
1718  }
1719 }
1720 
1730 static void
1733 {
1734  /* Origin circuits don't estimate RTT. They could do it easily enough,
1735  * but they have no reason to use it in any delay calculations. */
1736  if (CIRCUIT_IS_ORIGIN(circ))
1737  return;
1738 
1739  /* If last_received_time_usec is non-zero, we are waiting for a response
1740  * from the exit side. Calculate the time delta and use it as RTT. */
1741  if (mi->last_received_time_usec) {
1742  circpad_time_t rtt_time = monotime_absolute_usec() -
1744 
1745  /* Reset the last RTT packet time, so we can tell if two cells
1746  * arrive back to back */
1747  mi->last_received_time_usec = 0;
1748 
1749  /* Use INT32_MAX to ensure the addition doesn't overflow */
1750  if (rtt_time >= INT32_MAX) {
1752  "Circuit padding RTT estimate overflowed: %"PRIu64
1753  " vs %"PRIu64, monotime_absolute_usec(),
1755  return;
1756  }
1757 
1758  /* If the old RTT estimate is lower than this one, use this one, because
1759  * the circuit is getting longer. If this estimate is somehow
1760  * faster than the previous, then maybe that was network jitter, or a
1761  * bad monotonic clock source (so our ratchet returned a zero delta).
1762  * In that case, average them. */
1763  if (mi->rtt_estimate_usec < (circpad_delay_t)rtt_time) {
1764  mi->rtt_estimate_usec = (circpad_delay_t)rtt_time;
1765  } else {
1766  mi->rtt_estimate_usec += (circpad_delay_t)rtt_time;
1767  mi->rtt_estimate_usec /= 2;
1768  }
1769  } else if (circ->state == CIRCUIT_STATE_OPEN) {
1770  /* If last_received_time_usec is zero, then we have gotten two cells back
1771  * to back. Stop estimating RTT in this case. Note that we only
1772  * stop RTT update if the circuit is opened, to allow for RTT estimates
1773  * of var cells during circ setup. */
1774  if (!mi->rtt_estimate_usec && !mi->stop_rtt_update) {
1775  static ratelim_t rtt_lim = RATELIM_INIT(600);
1776  log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG,
1777  "Circuit sent two cells back to back before estimating RTT.");
1778  }
1779  mi->stop_rtt_update = 1;
1780  }
1781 }
1782 
1791 void
1793 {
1794  /* Update global cell count */
1795  circpad_global_nonpadding_sent++;
1796 
1797  /* If there are no machines then this loop should not iterate */
1799  /* First, update any timestamps */
1800  on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1801  circpad_estimate_circ_rtt_on_send(on_circ, on_circ->padding_info[i]);
1802 
1803  /* Then, do accounting */
1805 
1806  /* Check to see if we've run out of tokens for this state already,
1807  * and if not, check for other state transitions */
1808  if (check_machine_token_supply(on_circ->padding_info[i])
1809  == CIRCPAD_STATE_UNCHANGED) {
1810  /* If removing a token did not cause a transition, check if
1811  * non-padding sent event should */
1812  circpad_machine_spec_transition(on_circ->padding_info[i],
1813  CIRCPAD_EVENT_NONPADDING_SENT);
1814  }
1815  } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1816 }
1817 
1822 int
1824  crypt_path_t *layer_hint,
1825  const relay_header_t *rh)
1826 {
1827  /* First handle the padding commands, since we want to ignore any other
1828  * commands if this circuit is padding-specific. */
1829  switch (rh->command) {
1830  case RELAY_COMMAND_DROP:
1831  /* Already examined in circpad_deliver_recognized_relay_cell_events */
1832  return 0;
1833  case RELAY_COMMAND_PADDING_NEGOTIATE:
1835  return 0;
1836  case RELAY_COMMAND_PADDING_NEGOTIATED:
1837  if (circpad_handle_padding_negotiated(circ, cell, layer_hint) == 0)
1839  return 0;
1840  }
1841 
1842  /* If this is a padding circuit we don't need to parse any other commands
1843  * than the padding ones. Just drop them to the floor.
1844  *
1845  * Note: we deliberately do not call circuit_read_valid_data() here. The
1846  * vanguards addon (specifically the 'bandguards' component's dropped cell
1847  * detection) will thus close this circuit, as it would for any other
1848  * unexpected cell. However, default tor will *not* close the circuit.
1849  *
1850  * This is intentional. We are not yet certain that is it optimal to keep
1851  * padding circuits open in cases like these, rather than closing them.
1852  * We suspect that continuing to pad is optimal against a passive classifier,
1853  * but as soon as the adversary is active (even as a client adversary) this
1854  * might change.
1855  *
1856  * So as a way forward, we log the cell command and circuit number, to
1857  * help us enumerate the most common instances of this in testing with
1858  * vanguards, to see which are common enough to verify and handle
1859  * properly.
1860  * - Mike
1861  */
1863  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
1864  "Ignored cell (%d) that arrived in padding circuit "
1865  " %u.", rh->command, CIRCUIT_IS_ORIGIN(circ) ?
1866  TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
1867  return 0;
1868  }
1869 
1870  return 1;
1871 }
1872 
1881 void
1883 {
1885  /* First, update any timestamps */
1886  on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1888 
1889  circpad_machine_spec_transition(on_circ->padding_info[i],
1890  CIRCPAD_EVENT_NONPADDING_RECV);
1891  } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1892 }
1893 
1902 void
1904 {
1906  /* Check to see if we've run out of tokens for this state already,
1907  * and if not, check for other state transitions */
1908  if (check_machine_token_supply(on_circ->padding_info[i])
1909  == CIRCPAD_STATE_UNCHANGED) {
1910  /* If removing a token did not cause a transition, check if
1911  * non-padding sent event should */
1912 
1913  on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1914  circpad_machine_spec_transition(on_circ->padding_info[i],
1915  CIRCPAD_EVENT_PADDING_SENT);
1916  }
1917  } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1918 }
1919 
1928 void
1930 {
1931  /* identical to padding sent */
1933  on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1934  circpad_machine_spec_transition(on_circ->padding_info[i],
1935  CIRCPAD_EVENT_PADDING_RECV);
1936  } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1937 }
1938 
1950 {
1951  return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_INFINITY);
1952 }
1953 
1964 {
1965  if (circpad_machine_spec_transition(mi, CIRCPAD_EVENT_BINS_EMPTY)
1966  == CIRCPAD_STATE_CHANGED) {
1967  return CIRCPAD_STATE_CHANGED;
1968  } else {
1969  /* If we dont transition, then we refill the tokens */
1971  return CIRCPAD_STATE_UNCHANGED;
1972  }
1973 }
1974 
1983 {
1984  return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_LENGTH_COUNT);
1985 }
1986 
1990 static inline bool
1992  const circpad_machine_spec_t *machine)
1993 {
1994  /* If padding is disabled, no machines should match/apply. This has
1995  * the effect of shutting down all machines, and not adding any more. */
1996  if (circpad_padding_disabled || !get_options()->CircuitPadding)
1997  return 0;
1998 
1999  /* If the consensus or our torrc has selected reduced connection padding,
2000  * then only allow this machine if it is flagged as acceptable under
2001  * reduced padding conditions */
2002  if (circpad_padding_reduced || get_options()->ReducedCircuitPadding) {
2003  if (!machine->conditions.reduced_padding_ok)
2004  return 0;
2005  }
2006 
2007  if (!(circpad_circ_purpose_to_mask(TO_CIRCUIT(circ)->purpose)
2008  & machine->conditions.purpose_mask))
2009  return 0;
2010 
2011  if (machine->conditions.requires_vanguards) {
2012  const or_options_t *options = get_options();
2013 
2014  /* Pinned middles are effectively vanguards */
2015  if (!(options->HSLayer2Nodes || options->HSLayer3Nodes))
2016  return 0;
2017  }
2018 
2019  /* We check for any bits set in the circuit state mask so that machines
2020  * can say any of the following through their state bitmask:
2021  * "I want to apply to circuits with either streams or no streams"; OR
2022  * "I only want to apply to circuits with streams"; OR
2023  * "I only want to apply to circuits without streams". */
2024  if (!(circpad_circuit_state(circ) & machine->conditions.state_mask))
2025  return 0;
2026 
2027  if (circuit_get_cpath_opened_len(circ) < machine->conditions.min_hops)
2028  return 0;
2029 
2030  return 1;
2031 }
2032 
2041 static inline
2044 {
2045  circpad_circuit_state_t retmask = 0;
2046 
2047  if (circ->p_streams)
2048  retmask |= CIRCPAD_CIRC_STREAMS;
2049  else
2050  retmask |= CIRCPAD_CIRC_NO_STREAMS;
2051 
2052  /* We use has_opened to prevent cannibialized circs from flapping. */
2053  if (circ->has_opened)
2054  retmask |= CIRCPAD_CIRC_OPENED;
2055  else
2056  retmask |= CIRCPAD_CIRC_BUILDING;
2057 
2058  if (circ->remaining_relay_early_cells > 0)
2059  retmask |= CIRCPAD_CIRC_HAS_RELAY_EARLY;
2060  else
2061  retmask |= CIRCPAD_CIRC_HAS_NO_RELAY_EARLY;
2062 
2063  return retmask;
2064 }
2065 
2071 circpad_circ_purpose_to_mask(uint8_t circ_purpose)
2072 {
2073  /* Treat OR circ purposes as ignored. They should not be passed here*/
2074  if (BUG(circ_purpose <= CIRCUIT_PURPOSE_OR_MAX_)) {
2075  return 0;
2076  }
2077 
2078  /* Treat new client circuit purposes as "OMG ITS EVERYTHING".
2079  * This also should not happen */
2080  if (BUG(circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1 > 32)) {
2081  return CIRCPAD_PURPOSE_ALL;
2082  }
2083 
2084  /* Convert the purpose to a bit position */
2085  return 1 << (circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1);
2086 }
2087 
2092 static void
2094 {
2095  circuit_t *circ = TO_CIRCUIT(on_circ);
2096 
2098  if (!circpad_machine_conditions_met(on_circ,
2099  circ->padding_machine[i])) {
2100  // Clear machineinfo (frees timers)
2102  // Send padding negotiate stop
2103  circpad_negotiate_padding(on_circ,
2104  circ->padding_machine[i]->machine_num,
2105  circ->padding_machine[i]->target_hopnum,
2106  CIRCPAD_COMMAND_STOP);
2107  }
2108  } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
2109 }
2110 
2121 STATIC void
2123  smartlist_t *machines_sl)
2124 {
2125  circuit_t *circ = TO_CIRCUIT(on_circ);
2126 
2127 #ifdef TOR_UNIT_TESTS
2128  /* Tests don't have to init our padding machines */
2129  if (!machines_sl)
2130  return;
2131 #endif
2132 
2133  /* If padding negotiation failed before, do not try again */
2134  if (on_circ->padding_negotiation_failed)
2135  return;
2136 
2138  /* If there is a padding machine info, this index is occupied.
2139  * No need to check conditions for this index. */
2140  if (circ->padding_info[i])
2141  continue;
2142 
2143  /* We have a free machine index. Check the origin padding
2144  * machines in reverse order, so that more recently added
2145  * machines take priority over older ones. */
2146  SMARTLIST_FOREACH_REVERSE_BEGIN(machines_sl,
2148  machine) {
2149  /* Machine definitions have a specific target machine index.
2150  * This is so event ordering is deterministic with respect
2151  * to which machine gets events first when there are two
2152  * machines installed on a circuit. Make sure we only
2153  * add this machine if its target machine index is free. */
2154  if (machine->machine_index == i &&
2155  circpad_machine_conditions_met(on_circ, machine)) {
2156 
2157  // We can only replace this machine if the target hopnum
2158  // is the same, otherwise we'll get invalid data
2159  if (circ->padding_machine[i]) {
2160  if (circ->padding_machine[i]->target_hopnum !=
2161  machine->target_hopnum)
2162  continue;
2163  /* Replace it. (Don't free - is global). */
2164  circ->padding_machine[i] = NULL;
2165  }
2166 
2167  /* Set up the machine immediately so that the slot is occupied.
2168  * We will tear it down on error return, or if there is an error
2169  * response from the relay. */
2170  circpad_setup_machine_on_circ(circ, machine);
2171  if (circpad_negotiate_padding(on_circ, machine->machine_num,
2172  machine->target_hopnum,
2173  CIRCPAD_COMMAND_START) < 0) {
2174  log_info(LD_CIRC,
2175  "Padding not negotiated. Cleaning machine from circuit %u",
2176  CIRCUIT_IS_ORIGIN(circ) ?
2177  TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
2179  circ->padding_machine[i] = NULL;
2180  on_circ->padding_negotiation_failed = 1;
2181  } else {
2182  /* Success. Don't try any more machines */
2183  return;
2184  }
2185  }
2186  } SMARTLIST_FOREACH_END(machine);
2187  } FOR_EACH_CIRCUIT_MACHINE_END;
2188 }
2189 
2196 void
2198 {
2199  /* Since our padding conditions do not specify a max_hops,
2200  * all we can do is add machines here */
2202 }
2203 
2210 void
2212 {
2215 }
2216 
2223 void
2225 {
2228 }
2229 
2237 void
2239 {
2242 }
2243 
2252 void
2254 {
2257 }
2258 
2267 void
2269 {
2272 }
2273 
2283 bool
2285  crypt_path_t *from_hop)
2286 {
2287  crypt_path_t *target_hop = NULL;
2288  if (!CIRCUIT_IS_ORIGIN(circ))
2289  return 0;
2290 
2292  /* We have to check padding_machine and not padding_info/active
2293  * machines here because padding may arrive after we shut down a
2294  * machine. The info is gone, but the padding_machine waits
2295  * for the padding_negotiated response to come back. */
2296  if (!circ->padding_machine[i])
2297  continue;
2298 
2299  target_hop = circuit_get_cpath_hop(TO_ORIGIN_CIRCUIT(circ),
2300  circ->padding_machine[i]->target_hopnum);
2301 
2302  if (target_hop == from_hop)
2303  return 1;
2304  } FOR_EACH_CIRCUIT_MACHINE_END;
2305 
2306  return 0;
2307 }
2308 
2319 void
2321  cell_direction_t dir)
2322 {
2323  // We should never see unrecognized cells at origin.
2324  // Our caller emits a warn when this happens.
2325  if (CIRCUIT_IS_ORIGIN(circ)) {
2326  return;
2327  }
2328 
2329  if (dir == CELL_DIRECTION_OUT) {
2330  /* When direction is out (away from origin), then we received non-padding
2331  cell coming from the origin to us. */
2333  } else if (dir == CELL_DIRECTION_IN) {
2334  /* It's in and not origin, so the cell is going away from us.
2335  * So we are relaying a non-padding cell towards the origin. */
2337  }
2338 }
2339 
2347 void
2349  uint8_t relay_command,
2350  crypt_path_t *layer_hint)
2351 {
2352  if (relay_command == RELAY_COMMAND_DROP) {
2354 
2355  if (CIRCUIT_IS_ORIGIN(circ)) {
2356  if (circpad_padding_is_from_expected_hop(circ, layer_hint)) {
2358  } else {
2359  /* This is unexpected padding. Ignore it for now. */
2360  return;
2361  }
2362  }
2363 
2364  /* The cell should be recognized by now, which means that we are on the
2365  destination, which means that we received a padding cell. We might be
2366  the client or the Middle node, still, because leaky-pipe. */
2368  log_fn(LOG_INFO, LD_CIRC, "Got padding cell on %s circuit %u.",
2369  CIRCUIT_IS_ORIGIN(circ) ? "origin" : "non-origin",
2370  CIRCUIT_IS_ORIGIN(circ) ?
2371  TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
2372  } else {
2373  /* We received a non-padding cell on the edge */
2375  }
2376 }
2377 
2384 void
2386  uint8_t relay_command)
2387 {
2388  /* RELAY_COMMAND_DROP is the multi-hop (aka circuit-level) padding cell in
2389  * tor. (CELL_PADDING is a channel-level padding cell, which is not relayed
2390  * or processed here).
2391  *
2392  * We do generate events for PADDING_NEGOTIATE and PADDING_NEGOTIATED cells.
2393  */
2394  if (relay_command == RELAY_COMMAND_DROP) {
2395  /* Optimization: The event for RELAY_COMMAND_DROP is sent directly
2396  * from circpad_send_padding_cell_for_callback(). This is to avoid
2397  * putting a cell_t and a relay_header_t on the stack repeatedly
2398  * if we decide to send a long train of padding cells back-to-back
2399  * with 0 delay. So we do nothing here. */
2400  return;
2401  } else {
2402  /* This is a non-padding cell sent from the client or from
2403  * this node. */
2405  }
2406 }
2407 
2411 void
2413  circpad_statenum_t num_states)
2414 {
2415  if (BUG(num_states > CIRCPAD_MAX_MACHINE_STATES)) {
2416  num_states = CIRCPAD_MAX_MACHINE_STATES;
2417  }
2418 
2419  machine->num_states = num_states;
2420  machine->states = tor_malloc_zero(sizeof(circpad_state_t)*num_states);
2421 
2422  /* Initialize the default next state for all events to
2423  * "ignore" -- if events aren't specified, they are ignored. */
2424  for (circpad_statenum_t s = 0; s < num_states; s++) {
2425  for (int e = 0; e < CIRCPAD_NUM_EVENTS; e++) {
2426  machine->states[s].next_state[e] = CIRCPAD_STATE_IGNORE;
2427  }
2428  }
2429 }
2430 
2431 static void
2432 circpad_setup_machine_on_circ(circuit_t *on_circ,
2433  const circpad_machine_spec_t *machine)
2434 {
2435  if (CIRCUIT_IS_ORIGIN(on_circ) && !machine->is_origin_side) {
2437  "Can't set up non-origin machine on origin circuit!");
2438  return;
2439  }
2440 
2441  if (!CIRCUIT_IS_ORIGIN(on_circ) && machine->is_origin_side) {
2443  "Can't set up origin machine on non-origin circuit!");
2444  return;
2445  }
2446 
2447  tor_assert_nonfatal(on_circ->padding_machine[machine->machine_index]
2448  == NULL);
2449  tor_assert_nonfatal(on_circ->padding_info[machine->machine_index] == NULL);
2450 
2451  /* Log message */
2452  if (CIRCUIT_IS_ORIGIN(on_circ)) {
2453  log_info(LD_CIRC, "Registering machine %s to origin circ %u (%d)",
2454  machine->name,
2455  TO_ORIGIN_CIRCUIT(on_circ)->global_identifier, on_circ->purpose);
2456  } else {
2457  log_info(LD_CIRC, "Registering machine %s to non-origin circ (%d)",
2458  machine->name, on_circ->purpose);
2459  }
2460 
2461  on_circ->padding_info[machine->machine_index] =
2463  on_circ->padding_machine[machine->machine_index] = machine;
2464 }
2465 
2467 static bool
2469 {
2470  int b;
2471  uint32_t tokens_count = 0;
2472  circpad_delay_t prev_bin_edge = 0;
2473 
2474  /* We only validate histograms */
2475  if (!state->histogram_len) {
2476  return true;
2477  }
2478 
2479  /* We need at least two bins in a histogram */
2480  if (state->histogram_len < 2) {
2481  log_warn(LD_CIRC, "You can't have a histogram with less than 2 bins");
2482  return false;
2483  }
2484 
2485  /* For each machine state, if it's a histogram, make sure all the
2486  * histogram edges are well defined (i.e. are strictly monotonic). */
2487  for (b = 0 ; b < state->histogram_len ; b++) {
2488  /* Check that histogram edges are strictly increasing. Ignore the first
2489  * edge since it can be zero. */
2490  if (prev_bin_edge >= state->histogram_edges[b] && b > 0) {
2491  log_warn(LD_CIRC, "Histogram edges are not increasing [%u/%u]",
2492  prev_bin_edge, state->histogram_edges[b]);
2493  return false;
2494  }
2495 
2496  prev_bin_edge = state->histogram_edges[b];
2497 
2498  /* Also count the number of tokens as we go through the histogram states */
2499  tokens_count += state->histogram[b];
2500  }
2501  /* Verify that the total number of tokens is correct */
2502  if (tokens_count != state->histogram_total_tokens) {
2503  log_warn(LD_CIRC, "Histogram token count is wrong [%u/%u]",
2504  tokens_count, state->histogram_total_tokens);
2505  return false;
2506  }
2507 
2508  return true;
2509 }
2510 
2512 static bool
2514 {
2515  int i;
2516 
2517  /* Validate the histograms of the padding machine */
2518  for (i = 0 ; i < machine->num_states ; i++) {
2519  if (!padding_machine_state_is_valid(&machine->states[i])) {
2520  return false;
2521  }
2522  }
2523 
2524  return true;
2525 }
2526 
2527 /* Validate and register <b>machine</b> into <b>machine_list</b>. If
2528  * <b>machine_list</b> is NULL, then just validate. */
2529 void
2530 circpad_register_padding_machine(circpad_machine_spec_t *machine,
2531  smartlist_t *machine_list)
2532 {
2533  if (!padding_machine_is_valid(machine)) {
2534  log_warn(LD_CIRC, "Machine #%u is invalid. Ignoring.",
2535  machine->machine_num);
2536  return;
2537  }
2538 
2539  if (machine_list) {
2540  smartlist_add(machine_list, machine);
2541  }
2542 }
2543 
2544 #ifdef TOR_UNIT_TESTS
2545 /* These padding machines are only used for tests pending #28634. */
2546 static void
2547 circpad_circ_client_machine_init(void)
2548 {
2549  circpad_machine_spec_t *circ_client_machine
2550  = tor_malloc_zero(sizeof(circpad_machine_spec_t));
2551 
2552  circ_client_machine->conditions.min_hops = 2;
2553  circ_client_machine->conditions.state_mask =
2554  CIRCPAD_CIRC_BUILDING|CIRCPAD_CIRC_OPENED|CIRCPAD_CIRC_HAS_RELAY_EARLY;
2555  circ_client_machine->conditions.purpose_mask = CIRCPAD_PURPOSE_ALL;
2556  circ_client_machine->conditions.reduced_padding_ok = 1;
2557 
2558  circ_client_machine->target_hopnum = 2;
2559  circ_client_machine->is_origin_side = 1;
2560 
2561  /* Start, gap, burst */
2562  circpad_machine_states_init(circ_client_machine, 3);
2563 
2564  circ_client_machine->states[CIRCPAD_STATE_START].
2565  next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
2566 
2567  circ_client_machine->states[CIRCPAD_STATE_BURST].
2568  next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
2569  circ_client_machine->states[CIRCPAD_STATE_BURST].
2570  next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST;
2571 
2572  /* If we are in burst state, and we send a non-padding cell, then we cancel
2573  the timer for the next padding cell:
2574  We dont want to send fake extends when actual extends are going on */
2575  circ_client_machine->states[CIRCPAD_STATE_BURST].
2576  next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_CANCEL;
2577 
2578  circ_client_machine->states[CIRCPAD_STATE_BURST].
2579  next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END;
2580 
2581  circ_client_machine->states[CIRCPAD_STATE_BURST].token_removal =
2583 
2584  circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2;
2585  circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500;
2586  circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1]= 1000000;
2587 
2588  /* We have 5 tokens in the histogram, which means that all circuits will look
2589  * like they have 7 hops (since we start this machine after the second hop,
2590  * and tokens are decremented for any valid hops, and fake extends are
2591  * used after that -- 2+5==7). */
2592  circ_client_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 5;
2593 
2594  circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_total_tokens = 5;
2595 
2596  circ_client_machine->machine_num = smartlist_len(origin_padding_machines);
2597  circpad_register_padding_machine(circ_client_machine,
2599 }
2600 
2601 static void
2602 circpad_circ_responder_machine_init(void)
2603 {
2604  circpad_machine_spec_t *circ_responder_machine
2605  = tor_malloc_zero(sizeof(circpad_machine_spec_t));
2606 
2607  /* Shut down the machine after we've sent enough packets */
2608  circ_responder_machine->should_negotiate_end = 1;
2609 
2610  /* The relay-side doesn't care what hopnum it is, but for consistency,
2611  * let's match the client */
2612  circ_responder_machine->target_hopnum = 2;
2613  circ_responder_machine->is_origin_side = 0;
2614 
2615  /* Start, gap, burst */
2616  circpad_machine_states_init(circ_responder_machine, 3);
2617 
2618  /* This is the settings of the state machine. In the future we are gonna
2619  serialize this into the consensus or the torrc */
2620 
2621  /* We transition to the burst state on padding receive and on non-padding
2622  * recieve */
2623  circ_responder_machine->states[CIRCPAD_STATE_START].
2624  next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST;
2625  circ_responder_machine->states[CIRCPAD_STATE_START].
2626  next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
2627 
2628  /* Inside the burst state we _stay_ in the burst state when a non-padding
2629  * is sent */
2630  circ_responder_machine->states[CIRCPAD_STATE_BURST].
2631  next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_BURST;
2632 
2633  /* Inside the burst state we transition to the gap state when we receive a
2634  * padding cell */
2635  circ_responder_machine->states[CIRCPAD_STATE_BURST].
2636  next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP;
2637 
2638  /* These describe the padding charasteristics when in burst state */
2639 
2640  /* use_rtt_estimate tries to estimate how long padding cells take to go from
2641  C->M, and uses that as what as the base of the histogram */
2642  circ_responder_machine->states[CIRCPAD_STATE_BURST].use_rtt_estimate = 1;
2643  /* The histogram is 2 bins: an empty one, and infinity */
2644  circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2;
2645  circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500;
2646  circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1] =
2647  1000000;
2648  /* During burst state we wait forever for padding to arrive.
2649 
2650  We are waiting for a padding cell from the client to come in, so that we
2651  respond, and we immitate how extend looks like */
2652  circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 0;
2653  // Only infinity bin:
2654  circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[1] = 1;
2655  circ_responder_machine->states[CIRCPAD_STATE_BURST].
2656  histogram_total_tokens = 1;
2657 
2658  /* From the gap state, we _stay_ in the gap state, when we receive padding
2659  * or non padding */
2660  circ_responder_machine->states[CIRCPAD_STATE_GAP].
2661  next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP;
2662  circ_responder_machine->states[CIRCPAD_STATE_GAP].
2663  next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_GAP;
2664 
2665  /* And from the gap state, we go to the end, when the bins are empty or a
2666  * non-padding cell is sent */
2667  circ_responder_machine->states[CIRCPAD_STATE_GAP].
2668  next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END;
2669  circ_responder_machine->states[CIRCPAD_STATE_GAP].
2670  next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_END;
2671 
2672  // FIXME: Tune this histogram
2673 
2674  /* The gap state is the delay you wait after you receive a padding cell
2675  before you send a padding response */
2676  circ_responder_machine->states[CIRCPAD_STATE_GAP].use_rtt_estimate = 1;
2677  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_len = 6;
2678  /* Specify histogram bins */
2679  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[0]= 500;
2680  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[1]= 1000;
2681  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[2]= 2000;
2682  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[3]= 4000;
2683  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[4]= 8000;
2684  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[5]= 16000;
2685  /* Specify histogram tokens */
2686  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[0] = 0;
2687  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[1] = 1;
2688  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[2] = 2;
2689  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[3] = 2;
2690  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[4] = 1;
2691  /* Total number of tokens */
2692  circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_total_tokens = 6;
2693 
2694  circ_responder_machine->states[CIRCPAD_STATE_GAP].token_removal =
2696 
2697  circ_responder_machine->machine_num = smartlist_len(relay_padding_machines);
2698  circpad_register_padding_machine(circ_responder_machine,
2700 }
2701 #endif /* defined(TOR_UNIT_TESTS) */
2702 
2709 void
2711 {
2712  tor_assert_nonfatal(origin_padding_machines == NULL);
2713  tor_assert_nonfatal(relay_padding_machines == NULL);
2714 
2715  origin_padding_machines = smartlist_new();
2716  relay_padding_machines = smartlist_new();
2717 
2718  /* Register machines for hiding client-side intro circuits */
2721 
2722  /* Register machines for hiding client-side rendezvous circuits */
2725 
2726  // TODO: Parse machines from consensus and torrc
2727 #ifdef TOR_UNIT_TESTS
2728  circpad_circ_client_machine_init();
2729  circpad_circ_responder_machine_init();
2730 #endif
2731 }
2732 
2736 void
2738 {
2742  m, tor_free(m->states); tor_free(m));
2743  smartlist_free(origin_padding_machines);
2744  }
2745 
2746  if (relay_padding_machines) {
2749  m, tor_free(m->states); tor_free(m));
2750  smartlist_free(relay_padding_machines);
2751  }
2752 }
2753 
2757 static bool
2759 {
2760  if (node->rs) {
2761  log_fn(LOG_INFO, LD_CIRC, "Checking padding: %s",
2762  node->rs->pv.supports_hs_setup_padding ?
2763  "supported" : "unsupported");
2764  return node->rs->pv.supports_hs_setup_padding;
2765  }
2766 
2767  log_fn(LOG_INFO, LD_CIRC, "Empty routerstatus in padding check");
2768  return 0;
2769 }
2770 
2777 MOCK_IMPL(STATIC const node_t *,
2778 circuit_get_nth_node,(origin_circuit_t *circ, int hop))
2779 {
2780  crypt_path_t *iter = circuit_get_cpath_hop(circ, hop);
2781 
2782  if (!iter || iter->state != CPATH_STATE_OPEN)
2783  return NULL;
2784 
2785  return node_get_by_id(iter->extend_info->identity_digest);
2786 }
2787 
2792 static bool
2794  int target_hopnum)
2795 {
2796  const node_t *hop;
2797 
2798  if (!(hop = circuit_get_nth_node(circ, target_hopnum))) {
2799  return 0;
2800  }
2801 
2802  return circpad_node_supports_padding(hop);
2803 }
2804 
2812  circpad_machine_num_t machine,
2813  uint8_t target_hopnum,
2814  uint8_t command)
2815 {
2816  circpad_negotiate_t type;
2817  cell_t cell;
2818  ssize_t len;
2819 
2820  /* Check that the target hop lists support for padding in
2821  * its ProtoVer fields */
2822  if (!circpad_circuit_supports_padding(circ, target_hopnum)) {
2823  return -1;
2824  }
2825 
2826  memset(&cell, 0, sizeof(cell_t));
2827  memset(&type, 0, sizeof(circpad_negotiate_t));
2828  // This gets reset to RELAY_EARLY appropriately by
2829  // relay_send_command_from_edge_. At least, it looks that way.
2830  // QQQ-MP-AP: Verify that.
2831  cell.command = CELL_RELAY;
2832 
2833  circpad_negotiate_set_command(&type, command);
2834  circpad_negotiate_set_version(&type, 0);
2835  circpad_negotiate_set_machine_type(&type, machine);
2836 
2837  if ((len = circpad_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
2838  &type)) < 0)
2839  return -1;
2840 
2842  "Negotiating padding on circuit %u (%d), command %d",
2843  circ->global_identifier, TO_CIRCUIT(circ)->purpose, command);
2844 
2845  return circpad_send_command_to_hop(circ, target_hopnum,
2846  RELAY_COMMAND_PADDING_NEGOTIATE,
2847  cell.payload, len);
2848 }
2849 
2855 bool
2857  circpad_machine_num_t machine,
2858  uint8_t command,
2859  uint8_t response)
2860 {
2861  circpad_negotiated_t type;
2862  cell_t cell;
2863  ssize_t len;
2864 
2865  memset(&cell, 0, sizeof(cell_t));
2866  memset(&type, 0, sizeof(circpad_negotiated_t));
2867  // This gets reset to RELAY_EARLY appropriately by
2868  // relay_send_command_from_edge_. At least, it looks that way.
2869  // QQQ-MP-AP: Verify that.
2870  cell.command = CELL_RELAY;
2871 
2872  circpad_negotiated_set_command(&type, command);
2873  circpad_negotiated_set_response(&type, response);
2874  circpad_negotiated_set_version(&type, 0);
2875  circpad_negotiated_set_machine_type(&type, machine);
2876 
2877  if ((len = circpad_negotiated_encode(cell.payload, CELL_PAYLOAD_SIZE,
2878  &type)) < 0)
2879  return 0;
2880 
2881  /* Use relay_send because we're from the middle to the origin. We don't
2882  * need to specify a target hop or layer_hint. */
2883  return relay_send_command_from_edge(0, circ,
2884  RELAY_COMMAND_PADDING_NEGOTIATED,
2885  (void*)cell.payload,
2886  (size_t)len, NULL) == 0;
2887 }
2888 
2900 {
2901  int retval = 0;
2902  circpad_negotiate_t *negotiate;
2903 
2904  if (CIRCUIT_IS_ORIGIN(circ)) {
2905  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2906  "Padding negotiate cell unsupported at origin (circuit %u)",
2907  TO_ORIGIN_CIRCUIT(circ)->global_identifier);
2908  return -1;
2909  }
2910 
2911  if (circpad_negotiate_parse(&negotiate, cell->payload+RELAY_HEADER_SIZE,
2913  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2914  "Received malformed PADDING_NEGOTIATE cell; dropping.");
2915  return -1;
2916  }
2917 
2918  if (negotiate->command == CIRCPAD_COMMAND_STOP) {
2919  /* Free the machine corresponding to this machine type */
2921  negotiate->machine_type)) {
2922  log_info(LD_CIRC, "Received STOP command for machine %u",
2923  negotiate->machine_type);
2924  goto done;
2925  }
2926  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2927  "Received circuit padding stop command for unknown machine.");
2928  goto err;
2929  } else if (negotiate->command == CIRCPAD_COMMAND_START) {
2931  const circpad_machine_spec_t *, m) {
2932  if (m->machine_num == negotiate->machine_type) {
2933  circpad_setup_machine_on_circ(circ, m);
2935  goto done;
2936  }
2937  } SMARTLIST_FOREACH_END(m);
2938  }
2939 
2940  err:
2941  retval = -1;
2942 
2943  done:
2944  circpad_padding_negotiated(circ, negotiate->machine_type,
2945  negotiate->command,
2946  (retval == 0) ? CIRCPAD_RESPONSE_OK : CIRCPAD_RESPONSE_ERR);
2947  circpad_negotiate_free(negotiate);
2948 
2949  return retval;
2950 }
2951 
2962  crypt_path_t *layer_hint)
2963 {
2964  circpad_negotiated_t *negotiated;
2965 
2966  if (!CIRCUIT_IS_ORIGIN(circ)) {
2967  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2968  "Padding negotiated cell unsupported at non-origin.");
2969  return -1;
2970  }
2971 
2972  /* Verify this came from the expected hop */
2973  if (!circpad_padding_is_from_expected_hop(circ, layer_hint)) {
2974  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2975  "Padding negotiated cell from wrong hop on circuit %u",
2976  TO_ORIGIN_CIRCUIT(circ)->global_identifier);
2977  return -1;
2978  }
2979 
2980  if (circpad_negotiated_parse(&negotiated, cell->payload+RELAY_HEADER_SIZE,
2982  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2983  "Received malformed PADDING_NEGOTIATED cell on circuit %u; "
2984  "dropping.", TO_ORIGIN_CIRCUIT(circ)->global_identifier);
2985  return -1;
2986  }
2987 
2988  if (negotiated->command == CIRCPAD_COMMAND_STOP) {
2989  log_info(LD_CIRC,
2990  "Received STOP command on PADDING_NEGOTIATED for circuit %u",
2991  TO_ORIGIN_CIRCUIT(circ)->global_identifier);
2992  /* There may not be a padding_info here if we shut down the
2993  * machine in circpad_shutdown_old_machines(). Or, if
2994  * circpad_add_matching_matchines() added a new machine,
2995  * there may be a padding_machine for a different machine num
2996  * than this response. */
2997  free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type);
2998  } else if (negotiated->command == CIRCPAD_COMMAND_START &&
2999  negotiated->response == CIRCPAD_RESPONSE_ERR) {
3000  // This can happen due to consensus drift.. free the machines
3001  // and be sad
3002  free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type);
3003  TO_ORIGIN_CIRCUIT(circ)->padding_negotiation_failed = 1;
3004  log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3005  "Middle node did not accept our padding request on circuit %u (%d)",
3006  TO_ORIGIN_CIRCUIT(circ)->global_identifier,
3007  circ->purpose);
3008  }
3009 
3010  circpad_negotiated_free(negotiated);
3011  return 0;
3012 }
3013 
3015 STATIC void
3017 {
3018  if (!m) return;
3019 
3020  tor_free(m->states);
3021  tor_free(m);
3022 }
3023 
3025 void
3027 {
3030  circpad_machine_spec_t *, m) {
3031  machine_spec_free(m);
3032  } SMARTLIST_FOREACH_END(m);
3033  smartlist_free(origin_padding_machines);
3034  }
3035  if (relay_padding_machines) {
3037  circpad_machine_spec_t *, m) {
3038  machine_spec_free(m);
3039  } SMARTLIST_FOREACH_END(m);
3040  smartlist_free(relay_padding_machines);
3041  }
3042 }
3043 
3044 /* Serialization */
3045 // TODO: Should we use keyword=value here? Are there helpers for that?
3046 #if 0
3047 static void
3048 circpad_state_serialize(const circpad_state_t *state,
3049  smartlist_t *chunks)
3050 {
3051  smartlist_add_asprintf(chunks, " %u", state->histogram[0]);
3052  for (int i = 1; i < state->histogram_len; i++) {
3053  smartlist_add_asprintf(chunks, ",%u",
3054  state->histogram[i]);
3055  }
3056 
3057  smartlist_add_asprintf(chunks, " 0x%x",
3058  state->transition_cancel_events);
3059 
3060  for (int i = 0; i < CIRCPAD_NUM_STATES; i++) {
3061  smartlist_add_asprintf(chunks, ",0x%x",
3062  state->transition_events[i]);
3063  }
3064 
3065  smartlist_add_asprintf(chunks, " %u %u",
3066  state->use_rtt_estimate,
3067  state->token_removal);
3068 }
3069 
3070 char *
3072 {
3073  smartlist_t *chunks = smartlist_new();
3074  char *out;
3075  (void)machine;
3076 
3077  circpad_state_serialize(&machine->start, chunks);
3078  circpad_state_serialize(&machine->gap, chunks);
3079  circpad_state_serialize(&machine->burst, chunks);
3080 
3081  out = smartlist_join_strings(chunks, "", 0, NULL);
3082 
3083  SMARTLIST_FOREACH(chunks, char *, cp, tor_free(cp));
3084  smartlist_free(chunks);
3085  return out;
3086 }
3087 
3088 // XXX: Writeme
3089 const circpad_machine_spec_t *
3090 circpad_string_to_machine(const char *str)
3091 {
3092  (void)str;
3093  return NULL;
3094 }
3095 
3096 #endif /* 0 */
void timer_disable(tor_timer_t *t)
Definition: timers.c:324
static circpad_delay_t circpad_get_histogram_bin_midpoint(const circpad_machine_runtime_t *mi, int bin_index)
#define CIRCPAD_PURPOSE_ALL
STATIC circpad_machine_runtime_t * circpad_circuit_machineinfo_new(circuit_t *on_circ, int machine_index)
#define CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT
Definition: circuitlist.h:94
#define CIRCPAD_STATE_END
circpad_decision_t
uint8_t payload[CELL_PAYLOAD_SIZE]
Definition: cell_st.h:16
#define CIRCPAD_STATE_CANCEL
int circpad_check_received_cell(cell_t *cell, circuit_t *circ, crypt_path_t *layer_hint, const relay_header_t *rh)
Common functions for using (pseudo-)random number generators.
Definition: node_st.h:28
uint32_t circpad_delay_t
STATIC void machine_spec_free_(circpad_machine_spec_t *m)
#define SMARTLIST_FOREACH_BEGIN(sl, type, var)
int64_t tor_llround(double d)
Definition: fp.c:46
circpad_decision_t circpad_internal_event_state_length_up(circpad_machine_runtime_t *mi)
crypt_path_t * circuit_get_cpath_hop(origin_circuit_t *circ, int hopnum)
Definition: circuitlist.c:2071
uint64_t circpad_time_t
protover_summary_flags_t pv
void circpad_machine_event_circ_added_hop(origin_circuit_t *on_circ)
Definitions for timing-related constants.
uint16_t marked_for_close
Definition: circuit_st.h:178
uint16_t start_length
int signed_error_t
void circpad_machine_relay_hide_rend_circuits(smartlist_t *machines_sl)
uint64_t crypto_fast_rng_get_uint64(crypto_fast_rng_t *rng, uint64_t limit)
Definition: cell_st.h:12
circpad_state_t * states
#define CIRCUIT_IS_ORIGIN(c)
Definition: circuitlist.h:145
Functions and types for monotonic times.
circpad_statenum_t current_state
const char * circuit_purpose_to_string(uint8_t purpose)
Definition: circuitlist.c:898
#define LOG_INFO
Definition: log.h:43
Header file for nodelist.c.
circpad_removal_t token_removal
void circpad_machine_client_hide_rend_circuits(smartlist_t *machines_sl)
void smartlist_add(smartlist_t *sl, void *element)
static void circpad_machine_count_padding_sent(circpad_machine_runtime_t *mi)
void circpad_deliver_recognized_relay_cell_events(circuit_t *circ, uint8_t relay_command, crypt_path_t *layer_hint)
#define TO_CIRCUIT(x)
Definition: or.h:951
Header file for config.c.
circpad_delay_t rtt_estimate_usec
static double circpad_distribution_sample(circpad_distribution_t dist)
#define CIRCPAD_INFINITY_BIN(mi)
#define tor_free(p)
Definition: malloc.h:52
void circpad_deliver_unrecognized_cell_events(circuit_t *circ, cell_direction_t dir)
#define tor_fragile_assert()
Definition: util_bug.h:241
void circpad_new_consensus_params(const networkstatus_t *ns)
bool circpad_padding_negotiated(circuit_t *circ, circpad_machine_num_t machine, uint8_t command, uint8_t response)
#define LOG_NOTICE
Definition: log.h:48
STATIC void circpad_machine_remove_token(circpad_machine_runtime_t *mi)
void circpad_machine_client_hide_intro_circuits(smartlist_t *machines_sl)
int circuit_get_cpath_len(origin_circuit_t *circ)
Definition: circuitlist.c:2035
uint8_t purpose
Definition: circuit_st.h:100
static uint64_t circpad_global_padding_sent
STATIC smartlist_t * relay_padding_machines
circpad_hist_index_t chosen_bin
STATIC circpad_delay_t circpad_machine_sample_delay(circpad_machine_runtime_t *mi)
unsigned int remaining_relay_early_cells
static bool padding_machine_state_is_valid(const circpad_state_t *state)
circpad_decision_t circpad_internal_event_bins_empty(circpad_machine_runtime_t *mi)
void circpad_circuit_free_all_machineinfos(circuit_t *circ)
cell_direction_t
Definition: or.h:482
void rep_hist_padding_count_read(padding_type_t type)
Definition: rephist.c:2759
STATIC void circpad_add_matching_machines(origin_circuit_t *on_circ, smartlist_t *machines_sl)
static bool padding_machine_is_valid(const circpad_machine_spec_t *machine)
void circpad_machines_init(void)
struct circpad_machine_runtime_t * padding_info[CIRCPAD_MAX_MACHINES]
Definition: circuit_st.h:229
#define CIRCPAD_DELAY_INFINITE
static bool circpad_node_supports_padding(const node_t *node)
STATIC bool circpad_machine_reached_padding_limit(circpad_machine_runtime_t *mi)
static void circpad_estimate_circ_rtt_on_send(circuit_t *circ, circpad_machine_runtime_t *mi)
static void circpad_estimate_circ_rtt_on_received(circuit_t *circ, circpad_machine_runtime_t *mi)
STATIC void circpad_machine_remove_lower_token(circpad_machine_runtime_t *mi, circpad_delay_t target_bin_usec)
Header file for channel.c.
tor_assert(buffer)
signed_error_t circpad_handle_padding_negotiated(circuit_t *circ, cell_t *cell, crypt_path_t *layer_hint)
uint32_t circpad_hist_token_t
static void circpad_machine_update_state_length_for_nonpadding(circpad_machine_runtime_t *mi)
STATIC circpad_hist_index_t circpad_histogram_usec_to_bin(const circpad_machine_runtime_t *mi, circpad_delay_t usec)
void circpad_machine_event_circ_has_streams(origin_circuit_t *circ)
STATIC void circpad_machine_remove_closest_token(circpad_machine_runtime_t *mi, circpad_delay_t target_bin_usec, bool use_usec)
Header for fp.c.
time_t timestamp_dirty
Definition: circuit_st.h:176
#define LD_CIRC
Definition: log.h:80
circpad_hist_index_t histogram_len
uint8_t state
Definition: circuit_st.h:99
STATIC void circpad_machine_remove_higher_token(circpad_machine_runtime_t *mi, circpad_delay_t target_bin_usec)
circid_t n_circ_id
Definition: circuit_st.h:67
Header file for circuitpadding.c.
circpad_statenum_t next_state[CIRCPAD_NUM_EVENTS]
signed_error_t circpad_handle_padding_negotiate(circuit_t *circ, cell_t *cell)
unsigned use_rtt_estimate
uint32_t circpad_purpose_mask_t
void smartlist_add_asprintf(struct smartlist_t *sl, const char *pattern,...)
Definition: smartlist.c:36
static bool circpad_circuit_supports_padding(origin_circuit_t *circ, int target_hopnum)
Master header file for Tor-specific functionality.
#define CIRCUIT_STATE_OPEN
Definition: circuitlist.h:31
#define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(loop_var, circ)
Header file for circuitpadding_machines.c.
circpad_time_t padding_scheduled_at_usec
void circpad_machine_event_circ_has_no_streams(origin_circuit_t *circ)
#define TOR_USEC_PER_SEC
Definition: time.h:17
static bool circpad_is_padding_allowed(void)
Header file for rephist.c.
uint64_t crypto_fast_rng_uint64_range(crypto_fast_rng_t *rng, uint64_t min, uint64_t max)
void circpad_machines_free(void)
void timer_schedule(tor_timer_t *t, const struct timeval *tv)
Definition: timers.c:299
#define LOG_WARN
Definition: log.h:51
#define log_fn_ratelim(ratelim, severity, domain, args,...)
Definition: log.h:279
Header file for circuituse.c.
Header for prob_distr.c.
STATIC circpad_delay_t histogram_get_bin_upper_bound(const circpad_machine_runtime_t *mi, circpad_hist_index_t bin)
#define CIRCPAD_STATE_GAP
circpad_distribution_t length_dist
static void circpad_send_padding_callback(tor_timer_t *timer, void *args, const struct monotime_t *time)
MOCK_IMPL(STATIC signed_error_t, circpad_send_command_to_hop,(origin_circuit_t *circ, uint8_t hopnum, uint8_t relay_command, const uint8_t *payload, ssize_t payload_len))
static circpad_circuit_state_t circpad_circuit_state(origin_circuit_t *circ)
routerset_t * HSLayer2Nodes
Header file for circuitlist.c.
#define CIRCUIT_PURPOSE_PATH_BIAS_TESTING
Definition: circuitlist.h:121
circpad_time_t last_received_time_usec
void circpad_cell_event_padding_sent(circuit_t *on_circ)
static bool circpad_machine_conditions_met(origin_circuit_t *circ, const circpad_machine_spec_t *machine)
#define CIRCPAD_STATE_START
#define CIRCPAD_STATE_IGNORE
static circpad_decision_t check_machine_token_supply(circpad_machine_runtime_t *mi)
int circpad_marked_circuit_for_padding(circuit_t *circ, int reason)
#define CIRCPAD_GET_MACHINE(machineinfo)
static void circpad_circuit_machineinfo_free_idx(circuit_t *circ, int idx)
void circpad_machine_states_init(circpad_machine_spec_t *machine, circpad_statenum_t num_states)
uint16_t circpad_statenum_t
uint16_t length
Definition: or.h:642
uint8_t command
Definition: or.h:638
unsigned length_includes_nonpadding
void circpad_machine_relay_hide_intro_circuits(smartlist_t *machines_sl)
routerset_t * HSLayer3Nodes
STATIC void circpad_machine_setup_tokens(circpad_machine_runtime_t *mi)
origin_circuit_t * TO_ORIGIN_CIRCUIT(circuit_t *x)
Definition: circuitlist.c:163
char * smartlist_join_strings(smartlist_t *sl, const char *join, int terminate, size_t *len_out)
Definition: smartlist.c:279
void circpad_free_all(void)
#define CELL_PAYLOAD_SIZE
Definition: or.h:576
circpad_machine_conditions_t conditions
crypto_fast_rng_t * get_thread_fast_rng(void)
circpad_purpose_mask_t purpose_mask
#define CIRCPAD_DELAY_MAX_SECS
STATIC circpad_delay_t circpad_histogram_bin_to_usec(const circpad_machine_runtime_t *mi, circpad_hist_index_t bin)
tor_timer_t * timer_new(timer_cb_fn_t cb, void *arg)
Definition: timers.c:248
#define CIRCPAD_STATE_BURST
circpad_hist_index_t histogram_len
unsigned int supports_hs_setup_padding
Definition: or.h:845
void rep_hist_padding_count_write(padding_type_t type)
Definition: rephist.c:2731
circpad_circuit_state_t
Header file for relay.c.
edge_connection_t * p_streams
char identity_digest[DIGEST_LEN]
#define SMARTLIST_FOREACH(sl, type, var, cmd)
circpad_decision_t circpad_send_padding_cell_for_callback(circpad_machine_runtime_t *mi)
static int circpad_is_token_removal_supported(circpad_machine_runtime_t *mi)
void timer_set_cb(tor_timer_t *t, timer_cb_fn_t cb, void *arg)
Definition: timers.c:274
unsigned int has_opened
circpad_hist_token_t histogram[CIRCPAD_MAX_HISTOGRAM_LEN]
circpad_circuit_state_t state_mask
static void circpad_machine_remove_exact(circpad_machine_runtime_t *mi, circpad_delay_t target_bin_usec)
static circpad_hist_index_t circpad_machine_first_lower_index(const circpad_machine_runtime_t *mi, circpad_delay_t target_bin_usec)
circpad_distribution_t iat_dist
int circuit_get_cpath_opened_len(const origin_circuit_t *circ)
Definition: circuitlist.c:2051
uint8_t state
Definition: crypt_path_st.h:63
struct circuit_t * on_circ
#define log_fn(severity, domain, args,...)
Definition: log.h:274
circpad_statenum_t num_states
void circpad_cell_event_nonpadding_received(circuit_t *on_circ)
circpad_purpose_mask_t circpad_circ_purpose_to_mask(uint8_t circ_purpose)
char * circpad_machine_spec_to_string(const circpad_machine_spec_t *machine)
or_circuit_t * TO_OR_CIRCUIT(circuit_t *x)
Definition: circuitlist.c:151
void circpad_machine_event_circ_built(origin_circuit_t *circ)
time_t approx_time(void)
Definition: approx_time.c:32
#define LOG_DEBUG
Definition: log.h:40
void circpad_cell_event_nonpadding_sent(circuit_t *on_circ)
circpad_machine_num_t machine_num
extend_info_t * extend_info
Definition: crypt_path_st.h:56
STATIC smartlist_t * origin_padding_machines
void circuit_change_purpose(circuit_t *circ, uint8_t new_purpose)
Definition: circuituse.c:3061
#define MAX(a, b)
Definition: cmp.h:22
uint8_t command
Definition: cell_st.h:14
static void circpad_machine_spec_transitioned_to_end(circpad_machine_runtime_t *mi)
uint32_t histogram_total_tokens
const struct circpad_machine_spec_t * padding_machine[CIRCPAD_MAX_MACHINES]
Definition: circuit_st.h:218
signed_error_t circpad_negotiate_padding(origin_circuit_t *circ, circpad_machine_num_t machine, uint8_t target_hopnum, uint8_t command)
void circuit_read_valid_data(origin_circuit_t *circ, uint16_t relay_body_len)
Definition: circuituse.c:3150
circpad_hist_token_t * histogram
bool circpad_padding_is_from_expected_hop(circuit_t *circ, crypt_path_t *from_hop)
static void circpad_shutdown_old_machines(origin_circuit_t *on_circ)
static int free_circ_machineinfos_with_machine_num(circuit_t *circ, int machine_num)
circpad_event_t
circpad_decision_t circpad_internal_event_infinity(circpad_machine_runtime_t *mi)
int8_t circpad_hist_index_t
int64_t clamp_double_to_int64(double number)
Definition: fp.c:61
uint8_t circpad_machine_num_t
static circpad_delay_t circpad_distribution_sample_iat_delay(const circpad_state_t *state, circpad_delay_t delay_shift)
static void circpad_choose_state_length(circpad_machine_runtime_t *mi)
void circpad_machine_event_circ_has_no_relay_early(origin_circuit_t *circ)
#define RELAY_HEADER_SIZE
Definition: or.h:603
STATIC const circpad_state_t * circpad_machine_current_state(const circpad_machine_runtime_t *mi)
void circpad_deliver_sent_relay_cell_events(circuit_t *circ, uint8_t relay_command)
static void circpad_machine_count_nonpadding_sent(circpad_machine_runtime_t *mi)
static circpad_hist_index_t circpad_machine_first_higher_index(const circpad_machine_runtime_t *mi, circpad_delay_t target_bin_usec)
#define FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var)
Header file for networkstatus.c.
#define LD_BUG
Definition: log.h:84
channel_t * n_chan
Definition: circuit_st.h:58
static uint8_t circpad_padding_disabled
#define CIRCUIT_PURPOSE_C_CIRCUIT_PADDING
Definition: circuitlist.h:96
void circpad_cell_event_padding_received(circuit_t *on_circ)
#define SMARTLIST_FOREACH_REVERSE_BEGIN(sl, type, var)
#define CIRCPAD_MAX_MACHINE_STATES
void circpad_machine_event_circ_purpose_changed(origin_circuit_t *circ)
uint64_t global_identifier
Definition: channel.h:197