Line data Source code
1 : /* Copyright (c) 2001-2004, Roger Dingledine.
2 : * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
3 : * Copyright (c) 2007-2021, The Tor Project, Inc. */
4 : /* See LICENSE for licensing information */
5 :
6 : #include "core/or/or.h"
7 :
8 : #include "app/config/config.h"
9 : #include "core/mainloop/connection.h"
10 : #include "feature/dircache/conscache.h"
11 : #include "feature/dircache/consdiffmgr.h"
12 : #include "feature/dircommon/directory.h"
13 : #include "feature/dircache/dirserv.h"
14 : #include "feature/nodelist/microdesc.h"
15 : #include "feature/nodelist/routerlist.h"
16 : #include "feature/relay/router.h"
17 : #include "feature/relay/routermode.h"
18 : #include "feature/stats/predict_ports.h"
19 :
20 : #include "feature/dircache/cached_dir_st.h"
21 : #include "feature/dircommon/dir_connection_st.h"
22 : #include "feature/nodelist/extrainfo_st.h"
23 : #include "feature/nodelist/microdesc_st.h"
24 : #include "feature/nodelist/routerinfo_st.h"
25 : #include "feature/nodelist/routerlist_st.h"
26 :
27 : #include "lib/compress/compress.h"
28 :
29 : /**
30 : * \file dirserv.c
31 : * \brief Directory server core implementation. Manages directory
32 : * contents and generates directory documents.
33 : *
34 : * This module implements most of directory cache functionality, and some of
35 : * the directory authority functionality. The directory.c module delegates
36 : * here in order to handle incoming requests from clients, via
37 : * connection_dirserv_flushed_some() and its kin. In order to save RAM, this
38 : * module is responsible for spooling directory objects (in whole or in part)
39 : * onto buf_t instances, and then closing the dir_connection_t once the
40 : * objects are totally flushed.
41 : *
42 : * The directory.c module also delegates here for handling descriptor uploads
43 : * via dirserv_add_multiple_descriptors().
44 : *
45 : * Additionally, this module handles some aspects of voting, including:
46 : * deciding how to vote on individual flags (based on decisions reached in
47 : * rephist.c), of formatting routerstatus lines, and deciding what relays to
48 : * include in an authority's vote. (TODO: Those functions could profitably be
49 : * split off. They only live in this file because historically they were
50 : * shared among the v1, v2, and v3 directory code.)
51 : */
52 :
53 : static void clear_cached_dir(cached_dir_t *d);
54 : static const signed_descriptor_t *get_signed_descriptor_by_fp(
55 : const uint8_t *fp,
56 : int extrainfo);
57 :
58 : static int spooled_resource_lookup_body(const spooled_resource_t *spooled,
59 : int conn_is_encrypted,
60 : const uint8_t **body_out,
61 : size_t *size_out,
62 : time_t *published_out);
63 : static cached_dir_t *spooled_resource_lookup_cached_dir(
64 : const spooled_resource_t *spooled,
65 : time_t *published_out);
66 : static cached_dir_t *lookup_cached_dir_by_fp(const uint8_t *fp);
67 :
68 : /********************************************************************/
69 :
70 : /* A set of functions to answer questions about how we'd like to behave
71 : * as a directory mirror */
72 :
73 : /** Return true iff we want to serve certificates for authorities
74 : * that we don't acknowledge as authorities ourself.
75 : * Use we_want_to_fetch_unknown_auth_certs to check if we want to fetch
76 : * and keep these certificates.
77 : */
78 : int
79 1 : directory_caches_unknown_auth_certs(const or_options_t *options)
80 : {
81 1 : return dir_server_mode(options) || options->BridgeRelay;
82 : }
83 :
84 : /** Return 1 if we want to fetch and serve descriptors, networkstatuses, etc
85 : * Else return 0.
86 : * Check options->DirPort_set and directory_permits_begindir_requests()
87 : * to see if we are willing to serve these directory documents to others via
88 : * the DirPort and begindir-over-ORPort, respectively.
89 : *
90 : * To check if we should fetch documents, use we_want_to_fetch_flavor and
91 : * we_want_to_fetch_unknown_auth_certs instead of this function.
92 : */
93 : int
94 124 : directory_caches_dir_info(const or_options_t *options)
95 : {
96 124 : if (options->BridgeRelay || dir_server_mode(options))
97 0 : return 1;
98 124 : if (!server_mode(options) || !advertised_server_mode())
99 124 : return 0;
100 : /* We need an up-to-date view of network info if we're going to try to
101 : * block exit attempts from unknown relays. */
102 0 : return ! router_my_exit_policy_is_reject_star() &&
103 0 : should_refuse_unknown_exits(options);
104 : }
105 :
106 : /** Return 1 if we want to allow remote clients to ask us directory
107 : * requests via the "begin_dir" interface, which doesn't require
108 : * having any separate port open. */
109 : int
110 0 : directory_permits_begindir_requests(const or_options_t *options)
111 : {
112 0 : return options->BridgeRelay != 0 || dir_server_mode(options);
113 : }
114 :
115 : /********************************************************************/
116 :
117 : /** Map from flavor name to the cached_dir_t for the v3 consensuses that we're
118 : * currently serving. */
119 : static strmap_t *cached_consensuses = NULL;
120 :
121 : /** Decrement the reference count on <b>d</b>, and free it if it no longer has
122 : * any references. */
123 : void
124 19 : cached_dir_decref(cached_dir_t *d)
125 : {
126 19 : if (!d || --d->refcnt > 0)
127 0 : return;
128 19 : clear_cached_dir(d);
129 19 : tor_free(d);
130 : }
131 :
132 : /** Allocate and return a new cached_dir_t containing the string <b>s</b>,
133 : * published at <b>published</b>. */
134 : cached_dir_t *
135 19 : new_cached_dir(char *s, time_t published)
136 : {
137 19 : cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t));
138 19 : d->refcnt = 1;
139 19 : d->dir = s;
140 19 : d->dir_len = strlen(s);
141 19 : d->published = published;
142 19 : if (tor_compress(&(d->dir_compressed), &(d->dir_compressed_len),
143 : d->dir, d->dir_len, ZLIB_METHOD)) {
144 0 : log_warn(LD_BUG, "Error compressing directory");
145 : }
146 19 : return d;
147 : }
148 :
149 : /** Remove all storage held in <b>d</b>, but do not free <b>d</b> itself. */
150 : static void
151 19 : clear_cached_dir(cached_dir_t *d)
152 : {
153 19 : tor_free(d->dir);
154 19 : tor_free(d->dir_compressed);
155 19 : memset(d, 0, sizeof(cached_dir_t));
156 19 : }
157 :
158 : /** Free all storage held by the cached_dir_t in <b>d</b>. */
159 : static void
160 0 : free_cached_dir_(void *_d)
161 : {
162 0 : cached_dir_t *d;
163 0 : if (!_d)
164 : return;
165 :
166 0 : d = (cached_dir_t *)_d;
167 0 : cached_dir_decref(d);
168 : }
169 :
170 : /** Replace the v3 consensus networkstatus of type <b>flavor_name</b> that
171 : * we're serving with <b>networkstatus</b>, published at <b>published</b>. No
172 : * validation is performed. */
173 : void
174 0 : dirserv_set_cached_consensus_networkstatus(const char *networkstatus,
175 : size_t networkstatus_len,
176 : const char *flavor_name,
177 : const common_digests_t *digests,
178 : const uint8_t *sha3_as_signed,
179 : time_t published)
180 : {
181 0 : cached_dir_t *new_networkstatus;
182 0 : cached_dir_t *old_networkstatus;
183 0 : if (!cached_consensuses)
184 0 : cached_consensuses = strmap_new();
185 :
186 0 : new_networkstatus =
187 0 : new_cached_dir(tor_memdup_nulterm(networkstatus, networkstatus_len),
188 : published);
189 0 : memcpy(&new_networkstatus->digests, digests, sizeof(common_digests_t));
190 0 : memcpy(&new_networkstatus->digest_sha3_as_signed, sha3_as_signed,
191 : DIGEST256_LEN);
192 0 : old_networkstatus = strmap_set(cached_consensuses, flavor_name,
193 : new_networkstatus);
194 0 : if (old_networkstatus)
195 0 : cached_dir_decref(old_networkstatus);
196 0 : }
197 :
198 : /** Return the latest downloaded consensus networkstatus in encoded, signed,
199 : * optionally compressed format, suitable for sending to clients. */
200 1 : MOCK_IMPL(cached_dir_t *,
201 : dirserv_get_consensus,(const char *flavor_name))
202 : {
203 1 : if (!cached_consensuses)
204 : return NULL;
205 0 : return strmap_get(cached_consensuses, flavor_name);
206 : }
207 :
208 : /** As dir_split_resource_into_fingerprints, but instead fills
209 : * <b>spool_out</b> with a list of spoolable_resource_t for the resource
210 : * identified through <b>source</b>. */
211 : int
212 6 : dir_split_resource_into_spoolable(const char *resource,
213 : dir_spool_source_t source,
214 : smartlist_t *spool_out,
215 : int *compressed_out,
216 : int flags)
217 : {
218 6 : smartlist_t *fingerprints = smartlist_new();
219 :
220 6 : tor_assert(flags & (DSR_HEX|DSR_BASE64));
221 12 : const size_t digest_len =
222 6 : (flags & DSR_DIGEST256) ? DIGEST256_LEN : DIGEST_LEN;
223 :
224 6 : int r = dir_split_resource_into_fingerprints(resource, fingerprints,
225 : compressed_out, flags);
226 : /* This is not a very efficient implementation XXXX */
227 19 : SMARTLIST_FOREACH_BEGIN(fingerprints, uint8_t *, digest) {
228 13 : spooled_resource_t *spooled =
229 13 : spooled_resource_new(source, digest, digest_len);
230 13 : if (spooled)
231 13 : smartlist_add(spool_out, spooled);
232 13 : tor_free(digest);
233 13 : } SMARTLIST_FOREACH_END(digest);
234 :
235 6 : smartlist_free(fingerprints);
236 6 : return r;
237 : }
238 :
239 : /** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t
240 : * pointers, adds copies of digests to fps_out, and doesn't use the
241 : * /tor/server/ prefix. For a /d/ request, adds descriptor digests; for other
242 : * requests, adds identity digests.
243 : */
244 : int
245 6 : dirserv_get_routerdesc_spool(smartlist_t *spool_out,
246 : const char *key,
247 : dir_spool_source_t source,
248 : int conn_is_encrypted,
249 : const char **msg_out)
250 : {
251 6 : *msg_out = NULL;
252 :
253 6 : if (!strcmp(key, "all")) {
254 1 : const routerlist_t *rl = router_get_routerlist();
255 9 : SMARTLIST_FOREACH_BEGIN(rl->routers, const routerinfo_t *, r) {
256 8 : spooled_resource_t *spooled;
257 16 : spooled = spooled_resource_new(source,
258 8 : (const uint8_t *)r->cache_info.identity_digest,
259 : DIGEST_LEN);
260 : /* Treat "all" requests as if they were unencrypted */
261 8 : conn_is_encrypted = 0;
262 8 : smartlist_add(spool_out, spooled);
263 8 : } SMARTLIST_FOREACH_END(r);
264 5 : } else if (!strcmp(key, "authority")) {
265 1 : const routerinfo_t *ri = router_get_my_routerinfo();
266 1 : if (ri)
267 1 : smartlist_add(spool_out,
268 1 : spooled_resource_new(source,
269 1 : (const uint8_t *)ri->cache_info.identity_digest,
270 : DIGEST_LEN));
271 4 : } else if (!strcmpstart(key, "d/")) {
272 2 : key += strlen("d/");
273 2 : dir_split_resource_into_spoolable(key, source, spool_out, NULL,
274 : DSR_HEX|DSR_SORT_UNIQ);
275 2 : } else if (!strcmpstart(key, "fp/")) {
276 1 : key += strlen("fp/");
277 1 : dir_split_resource_into_spoolable(key, source, spool_out, NULL,
278 : DSR_HEX|DSR_SORT_UNIQ);
279 : } else {
280 1 : *msg_out = "Not found";
281 1 : return -1;
282 : }
283 :
284 5 : if (! conn_is_encrypted) {
285 : /* Remove anything that insists it not be sent unencrypted. */
286 23 : SMARTLIST_FOREACH_BEGIN(spool_out, spooled_resource_t *, spooled) {
287 18 : const uint8_t *body = NULL;
288 18 : size_t bodylen = 0;
289 18 : int r = spooled_resource_lookup_body(spooled, conn_is_encrypted,
290 : &body, &bodylen, NULL);
291 18 : if (r < 0 || body == NULL || bodylen == 0) {
292 6 : SMARTLIST_DEL_CURRENT(spool_out, spooled);
293 6 : spooled_resource_free(spooled);
294 : }
295 18 : } SMARTLIST_FOREACH_END(spooled);
296 : }
297 :
298 5 : if (!smartlist_len(spool_out)) {
299 0 : *msg_out = "Servers unavailable";
300 0 : return -1;
301 : }
302 : return 0;
303 : }
304 :
305 : /* ==========
306 : * Spooling code.
307 : * ========== */
308 :
309 : spooled_resource_t *
310 22 : spooled_resource_new(dir_spool_source_t source,
311 : const uint8_t *digest, size_t digestlen)
312 : {
313 22 : spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t));
314 22 : spooled->spool_source = source;
315 22 : switch (source) {
316 0 : case DIR_SPOOL_NETWORKSTATUS:
317 0 : spooled->spool_eagerly = 0;
318 0 : break;
319 22 : case DIR_SPOOL_SERVER_BY_DIGEST:
320 : case DIR_SPOOL_SERVER_BY_FP:
321 : case DIR_SPOOL_EXTRA_BY_DIGEST:
322 : case DIR_SPOOL_EXTRA_BY_FP:
323 : case DIR_SPOOL_MICRODESC:
324 : default:
325 22 : spooled->spool_eagerly = 1;
326 22 : break;
327 0 : case DIR_SPOOL_CONSENSUS_CACHE_ENTRY:
328 0 : tor_assert_unreached();
329 22 : break;
330 : }
331 22 : tor_assert(digestlen <= sizeof(spooled->digest));
332 22 : if (digest)
333 22 : memcpy(spooled->digest, digest, digestlen);
334 22 : return spooled;
335 : }
336 :
337 : /**
338 : * Create a new spooled_resource_t to spool the contents of <b>entry</b> to
339 : * the user. Return the spooled object on success, or NULL on failure (which
340 : * is probably caused by a failure to map the body of the item from disk).
341 : *
342 : * Adds a reference to entry's reference counter.
343 : */
344 : spooled_resource_t *
345 2 : spooled_resource_new_from_cache_entry(consensus_cache_entry_t *entry)
346 : {
347 2 : spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t));
348 2 : spooled->spool_source = DIR_SPOOL_CONSENSUS_CACHE_ENTRY;
349 2 : spooled->spool_eagerly = 0;
350 2 : consensus_cache_entry_incref(entry);
351 2 : spooled->consensus_cache_entry = entry;
352 :
353 2 : int r = consensus_cache_entry_get_body(entry,
354 : &spooled->cce_body,
355 : &spooled->cce_len);
356 2 : if (r == 0) {
357 : return spooled;
358 : } else {
359 0 : spooled_resource_free(spooled);
360 0 : return NULL;
361 : }
362 : }
363 :
364 : /** Release all storage held by <b>spooled</b>. */
365 : void
366 24 : spooled_resource_free_(spooled_resource_t *spooled)
367 : {
368 24 : if (spooled == NULL)
369 : return;
370 :
371 24 : if (spooled->cached_dir_ref) {
372 0 : cached_dir_decref(spooled->cached_dir_ref);
373 : }
374 :
375 24 : if (spooled->consensus_cache_entry) {
376 2 : consensus_cache_entry_decref(spooled->consensus_cache_entry);
377 : }
378 :
379 24 : tor_free(spooled);
380 : }
381 :
382 : /** When spooling data from a cached_dir_t object, we always add
383 : * at least this much. */
384 : #define DIRSERV_CACHED_DIR_CHUNK_SIZE 8192
385 :
386 : /** Return an compression ratio for compressing objects from <b>source</b>.
387 : */
388 : static double
389 0 : estimate_compression_ratio(dir_spool_source_t source)
390 : {
391 : /* We should put in better estimates here, depending on the number of
392 : objects and their type */
393 0 : (void) source;
394 0 : return 0.5;
395 : }
396 :
397 : /** Return an estimated number of bytes needed for transmitting the
398 : * resource in <b>spooled</b> on <b>conn</b>
399 : *
400 : * As a convenient side-effect, set *<b>published_out</b> to the resource's
401 : * publication time.
402 : */
403 : static size_t
404 18 : spooled_resource_estimate_size(const spooled_resource_t *spooled,
405 : dir_connection_t *conn,
406 : int compressed,
407 : time_t *published_out)
408 : {
409 18 : if (spooled->spool_eagerly) {
410 16 : const uint8_t *body = NULL;
411 16 : size_t bodylen = 0;
412 16 : int r = spooled_resource_lookup_body(spooled,
413 : connection_dir_is_encrypted(conn),
414 : &body, &bodylen,
415 : published_out);
416 16 : if (r == -1 || body == NULL || bodylen == 0)
417 : return 0;
418 14 : if (compressed) {
419 0 : double ratio = estimate_compression_ratio(spooled->spool_source);
420 0 : bodylen = (size_t)(bodylen * ratio);
421 : }
422 14 : return bodylen;
423 : } else {
424 2 : cached_dir_t *cached;
425 2 : if (spooled->consensus_cache_entry) {
426 2 : if (published_out) {
427 2 : consensus_cache_entry_get_valid_after(
428 : spooled->consensus_cache_entry, published_out);
429 : }
430 :
431 2 : return spooled->cce_len;
432 : }
433 0 : if (spooled->cached_dir_ref) {
434 : cached = spooled->cached_dir_ref;
435 : } else {
436 0 : cached = spooled_resource_lookup_cached_dir(spooled,
437 : published_out);
438 : }
439 0 : if (cached == NULL) {
440 : return 0;
441 : }
442 0 : size_t result = compressed ? cached->dir_compressed_len : cached->dir_len;
443 0 : return result;
444 : }
445 : }
446 :
447 : /** Return code for spooled_resource_flush_some */
448 : typedef enum {
449 : SRFS_ERR = -1,
450 : SRFS_MORE = 0,
451 : SRFS_DONE
452 : } spooled_resource_flush_status_t;
453 :
454 : /** Flush some or all of the bytes from <b>spooled</b> onto <b>conn</b>.
455 : * Return SRFS_ERR on error, SRFS_MORE if there are more bytes to flush from
456 : * this spooled resource, or SRFS_DONE if we are done flushing this spooled
457 : * resource.
458 : */
459 : static spooled_resource_flush_status_t
460 6 : spooled_resource_flush_some(spooled_resource_t *spooled,
461 : dir_connection_t *conn)
462 : {
463 6 : if (spooled->spool_eagerly) {
464 : /* Spool_eagerly resources are sent all-at-once. */
465 5 : const uint8_t *body = NULL;
466 5 : size_t bodylen = 0;
467 5 : int r = spooled_resource_lookup_body(spooled,
468 : connection_dir_is_encrypted(conn),
469 : &body, &bodylen, NULL);
470 5 : if (r == -1 || body == NULL || bodylen == 0) {
471 : /* Absent objects count as "done". */
472 : return SRFS_DONE;
473 : }
474 :
475 5 : connection_dir_buf_add((const char*)body, bodylen, conn, 0);
476 :
477 5 : return SRFS_DONE;
478 : } else {
479 1 : cached_dir_t *cached = spooled->cached_dir_ref;
480 1 : consensus_cache_entry_t *cce = spooled->consensus_cache_entry;
481 1 : if (cached == NULL && cce == NULL) {
482 : /* The cached_dir_t hasn't been materialized yet. So let's look it up. */
483 0 : cached = spooled->cached_dir_ref =
484 0 : spooled_resource_lookup_cached_dir(spooled, NULL);
485 0 : if (!cached) {
486 : /* Absent objects count as done. */
487 : return SRFS_DONE;
488 : }
489 0 : ++cached->refcnt;
490 0 : tor_assert_nonfatal(spooled->cached_dir_offset == 0);
491 : }
492 :
493 1 : if (BUG(!cached && !cce))
494 0 : return SRFS_DONE;
495 :
496 1 : int64_t total_len;
497 1 : const char *ptr;
498 1 : if (cached) {
499 0 : total_len = cached->dir_compressed_len;
500 0 : ptr = cached->dir_compressed;
501 : } else {
502 1 : total_len = spooled->cce_len;
503 1 : ptr = (const char *)spooled->cce_body;
504 : }
505 : /* How many bytes left to flush? */
506 1 : int64_t remaining;
507 1 : remaining = total_len - spooled->cached_dir_offset;
508 1 : if (BUG(remaining < 0))
509 0 : return SRFS_ERR;
510 1 : ssize_t bytes = (ssize_t) MIN(DIRSERV_CACHED_DIR_CHUNK_SIZE, remaining);
511 :
512 1 : connection_dir_buf_add(ptr + spooled->cached_dir_offset,
513 : bytes, conn, 0);
514 :
515 1 : spooled->cached_dir_offset += bytes;
516 1 : if (spooled->cached_dir_offset >= (off_t)total_len) {
517 : return SRFS_DONE;
518 : } else {
519 0 : return SRFS_MORE;
520 : }
521 : }
522 : }
523 :
524 : /** Helper: find the cached_dir_t for a spooled_resource_t, for
525 : * sending it to <b>conn</b>. Set *<b>published_out</b>, if provided,
526 : * to the published time of the cached_dir_t.
527 : *
528 : * DOES NOT increase the reference count on the result. Callers must do that
529 : * themselves if they mean to hang on to it.
530 : */
531 : static cached_dir_t *
532 0 : spooled_resource_lookup_cached_dir(const spooled_resource_t *spooled,
533 : time_t *published_out)
534 : {
535 0 : tor_assert(spooled->spool_eagerly == 0);
536 0 : cached_dir_t *d = lookup_cached_dir_by_fp(spooled->digest);
537 0 : if (d != NULL) {
538 0 : if (published_out)
539 0 : *published_out = d->published;
540 : }
541 0 : return d;
542 : }
543 :
544 : /** Helper: Look up the body for an eagerly-served spooled_resource. If
545 : * <b>conn_is_encrypted</b> is false, don't look up any resource that
546 : * shouldn't be sent over an unencrypted connection. On success, set
547 : * <b>body_out</b>, <b>size_out</b>, and <b>published_out</b> to refer
548 : * to the resource's body, size, and publication date, and return 0.
549 : * On failure return -1. */
550 : static int
551 39 : spooled_resource_lookup_body(const spooled_resource_t *spooled,
552 : int conn_is_encrypted,
553 : const uint8_t **body_out,
554 : size_t *size_out,
555 : time_t *published_out)
556 : {
557 39 : tor_assert(spooled->spool_eagerly == 1);
558 :
559 39 : const signed_descriptor_t *sd = NULL;
560 :
561 39 : switch (spooled->spool_source) {
562 0 : case DIR_SPOOL_EXTRA_BY_FP: {
563 0 : sd = get_signed_descriptor_by_fp(spooled->digest, 1);
564 0 : break;
565 : }
566 25 : case DIR_SPOOL_SERVER_BY_FP: {
567 25 : sd = get_signed_descriptor_by_fp(spooled->digest, 0);
568 25 : break;
569 : }
570 9 : case DIR_SPOOL_SERVER_BY_DIGEST: {
571 9 : sd = router_get_by_descriptor_digest((const char *)spooled->digest);
572 9 : break;
573 : }
574 0 : case DIR_SPOOL_EXTRA_BY_DIGEST: {
575 0 : sd = extrainfo_get_by_descriptor_digest((const char *)spooled->digest);
576 0 : break;
577 : }
578 5 : case DIR_SPOOL_MICRODESC: {
579 5 : microdesc_t *md = microdesc_cache_lookup_by_digest256(
580 : get_microdesc_cache(),
581 5 : (const char *)spooled->digest);
582 5 : if (! md || ! md->body) {
583 : return -1;
584 : }
585 3 : *body_out = (const uint8_t *)md->body;
586 3 : *size_out = md->bodylen;
587 3 : if (published_out)
588 2 : *published_out = TIME_MAX;
589 : return 0;
590 : }
591 0 : case DIR_SPOOL_NETWORKSTATUS:
592 : case DIR_SPOOL_CONSENSUS_CACHE_ENTRY:
593 : default:
594 : /* LCOV_EXCL_START */
595 : tor_assert_nonfatal_unreached();
596 : return -1;
597 : /* LCOV_EXCL_STOP */
598 : }
599 :
600 : /* If we get here, then we tried to set "sd" to a signed_descriptor_t. */
601 :
602 34 : if (sd == NULL) {
603 : return -1;
604 : }
605 28 : if (sd->send_unencrypted == 0 && ! conn_is_encrypted) {
606 : /* we did this check once before (so we could have an accurate size
607 : * estimate and maybe send a 404 if somebody asked for only bridges on
608 : * a connection), but we need to do it again in case a previously
609 : * unknown bridge descriptor has shown up between then and now. */
610 : return -1;
611 : }
612 28 : *body_out = (const uint8_t *) signed_descriptor_get_body(sd);
613 28 : *size_out = sd->signed_descriptor_len;
614 28 : if (published_out)
615 12 : *published_out = sd->published_on;
616 : return 0;
617 : }
618 :
619 : /** Given a fingerprint <b>fp</b> which is either set if we're looking for a
620 : * v2 status, or zeroes if we're looking for a v3 status, or a NUL-padded
621 : * flavor name if we want a flavored v3 status, return a pointer to the
622 : * appropriate cached dir object, or NULL if there isn't one available. */
623 : static cached_dir_t *
624 0 : lookup_cached_dir_by_fp(const uint8_t *fp)
625 : {
626 0 : cached_dir_t *d = NULL;
627 0 : if (tor_digest_is_zero((const char *)fp) && cached_consensuses) {
628 0 : d = strmap_get(cached_consensuses, "ns");
629 0 : } else if (memchr(fp, '\0', DIGEST_LEN) && cached_consensuses) {
630 : /* this here interface is a nasty hack: we're shoving a flavor into
631 : * a digest field. */
632 0 : d = strmap_get(cached_consensuses, (const char *)fp);
633 : }
634 0 : return d;
635 : }
636 :
637 : /** Try to guess the number of bytes that will be needed to send the
638 : * spooled objects for <b>conn</b>'s outgoing spool. In the process,
639 : * remove every element of the spool that refers to an absent object, or
640 : * which was published earlier than <b>cutoff</b>. Set *<b>size_out</b>
641 : * to the number of bytes, and *<b>n_expired_out</b> to the number of
642 : * objects removed for being too old. */
643 : void
644 12 : dirserv_spool_remove_missing_and_guess_size(dir_connection_t *conn,
645 : time_t cutoff,
646 : int compression,
647 : size_t *size_out,
648 : int *n_expired_out)
649 : {
650 12 : if (BUG(!conn))
651 0 : return;
652 :
653 12 : smartlist_t *spool = conn->spool;
654 12 : if (!spool) {
655 0 : if (size_out)
656 0 : *size_out = 0;
657 0 : if (n_expired_out)
658 0 : *n_expired_out = 0;
659 0 : return;
660 : }
661 12 : int n_expired = 0;
662 12 : uint64_t total = 0;
663 30 : SMARTLIST_FOREACH_BEGIN(spool, spooled_resource_t *, spooled) {
664 18 : time_t published = TIME_MAX;
665 18 : size_t sz = spooled_resource_estimate_size(spooled, conn,
666 : compression, &published);
667 18 : if (published < cutoff) {
668 7 : ++n_expired;
669 7 : SMARTLIST_DEL_CURRENT(spool, spooled);
670 7 : spooled_resource_free(spooled);
671 11 : } else if (sz == 0) {
672 2 : SMARTLIST_DEL_CURRENT(spool, spooled);
673 2 : spooled_resource_free(spooled);
674 : } else {
675 9 : total += sz;
676 : }
677 18 : } SMARTLIST_FOREACH_END(spooled);
678 :
679 12 : if (size_out) {
680 12 : *size_out = (total > SIZE_MAX) ? SIZE_MAX : (size_t)total;
681 : }
682 12 : if (n_expired_out)
683 9 : *n_expired_out = n_expired;
684 : }
685 :
686 : /** Helper: used to sort a connection's spool. */
687 : static int
688 0 : dirserv_spool_sort_comparison_(const void **a_, const void **b_)
689 : {
690 0 : const spooled_resource_t *a = *a_;
691 0 : const spooled_resource_t *b = *b_;
692 0 : return fast_memcmp(a->digest, b->digest, sizeof(a->digest));
693 : }
694 :
695 : /** Sort all the entries in <b>conn</b> by digest. */
696 : void
697 0 : dirserv_spool_sort(dir_connection_t *conn)
698 : {
699 0 : if (conn->spool == NULL)
700 : return;
701 0 : smartlist_sort(conn->spool, dirserv_spool_sort_comparison_);
702 : }
703 :
704 : /** Return the cache-info for identity fingerprint <b>fp</b>, or
705 : * its extra-info document if <b>extrainfo</b> is true. Return
706 : * NULL if not found or if the descriptor is older than
707 : * <b>publish_cutoff</b>. */
708 : static const signed_descriptor_t *
709 25 : get_signed_descriptor_by_fp(const uint8_t *fp, int extrainfo)
710 : {
711 25 : if (router_digest_is_me((const char *)fp)) {
712 9 : if (extrainfo)
713 0 : return &(router_get_my_extrainfo()->cache_info);
714 : else
715 9 : return &(router_get_my_routerinfo()->cache_info);
716 : } else {
717 16 : const routerinfo_t *ri = router_get_by_id_digest((const char *)fp);
718 16 : if (ri) {
719 14 : if (extrainfo)
720 0 : return extrainfo_get_by_descriptor_digest(
721 0 : ri->cache_info.extra_info_digest);
722 : else
723 14 : return &ri->cache_info;
724 : }
725 : }
726 : return NULL;
727 : }
728 :
729 : /** When we're spooling data onto our outbuf, add more whenever we dip
730 : * below this threshold. */
731 : #define DIRSERV_BUFFER_MIN 16384
732 :
733 : /**
734 : * Called whenever we have flushed some directory data in state
735 : * SERVER_WRITING, or whenever we want to fill the buffer with initial
736 : * directory data (so that subsequent writes will occur, and trigger this
737 : * function again.)
738 : *
739 : * Return 0 on success, and -1 on failure.
740 : */
741 : int
742 6 : connection_dirserv_flushed_some(dir_connection_t *conn)
743 : {
744 6 : tor_assert(conn->base_.state == DIR_CONN_STATE_SERVER_WRITING);
745 6 : if (conn->spool == NULL)
746 : return 0;
747 :
748 12 : while (connection_get_outbuf_len(TO_CONN(conn)) < DIRSERV_BUFFER_MIN &&
749 12 : smartlist_len(conn->spool)) {
750 6 : spooled_resource_t *spooled =
751 6 : smartlist_get(conn->spool, smartlist_len(conn->spool)-1);
752 6 : spooled_resource_flush_status_t status;
753 6 : status = spooled_resource_flush_some(spooled, conn);
754 6 : if (status == SRFS_ERR) {
755 6 : return -1;
756 6 : } else if (status == SRFS_MORE) {
757 : return 0;
758 : }
759 6 : tor_assert(status == SRFS_DONE);
760 :
761 : /* If we're here, we're done flushing this resource. */
762 6 : tor_assert(smartlist_pop_last(conn->spool) == spooled);
763 6 : spooled_resource_free(spooled);
764 : }
765 :
766 6 : if (smartlist_len(conn->spool) > 0) {
767 : /* We're still spooling something. */
768 : return 0;
769 : }
770 :
771 : /* If we get here, we're done. */
772 6 : smartlist_free(conn->spool);
773 6 : conn->spool = NULL;
774 6 : if (conn->compress_state) {
775 : /* Flush the compression state: there could be more bytes pending in there,
776 : * and we don't want to omit bytes. */
777 1 : connection_buf_add_compress("", 0, conn, 1);
778 1 : tor_compress_free(conn->compress_state);
779 1 : conn->compress_state = NULL;
780 : }
781 : return 0;
782 : }
783 :
784 : /** Remove every element from <b>conn</b>'s outgoing spool, and delete
785 : * the spool. */
786 : void
787 81 : dir_conn_clear_spool(dir_connection_t *conn)
788 : {
789 81 : if (!conn || ! conn->spool)
790 : return;
791 9 : SMARTLIST_FOREACH(conn->spool, spooled_resource_t *, s,
792 : spooled_resource_free(s));
793 6 : smartlist_free(conn->spool);
794 6 : conn->spool = NULL;
795 : }
796 :
797 : /** Release all storage used by the directory server. */
798 : void
799 239 : dirserv_free_all(void)
800 : {
801 239 : strmap_free(cached_consensuses, free_cached_dir_);
802 239 : cached_consensuses = NULL;
803 239 : }
|