Line data Source code
1 : /* Copyright (c) 2017-2021, The Tor Project, Inc. */
2 : /* See LICENSE for licensing information */
3 :
4 : /**
5 : * \file test_hs_common.c
6 : * \brief Test hidden service common functionalities.
7 : */
8 :
9 : #define CONNECTION_EDGE_PRIVATE
10 : #define HS_COMMON_PRIVATE
11 : #define HS_CLIENT_PRIVATE
12 : #define HS_SERVICE_PRIVATE
13 : #define NODELIST_PRIVATE
14 :
15 : #include "test/test.h"
16 : #include "test/test_helpers.h"
17 : #include "test/log_test_helpers.h"
18 : #include "test/hs_test_helpers.h"
19 :
20 : #include "core/or/connection_edge.h"
21 : #include "lib/crypt_ops/crypto_format.h"
22 : #include "lib/crypt_ops/crypto_rand.h"
23 : #include "feature/hs/hs_common.h"
24 : #include "feature/hs/hs_client.h"
25 : #include "feature/hs/hs_service.h"
26 : #include "app/config/config.h"
27 : #include "feature/nodelist/networkstatus.h"
28 : #include "feature/dirclient/dirclient.h"
29 : #include "feature/dirauth/dirvote.h"
30 : #include "feature/nodelist/nodelist.h"
31 : #include "feature/nodelist/routerlist.h"
32 : #include "app/config/statefile.h"
33 : #include "core/or/circuitlist.h"
34 : #include "feature/dirauth/shared_random.h"
35 : #include "feature/dirauth/voting_schedule.h"
36 :
37 : #include "feature/nodelist/microdesc_st.h"
38 : #include "feature/nodelist/networkstatus_st.h"
39 : #include "feature/nodelist/node_st.h"
40 : #include "app/config/or_state_st.h"
41 : #include "feature/nodelist/routerinfo_st.h"
42 : #include "feature/nodelist/routerstatus_st.h"
43 :
44 : /** Test the validation of HS v3 addresses */
45 : static void
46 1 : test_validate_address(void *arg)
47 : {
48 1 : int ret;
49 :
50 1 : (void) arg;
51 :
52 : /* Address too short and too long. */
53 1 : setup_full_capture_of_logs(LOG_WARN);
54 1 : ret = hs_address_is_valid("blah");
55 1 : tt_int_op(ret, OP_EQ, 0);
56 1 : expect_log_msg_containing("Invalid length");
57 1 : teardown_capture_of_logs();
58 :
59 1 : setup_full_capture_of_logs(LOG_WARN);
60 1 : ret = hs_address_is_valid(
61 : "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
62 1 : tt_int_op(ret, OP_EQ, 0);
63 1 : expect_log_msg_containing("Invalid length");
64 1 : teardown_capture_of_logs();
65 :
66 : /* Invalid checksum (taken from prop224) */
67 1 : setup_full_capture_of_logs(LOG_WARN);
68 1 : ret = hs_address_is_valid(
69 : "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
70 1 : tt_int_op(ret, OP_EQ, 0);
71 1 : expect_log_msg_containing("invalid checksum");
72 1 : teardown_capture_of_logs();
73 :
74 1 : setup_full_capture_of_logs(LOG_WARN);
75 1 : ret = hs_address_is_valid(
76 : "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
77 1 : tt_int_op(ret, OP_EQ, 0);
78 1 : expect_log_msg_containing("invalid checksum");
79 1 : teardown_capture_of_logs();
80 :
81 : /* Non base32 decodable string. */
82 1 : setup_full_capture_of_logs(LOG_WARN);
83 1 : ret = hs_address_is_valid(
84 : "????????????????????????????????????????????????????????");
85 1 : tt_int_op(ret, OP_EQ, 0);
86 1 : expect_log_msg_containing("Unable to base32 decode");
87 1 : teardown_capture_of_logs();
88 :
89 : /* Valid address. */
90 1 : ret = hs_address_is_valid(
91 : "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
92 1 : tt_int_op(ret, OP_EQ, 1);
93 :
94 1 : done:
95 1 : ;
96 1 : }
97 :
98 : static int
99 1 : mock_write_str_to_file(const char *path, const char *str, int bin)
100 : {
101 1 : (void)bin;
102 1 : tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
103 1 : tt_str_op(str, OP_EQ,
104 : "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
105 :
106 1 : done:
107 1 : return 0;
108 : }
109 :
110 : /** Test building HS v3 onion addresses. Uses test vectors from the
111 : * ./hs_build_address.py script. */
112 : static void
113 1 : test_build_address(void *arg)
114 : {
115 1 : int ret;
116 1 : char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
117 1 : ed25519_public_key_t pubkey;
118 : /* hex-encoded ed25519 pubkey used in hs_build_address.py */
119 1 : char pubkey_hex[] =
120 : "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
121 1 : hs_service_t *service = NULL;
122 :
123 1 : (void) arg;
124 :
125 1 : MOCK(write_str_to_file, mock_write_str_to_file);
126 :
127 : /* The following has been created with hs_build_address.py script that
128 : * follows proposal 224 specification to build an onion address. */
129 1 : static const char *test_addr =
130 : "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
131 :
132 : /* Let's try to build the same onion address as the script */
133 1 : base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
134 : pubkey_hex, strlen(pubkey_hex));
135 1 : hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
136 1 : tt_str_op(test_addr, OP_EQ, onion_addr);
137 : /* Validate that address. */
138 1 : ret = hs_address_is_valid(onion_addr);
139 1 : tt_int_op(ret, OP_EQ, 1);
140 :
141 1 : service = tor_malloc_zero(sizeof(hs_service_t));
142 1 : memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
143 1 : tor_asprintf(&service->config.directory_path, "/double/five");
144 1 : ret = write_address_to_file(service, "squared");
145 1 : tt_int_op(ret, OP_EQ, 0);
146 :
147 1 : done:
148 1 : hs_service_free(service);
149 1 : }
150 :
151 : /** Test that our HS time period calculation functions work properly */
152 : static void
153 1 : test_time_period(void *arg)
154 : {
155 1 : (void) arg;
156 1 : uint64_t tn;
157 1 : int retval;
158 1 : time_t fake_time, correct_time, start_time;
159 :
160 : /* Let's do the example in prop224 section [TIME-PERIODS] */
161 1 : retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
162 : &fake_time);
163 1 : tt_int_op(retval, OP_EQ, 0);
164 :
165 : /* Check that the time period number is right */
166 1 : tn = hs_get_time_period_num(fake_time);
167 1 : tt_u64_op(tn, OP_EQ, 16903);
168 :
169 : /* Increase current time to 11:59:59 UTC and check that the time period
170 : number is still the same */
171 1 : fake_time += 3599;
172 1 : tn = hs_get_time_period_num(fake_time);
173 1 : tt_u64_op(tn, OP_EQ, 16903);
174 :
175 : { /* Check start time of next time period */
176 1 : retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
177 : &correct_time);
178 1 : tt_int_op(retval, OP_EQ, 0);
179 :
180 1 : start_time = hs_get_start_time_of_next_time_period(fake_time);
181 1 : tt_int_op(start_time, OP_EQ, correct_time);
182 : }
183 :
184 : /* Now take time to 12:00:00 UTC and check that the time period rotated */
185 1 : fake_time += 1;
186 1 : tn = hs_get_time_period_num(fake_time);
187 1 : tt_u64_op(tn, OP_EQ, 16904);
188 :
189 : /* Now also check our hs_get_next_time_period_num() function */
190 1 : tn = hs_get_next_time_period_num(fake_time);
191 1 : tt_u64_op(tn, OP_EQ, 16905);
192 :
193 : { /* Check start time of next time period again */
194 1 : retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
195 : &correct_time);
196 1 : tt_int_op(retval, OP_EQ, 0);
197 :
198 1 : start_time = hs_get_start_time_of_next_time_period(fake_time);
199 1 : tt_int_op(start_time, OP_EQ, correct_time);
200 : }
201 :
202 : /* Now do another sanity check: The time period number at the start of the
203 : * next time period, must be the same time period number as the one returned
204 : * from hs_get_next_time_period_num() */
205 : {
206 1 : time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
207 1 : tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
208 : hs_get_next_time_period_num(fake_time));
209 : }
210 :
211 1 : done:
212 1 : ;
213 1 : }
214 :
215 : /** Test that we can correctly find the start time of the next time period */
216 : static void
217 1 : test_start_time_of_next_time_period(void *arg)
218 : {
219 1 : (void) arg;
220 1 : int retval;
221 1 : time_t fake_time;
222 1 : char tbuf[ISO_TIME_LEN + 1];
223 1 : time_t next_tp_start_time;
224 :
225 : /* Do some basic tests */
226 1 : retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
227 : &fake_time);
228 1 : tt_int_op(retval, OP_EQ, 0);
229 1 : next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
230 : /* Compare it with the correct result */
231 1 : format_iso_time(tbuf, next_tp_start_time);
232 1 : tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
233 :
234 : /* Another test with an edge-case time (start of TP) */
235 1 : retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
236 : &fake_time);
237 1 : tt_int_op(retval, OP_EQ, 0);
238 1 : next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
239 1 : format_iso_time(tbuf, next_tp_start_time);
240 1 : tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
241 :
242 : {
243 : /* Now pretend we are on a testing network and alter the voting schedule to
244 : be every 10 seconds. This means that a time period has length 10*24
245 : seconds (4 minutes). It also means that we apply a rotational offset of
246 : 120 seconds to the time period, so that it starts at 00:02:00 instead of
247 : 00:00:00. */
248 1 : or_options_t *options = get_options_mutable();
249 1 : options->TestingTorNetwork = 1;
250 1 : options->V3AuthVotingInterval = 10;
251 1 : options->TestingV3AuthInitialVotingInterval = 10;
252 :
253 1 : retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
254 : &fake_time);
255 1 : tt_int_op(retval, OP_EQ, 0);
256 1 : next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
257 : /* Compare it with the correct result */
258 1 : format_iso_time(tbuf, next_tp_start_time);
259 1 : tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
260 :
261 1 : retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
262 : &fake_time);
263 1 : tt_int_op(retval, OP_EQ, 0);
264 1 : next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
265 : /* Compare it with the correct result */
266 1 : format_iso_time(tbuf, next_tp_start_time);
267 1 : tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
268 : }
269 :
270 1 : done:
271 1 : ;
272 1 : }
273 :
274 : /* Cleanup the global nodelist. It also frees the "md" in the node_t because
275 : * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
276 : static void
277 34 : cleanup_nodelist(void)
278 : {
279 34 : const smartlist_t *nodelist = nodelist_get_list();
280 6055 : SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
281 6021 : tor_free(node->md);
282 6021 : node->md = NULL;
283 6021 : } SMARTLIST_FOREACH_END(node);
284 34 : nodelist_free_all();
285 34 : }
286 :
287 : static void
288 6021 : helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
289 : int identity_idx,
290 : const char *nickname,
291 : int is_hsdir)
292 : {
293 6021 : routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
294 6021 : routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
295 6021 : uint8_t identity[DIGEST_LEN];
296 6021 : node_t *node = NULL;
297 :
298 6021 : memset(identity, identity_idx, sizeof(identity));
299 :
300 6021 : memcpy(rs->identity_digest, identity, DIGEST_LEN);
301 6021 : rs->is_hs_dir = is_hsdir;
302 6021 : rs->pv.supports_v3_hsdir = 1;
303 6021 : strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
304 6021 : tor_addr_parse(&ri->ipv4_addr, "1.2.3.4");
305 6021 : tor_addr_parse(&rs->ipv4_addr, "1.2.3.4");
306 6021 : ri->nickname = tor_strdup(nickname);
307 6021 : ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
308 6021 : memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
309 6021 : ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
310 : /* Needed for the HSDir index computation. */
311 6021 : memset(&ri->cache_info.signing_key_cert->signing_key,
312 : identity_idx, ED25519_PUBKEY_LEN);
313 6021 : tt_assert(nodelist_set_routerinfo(ri, NULL));
314 :
315 6021 : node = node_get_mutable_by_id(ri->cache_info.identity_digest);
316 6021 : tt_assert(node);
317 6021 : node->rs = rs;
318 : /* We need this to exist for node_has_preferred_descriptor() to return
319 : * true. */
320 6021 : node->md = tor_malloc_zero(sizeof(microdesc_t));
321 : /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
322 : * the indexes which it doesn't have when it is called. */
323 6021 : node_set_hsdir_index(node, ns);
324 6021 : node->ri = NULL;
325 6021 : smartlist_add(ns->routerstatus_list, rs);
326 :
327 6021 : done:
328 6021 : if (node == NULL)
329 0 : routerstatus_free(rs);
330 :
331 6021 : routerinfo_free(ri);
332 6021 : }
333 :
334 : static networkstatus_t *mock_ns = NULL;
335 :
336 : static networkstatus_t *
337 33246 : mock_networkstatus_get_latest_consensus(void)
338 : {
339 33246 : time_t now = approx_time();
340 :
341 : /* If initialized, return it */
342 33246 : if (mock_ns) {
343 : return mock_ns;
344 : }
345 :
346 : /* Initialize fake consensus */
347 3 : mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
348 :
349 : /* This consensus is live */
350 3 : mock_ns->valid_after = now-1;
351 3 : mock_ns->fresh_until = now+1;
352 3 : mock_ns->valid_until = now+2;
353 : /* Create routerstatus list */
354 3 : mock_ns->routerstatus_list = smartlist_new();
355 3 : mock_ns->type = NS_TYPE_CONSENSUS;
356 :
357 3 : return mock_ns;
358 : }
359 :
360 : static networkstatus_t *
361 51528 : mock_networkstatus_get_reasonably_live_consensus(time_t now, int flavor)
362 : {
363 51528 : (void) now;
364 51528 : (void) flavor;
365 :
366 51528 : tt_assert(mock_ns);
367 :
368 51528 : done:
369 51528 : return mock_ns;
370 : }
371 :
372 : /** Test the responsible HSDirs calculation function */
373 : static void
374 1 : test_responsible_hsdirs(void *arg)
375 : {
376 1 : smartlist_t *responsible_dirs = smartlist_new();
377 1 : networkstatus_t *ns = NULL;
378 1 : (void) arg;
379 :
380 1 : hs_init();
381 :
382 1 : MOCK(networkstatus_get_latest_consensus,
383 : mock_networkstatus_get_latest_consensus);
384 1 : MOCK(networkstatus_get_reasonably_live_consensus,
385 : mock_networkstatus_get_reasonably_live_consensus);
386 :
387 1 : ns = networkstatus_get_latest_consensus();
388 :
389 : { /* First router: HSdir */
390 1 : helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
391 : }
392 :
393 : { /* Second HSDir */
394 1 : helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
395 : }
396 :
397 : { /* Third relay but not HSDir */
398 1 : helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
399 : }
400 :
401 : /* Use a fixed time period and pub key so we always take the same path */
402 1 : ed25519_public_key_t pubkey;
403 1 : uint64_t time_period_num = 17653; // 2 May, 2018, 14:00.
404 1 : memset(&pubkey, 42, sizeof(pubkey));
405 :
406 1 : hs_get_responsible_hsdirs(&pubkey, time_period_num,
407 : 0, 0, responsible_dirs);
408 :
409 : /* Make sure that we only found 2 responsible HSDirs.
410 : * The third relay was not an hsdir! */
411 1 : tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
412 :
413 : /** TODO: Build a bigger network and do more tests here */
414 :
415 1 : done:
416 4 : SMARTLIST_FOREACH(ns->routerstatus_list,
417 : routerstatus_t *, rs, routerstatus_free(rs));
418 1 : smartlist_free(responsible_dirs);
419 1 : smartlist_clear(ns->routerstatus_list);
420 1 : networkstatus_vote_free(mock_ns);
421 1 : cleanup_nodelist();
422 :
423 1 : UNMOCK(networkstatus_get_reasonably_live_consensus);
424 1 : }
425 :
426 : static void
427 66 : mock_directory_initiate_request(directory_request_t *req)
428 : {
429 66 : (void)req;
430 66 : return;
431 : }
432 :
433 : static int
434 78 : mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
435 : const ed25519_keypair_t *signing_kp,
436 : const uint8_t *descriptor_cookie,
437 : char **encoded_out)
438 : {
439 78 : (void)desc;
440 78 : (void)signing_kp;
441 78 : (void)descriptor_cookie;
442 :
443 78 : tor_asprintf(encoded_out, "lulu");
444 78 : return 0;
445 : }
446 :
447 : static or_state_t dummy_state;
448 :
449 : /* Mock function to get fake or state (used for rev counters) */
450 : static or_state_t *
451 0 : get_or_state_replacement(void)
452 : {
453 0 : return &dummy_state;
454 : }
455 :
456 : static int
457 0 : mock_router_have_minimum_dir_info(void)
458 : {
459 0 : return 1;
460 : }
461 :
462 : /** Test that we correctly detect when the HSDir hash ring changes so that we
463 : * reupload our descriptor. */
464 : static void
465 1 : test_desc_reupload_logic(void *arg)
466 : {
467 1 : networkstatus_t *ns = NULL;
468 :
469 1 : (void) arg;
470 :
471 1 : hs_init();
472 :
473 1 : MOCK(networkstatus_get_reasonably_live_consensus,
474 : mock_networkstatus_get_reasonably_live_consensus);
475 1 : MOCK(router_have_minimum_dir_info,
476 : mock_router_have_minimum_dir_info);
477 1 : MOCK(get_or_state,
478 : get_or_state_replacement);
479 1 : MOCK(networkstatus_get_latest_consensus,
480 : mock_networkstatus_get_latest_consensus);
481 1 : MOCK(directory_initiate_request,
482 : mock_directory_initiate_request);
483 1 : MOCK(hs_desc_encode_descriptor,
484 : mock_hs_desc_encode_descriptor);
485 :
486 1 : ns = networkstatus_get_latest_consensus();
487 :
488 : /** Test logic:
489 : * 1) Upload descriptor to HSDirs
490 : * CHECK that previous_hsdirs list was populated.
491 : * 2) Then call router_dir_info_changed() without an HSDir set change.
492 : * CHECK that no reupload occurs.
493 : * 3) Now change the HSDir set, and call dir_info_changed() again.
494 : * CHECK that reupload occurs.
495 : * 4) Finally call service_desc_schedule_upload().
496 : * CHECK that previous_hsdirs list was cleared.
497 : **/
498 :
499 : /* Let's start by building our descriptor and service */
500 1 : hs_service_descriptor_t *desc = service_descriptor_new();
501 1 : hs_service_t *service = NULL;
502 : /* hex-encoded ed25519 pubkey used in hs_build_address.py */
503 1 : char pubkey_hex[] =
504 : "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
505 1 : char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
506 1 : ed25519_public_key_t pubkey;
507 1 : base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
508 : pubkey_hex, strlen(pubkey_hex));
509 1 : hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
510 1 : service = tor_malloc_zero(sizeof(hs_service_t));
511 1 : tt_assert(service);
512 1 : memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
513 1 : ed25519_secret_key_generate(&service->keys.identity_sk, 0);
514 1 : ed25519_public_key_generate(&service->keys.identity_pk,
515 : &service->keys.identity_sk);
516 1 : service->desc_current = desc;
517 : /* Also add service to service map */
518 1 : hs_service_ht *service_map = get_hs_service_map();
519 1 : tt_assert(service_map);
520 1 : tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
521 1 : register_service(service_map, service);
522 1 : tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
523 :
524 : /* Now let's create our hash ring: */
525 : {
526 1 : helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
527 1 : helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
528 1 : helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
529 1 : helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
530 1 : helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
531 1 : helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
532 : }
533 :
534 : /* Now let's upload our desc to all hsdirs */
535 1 : upload_descriptor_to_all(service, desc);
536 : /* Check that previous hsdirs were populated */
537 1 : tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
538 :
539 : /* Poison next upload time so that we can see if it was changed by
540 : * router_dir_info_changed(). No changes in hash ring so far, so the upload
541 : * time should stay as is. */
542 1 : desc->next_upload_time = 42;
543 1 : router_dir_info_changed();
544 1 : tt_int_op(desc->next_upload_time, OP_EQ, 42);
545 :
546 : /* Now change the HSDir hash ring by swapping nora for aaron.
547 : * Start by clearing the hash ring */
548 : {
549 7 : SMARTLIST_FOREACH(ns->routerstatus_list,
550 : routerstatus_t *, rs, routerstatus_free(rs));
551 1 : smartlist_clear(ns->routerstatus_list);
552 1 : cleanup_nodelist();
553 1 : routerlist_free_all();
554 : }
555 :
556 : { /* Now add back all the nodes */
557 1 : helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
558 1 : helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
559 1 : helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
560 1 : helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
561 1 : helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
562 1 : helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
563 : }
564 :
565 : /* Now call service_desc_hsdirs_changed() and see that it detected the hash
566 : ring change */
567 1 : time_t now = approx_time();
568 1 : tt_assert(now);
569 1 : tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
570 1 : tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
571 :
572 : /* Now order another upload and see that we keep having 6 prev hsdirs */
573 1 : upload_descriptor_to_all(service, desc);
574 : /* Check that previous hsdirs were populated */
575 1 : tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
576 :
577 : /* Now restore the HSDir hash ring to its original state by swapping back
578 : aaron for nora */
579 : /* First clear up the hash ring */
580 : {
581 7 : SMARTLIST_FOREACH(ns->routerstatus_list,
582 : routerstatus_t *, rs, routerstatus_free(rs));
583 1 : smartlist_clear(ns->routerstatus_list);
584 1 : cleanup_nodelist();
585 1 : routerlist_free_all();
586 : }
587 :
588 : { /* Now populate the hash ring again */
589 1 : helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
590 1 : helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
591 1 : helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
592 1 : helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
593 1 : helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
594 1 : helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
595 : }
596 :
597 : /* Check that our algorithm catches this change of hsdirs */
598 1 : tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
599 :
600 : /* Now pretend that the descriptor changed, and order a reupload to all
601 : HSDirs. Make sure that the set of previous HSDirs was cleared. */
602 1 : service_desc_schedule_upload(desc, now, 1);
603 1 : tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
604 :
605 : /* Now reupload again: see that the prev hsdir set got populated again. */
606 1 : upload_descriptor_to_all(service, desc);
607 1 : tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
608 :
609 1 : done:
610 7 : SMARTLIST_FOREACH(ns->routerstatus_list,
611 : routerstatus_t *, rs, routerstatus_free(rs));
612 1 : smartlist_clear(ns->routerstatus_list);
613 1 : if (service) {
614 1 : remove_service(get_hs_service_map(), service);
615 1 : hs_service_free(service);
616 : }
617 1 : networkstatus_vote_free(ns);
618 1 : cleanup_nodelist();
619 1 : hs_free_all();
620 1 : }
621 :
622 : /** Test disaster SRV computation and caching */
623 : static void
624 1 : test_disaster_srv(void *arg)
625 : {
626 1 : uint8_t *cached_disaster_srv_one = NULL;
627 1 : uint8_t *cached_disaster_srv_two = NULL;
628 1 : uint8_t srv_one[DIGEST256_LEN] = {0};
629 1 : uint8_t srv_two[DIGEST256_LEN] = {0};
630 1 : uint8_t srv_three[DIGEST256_LEN] = {0};
631 1 : uint8_t srv_four[DIGEST256_LEN] = {0};
632 1 : uint8_t srv_five[DIGEST256_LEN] = {0};
633 :
634 1 : (void) arg;
635 :
636 : /* Get the cached SRVs: we gonna use them later for verification */
637 1 : cached_disaster_srv_one = get_first_cached_disaster_srv();
638 1 : cached_disaster_srv_two = get_second_cached_disaster_srv();
639 :
640 : /* Compute some srvs */
641 1 : get_disaster_srv(1, srv_one);
642 1 : get_disaster_srv(2, srv_two);
643 :
644 : /* Check that the cached ones were updated */
645 1 : tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
646 1 : tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
647 :
648 : /* Ask for an SRV that has already been computed */
649 1 : get_disaster_srv(2, srv_two);
650 : /* and check that the cache entries have not changed */
651 1 : tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
652 1 : tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
653 :
654 : /* Ask for a new SRV */
655 1 : get_disaster_srv(3, srv_three);
656 1 : tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
657 1 : tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
658 :
659 : /* Ask for another SRV: none of the original SRVs should now be cached */
660 1 : get_disaster_srv(4, srv_four);
661 1 : tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
662 1 : tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
663 :
664 : /* Ask for yet another SRV */
665 1 : get_disaster_srv(5, srv_five);
666 1 : tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
667 1 : tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
668 :
669 1 : done:
670 1 : ;
671 1 : }
672 :
673 : /** Test our HS descriptor request tracker by making various requests and
674 : * checking whether they get tracked properly. */
675 : static void
676 1 : test_hid_serv_request_tracker(void *arg)
677 : {
678 1 : (void) arg;
679 1 : time_t retval;
680 1 : routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
681 1 : time_t now = approx_time();
682 :
683 1 : const char *req_key_str_first =
684 : "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
685 1 : const char *req_key_str_second =
686 : "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
687 1 : const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
688 :
689 : /*************************** basic test *******************************/
690 :
691 : /* Get request tracker and make sure it's empty */
692 1 : strmap_t *request_tracker = get_last_hid_serv_requests();
693 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
694 :
695 : /* Let's register a hid serv request */
696 1 : hsdir = tor_malloc_zero(sizeof(routerstatus_t));
697 1 : memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
698 1 : retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
699 : now, 1);
700 1 : tt_int_op(retval, OP_EQ, now);
701 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
702 :
703 : /* Let's lookup a non-existent hidserv request */
704 1 : retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
705 : now+1, 0);
706 1 : tt_int_op(retval, OP_EQ, 0);
707 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
708 :
709 : /* Let's lookup a real hidserv request */
710 1 : retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
711 : now+2, 0);
712 1 : tt_int_op(retval, OP_EQ, now); /* we got it */
713 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
714 :
715 : /**********************************************************************/
716 :
717 : /* Let's add another request for the same HS but on a different HSDir. */
718 1 : hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
719 1 : memset(hsdir2->identity_digest, 2, DIGEST_LEN);
720 1 : retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
721 : now+3, 1);
722 1 : tt_int_op(retval, OP_EQ, now+3);
723 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
724 :
725 : /* Check that we can clean the first request based on time */
726 1 : hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
727 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
728 : /* Check that it doesn't exist anymore */
729 1 : retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
730 : now+2, 0);
731 1 : tt_int_op(retval, OP_EQ, 0);
732 :
733 : /* Now let's add a smaller req key str */
734 1 : hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
735 1 : memset(hsdir3->identity_digest, 3, DIGEST_LEN);
736 1 : retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
737 : now+4, 1);
738 1 : tt_int_op(retval, OP_EQ, now+4);
739 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
740 :
741 : /*************************** deleting entries **************************/
742 :
743 : /* Add another request with very short key */
744 1 : retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
745 1 : tt_int_op(retval, OP_EQ, now);
746 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
747 :
748 : /* Try deleting entries with a dummy key. Check that our previous requests
749 : * are still there */
750 1 : tor_capture_bugs_(1);
751 1 : hs_purge_hid_serv_from_last_hid_serv_requests("a");
752 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
753 1 : tor_end_capture_bugs_();
754 :
755 : /* Try another dummy key. Check that requests are still there */
756 : {
757 1 : char dummy[2000];
758 1 : memset(dummy, 'Z', 2000);
759 1 : dummy[1999] = '\x00';
760 1 : hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
761 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
762 : }
763 :
764 : /* Another dummy key! */
765 1 : hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
766 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
767 :
768 : /* Now actually delete a request! */
769 1 : hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
770 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
771 :
772 : /* Purge it all! */
773 1 : hs_purge_last_hid_serv_requests();
774 1 : request_tracker = get_last_hid_serv_requests();
775 1 : tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
776 :
777 1 : done:
778 1 : tor_free(hsdir);
779 1 : tor_free(hsdir2);
780 1 : tor_free(hsdir3);
781 1 : }
782 :
783 : static void
784 1 : test_parse_extended_hostname(void *arg)
785 : {
786 1 : (void) arg;
787 1 : hostname_type_t type;
788 :
789 1 : char address1[] = "fooaddress.onion";
790 1 : char address3[] = "fooaddress.exit";
791 1 : char address4[] = "www.torproject.org";
792 1 : char address5[] = "foo.abcdefghijklmnop.onion";
793 1 : char address6[] = "foo.bar.abcdefghijklmnop.onion";
794 1 : char address7[] = ".abcdefghijklmnop.onion";
795 1 : char address8[] =
796 : "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
797 1 : char address9[] =
798 : "www.15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
799 1 : char address10[] =
800 : "15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid7jdl.onion";
801 :
802 1 : tt_assert(!parse_extended_hostname(address1, &type));
803 1 : tt_int_op(type, OP_EQ, BAD_HOSTNAME);
804 :
805 1 : tt_assert(parse_extended_hostname(address3, &type));
806 1 : tt_int_op(type, OP_EQ, EXIT_HOSTNAME);
807 :
808 1 : tt_assert(parse_extended_hostname(address4, &type));
809 1 : tt_int_op(type, OP_EQ, NORMAL_HOSTNAME);
810 :
811 1 : tt_assert(parse_extended_hostname(address5, &type));
812 1 : tt_int_op(type, OP_EQ, ONION_V2_HOSTNAME);
813 1 : tt_str_op(address5, OP_EQ, "abcdefghijklmnop");
814 :
815 1 : tt_assert(parse_extended_hostname(address6, &type));
816 1 : tt_int_op(type, OP_EQ, ONION_V2_HOSTNAME);
817 1 : tt_str_op(address6, OP_EQ, "abcdefghijklmnop");
818 :
819 1 : tt_assert(!parse_extended_hostname(address7, &type));
820 1 : tt_int_op(type, OP_EQ, BAD_HOSTNAME);
821 :
822 1 : tt_assert(parse_extended_hostname(address8, &type));
823 1 : tt_int_op(type, OP_EQ, ONION_V3_HOSTNAME);
824 1 : tt_str_op(address8, OP_EQ,
825 : "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
826 :
827 : /* Invalid v3 address. */
828 1 : tt_assert(!parse_extended_hostname(address9, &type));
829 1 : tt_int_op(type, OP_EQ, BAD_HOSTNAME);
830 :
831 : /* Invalid v3 address: too long */
832 1 : tt_assert(!parse_extended_hostname(address10, &type));
833 1 : tt_int_op(type, OP_EQ, BAD_HOSTNAME);
834 :
835 1 : done: ;
836 1 : }
837 :
838 : static void
839 1 : test_time_between_tp_and_srv(void *arg)
840 : {
841 1 : int ret;
842 1 : networkstatus_t ns;
843 1 : (void) arg;
844 :
845 : /* This function should be returning true where "^" are:
846 : *
847 : * +------------------------------------------------------------------+
848 : * | |
849 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
850 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
851 : * | |
852 : * | $==========|-----------$===========|-----------$===========| |
853 : * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
854 : * | |
855 : * +------------------------------------------------------------------+
856 : */
857 :
858 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
859 1 : tt_int_op(ret, OP_EQ, 0);
860 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
861 1 : tt_int_op(ret, OP_EQ, 0);
862 1 : dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
863 1 : ret = hs_in_period_between_tp_and_srv(&ns, 0);
864 1 : tt_int_op(ret, OP_EQ, 0);
865 :
866 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
867 1 : tt_int_op(ret, OP_EQ, 0);
868 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
869 1 : tt_int_op(ret, OP_EQ, 0);
870 1 : dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
871 1 : ret = hs_in_period_between_tp_and_srv(&ns, 0);
872 1 : tt_int_op(ret, OP_EQ, 0);
873 :
874 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
875 1 : tt_int_op(ret, OP_EQ, 0);
876 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
877 1 : tt_int_op(ret, OP_EQ, 0);
878 1 : dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
879 1 : ret = hs_in_period_between_tp_and_srv(&ns, 0);
880 1 : tt_int_op(ret, OP_EQ, 1);
881 :
882 1 : ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
883 1 : tt_int_op(ret, OP_EQ, 0);
884 1 : ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
885 1 : tt_int_op(ret, OP_EQ, 0);
886 1 : dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
887 1 : ret = hs_in_period_between_tp_and_srv(&ns, 0);
888 1 : tt_int_op(ret, OP_EQ, 1);
889 :
890 1 : ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
891 1 : tt_int_op(ret, OP_EQ, 0);
892 1 : ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
893 1 : tt_int_op(ret, OP_EQ, 0);
894 1 : dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
895 1 : ret = hs_in_period_between_tp_and_srv(&ns, 0);
896 1 : tt_int_op(ret, OP_EQ, 0);
897 :
898 1 : done:
899 1 : ;
900 1 : }
901 :
902 : /************ Reachability Test (it is huge) ****************/
903 :
904 : /* Simulate different consensus for client and service. Used by the
905 : * reachability test. The SRV and responsible HSDir list are used by all
906 : * reachability tests so make them common to simplify setup and teardown. */
907 : static networkstatus_t *mock_service_ns = NULL;
908 : static networkstatus_t *mock_client_ns = NULL;
909 : static sr_srv_t current_srv, previous_srv;
910 : static smartlist_t *service_responsible_hsdirs = NULL;
911 : static smartlist_t *client_responsible_hsdirs = NULL;
912 :
913 : static networkstatus_t *
914 41334 : mock_networkstatus_get_reasonably_live_consensus_service(time_t now,
915 : int flavor)
916 : {
917 41334 : (void) now;
918 41334 : (void) flavor;
919 :
920 41334 : if (mock_service_ns) {
921 : return mock_service_ns;
922 : }
923 :
924 6 : mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
925 6 : mock_service_ns->routerstatus_list = smartlist_new();
926 6 : mock_service_ns->type = NS_TYPE_CONSENSUS;
927 :
928 6 : return mock_service_ns;
929 : }
930 :
931 : static networkstatus_t *
932 15816 : mock_networkstatus_get_latest_consensus_service(void)
933 : {
934 15816 : return mock_networkstatus_get_reasonably_live_consensus_service(0, 0);
935 : }
936 :
937 : static networkstatus_t *
938 41335 : mock_networkstatus_get_reasonably_live_consensus_client(time_t now, int flavor)
939 : {
940 41335 : (void) now;
941 41335 : (void) flavor;
942 :
943 41335 : if (mock_client_ns) {
944 : return mock_client_ns;
945 : }
946 :
947 6 : mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
948 6 : mock_client_ns->routerstatus_list = smartlist_new();
949 6 : mock_client_ns->type = NS_TYPE_CONSENSUS;
950 :
951 6 : return mock_client_ns;
952 : }
953 :
954 : static networkstatus_t *
955 15817 : mock_networkstatus_get_latest_consensus_client(void)
956 : {
957 15817 : return mock_networkstatus_get_reasonably_live_consensus_client(0, 0);
958 : }
959 :
960 : /* Mock function because we are not trying to test the close circuit that does
961 : * an awful lot of checks on the circuit object. */
962 : static void
963 0 : mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
964 : const char *file)
965 : {
966 0 : (void) circ;
967 0 : (void) reason;
968 0 : (void) line;
969 0 : (void) file;
970 0 : return;
971 : }
972 :
973 : /* Initialize a big HSDir V3 hash ring. */
974 : static void
975 24 : helper_initialize_big_hash_ring(networkstatus_t *ns)
976 : {
977 24 : int ret;
978 :
979 : /* Generate 250 hsdirs! :) */
980 6024 : for (int counter = 1 ; counter < 251 ; counter++) {
981 : /* Let's generate random nickname for each hsdir... */
982 6000 : char nickname_binary[8];
983 6000 : char nickname_str[13] = {0};
984 6000 : crypto_rand(nickname_binary, sizeof(nickname_binary));
985 6000 : ret = base64_encode(nickname_str, sizeof(nickname_str),
986 : nickname_binary, sizeof(nickname_binary), 0);
987 6000 : tt_int_op(ret, OP_EQ, 12);
988 6000 : helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
989 : }
990 :
991 : /* Make sure we have 200 hsdirs in our list */
992 24 : tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
993 :
994 24 : done:
995 24 : ;
996 24 : }
997 :
998 : /** Initialize service and publish its descriptor as needed. Return the newly
999 : * allocated service object to the caller. */
1000 : static hs_service_t *
1001 12 : helper_init_service(time_t now)
1002 : {
1003 12 : int retval;
1004 12 : hs_service_t *service = hs_service_new(get_options());
1005 12 : tt_assert(service);
1006 12 : service->config.version = HS_VERSION_THREE;
1007 12 : ed25519_secret_key_generate(&service->keys.identity_sk, 0);
1008 12 : ed25519_public_key_generate(&service->keys.identity_pk,
1009 : &service->keys.identity_sk);
1010 : /* Register service to global map. */
1011 12 : retval = register_service(get_hs_service_map(), service);
1012 12 : tt_int_op(retval, OP_EQ, 0);
1013 :
1014 : /* Initialize service descriptor */
1015 12 : build_all_descriptors(now);
1016 12 : tt_assert(service->desc_current);
1017 12 : tt_assert(service->desc_next);
1018 :
1019 12 : done:
1020 12 : return service;
1021 : }
1022 :
1023 : /* Helper function to set the RFC 1123 time string into t. */
1024 : static void
1025 36 : set_consensus_times(const char *timestr, time_t *t)
1026 : {
1027 36 : tt_assert(timestr);
1028 36 : tt_assert(t);
1029 :
1030 36 : int ret = parse_rfc1123_time(timestr, t);
1031 36 : tt_int_op(ret, OP_EQ, 0);
1032 :
1033 36 : done:
1034 36 : return;
1035 : }
1036 :
1037 : /* Helper function to cleanup the mock consensus (client and service) */
1038 : static void
1039 6 : cleanup_mock_ns(void)
1040 : {
1041 6 : if (mock_service_ns) {
1042 1506 : SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
1043 : routerstatus_t *, rs, routerstatus_free(rs));
1044 6 : smartlist_clear(mock_service_ns->routerstatus_list);
1045 6 : mock_service_ns->sr_info.current_srv = NULL;
1046 6 : mock_service_ns->sr_info.previous_srv = NULL;
1047 6 : networkstatus_vote_free(mock_service_ns);
1048 6 : mock_service_ns = NULL;
1049 : }
1050 :
1051 6 : if (mock_client_ns) {
1052 1506 : SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
1053 : routerstatus_t *, rs, routerstatus_free(rs));
1054 6 : smartlist_clear(mock_client_ns->routerstatus_list);
1055 6 : mock_client_ns->sr_info.current_srv = NULL;
1056 6 : mock_client_ns->sr_info.previous_srv = NULL;
1057 6 : networkstatus_vote_free(mock_client_ns);
1058 6 : mock_client_ns = NULL;
1059 : }
1060 6 : }
1061 :
1062 : /* Helper function to setup a reachability test. Once called, the
1063 : * cleanup_reachability_test MUST be called at the end. */
1064 : static void
1065 6 : setup_reachability_test(void)
1066 : {
1067 6 : MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
1068 6 : MOCK(get_or_state, get_or_state_replacement);
1069 :
1070 6 : hs_init();
1071 :
1072 : /* Baseline to start with. */
1073 6 : memset(¤t_srv, 0, sizeof(current_srv));
1074 6 : memset(&previous_srv, 1, sizeof(previous_srv));
1075 :
1076 : /* Initialize the consensuses. */
1077 6 : mock_networkstatus_get_latest_consensus_service();
1078 6 : mock_networkstatus_get_latest_consensus_client();
1079 :
1080 6 : service_responsible_hsdirs = smartlist_new();
1081 6 : client_responsible_hsdirs = smartlist_new();
1082 6 : }
1083 :
1084 : /* Helper function to cleanup a reachability test initial setup. */
1085 : static void
1086 6 : cleanup_reachability_test(void)
1087 : {
1088 6 : smartlist_free(service_responsible_hsdirs);
1089 6 : service_responsible_hsdirs = NULL;
1090 6 : smartlist_free(client_responsible_hsdirs);
1091 6 : client_responsible_hsdirs = NULL;
1092 6 : hs_free_all();
1093 6 : cleanup_mock_ns();
1094 6 : UNMOCK(get_or_state);
1095 6 : UNMOCK(circuit_mark_for_close_);
1096 6 : }
1097 :
1098 : /* A reachability test always check if the resulting service and client
1099 : * responsible HSDir for the given parameters are equal.
1100 : *
1101 : * Return true iff the same exact nodes are in both list. */
1102 : static int
1103 6 : are_responsible_hsdirs_equal(void)
1104 : {
1105 6 : int count = 0;
1106 6 : tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
1107 6 : tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
1108 :
1109 42 : SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
1110 : const routerstatus_t *, c_rs) {
1111 144 : SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
1112 : const routerstatus_t *, s_rs) {
1113 144 : if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
1114 : DIGEST_LEN)) {
1115 36 : count++;
1116 36 : break;
1117 : }
1118 108 : } SMARTLIST_FOREACH_END(s_rs);
1119 36 : } SMARTLIST_FOREACH_END(c_rs);
1120 :
1121 6 : done:
1122 6 : return (count == 6);
1123 : }
1124 :
1125 : /* Tor doesn't use such a function to get the previous HSDir, it is only used
1126 : * in node_set_hsdir_index(). We need it here so we can test the reachability
1127 : * scenario 6 that requires the previous time period to compute the list of
1128 : * responsible HSDir because of the client state timing. */
1129 : static uint64_t
1130 1 : get_previous_time_period(time_t now)
1131 : {
1132 1 : return hs_get_time_period_num(now) - 1;
1133 : }
1134 :
1135 : /* Configuration of a reachability test scenario. */
1136 : typedef struct reachability_cfg_t {
1137 : /* Consensus timings to be set. They have to be compliant with
1138 : * RFC 1123 time format. */
1139 : const char *service_valid_after;
1140 : const char *service_valid_until;
1141 : const char *client_valid_after;
1142 : const char *client_valid_until;
1143 :
1144 : /* SRVs that the service and client should use. */
1145 : sr_srv_t *service_current_srv;
1146 : sr_srv_t *service_previous_srv;
1147 : sr_srv_t *client_current_srv;
1148 : sr_srv_t *client_previous_srv;
1149 :
1150 : /* A time period function for the service to use for this scenario. For a
1151 : * successful reachability test, the client always use the current time
1152 : * period thus why no client function. */
1153 : uint64_t (*service_time_period_fn)(time_t);
1154 :
1155 : /* Is the client and service expected to be in a new time period. After
1156 : * setting the consensus time, the reachability test checks
1157 : * hs_in_period_between_tp_and_srv() and test the returned value against
1158 : * this. */
1159 : unsigned int service_in_new_tp;
1160 : unsigned int client_in_new_tp;
1161 :
1162 : /* Some scenario requires a hint that the client, because of its consensus
1163 : * time, will request the "next" service descriptor so this indicates if it
1164 : * is the case or not. */
1165 : unsigned int client_fetch_next_desc;
1166 : } reachability_cfg_t;
1167 :
1168 : /* Some defines to help with semantic while reading a configuration below. */
1169 : #define NOT_IN_NEW_TP 0
1170 : #define IN_NEW_TP 1
1171 : #define DONT_NEED_NEXT_DESC 0
1172 : #define NEED_NEXT_DESC 1
1173 :
1174 : static reachability_cfg_t reachability_scenarios[] = {
1175 : /* Scenario 1
1176 : *
1177 : * +------------------------------------------------------------------+
1178 : * | |
1179 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1180 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1181 : * | |
1182 : * | $==========|-----------$===========|-----------$===========| |
1183 : * | ^ ^ |
1184 : * | S C |
1185 : * +------------------------------------------------------------------+
1186 : *
1187 : * S: Service, C: Client
1188 : *
1189 : * Service consensus valid_after time is set to 13:00 and client to 15:00,
1190 : * both are after TP#1 thus have access to SRV#1. Service and client should
1191 : * be using TP#1.
1192 : */
1193 :
1194 : { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
1195 : "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
1196 : "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
1197 : "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
1198 : ¤t_srv, NULL, /* Service current and previous SRV */
1199 : ¤t_srv, NULL, /* Client current and previous SRV */
1200 : hs_get_time_period_num, /* Service time period function. */
1201 : IN_NEW_TP, /* Is service in new TP? */
1202 : IN_NEW_TP, /* Is client in new TP? */
1203 : NEED_NEXT_DESC },
1204 :
1205 : /* Scenario 2
1206 : *
1207 : * +------------------------------------------------------------------+
1208 : * | |
1209 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1210 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1211 : * | |
1212 : * | $==========|-----------$===========|-----------$===========| |
1213 : * | ^ ^ |
1214 : * | S C |
1215 : * +------------------------------------------------------------------+
1216 : *
1217 : * S: Service, C: Client
1218 : *
1219 : * Service consensus valid_after time is set to 23:00 and client to 01:00,
1220 : * which makes the client after the SRV#2 and the service just before. The
1221 : * service should only be using TP#1. The client should be using TP#1.
1222 : */
1223 :
1224 : { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
1225 : "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
1226 : "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
1227 : "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
1228 : &previous_srv, NULL, /* Service current and previous SRV */
1229 : ¤t_srv, &previous_srv, /* Client current and previous SRV */
1230 : hs_get_time_period_num, /* Service time period function. */
1231 : IN_NEW_TP, /* Is service in new TP? */
1232 : NOT_IN_NEW_TP, /* Is client in new TP? */
1233 : NEED_NEXT_DESC },
1234 :
1235 : /* Scenario 3
1236 : *
1237 : * +------------------------------------------------------------------+
1238 : * | |
1239 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1240 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1241 : * | |
1242 : * | $==========|-----------$===========|----------$===========| |
1243 : * | ^ ^ |
1244 : * | S C |
1245 : * +------------------------------------------------------------------+
1246 : *
1247 : * S: Service, C: Client
1248 : *
1249 : * Service consensus valid_after time is set to 03:00 and client to 05:00,
1250 : * which makes both after SRV#2. The service should be using TP#1 as its
1251 : * current time period. The client should be using TP#1.
1252 : */
1253 :
1254 : { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
1255 : "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
1256 : "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
1257 : "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
1258 : ¤t_srv, &previous_srv, /* Service current and previous SRV */
1259 : ¤t_srv, &previous_srv, /* Client current and previous SRV */
1260 : hs_get_time_period_num, /* Service time period function. */
1261 : NOT_IN_NEW_TP, /* Is service in new TP? */
1262 : NOT_IN_NEW_TP, /* Is client in new TP? */
1263 : DONT_NEED_NEXT_DESC },
1264 :
1265 : /* Scenario 4
1266 : *
1267 : * +------------------------------------------------------------------+
1268 : * | |
1269 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1270 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1271 : * | |
1272 : * | $==========|-----------$===========|-----------$===========| |
1273 : * | ^ ^ |
1274 : * | S C |
1275 : * +------------------------------------------------------------------+
1276 : *
1277 : * S: Service, C: Client
1278 : *
1279 : * Service consensus valid_after time is set to 11:00 and client to 13:00,
1280 : * which makes the service before TP#2 and the client just after. The
1281 : * service should be using TP#1 as its current time period and TP#2 as the
1282 : * next. The client should be using TP#2 time period.
1283 : */
1284 :
1285 : { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
1286 : "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
1287 : "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
1288 : "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
1289 : ¤t_srv, &previous_srv, /* Service current and previous SRV */
1290 : ¤t_srv, &previous_srv, /* Client current and previous SRV */
1291 : hs_get_next_time_period_num, /* Service time period function. */
1292 : NOT_IN_NEW_TP, /* Is service in new TP? */
1293 : IN_NEW_TP, /* Is client in new TP? */
1294 : NEED_NEXT_DESC },
1295 :
1296 : /* Scenario 5
1297 : *
1298 : * +------------------------------------------------------------------+
1299 : * | |
1300 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1301 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1302 : * | |
1303 : * | $==========|-----------$===========|-----------$===========| |
1304 : * | ^ ^ |
1305 : * | C S |
1306 : * +------------------------------------------------------------------+
1307 : *
1308 : * S: Service, C: Client
1309 : *
1310 : * Service consensus valid_after time is set to 01:00 and client to 23:00,
1311 : * which makes the service after SRV#2 and the client just before. The
1312 : * service should be using TP#1 as its current time period and TP#2 as the
1313 : * next. The client should be using TP#1 time period.
1314 : */
1315 :
1316 : { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
1317 : "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
1318 : "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
1319 : "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
1320 : ¤t_srv, &previous_srv, /* Service current and previous SRV */
1321 : &previous_srv, NULL, /* Client current and previous SRV */
1322 : hs_get_time_period_num, /* Service time period function. */
1323 : NOT_IN_NEW_TP, /* Is service in new TP? */
1324 : IN_NEW_TP, /* Is client in new TP? */
1325 : DONT_NEED_NEXT_DESC },
1326 :
1327 : /* Scenario 6
1328 : *
1329 : * +------------------------------------------------------------------+
1330 : * | |
1331 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1332 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1333 : * | |
1334 : * | $==========|-----------$===========|-----------$===========| |
1335 : * | ^ ^ |
1336 : * | C S |
1337 : * +------------------------------------------------------------------+
1338 : *
1339 : * S: Service, C: Client
1340 : *
1341 : * Service consensus valid_after time is set to 13:00 and client to 11:00,
1342 : * which makes the service outside after TP#2 and the client just before.
1343 : * The service should be using TP#1 as its current time period and TP#2 as
1344 : * its next. The client should be using TP#1 time period.
1345 : */
1346 :
1347 : { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
1348 : "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
1349 : "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
1350 : "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
1351 : ¤t_srv, &previous_srv, /* Service current and previous SRV */
1352 : ¤t_srv, &previous_srv, /* Client current and previous SRV */
1353 : get_previous_time_period, /* Service time period function. */
1354 : IN_NEW_TP, /* Is service in new TP? */
1355 : NOT_IN_NEW_TP, /* Is client in new TP? */
1356 : DONT_NEED_NEXT_DESC },
1357 :
1358 : /* End marker. */
1359 : { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
1360 : };
1361 :
1362 : /* Run a single reachability scenario. num_scenario is the corresponding
1363 : * scenario number from the documentation. It is used to log it in case of
1364 : * failure so we know which scenario fails. */
1365 : static int
1366 6 : run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
1367 : {
1368 6 : int ret = -1;
1369 6 : hs_service_t *service;
1370 6 : uint64_t service_tp, client_tp;
1371 6 : ed25519_public_key_t service_blinded_pk, client_blinded_pk;
1372 :
1373 6 : setup_reachability_test();
1374 :
1375 6 : tt_assert(cfg);
1376 :
1377 : /* Set service consensus time. */
1378 6 : set_consensus_times(cfg->service_valid_after,
1379 6 : &mock_service_ns->valid_after);
1380 6 : set_consensus_times(cfg->service_valid_until,
1381 6 : &mock_service_ns->valid_until);
1382 6 : set_consensus_times(cfg->service_valid_until,
1383 6 : &mock_service_ns->fresh_until);
1384 6 : dirauth_sched_recalculate_timing(get_options(),
1385 6 : mock_service_ns->valid_after);
1386 : /* Check that service is in the right time period point */
1387 6 : tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
1388 : cfg->service_in_new_tp);
1389 :
1390 : /* Set client consensus time. */
1391 6 : set_consensus_times(cfg->client_valid_after,
1392 6 : &mock_client_ns->valid_after);
1393 6 : set_consensus_times(cfg->client_valid_until,
1394 6 : &mock_client_ns->valid_until);
1395 6 : set_consensus_times(cfg->client_valid_until,
1396 6 : &mock_client_ns->fresh_until);
1397 6 : dirauth_sched_recalculate_timing(get_options(),
1398 6 : mock_client_ns->valid_after);
1399 : /* Check that client is in the right time period point */
1400 6 : tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
1401 : cfg->client_in_new_tp);
1402 :
1403 : /* Set the SRVs for this scenario. */
1404 6 : mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
1405 6 : mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
1406 6 : mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
1407 6 : mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
1408 :
1409 : /* Initialize a service to get keys. */
1410 6 : update_approx_time(mock_service_ns->valid_after);
1411 6 : service = helper_init_service(mock_service_ns->valid_after+1);
1412 :
1413 : /*
1414 : * === Client setup ===
1415 : */
1416 :
1417 6 : MOCK(networkstatus_get_reasonably_live_consensus,
1418 : mock_networkstatus_get_reasonably_live_consensus_client);
1419 6 : MOCK(networkstatus_get_latest_consensus,
1420 : mock_networkstatus_get_latest_consensus_client);
1421 :
1422 : /* Make networkstatus_is_live() happy. */
1423 6 : update_approx_time(mock_client_ns->valid_after);
1424 : /* Initialize a big hashring for this consensus with the hsdir index set. */
1425 6 : helper_initialize_big_hash_ring(mock_client_ns);
1426 :
1427 : /* Client ONLY use the current time period. This is the whole point of these
1428 : * reachability test that is to make sure the client can always reach the
1429 : * service using only its current time period. */
1430 6 : client_tp = hs_get_time_period_num(0);
1431 :
1432 6 : hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
1433 : client_tp, &client_blinded_pk);
1434 6 : hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
1435 : client_responsible_hsdirs);
1436 : /* Cleanup the nodelist so we can let the service computes its own set of
1437 : * node with its own hashring. */
1438 6 : cleanup_nodelist();
1439 6 : tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
1440 :
1441 6 : UNMOCK(networkstatus_get_latest_consensus);
1442 6 : UNMOCK(networkstatus_get_reasonably_live_consensus);
1443 :
1444 : /*
1445 : * === Service setup ===
1446 : */
1447 :
1448 6 : MOCK(networkstatus_get_reasonably_live_consensus,
1449 : mock_networkstatus_get_reasonably_live_consensus_service);
1450 6 : MOCK(networkstatus_get_latest_consensus,
1451 : mock_networkstatus_get_latest_consensus_service);
1452 :
1453 : /* Make networkstatus_is_live() happy. */
1454 6 : update_approx_time(mock_service_ns->valid_after);
1455 : /* Initialize a big hashring for this consensus with the hsdir index set. */
1456 6 : helper_initialize_big_hash_ring(mock_service_ns);
1457 :
1458 6 : service_tp = cfg->service_time_period_fn(0);
1459 :
1460 6 : hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
1461 : service_tp, &service_blinded_pk);
1462 :
1463 : /* A service builds two lists of responsible HSDir, for the current and the
1464 : * next descriptor. Depending on the scenario, the client timing indicate if
1465 : * it is fetching the current or the next descriptor so we use the
1466 : * "client_fetch_next_desc" to know which one the client is trying to get to
1467 : * confirm that the service computes the same hashring for the same blinded
1468 : * key and service time period function. */
1469 6 : hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
1470 6 : cfg->client_fetch_next_desc, 0,
1471 : service_responsible_hsdirs);
1472 6 : cleanup_nodelist();
1473 6 : tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
1474 :
1475 6 : UNMOCK(networkstatus_get_latest_consensus);
1476 6 : UNMOCK(networkstatus_get_reasonably_live_consensus);
1477 :
1478 : /* Some testing of the values we just got from the client and service. */
1479 6 : tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
1480 6 : ED25519_PUBKEY_LEN);
1481 6 : tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
1482 :
1483 : /* Everything went well. */
1484 6 : ret = 0;
1485 :
1486 : done:
1487 6 : cleanup_reachability_test();
1488 6 : if (ret == -1) {
1489 : /* Do this so we can know which scenario failed. */
1490 0 : char msg[32];
1491 0 : tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
1492 0 : tt_fail_msg(msg);
1493 : }
1494 6 : return ret;
1495 : }
1496 :
1497 : static void
1498 1 : test_reachability(void *arg)
1499 : {
1500 1 : (void) arg;
1501 :
1502 : /* NOTE: An important axiom to understand here is that SRV#N must only be
1503 : * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
1504 : * together. The HSDir index computation is based on this axiom.*/
1505 :
1506 7 : for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
1507 6 : int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
1508 6 : if (ret < 0) {
1509 : return;
1510 : }
1511 : }
1512 : }
1513 :
1514 : /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
1515 : * its identity digest in <b>hsdir_digest_out</b>. */
1516 : static void
1517 36 : helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
1518 : char *hsdir_digest_out)
1519 : {
1520 36 : tt_assert(onion_identity_pk);
1521 :
1522 36 : routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
1523 36 : tt_assert(client_hsdir);
1524 36 : digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
1525 :
1526 36 : done:
1527 36 : ;
1528 36 : }
1529 :
1530 : static void
1531 1 : test_hs_indexes(void *arg)
1532 : {
1533 1 : int ret;
1534 1 : uint64_t period_num = 42;
1535 1 : ed25519_public_key_t pubkey;
1536 :
1537 1 : (void) arg;
1538 :
1539 : /* Build the hs_index */
1540 : {
1541 1 : uint8_t hs_index[DIGEST256_LEN];
1542 1 : const char *b32_test_vector =
1543 : "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
1544 1 : char test_vector[DIGEST256_LEN];
1545 1 : ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
1546 : strlen(b32_test_vector));
1547 1 : tt_int_op(ret, OP_EQ, sizeof(test_vector));
1548 : /* Our test vector uses a public key set to 32 bytes of \x42. */
1549 1 : memset(&pubkey, '\x42', sizeof(pubkey));
1550 1 : hs_build_hs_index(1, &pubkey, period_num, hs_index);
1551 1 : tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
1552 : }
1553 :
1554 : /* Build the hsdir_index */
1555 : {
1556 1 : uint8_t srv[DIGEST256_LEN];
1557 1 : uint8_t hsdir_index[DIGEST256_LEN];
1558 1 : const char *b32_test_vector =
1559 : "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
1560 1 : char test_vector[DIGEST256_LEN];
1561 1 : ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
1562 : strlen(b32_test_vector));
1563 1 : tt_int_op(ret, OP_EQ, sizeof(test_vector));
1564 : /* Our test vector uses a public key set to 32 bytes of \x42. */
1565 1 : memset(&pubkey, '\x42', sizeof(pubkey));
1566 1 : memset(srv, '\x43', sizeof(srv));
1567 1 : hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
1568 1 : tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
1569 : }
1570 :
1571 1 : done:
1572 1 : ;
1573 1 : }
1574 :
1575 : #define EARLY_IN_SRV_TO_TP 0
1576 : #define LATE_IN_SRV_TO_TP 1
1577 : #define EARLY_IN_TP_TO_SRV 2
1578 : #define LATE_IN_TP_TO_SRV 3
1579 :
1580 : /** Set the consensus and system time based on <b>position</b>. See the
1581 : * following diagram for details:
1582 : *
1583 : * +------------------------------------------------------------------+
1584 : * | |
1585 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1586 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1587 : * | |
1588 : * | $==========|-----------$===========|----------$===========| |
1589 : * | |
1590 : * | |
1591 : * +------------------------------------------------------------------+
1592 : */
1593 : static time_t
1594 12 : helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
1595 : {
1596 12 : time_t real_time = 0;
1597 :
1598 : /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
1599 : * valid_after is what matters here, the rest is just to specify the voting
1600 : * period correctly. */
1601 12 : if (position == LATE_IN_SRV_TO_TP) {
1602 4 : parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
1603 4 : parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
1604 4 : parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
1605 8 : } else if (position == EARLY_IN_TP_TO_SRV) {
1606 2 : parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
1607 2 : parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
1608 2 : parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
1609 6 : } else if (position == LATE_IN_TP_TO_SRV) {
1610 4 : parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
1611 4 : parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
1612 4 : parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
1613 2 : } else if (position == EARLY_IN_SRV_TO_TP) {
1614 2 : parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
1615 2 : parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
1616 2 : parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
1617 : } else {
1618 0 : tt_assert(0);
1619 : }
1620 12 : dirauth_sched_recalculate_timing(get_options(), ns->valid_after);
1621 :
1622 : /* Set system time: pretend to be just 2 minutes before consensus expiry */
1623 12 : real_time = ns->valid_until - 120;
1624 12 : update_approx_time(real_time);
1625 :
1626 12 : done:
1627 12 : return real_time;
1628 : }
1629 :
1630 : /** Helper function that carries out the actual test for
1631 : * test_client_service_sync() */
1632 : static void
1633 6 : helper_test_hsdir_sync(networkstatus_t *ns,
1634 : int service_position, int client_position,
1635 : int client_fetches_next_desc)
1636 : {
1637 6 : hs_service_descriptor_t *desc;
1638 6 : int retval;
1639 :
1640 : /** Test logic:
1641 : * 1) Initialize service time: consensus and system time.
1642 : * 1.1) Initialize service hash ring
1643 : * 2) Initialize service and publish descriptors.
1644 : * 3) Initialize client time: consensus and system time.
1645 : * 3.1) Initialize client hash ring
1646 : * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
1647 : * the client was also picked by service.
1648 : */
1649 :
1650 : /* 1) Initialize service time: consensus and real time */
1651 6 : time_t now = helper_set_consensus_and_system_time(ns, service_position);
1652 6 : helper_initialize_big_hash_ring(ns);
1653 :
1654 : /* 2) Initialize service */
1655 6 : hs_service_t *service = helper_init_service(now);
1656 6 : desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
1657 :
1658 : /* Now let's upload our desc to all hsdirs */
1659 6 : upload_descriptor_to_all(service, desc);
1660 : /* Cleanup right now so we don't memleak on error. */
1661 6 : cleanup_nodelist();
1662 : /* Check that previous hsdirs were populated */
1663 6 : tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
1664 :
1665 : /* 3) Initialize client time */
1666 6 : helper_set_consensus_and_system_time(ns, client_position);
1667 :
1668 6 : cleanup_nodelist();
1669 1506 : SMARTLIST_FOREACH(ns->routerstatus_list,
1670 : routerstatus_t *, rs, routerstatus_free(rs));
1671 6 : smartlist_clear(ns->routerstatus_list);
1672 6 : helper_initialize_big_hash_ring(ns);
1673 :
1674 : /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
1675 : service. */
1676 42 : for (int y = 0 ; y < 6 ; y++) {
1677 36 : char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
1678 36 : helper_client_pick_hsdir(&service->keys.identity_pk,
1679 : client_hsdir_b64_digest);
1680 :
1681 : /* CHECK: Go through the hsdirs chosen by the service and make sure that it
1682 : * contains the one picked by the client! */
1683 36 : retval = smartlist_contains_string(desc->previous_hsdirs,
1684 : client_hsdir_b64_digest);
1685 36 : tt_int_op(retval, OP_EQ, 1);
1686 : }
1687 :
1688 : /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
1689 : * exhausted all of them: */
1690 6 : tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
1691 :
1692 6 : done:
1693 : /* At the end: free all services and initialize the subsystem again, we will
1694 : * need it for next scenario. */
1695 6 : cleanup_nodelist();
1696 6 : hs_service_free_all();
1697 6 : hs_service_init();
1698 1506 : SMARTLIST_FOREACH(ns->routerstatus_list,
1699 : routerstatus_t *, rs, routerstatus_free(rs));
1700 6 : smartlist_clear(ns->routerstatus_list);
1701 6 : }
1702 :
1703 : /** This test ensures that client and service will pick the same HSDirs, under
1704 : * various timing scenarios:
1705 : * a) Scenario where both client and service are in the time segment between
1706 : * SRV#N and TP#N:
1707 : * b) Scenario where both client and service are in the time segment between
1708 : * TP#N and SRV#N+1.
1709 : * c) Scenario where service is between SRV#N and TP#N, but client is between
1710 : * TP#N and SRV#N+1.
1711 : * d) Scenario where service is between TP#N and SRV#N+1, but client is
1712 : * between SRV#N and TP#N.
1713 : *
1714 : * This test is important because it tests that upload_descriptor_to_all() is
1715 : * in synch with pick_hsdir_v3(). That's not the case for the
1716 : * test_reachability() test which only compares the responsible hsdir sets.
1717 : */
1718 : static void
1719 1 : test_client_service_hsdir_set_sync(void *arg)
1720 : {
1721 1 : networkstatus_t *ns = NULL;
1722 :
1723 1 : (void) arg;
1724 :
1725 1 : MOCK(networkstatus_get_latest_consensus,
1726 : mock_networkstatus_get_latest_consensus);
1727 1 : MOCK(networkstatus_get_reasonably_live_consensus,
1728 : mock_networkstatus_get_reasonably_live_consensus);
1729 1 : MOCK(get_or_state,
1730 : get_or_state_replacement);
1731 1 : MOCK(hs_desc_encode_descriptor,
1732 : mock_hs_desc_encode_descriptor);
1733 1 : MOCK(directory_initiate_request,
1734 : mock_directory_initiate_request);
1735 :
1736 1 : hs_init();
1737 :
1738 : /* Initialize a big hash ring: we want it to be big so that client and
1739 : * service cannot accidentally select the same HSDirs */
1740 1 : ns = networkstatus_get_latest_consensus();
1741 1 : tt_assert(ns);
1742 :
1743 : /** Now test the various synch scenarios. See the helper function for more
1744 : details: */
1745 :
1746 : /* a) Scenario where both client and service are in the time segment between
1747 : * SRV#N and TP#N. At this time the client fetches the first HS desc:
1748 : *
1749 : * +------------------------------------------------------------------+
1750 : * | |
1751 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1752 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1753 : * | |
1754 : * | $==========|-----------$===========|----------$===========| |
1755 : * | ^ ^ |
1756 : * | S C |
1757 : * +------------------------------------------------------------------+
1758 : */
1759 1 : helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
1760 :
1761 : /* b) Scenario where both client and service are in the time segment between
1762 : * TP#N and SRV#N+1. At this time the client fetches the second HS
1763 : * desc:
1764 : *
1765 : * +------------------------------------------------------------------+
1766 : * | |
1767 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1768 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1769 : * | |
1770 : * | $==========|-----------$===========|-----------$===========| |
1771 : * | ^ ^ |
1772 : * | S C |
1773 : * +------------------------------------------------------------------+
1774 : */
1775 1 : helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
1776 :
1777 : /* c) Scenario where service is between SRV#N and TP#N, but client is
1778 : * between TP#N and SRV#N+1. Client is forward in time so it fetches the
1779 : * second HS desc.
1780 : *
1781 : * +------------------------------------------------------------------+
1782 : * | |
1783 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1784 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1785 : * | |
1786 : * | $==========|-----------$===========|-----------$===========| |
1787 : * | ^ ^ |
1788 : * | S C |
1789 : * +------------------------------------------------------------------+
1790 : */
1791 1 : helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
1792 :
1793 : /* d) Scenario where service is between TP#N and SRV#N+1, but client is
1794 : * between SRV#N and TP#N. Client is backwards in time so it fetches the
1795 : * first HS desc.
1796 : *
1797 : * +------------------------------------------------------------------+
1798 : * | |
1799 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1800 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1801 : * | |
1802 : * | $==========|-----------$===========|-----------$===========| |
1803 : * | ^ ^ |
1804 : * | C S |
1805 : * +------------------------------------------------------------------+
1806 : */
1807 1 : helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
1808 :
1809 : /* e) Scenario where service is between SRV#N and TP#N, but client is
1810 : * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
1811 : * the first HS desc.
1812 : *
1813 : * +------------------------------------------------------------------+
1814 : * | |
1815 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1816 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1817 : * | |
1818 : * | $==========|-----------$===========|-----------$===========| |
1819 : * | ^ ^ |
1820 : * | C S |
1821 : * +------------------------------------------------------------------+
1822 : */
1823 1 : helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
1824 :
1825 : /* f) Scenario where service is between TP#N and SRV#N+1, but client is
1826 : * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
1827 : * the second HS desc.
1828 : *
1829 : * +------------------------------------------------------------------+
1830 : * | |
1831 : * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1832 : * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1833 : * | |
1834 : * | $==========|-----------$===========|-----------$===========| |
1835 : * | ^ ^ |
1836 : * | S C |
1837 : * +------------------------------------------------------------------+
1838 : */
1839 1 : helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
1840 :
1841 1 : done:
1842 1 : networkstatus_vote_free(ns);
1843 1 : nodelist_free_all();
1844 1 : hs_free_all();
1845 1 : }
1846 :
1847 : struct testcase_t hs_common_tests[] = {
1848 : { "build_address", test_build_address, TT_FORK,
1849 : NULL, NULL },
1850 : { "validate_address", test_validate_address, TT_FORK,
1851 : NULL, NULL },
1852 : { "time_period", test_time_period, TT_FORK,
1853 : NULL, NULL },
1854 : { "start_time_of_next_time_period", test_start_time_of_next_time_period,
1855 : TT_FORK, NULL, NULL },
1856 : { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
1857 : NULL, NULL },
1858 : { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
1859 : NULL, NULL },
1860 : { "disaster_srv", test_disaster_srv, TT_FORK,
1861 : NULL, NULL },
1862 : { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
1863 : NULL, NULL },
1864 : { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
1865 : NULL, NULL },
1866 : { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
1867 : NULL, NULL },
1868 : { "reachability", test_reachability, TT_FORK,
1869 : NULL, NULL },
1870 : { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
1871 : TT_FORK, NULL, NULL },
1872 : { "hs_indexes", test_hs_indexes, TT_FORK,
1873 : NULL, NULL },
1874 :
1875 : END_OF_TESTCASES
1876 : };
|