diff --git a/core/pom.xml b/core/pom.xml index f552d799..6b80ce02 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -280,6 +280,11 @@ 9.1-901.jdbc4 --> + + com.subgraph + orchid + 1.0-SNAPSHOT + diff --git a/core/src/main/java/com/google/bitcoin/net/BlockingClient.java b/core/src/main/java/com/google/bitcoin/net/BlockingClient.java index ee4c90ae..aad4b831 100644 --- a/core/src/main/java/com/google/bitcoin/net/BlockingClient.java +++ b/core/src/main/java/com/google/bitcoin/net/BlockingClient.java @@ -23,6 +23,7 @@ import javax.net.SocketFactory; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; import java.nio.ByteBuffer; @@ -65,14 +66,15 @@ public class BlockingClient implements MessageWriteTarget { // sure it doesnt get too large or have to call read too often. dbuf = ByteBuffer.allocateDirect(Math.min(Math.max(parser.getMaxMessageSize(), BUFFER_SIZE_LOWER_BOUND), BUFFER_SIZE_UPPER_BOUND)); parser.setWriteTarget(this); - socket = socketFactory.createSocket(); Thread t = new Thread() { @Override public void run() { if (clientSet != null) clientSet.add(BlockingClient.this); try { - socket.connect(serverAddress, connectTimeoutMillis); + InetSocketAddress iServerAddress = (InetSocketAddress)serverAddress; + socket = socketFactory.createSocket(iServerAddress.getAddress(), iServerAddress.getPort()); + //socket.connect(serverAddress, connectTimeoutMillis); parser.connectionOpened(); InputStream stream = socket.getInputStream(); byte[] readBuff = new byte[dbuf.capacity()]; diff --git a/core/src/main/java/com/google/bitcoin/net/discovery/TorDiscovery.java b/core/src/main/java/com/google/bitcoin/net/discovery/TorDiscovery.java new file mode 100644 index 00000000..23b40388 --- /dev/null +++ b/core/src/main/java/com/google/bitcoin/net/discovery/TorDiscovery.java @@ -0,0 +1,270 @@ +/** + * Copyright 2014 Miron Cuperman + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.bitcoin.net.discovery; + +import com.google.bitcoin.core.NetworkParameters; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorClient; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +/** + *

Supports peer discovery through Tor.

+ * + *

Failure to obtain at least four different peers through different exit nodes will cause + * a PeerDiscoveryException will be thrown during getPeers(). + *

+ * + *

DNS seeds do not attempt to enumerate every peer on the network. If you want more peers + * to connect to, you need to discover them via other means (like addr broadcasts).

+ */ +public class TorDiscovery implements PeerDiscovery { + private static final Logger log = LoggerFactory.getLogger(TorDiscovery.class); + public static final int MINIMUM_ROUTER_COUNT = 4; + public static final int MINIMUM_ROUTER_LOOKUP_COUNT = 10; + public static final int RECEIVE_RETRIES = 3; + public static final int RESOLVE_STREAM_ID = 0x1000; // An arbitrary stream ID + public static final int RESOLVE_CNAME = 0x00; + public static final int RESOLVE_ERROR = 0xf0; + public static final int RESOLVE_IPV4 = 0x04; + public static final int RESOLVE_IPV6 = 0x06; + + private final String[] hostNames; + private final NetworkParameters netParams; + private final CircuitPathChooser pathChooser; + private final TorClient torClient; + private ListeningExecutorService threadPool; + + /** + * Supports finding peers through Tor. Community run DNS entry points will be used. + * + * @param netParams Network parameters to be used for port information. + */ + public TorDiscovery(NetworkParameters netParams, TorClient torClient) { + this(netParams.getDnsSeeds(), netParams, torClient); + } + + /** + * Supports finding peers through Tor. + * + * @param hostNames Host names to be examined for seed addresses. + * @param netParams Network parameters to be used for port information. + * @param torClient an already-started Tor client. + */ + public TorDiscovery(String[] hostNames, NetworkParameters netParams, TorClient torClient) { + this.hostNames = hostNames; + this.netParams = netParams; + + this.torClient = torClient; + this.pathChooser = CircuitPathChooser.create(torClient.getConfig(), torClient.getDirectory()); + } + + private static class Lookup { + final Router router; + final InetAddress address; + + Lookup(Router router, InetAddress address) { + this.router = router; + this.address = address; + } + } + + public InetSocketAddress[] getPeers(long timeoutValue, TimeUnit timeoutUnit) throws PeerDiscoveryException { + if (hostNames == null) + throw new PeerDiscoveryException("Unable to find any peers via DNS"); + + Set routers = Sets.newHashSet(); + ArrayList dummyTargets = Lists.newArrayList(); + + // Collect exit nodes until we have enough + while (routers.size() < MINIMUM_ROUTER_LOOKUP_COUNT) { + Router router = pathChooser.chooseExitNodeForTargets(dummyTargets); + routers.add(router); + } + + try { + List circuits = getCircuits(timeoutValue, timeoutUnit, routers); + + Collection addresses = lookupAddresses(timeoutValue, timeoutUnit, circuits); + + if (addresses.size() < MINIMUM_ROUTER_COUNT) + throw new PeerDiscoveryException("Unable to find enough peers via Tor - got " + addresses.size()); + ArrayList addressList = Lists.newArrayList(); + addressList.addAll(addresses); + Collections.shuffle(addressList); + return addressList.toArray(new InetSocketAddress[addressList.size()]); + } catch (InterruptedException e) { + throw new PeerDiscoveryException(e); + } + } + + private List getCircuits(long timeoutValue, TimeUnit timeoutUnit, Set routers) throws InterruptedException { + createThreadPool(routers.size()); + + try { + List> circuitFutures = Lists.newArrayList(); + for (final Router router : routers) { + circuitFutures.add(threadPool.submit(new Callable() { + public Circuit call() throws Exception { + return torClient.getCircuitManager().openInternalCircuitTo(Lists.newArrayList(router)); + } + })); + } + + threadPool.awaitTermination(timeoutValue, timeoutUnit); + for (ListenableFuture future : circuitFutures) { + if (!future.isDone()) { + log.warn("circuit timed out"); + future.cancel(true); + } + } + + List circuits; + try { + circuits = Futures.successfulAsList(circuitFutures).get(); + // Any failures will result in null entries. Remove them. + circuits.removeAll(Collections.singleton(null)); + return circuits; + } catch (ExecutionException e) { + // Cannot happen, successfulAsList accepts failures + throw new RuntimeException(e); + } + } finally { + shutdownThreadPool(); + } + } + + private Collection lookupAddresses(long timeoutValue, TimeUnit timeoutUnit, List circuits) throws InterruptedException { + createThreadPool(circuits.size() * hostNames.length); + + try { + List> lookupFutures = Lists.newArrayList(); + for (final Circuit circuit : circuits) { + for (final String seed : hostNames) { + lookupFutures.add(threadPool.submit(new Callable() { + public Lookup call() throws Exception { + return new Lookup(circuit.getFinalCircuitNode().getRouter(), lookup(circuit, seed)); + } + })); + } + } + + threadPool.awaitTermination(timeoutValue, timeoutUnit); + for (ListenableFuture future : lookupFutures) { + if (!future.isDone()) { + log.warn("circuit timed out"); + future.cancel(true); + } + } + + try { + List lookups = Futures.successfulAsList(lookupFutures).get(); + // Any failures will result in null entries. Remove them. + lookups.removeAll(Collections.singleton(null)); + + // Use a map to enforce one result per exit node + // TODO: randomize result selection better + Map lookupMap = Maps.newHashMap(); + + for (Lookup lookup : lookups) { + InetSocketAddress address = new InetSocketAddress(lookup.address, netParams.getPort()); + lookupMap.put(lookup.router.getIdentityHash(), address); + } + + return lookupMap.values(); + } catch (ExecutionException e) { + // Cannot happen, successfulAsList accepts failures + throw new RuntimeException(e); + } + } finally { + shutdownThreadPool(); + } + } + + private synchronized void shutdownThreadPool() { + threadPool.shutdownNow(); + threadPool = null; + } + + private synchronized void createThreadPool(int size) { + threadPool = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(size)); + } + + private InetAddress lookup(Circuit circuit, String seed) throws UnknownHostException { + // Send a resolve cell to the exit node + RelayCell cell = circuit.createRelayCell(RelayCell.RELAY_RESOLVE, RESOLVE_STREAM_ID, circuit.getFinalCircuitNode()); + cell.putString(seed); + circuit.sendRelayCell(cell); + + // Wait a few cell timeout periods (3 * 20 sec) for replies, in case the path is slow + for (int i = 0 ; i < RECEIVE_RETRIES; i++) { + RelayCell res = circuit.receiveRelayCell(); + if (res != null) { + while (res.cellBytesRemaining() > 0) { + int type = res.getByte(); + int len = res.getByte(); + byte[] value = new byte[len]; + res.getByteArray(value); + int ttl = res.getInt(); + + if (type == RESOLVE_CNAME || type >= RESOLVE_ERROR) { + // TODO handle .onion CNAME replies + throw new RuntimeException(new String(value)); + } else if (type == RESOLVE_IPV4 || type == RESOLVE_IPV6) { + return InetAddress.getByAddress(value); + } + } + break; + } + } + throw new RuntimeException("Could not look up " + seed); + } + + public synchronized void shutdown() { + if (threadPool != null) { + shutdownThreadPool(); + } + } +} diff --git a/orchid/.gitignore b/orchid/.gitignore new file mode 100644 index 00000000..f58915aa --- /dev/null +++ b/orchid/.gitignore @@ -0,0 +1,5 @@ +bin/ +orchid-*.jar +orchid-*.zip +build-revision +lib/xmlrpc-* diff --git a/orchid/LICENSE b/orchid/LICENSE new file mode 100644 index 00000000..2738761a --- /dev/null +++ b/orchid/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2009-2011, Bruce Leidl +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/orchid/README b/orchid/README new file mode 100644 index 00000000..e69de29b diff --git a/orchid/build.xml b/orchid/build.xml new file mode 100644 index 00000000..8aa956c1 --- /dev/null +++ b/orchid/build.xml @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/orchid/data/GeoIP.dat b/orchid/data/GeoIP.dat new file mode 100644 index 00000000..fe563297 Binary files /dev/null and b/orchid/data/GeoIP.dat differ diff --git a/orchid/data/README b/orchid/data/README new file mode 100644 index 00000000..3eccbba5 --- /dev/null +++ b/orchid/data/README @@ -0,0 +1,3 @@ +GeoIP.dat GeoLite Country database downloaded September, 2013 + + http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz diff --git a/orchid/doc/spec/address-spec.txt b/orchid/doc/spec/address-spec.txt new file mode 100644 index 00000000..2e1aff2b --- /dev/null +++ b/orchid/doc/spec/address-spec.txt @@ -0,0 +1,58 @@ + + Special Hostnames in Tor + Nick Mathewson + +1. Overview + + Most of the time, Tor treats user-specified hostnames as opaque: When + the user connects to www.torproject.org, Tor picks an exit node and uses + that node to connect to "www.torproject.org". Some hostnames, however, + can be used to override Tor's default behavior and circuit-building + rules. + + These hostnames can be passed to Tor as the address part of a SOCKS4a or + SOCKS5 request. If the application is connected to Tor using an IP-only + method (such as SOCKS4, TransPort, or NatdPort), these hostnames can be + substituted for certain IP addresses using the MapAddress configuration + option or the MAPADDRESS control command. + +2. .exit + + SYNTAX: [hostname].[name-or-digest].exit + [name-or-digest].exit + + Hostname is a valid hostname; [name-or-digest] is either the nickname of a + Tor node or the hex-encoded digest of that node's public key. + + When Tor sees an address in this format, it uses the specified hostname as + the exit node. If no "hostname" component is given, Tor defaults to the + published IPv4 address of the exit node. + + It is valid to try to resolve hostnames, and in fact upon success Tor + will cache an internal mapaddress of the form + "www.google.com.foo.exit=64.233.161.99.foo.exit" to speed subsequent + lookups. + + The .exit notation is disabled by default as of Tor 0.2.2.1-alpha, due + to potential application-level attacks. + + EXAMPLES: + www.example.com.exampletornode.exit + + Connect to www.example.com from the node called "exampletornode". + + exampletornode.exit + + Connect to the published IP address of "exampletornode" using + "exampletornode" as the exit. + +3. .onion + + SYNTAX: [digest].onion + + The digest is the first eighty bits of a SHA1 hash of the identity key for + a hidden service, encoded in base32. + + When Tor sees an address in this format, it tries to look up and connect to + the specified hidden service. See rend-spec.txt for full details. + diff --git a/orchid/doc/spec/bridges-spec.txt b/orchid/doc/spec/bridges-spec.txt new file mode 100644 index 00000000..64711881 --- /dev/null +++ b/orchid/doc/spec/bridges-spec.txt @@ -0,0 +1,249 @@ + + Tor bridges specification + +0. Preface + + This document describes the design decisions around support for bridge + users, bridge relays, and bridge authorities. It acts as an overview + of the bridge design and deployment for developers, and it also tries + to point out limitations in the current design and implementation. + + For more details on what all of these mean, look at blocking.tex in + /doc/design-paper/ + +1. Bridge relays + + Bridge relays are just like normal Tor relays except they don't publish + their server descriptors to the main directory authorities. + +1.1. PublishServerDescriptor + + To configure your relay to be a bridge relay, just add + BridgeRelay 1 + PublishServerDescriptor bridge + to your torrc. This will cause your relay to publish its descriptor + to the bridge authorities rather than to the default authorities. + + Alternatively, you can say + BridgeRelay 1 + PublishServerDescriptor 0 + which will cause your relay to not publish anywhere. This could be + useful for private bridges. + +1.2. Recommendations. + + Bridge relays should use an exit policy of "reject *:*". This is + because they only need to relay traffic between the bridge users + and the rest of the Tor network, so there's no need to let people + exit directly from them. + + We invented the RelayBandwidth* options for this situation: Tor clients + who want to allow relaying too. See proposal 111 for details. Relay + operators should feel free to rate-limit their relayed traffic. + +1.3. Implementation note. + + Vidalia 0.0.15 has turned its "Relay" settings page into a tri-state + "Don't relay" / "Relay for the Tor network" / "Help censored users". + + If you click the third choice, it forces your exit policy to reject *:*. + + If all the bridges end up on port 9001, that's not so good. On the + other hand, putting the bridges on a low-numbered port in the Unix + world requires jumping through extra hoops. The current compromise is + that Vidalia makes the ORPort default to 443 on Windows, and 9001 on + other platforms. + + At the bottom of the relay config settings window, Vidalia displays + the bridge identifier to the operator (see Section 3.1) so he can pass + it on to bridge users. + +2. Bridge authorities. + + Bridge authorities are like normal v3 directory authorities, except + they don't create their own network-status documents or votes. So if + you ask a bridge authority for a network-status document or consensus, + they behave like a directory mirror: they give you one from one of + the main authorities. But if you ask the bridge authority for the + descriptor corresponding to a particular identity fingerprint, it will + happily give you the latest descriptor for that fingerprint. + + To become a bridge authority, add these lines to your torrc: + AuthoritativeDirectory 1 + BridgeAuthoritativeDir 1 + + Right now there's one bridge authority, running on the Tonga relay. + +2.1. Exporting bridge-purpose descriptors + + We've added a new purpose for server descriptors: the "bridge" + purpose. With the new router-descriptors file format that includes + annotations, it's easy to look through it and find the bridge-purpose + descriptors. + + Currently we export the bridge descriptors from Tonga to the + BridgeDB server, so it can give them out according to the policies + in blocking.pdf. + +2.2. Reachability/uptime testing + + Right now the bridge authorities do active reachability testing of + bridges, so we know which ones to recommend for users. + + But in the design document, we suggested that bridges should publish + anonymously (i.e. via Tor) to the bridge authority, so somebody watching + the bridge authority can't just enumerate all the bridges. But if we're + doing active measurement, the game is up. Perhaps we should back off on + this goal, or perhaps we should do our active measurement anonymously? + + Answering this issue is scheduled for 0.2.1.x. + +2.3. Future work: migrating to multiple bridge authorities + + Having only one bridge authority is both a trust bottleneck (if you + break into one place you learn about every single bridge we've got) + and a robustness bottleneck (when it's down, bridge users become sad). + + Right now if we put up a second bridge authority, all the bridges would + publish to it, and (assuming the code works) bridge users would query + a random bridge authority. This resolves the robustness bottleneck, + but makes the trust bottleneck even worse. + + In 0.2.2.x and later we should think about better ways to have multiple + bridge authorities. + +3. Bridge users. + + Bridge users are like ordinary Tor users except they use encrypted + directory connections by default, and they use bridge relays as both + entry guards (their first hop) and directory guards (the source of + all their directory information). + + To become a bridge user, add the following line to your torrc: + UseBridges 1 + + and then add at least one "Bridge" line to your torrc based on the + format below. + +3.1. Format of the bridge identifier. + + The canonical format for a bridge identifier contains an IP address, + an ORPort, and an identity fingerprint: + bridge 128.31.0.34:9009 4C17 FB53 2E20 B2A8 AC19 9441 ECD2 B017 7B39 E4B1 + + However, the identity fingerprint can be left out, in which case the + bridge user will connect to that relay and use it as a bridge regardless + of what identity key it presents: + bridge 128.31.0.34:9009 + This might be useful for cases where only short bridge identifiers + can be communicated to bridge users. + + In a future version we may also support bridge identifiers that are + only a key fingerprint: + bridge 4C17 FB53 2E20 B2A8 AC19 9441 ECD2 B017 7B39 E4B1 + and the bridge user can fetch the latest descriptor from the bridge + authority (see Section 3.4). + +3.2. Bridges as entry guards + + For now, bridge users add their bridge relays to their list of "entry + guards" (see path-spec.txt for background on entry guards). They are + managed by the entry guard algorithms exactly as if they were a normal + entry guard -- their keys and timing get cached in the "state" file, + etc. This means that when the Tor user starts up with "UseBridges" + disabled, he will skip past the bridge entries since they won't be + listed as up and usable in his networkstatus consensus. But to be clear, + the "entry_guards" list doesn't currently distinguish guards by purpose. + + Internally, each bridge user keeps a smartlist of "bridge_info_t" + that reflects the "bridge" lines from his torrc along with a download + schedule (see Section 3.5 below). When he starts Tor, he attempts + to fetch a descriptor for each configured bridge (see Section 3.4 + below). When he succeeds at getting a descriptor for one of the bridges + in his list, he adds it directly to the entry guard list using the + normal add_an_entry_guard() interface. Once a bridge descriptor has + been added, should_delay_dir_fetches() will stop delaying further + directory fetches, and the user begins to bootstrap his directory + information from that bridge (see Section 3.3). + + Currently bridge users cache their bridge descriptors to the + "cached-descriptors" file (annotated with purpose "bridge"), but + they don't make any attempt to reuse descriptors they find in this + file. The theory is that either the bridge is available now, in which + case you can get a fresh descriptor, or it's not, in which case an + old descriptor won't do you much good. + + We could disable writing out the bridge lines to the state file, if + we think this is a problem. + + As an exception, if we get an application request when we have one + or more bridge descriptors but we believe none of them are running, + we mark them all as running again. This is similar to the exception + already in place to help long-idle Tor clients realize they should + fetch fresh directory information rather than just refuse requests. + +3.3. Bridges as directory guards + + In addition to using bridges as the first hop in their circuits, bridge + users also use them to fetch directory updates. Other than initial + bootstrapping to find a working bridge descriptor (see Section 3.4 + below), all further non-anonymized directory fetches will be redirected + to the bridge. + + This means that bridge relays need to have cached answers for all + questions the bridge user might ask. This makes the upgrade path + tricky --- for example, if we migrate to a v4 directory design, the + bridge user would need to keep using v3 so long as his bridge relays + only knew how to answer v3 queries. + + In a future design, for cases where the user has enough information + to build circuits yet the chosen bridge doesn't know how to answer a + given query, we might teach bridge users to make an anonymized request + to a more suitable directory server. + +3.4. How bridge users get their bridge descriptor + + Bridge users can fetch bridge descriptors in two ways: by going directly + to the bridge and asking for "/tor/server/authority", or by going to + the bridge authority and asking for "/tor/server/fp/ID". By default, + they will only try the direct queries. If the user sets + UpdateBridgesFromAuthority 1 + in his config file, then he will try querying the bridge authority + first for bridges where he knows a digest (if he only knows an IP + address and ORPort, then his only option is a direct query). + + If the user has at least one working bridge, then he will do further + queries to the bridge authority through a full three-hop Tor circuit. + But when bootstrapping, he will make a direct begin_dir-style connection + to the bridge authority. + + As of Tor 0.2.0.10-alpha, if the user attempts to fetch a descriptor + from the bridge authority and it returns a 404 not found, the user + will automatically fall back to trying a direct query. Therefore it is + recommended that bridge users always set UpdateBridgesFromAuthority, + since at worst it will delay their fetches a little bit and notify + the bridge authority of the identity fingerprint (but not location) + of their intended bridges. + +3.5. Bridge descriptor retry schedule + + Bridge users try to fetch a descriptor for each bridge (using the + steps in Section 3.4 above) on startup. Whenever they receive a + bridge descriptor, they reschedule a new descriptor download for 1 + hour from then. + + If on the other hand it fails, they try again after 15 minutes for the + first attempt, after 15 minutes for the second attempt, and after 60 + minutes for subsequent attempts. + + In 0.2.2.x we should come up with some smarter retry schedules. + +3.6. Implementation note. + + Vidalia 0.1.0 has a new checkbox in its Network config window called + "My ISP blocks connections to the Tor network." Users who click that + box change their configuration to: + UseBridges 1 + UpdateBridgesFromAuthority 1 + and should add at least one bridge identifier. + diff --git a/orchid/doc/spec/control-spec.txt b/orchid/doc/spec/control-spec.txt new file mode 100644 index 00000000..1a463afc --- /dev/null +++ b/orchid/doc/spec/control-spec.txt @@ -0,0 +1,1853 @@ + + TC: A Tor control protocol (Version 1) + +0. Scope + + This document describes an implementation-specific protocol that is used + for other programs (such as frontend user-interfaces) to communicate with a + locally running Tor process. It is not part of the Tor onion routing + protocol. + + This protocol replaces version 0 of TC, which is now deprecated. For + reference, TC is described in "control-spec-v0.txt". Implementors are + recommended to avoid using TC directly, but instead to use a library that + can easily be updated to use the newer protocol. (Version 0 is used by Tor + versions 0.1.0.x; the protocol in this document only works with Tor + versions in the 0.1.1.x series and later.) + +1. Protocol outline + + TC is a bidirectional message-based protocol. It assumes an underlying + stream for communication between a controlling process (the "client" + or "controller") and a Tor process (or "server"). The stream may be + implemented via TCP, TLS-over-TCP, a Unix-domain socket, or so on, + but it must provide reliable in-order delivery. For security, the + stream should not be accessible by untrusted parties. + + In TC, the client and server send typed messages to each other over the + underlying stream. The client sends "commands" and the server sends + "replies". + + By default, all messages from the server are in response to messages from + the client. Some client requests, however, will cause the server to send + messages to the client indefinitely far into the future. Such + "asynchronous" replies are marked as such. + + Servers respond to messages in the order messages are received. + +2. Message format + +2.1. Description format + + The message formats listed below use ABNF as described in RFC 2234. + The protocol itself is loosely based on SMTP (see RFC 2821). + + We use the following nonterminals from RFC 2822: atom, qcontent + + We define the following general-use nonterminals: + + String = DQUOTE *qcontent DQUOTE + + There are explicitly no limits on line length. All 8-bit characters are + permitted unless explicitly disallowed. + + Wherever CRLF is specified to be accepted from the controller, Tor MAY also + accept LF. Tor, however, MUST NOT generate LF instead of CRLF. + Controllers SHOULD always send CRLF. + +2.2. Commands from controller to Tor + + Command = Keyword Arguments CRLF / "+" Keyword Arguments CRLF Data + Keyword = 1*ALPHA + Arguments = *(SP / VCHAR) + + Specific commands and their arguments are described below in section 3. + +2.3. Replies from Tor to the controller + + Reply = SyncReply / AsyncReply + SyncReply = *(MidReplyLine / DataReplyLine) EndReplyLine + AsyncReply = *(MidReplyLine / DataReplyLine) EndReplyLine + + MidReplyLine = StatusCode "-" ReplyLine + DataReplyLine = StatusCode "+" ReplyLine Data + EndReplyLine = StatusCode SP ReplyLine + ReplyLine = [ReplyText] CRLF + ReplyText = XXXX + StatusCode = 3DIGIT + + Specific replies are mentioned below in section 3, and described more fully + in section 4. + + [Compatibility note: versions of Tor before 0.2.0.3-alpha sometimes + generate AsyncReplies of the form "*(MidReplyLine / DataReplyLine)". + This is incorrect, but controllers that need to work with these + versions of Tor should be prepared to get multi-line AsyncReplies with + the final line (usually "650 OK") omitted.] + +2.4. General-use tokens + + ; Identifiers for servers. + ServerID = Nickname / Fingerprint + + Nickname = 1*19 NicknameChar + NicknameChar = "a"-"z" / "A"-"Z" / "0" - "9" + Fingerprint = "$" 40*HEXDIG + + ; A "=" indicates that the given nickname is canonical; a "~" indicates + ; that the given nickname is not canonical. If no nickname is given at + ; all, Tor does not even have a guess for what this router calls itself. + LongName = Fingerprint [ ( "=" / "~" ) Nickname ] + + ; How a controller tells Tor about a particular OR. There are four + ; possible formats: + ; $Digest -- The router whose identity key hashes to the given digest. + ; This is the preferred way to refer to an OR. + ; $Digest~Name -- The router whose identity key hashes to the given + ; digest, but only if the router has the given nickname. + ; $Digest=Name -- The router whose identity key hashes to the given + ; digest, but only if the router is Named and has the given + ; nickname. + ; Name -- The Named router with the given nickname, or, if no such + ; router exists, any router whose nickname matches the one given. + ; This is not a safe way to refer to routers, since Named status + ; could under some circumstances change over time. + ServerSpec = LongName / Nickname + + ; Unique identifiers for streams or circuits. Currently, Tor only + ; uses digits, but this may change + StreamID = 1*16 IDChar + CircuitID = 1*16 IDChar + IDChar = ALPHA / DIGIT + + Address = ip4-address / ip6-address / hostname (XXXX Define these) + + ; A "Data" section is a sequence of octets concluded by the terminating + ; sequence CRLF "." CRLF. The terminating sequence may not appear in the + ; body of the data. Leading periods on lines in the data are escaped with + ; an additional leading period as in RFC 2821 section 4.5.2. + Data = *DataLine "." CRLF + DataLine = CRLF / "." 1*LineItem CRLF / NonDotItem *LineItem CRLF + LineItem = NonCR / 1*CR NonCRLF + NonDotItem = NonDotCR / 1*CR NonCRLF + +3. Commands + + All commands are case-insensitive, but most keywords are case-sensitive. + +3.1. SETCONF + + Change the value of one or more configuration variables. The syntax is: + + "SETCONF" 1*(SP keyword ["=" value]) CRLF + value = String / QuotedString + + Tor behaves as though it had just read each of the key-value pairs + from its configuration file. Keywords with no corresponding values have + their configuration values reset to 0 or NULL (use RESETCONF if you want + to set it back to its default). SETCONF is all-or-nothing: if there + is an error in any of the configuration settings, Tor sets none of them. + + Tor responds with a "250 configuration values set" reply on success. + If some of the listed keywords can't be found, Tor replies with a + "552 Unrecognized option" message. Otherwise, Tor responds with a + "513 syntax error in configuration values" reply on syntax error, or a + "553 impossible configuration setting" reply on a semantic error. + + When a configuration option takes multiple values, or when multiple + configuration keys form a context-sensitive group (see GETCONF below), then + setting _any_ of the options in a SETCONF command is taken to reset all of + the others. For example, if two ORBindAddress values are configured, and a + SETCONF command arrives containing a single ORBindAddress value, the new + command's value replaces the two old values. + + Sometimes it is not possible to change configuration options solely by + issuing a series of SETCONF commands, because the value of one of the + configuration options depends on the value of another which has not yet + been set. Such situations can be overcome by setting multiple configuration + options with a single SETCONF command (e.g. SETCONF ORPort=443 + ORListenAddress=9001). + +3.2. RESETCONF + + Remove all settings for a given configuration option entirely, assign + its default value (if any), and then assign the String provided. + Typically the String is left empty, to simply set an option back to + its default. The syntax is: + + "RESETCONF" 1*(SP keyword ["=" String]) CRLF + + Otherwise it behaves like SETCONF above. + +3.3. GETCONF + + Request the value of a configuration variable. The syntax is: + + "GETCONF" 1*(SP keyword) CRLF + + If all of the listed keywords exist in the Tor configuration, Tor replies + with a series of reply lines of the form: + 250 keyword=value + If any option is set to a 'default' value semantically different from an + empty string, Tor may reply with a reply line of the form: + 250 keyword + + Value may be a raw value or a quoted string. Tor will try to use + unquoted values except when the value could be misinterpreted through + not being quoted. + + If some of the listed keywords can't be found, Tor replies with a + "552 unknown configuration keyword" message. + + If an option appears multiple times in the configuration, all of its + key-value pairs are returned in order. + + Some options are context-sensitive, and depend on other options with + different keywords. These cannot be fetched directly. Currently there + is only one such option: clients should use the "HiddenServiceOptions" + virtual keyword to get all HiddenServiceDir, HiddenServicePort, + HiddenServiceNodes, and HiddenServiceExcludeNodes option settings. + +3.4. SETEVENTS + + Request the server to inform the client about interesting events. The + syntax is: + + "SETEVENTS" [SP "EXTENDED"] *(SP EventCode) CRLF + + EventCode = "CIRC" / "STREAM" / "ORCONN" / "BW" / "DEBUG" / + "INFO" / "NOTICE" / "WARN" / "ERR" / "NEWDESC" / "ADDRMAP" / + "AUTHDIR_NEWDESCS" / "DESCCHANGED" / "STATUS_GENERAL" / + "STATUS_CLIENT" / "STATUS_SERVER" / "GUARD" / "NS" / "STREAM_BW" / + "CLIENTS_SEEN" / "NEWCONSENSUS" + + Any events *not* listed in the SETEVENTS line are turned off; thus, sending + SETEVENTS with an empty body turns off all event reporting. + + The server responds with a "250 OK" reply on success, and a "552 + Unrecognized event" reply if one of the event codes isn't recognized. (On + error, the list of active event codes isn't changed.) + + If the flag string "EXTENDED" is provided, Tor may provide extra + information with events for this connection; see 4.1 for more information. + NOTE: All events on a given connection will be provided in extended format, + or none. + NOTE: "EXTENDED" is only supported in Tor 0.1.1.9-alpha or later. + + Each event is described in more detail in Section 4.1. + +3.5. AUTHENTICATE + + Sent from the client to the server. The syntax is: + "AUTHENTICATE" [ SP 1*HEXDIG / QuotedString ] CRLF + + The server responds with "250 OK" on success or "515 Bad authentication" if + the authentication cookie is incorrect. Tor closes the connection on an + authentication failure. + + The format of the 'cookie' is implementation-dependent; see 5.1 below for + information on how the standard Tor implementation handles it. + + Before the client has authenticated, no command other than PROTOCOLINFO, + AUTHENTICATE, or QUIT is valid. If the controller sends any other command, + or sends a malformed command, or sends an unsuccessful AUTHENTICATE + command, or sends PROTOCOLINFO more than once, Tor sends an error reply and + closes the connection. + + To prevent some cross-protocol attacks, the AUTHENTICATE command is still + required even if all authentication methods in Tor are disabled. In this + case, the controller should just send "AUTHENTICATE" CRLF. + + (Versions of Tor before 0.1.2.16 and 0.2.0.4-alpha did not close the + connection after an authentication failure.) + +3.6. SAVECONF + + Sent from the client to the server. The syntax is: + "SAVECONF" CRLF + + Instructs the server to write out its config options into its torrc. Server + returns "250 OK" if successful, or "551 Unable to write configuration + to disk" if it can't write the file or some other error occurs. + +3.7. SIGNAL + + Sent from the client to the server. The syntax is: + + "SIGNAL" SP Signal CRLF + + Signal = "RELOAD" / "SHUTDOWN" / "DUMP" / "DEBUG" / "HALT" / + "HUP" / "INT" / "USR1" / "USR2" / "TERM" / "NEWNYM" / + "CLEARDNSCACHE" + + The meaning of the signals are: + + RELOAD -- Reload: reload config items, refetch directory. (like HUP) + SHUTDOWN -- Controlled shutdown: if server is an OP, exit immediately. + If it's an OR, close listeners and exit after 30 seconds. + (like INT) + DUMP -- Dump stats: log information about open connections and + circuits. (like USR1) + DEBUG -- Debug: switch all open logs to loglevel debug. (like USR2) + HALT -- Immediate shutdown: clean up and exit now. (like TERM) + CLEARDNSCACHE -- Forget the client-side cached IPs for all hostnames. + NEWNYM -- Switch to clean circuits, so new application requests + don't share any circuits with old ones. Also clears + the client-side DNS cache. (Tor MAY rate-limit its + response to this signal.) + + The server responds with "250 OK" if the signal is recognized (or simply + closes the socket if it was asked to close immediately), or "552 + Unrecognized signal" if the signal is unrecognized. + +3.8. MAPADDRESS + + Sent from the client to the server. The syntax is: + + "MAPADDRESS" 1*(Address "=" Address SP) CRLF + + The first address in each pair is an "original" address; the second is a + "replacement" address. The client sends this message to the server in + order to tell it that future SOCKS requests for connections to the original + address should be replaced with connections to the specified replacement + address. If the addresses are well-formed, and the server is able to + fulfill the request, the server replies with a 250 message: + 250-OldAddress1=NewAddress1 + 250 OldAddress2=NewAddress2 + + containing the source and destination addresses. If request is + malformed, the server replies with "512 syntax error in command + argument". If the server can't fulfill the request, it replies with + "451 resource exhausted". + + The client may decline to provide a body for the original address, and + instead send a special null address ("0.0.0.0" for IPv4, "::0" for IPv6, or + "." for hostname), signifying that the server should choose the original + address itself, and return that address in the reply. The server + should ensure that it returns an element of address space that is unlikely + to be in actual use. If there is already an address mapped to the + destination address, the server may reuse that mapping. + + If the original address is already mapped to a different address, the old + mapping is removed. If the original address and the destination address + are the same, the server removes any mapping in place for the original + address. + + Example: + C: MAPADDRESS 0.0.0.0=torproject.org 1.2.3.4=tor.freehaven.net + S: 250-127.192.10.10=torproject.org + S: 250 1.2.3.4=tor.freehaven.net + + {Note: This feature is designed to be used to help Tor-ify applications + that need to use SOCKS4 or hostname-less SOCKS5. There are three + approaches to doing this: + 1. Somehow make them use SOCKS4a or SOCKS5-with-hostnames instead. + 2. Use tor-resolve (or another interface to Tor's resolve-over-SOCKS + feature) to resolve the hostname remotely. This doesn't work + with special addresses like x.onion or x.y.exit. + 3. Use MAPADDRESS to map an IP address to the desired hostname, and then + arrange to fool the application into thinking that the hostname + has resolved to that IP. + This functionality is designed to help implement the 3rd approach.} + + Mappings set by the controller last until the Tor process exits: + they never expire. If the controller wants the mapping to last only + a certain time, then it must explicitly un-map the address when that + time has elapsed. + +3.9. GETINFO + + Sent from the client to the server. The syntax is as for GETCONF: + "GETINFO" 1*(SP keyword) CRLF + one or more NL-terminated strings. The server replies with an INFOVALUE + message, or a 551 or 552 error. + + Unlike GETCONF, this message is used for data that are not stored in the Tor + configuration file, and that may be longer than a single line. On success, + one ReplyLine is sent for each requested value, followed by a final 250 OK + ReplyLine. If a value fits on a single line, the format is: + 250-keyword=value + If a value must be split over multiple lines, the format is: + 250+keyword= + value + . + Recognized keys and their values include: + + "version" -- The version of the server's software, including the name + of the software. (example: "Tor 0.0.9.4") + + "config-file" -- The location of Tor's configuration file ("torrc"). + + ["exit-policy/prepend" -- The default exit policy lines that Tor will + *prepend* to the ExitPolicy config option. + -- Never implemented. Useful?] + + "exit-policy/default" -- The default exit policy lines that Tor will + *append* to the ExitPolicy config option. + + "desc/id/" or "desc/name/" -- the latest + server descriptor for a given OR, NUL-terminated. + + "desc-annotations/id/" -- outputs the annotations string + (source, timestamp of arrival, purpose, etc) for the corresponding + descriptor. [First implemented in 0.2.0.13-alpha.] + + "extra-info/digest/" -- the extrainfo document whose digest (in + hex) is . Only available if we're downloading extra-info + documents. + + "ns/id/" or "ns/name/" -- the latest router + status info (v2 directory style) for a given OR. Router status + info is as given in + dir-spec.txt, and reflects the current beliefs of this Tor about the + router in question. Like directory clients, controllers MUST + tolerate unrecognized flags and lines. The published date and + descriptor digest are those believed to be best by this Tor, + not necessarily those for a descriptor that Tor currently has. + [First implemented in 0.1.2.3-alpha.] + + "ns/all" -- Router status info (v2 directory style) for all ORs we + have an opinion about, joined by newlines. [First implemented + in 0.1.2.3-alpha.] + + "ns/purpose/" -- Router status info (v2 directory style) + for all ORs of this purpose. Mostly designed for /ns/purpose/bridge + queries. [First implemented in 0.2.0.13-alpha.] + + "desc/all-recent" -- the latest server descriptor for every router that + Tor knows about. + + "network-status" -- a space-separated list (v1 directory style) + of all known OR identities. This is in the same format as the + router-status line in v1 directories; see dir-spec-v1.txt section + 3 for details. (If VERBOSE_NAMES is enabled, the output will + not conform to dir-spec-v1.txt; instead, the result will be a + space-separated list of LongName, each preceded by a "!" if it is + believed to be not running.) This option is deprecated; use + "ns/all" instead. + + "address-mappings/all" + "address-mappings/config" + "address-mappings/cache" + "address-mappings/control" -- a \r\n-separated list of address + mappings, each in the form of "from-address to-address expiry". + The 'config' key returns those address mappings set in the + configuration; the 'cache' key returns the mappings in the + client-side DNS cache; the 'control' key returns the mappings set + via the control interface; the 'all' target returns the mappings + set through any mechanism. + Expiry is formatted as with ADDRMAP events, except that "expiry" is + always a time in GMT or the string "NEVER"; see section 4.1.7. + First introduced in 0.2.0.3-alpha. + + "addr-mappings/*" -- as for address-mappings/*, but without the + expiry portion of the value. Use of this value is deprecated + since 0.2.0.3-alpha; use address-mappings instead. + + "address" -- the best guess at our external IP address. If we + have no guess, return a 551 error. (Added in 0.1.2.2-alpha) + + "fingerprint" -- the contents of the fingerprint file that Tor + writes as a server, or a 551 if we're not a server currently. + (Added in 0.1.2.3-alpha) + + "circuit-status" + A series of lines as for a circuit status event. Each line is of + the form: + CircuitID SP CircStatus [SP Path] CRLF + + "stream-status" + A series of lines as for a stream status event. Each is of the form: + StreamID SP StreamStatus SP CircID SP Target CRLF + + "orconn-status" + A series of lines as for an OR connection status event. Each is of the + form: + ServerID SP ORStatus CRLF + + "entry-guards" + A series of lines listing the currently chosen entry guards, if any. + Each is of the form: + ServerID2 SP Status [SP ISOTime] CRLF + + Status-with-time = ("unlisted") SP ISOTime + Status = ("up" / "never-connected" / "down" / + "unusable" / "unlisted" ) + + ServerID2 = Nickname / 40*HEXDIG + + [From 0.1.1.4-alpha to 0.1.1.10-alpha, this was called "helper-nodes". + Tor still supports calling it that for now, but support will be + removed in 0.1.3.x.] + + [Older versions of Tor (before 0.1.2.x-final) generated 'down' instead + of unlisted/unusable. Current Tors never generate 'down'.] + + [XXXX ServerID2 differs from ServerID in not prefixing fingerprints + with a $. This is an implementation error. It would be nice to add + the $ back in if we can do so without breaking compatibility.] + + "accounting/enabled" + "accounting/hibernating" + "accounting/bytes" + "accounting/bytes-left" + "accounting/interval-start" + "accounting/interval-wake" + "accounting/interval-end" + Information about accounting status. If accounting is enabled, + "enabled" is 1; otherwise it is 0. The "hibernating" field is "hard" + if we are accepting no data; "soft" if we're accepting no new + connections, and "awake" if we're not hibernating at all. The "bytes" + and "bytes-left" fields contain (read-bytes SP write-bytes), for the + start and the rest of the interval respectively. The 'interval-start' + and 'interval-end' fields are the borders of the current interval; the + 'interval-wake' field is the time within the current interval (if any) + where we plan[ned] to start being active. The times are GMT. + + "config/names" + A series of lines listing the available configuration options. Each is + of the form: + OptionName SP OptionType [ SP Documentation ] CRLF + OptionName = Keyword + OptionType = "Integer" / "TimeInterval" / "DataSize" / "Float" / + "Boolean" / "Time" / "CommaList" / "Dependant" / "Virtual" / + "String" / "LineList" + Documentation = Text + + "info/names" + A series of lines listing the available GETINFO options. Each is of + one of these forms: + OptionName SP Documentation CRLF + OptionPrefix SP Documentation CRLF + OptionPrefix = OptionName "/*" + + "events/names" + A space-separated list of all the events supported by this version of + Tor's SETEVENTS. + + "features/names" + A space-separated list of all the events supported by this version of + Tor's USEFEATURE. + + "ip-to-country/*" + Maps IP addresses to 2-letter country codes. For example, + "GETINFO ip-to-country/18.0.0.1" should give "US". + + "next-circuit/IP:port" + XXX todo. + + "dir/status-vote/current/consensus" [added in Tor 0.2.1.6-alpha] + "dir/status/authority" + "dir/status/fp/" + "dir/status/fp/++" + "dir/status/all" + "dir/server/fp/" + "dir/server/fp/++" + "dir/server/d/" + "dir/server/d/++" + "dir/server/authority" + "dir/server/all" + A series of lines listing directory contents, provided according to the + specification for the URLs listed in Section 4.4 of dir-spec.txt. Note + that Tor MUST NOT provide private information, such as descriptors for + routers not marked as general-purpose. When asked for 'authority' + information for which this Tor is not authoritative, Tor replies with + an empty string. + + "status/circuit-established" + "status/enough-dir-info" + "status/good-server-descriptor" + "status/accepted-server-descriptor" + "status/..." + These provide the current internal Tor values for various Tor + states. See Section 4.1.10 for explanations. (Only a few of the + status events are available as getinfo's currently. Let us know if + you want more exposed.) + "status/reachability-succeeded/or" + 0 or 1, depending on whether we've found our ORPort reachable. + "status/reachability-succeeded/dir" + 0 or 1, depending on whether we've found our DirPort reachable. + "status/reachability-succeeded" + "OR=" ("0"/"1") SP "DIR=" ("0"/"1") + Combines status/reachability-succeeded/*; controllers MUST ignore + unrecognized elements in this entry. + "status/bootstrap-phase" + Returns the most recent bootstrap phase status event + sent. Specifically, it returns a string starting with either + "NOTICE BOOTSTRAP ..." or "WARN BOOTSTRAP ...". Controllers should + use this getinfo when they connect or attach to Tor to learn its + current bootstrap state. + "status/version/recommended" + List of currently recommended versions. + "status/version/current" + Status of the current version. One of: new, old, unrecommended, + recommended, new in series, obsolete. + "status/clients-seen" + A summary of which countries we've seen clients from recently, + formatted the same as the CLIENTS_SEEN status event described in + Section 4.1.14. This GETINFO option is currently available only + for bridge relays. + + Examples: + C: GETINFO version desc/name/moria1 + S: 250+desc/name/moria= + S: [Descriptor for moria] + S: . + S: 250-version=Tor 0.1.1.0-alpha-cvs + S: 250 OK + +3.10. EXTENDCIRCUIT + + Sent from the client to the server. The format is: + "EXTENDCIRCUIT" SP CircuitID SP + ServerSpec *("," ServerSpec) + [SP "purpose=" Purpose] CRLF + + This request takes one of two forms: either the CircuitID is zero, in + which case it is a request for the server to build a new circuit according + to the specified path, or the CircuitID is nonzero, in which case it is a + request for the server to extend an existing circuit with that ID according + to the specified path. + + If CircuitID is 0 and "purpose=" is specified, then the circuit's + purpose is set. Two choices are recognized: "general" and + "controller". If not specified, circuits are created as "general". + + If the request is successful, the server sends a reply containing a + message body consisting of the CircuitID of the (maybe newly created) + circuit. The syntax is "250" SP "EXTENDED" SP CircuitID CRLF. + +3.11. SETCIRCUITPURPOSE + + Sent from the client to the server. The format is: + "SETCIRCUITPURPOSE" SP CircuitID SP Purpose CRLF + + This changes the circuit's purpose. See EXTENDCIRCUIT above for details. + +3.12. SETROUTERPURPOSE + + Sent from the client to the server. The format is: + "SETROUTERPURPOSE" SP NicknameOrKey SP Purpose CRLF + + This changes the descriptor's purpose. See +POSTDESCRIPTOR below + for details. + + NOTE: This command was disabled and made obsolete as of Tor + 0.2.0.8-alpha. It doesn't exist anymore, and is listed here only for + historical interest. + +3.13. ATTACHSTREAM + + Sent from the client to the server. The syntax is: + "ATTACHSTREAM" SP StreamID SP CircuitID [SP "HOP=" HopNum] CRLF + + This message informs the server that the specified stream should be + associated with the specified circuit. Each stream may be associated with + at most one circuit, and multiple streams may share the same circuit. + Streams can only be attached to completed circuits (that is, circuits that + have sent a circuit status 'BUILT' event or are listed as built in a + GETINFO circuit-status request). + + If the circuit ID is 0, responsibility for attaching the given stream is + returned to Tor. + + If HOP=HopNum is specified, Tor will choose the HopNumth hop in the + circuit as the exit node, rather than the last node in the circuit. + Hops are 1-indexed; generally, it is not permitted to attach to hop 1. + + Tor responds with "250 OK" if it can attach the stream, 552 if the circuit + or stream didn't exist, or 551 if the stream couldn't be attached for + another reason. + + {Implementation note: Tor will close unattached streams by itself, + roughly two minutes after they are born. Let the developers know if + that turns out to be a problem.} + + {Implementation note: By default, Tor automatically attaches streams to + circuits itself, unless the configuration variable + "__LeaveStreamsUnattached" is set to "1". Attempting to attach streams + via TC when "__LeaveStreamsUnattached" is false may cause a race between + Tor and the controller, as both attempt to attach streams to circuits.} + + {Implementation note: You can try to attachstream to a stream that + has already sent a connect or resolve request but hasn't succeeded + yet, in which case Tor will detach the stream from its current circuit + before proceeding with the new attach request.} + +3.14. POSTDESCRIPTOR + + Sent from the client to the server. The syntax is: + "+POSTDESCRIPTOR" [SP "purpose=" Purpose] [SP "cache=" Cache] + CRLF Descriptor CRLF "." CRLF + + This message informs the server about a new descriptor. If Purpose is + specified, it must be either "general", "controller", or "bridge", + else we return a 552 error. The default is "general". + + If Cache is specified, it must be either "no" or "yes", else we + return a 552 error. If Cache is not specified, Tor will decide for + itself whether it wants to cache the descriptor, and controllers + must not rely on its choice. + + The descriptor, when parsed, must contain a number of well-specified + fields, including fields for its nickname and identity. + + If there is an error in parsing the descriptor, the server must send a + "554 Invalid descriptor" reply. If the descriptor is well-formed but + the server chooses not to add it, it must reply with a 251 message + whose body explains why the server was not added. If the descriptor + is added, Tor replies with "250 OK". + +3.15. REDIRECTSTREAM + + Sent from the client to the server. The syntax is: + "REDIRECTSTREAM" SP StreamID SP Address [SP Port] CRLF + + Tells the server to change the exit address on the specified stream. If + Port is specified, changes the destination port as well. No remapping + is performed on the new provided address. + + To be sure that the modified address will be used, this event must be sent + after a new stream event is received, and before attaching this stream to + a circuit. + + Tor replies with "250 OK" on success. + +3.16. CLOSESTREAM + + Sent from the client to the server. The syntax is: + + "CLOSESTREAM" SP StreamID SP Reason *(SP Flag) CRLF + + Tells the server to close the specified stream. The reason should be one + of the Tor RELAY_END reasons given in tor-spec.txt, as a decimal. Flags is + not used currently; Tor servers SHOULD ignore unrecognized flags. Tor may + hold the stream open for a while to flush any data that is pending. + + Tor replies with "250 OK" on success, or a 512 if there aren't enough + arguments, or a 552 if it doesn't recognize the StreamID or reason. + +3.17. CLOSECIRCUIT + + The syntax is: + CLOSECIRCUIT SP CircuitID *(SP Flag) CRLF + Flag = "IfUnused" + + Tells the server to close the specified circuit. If "IfUnused" is + provided, do not close the circuit unless it is unused. + + Other flags may be defined in the future; Tor SHOULD ignore unrecognized + flags. + + Tor replies with "250 OK" on success, or a 512 if there aren't enough + arguments, or a 552 if it doesn't recognize the CircuitID. + +3.18. QUIT + + Tells the server to hang up on this controller connection. This command + can be used before authenticating. + +3.19. USEFEATURE + + The syntax is: + + "USEFEATURE" *(SP FeatureName) CRLF + FeatureName = 1*(ALPHA / DIGIT / "_" / "-") + + Sometimes extensions to the controller protocol break compatibility with + older controllers. In this case, whenever possible, the extensions are + first included in Tor disabled by default, and only enabled on a given + controller connection when the "USEFEATURE" command is given. Once a + "USEFEATURE" command is given, it applies to all subsequent interactions on + the same connection; to disable an enabled feature, a new controller + connection must be opened. + + This is a forward-compatibility mechanism; each feature will eventually + become a regular part of the control protocol in some future version of Tor. + Tor will ignore a request to use any feature that is already on by default. + Tor will give a "552" error if any requested feature is not recognized. + + Feature names are case-insensitive. + + EXTENDED_EVENTS + + Same as passing 'EXTENDED' to SETEVENTS; this is the preferred way to + request the extended event syntax. + + This feature was first used in 0.1.2.3-alpha. It is always-on in + Tor 0.2.2.1-alpha and later. + + VERBOSE_NAMES + + Instead of ServerID as specified above, the controller should + identify ORs by LongName in events and GETINFO results. This format is + strictly more informative: rather than including Nickname for + known Named routers and Fingerprint for unknown or unNamed routers, the + LongName format includes a Fingerprint, an indication of Named status, + and a Nickname (if one is known). + + This will not be always-enabled until at least two stable + releases after 0.1.2.2-alpha, the release where it was first + available. It is always-on in Tor 0.2.2.1-alpha and later. + +3.20. RESOLVE + + The syntax is + "RESOLVE" *Option *Address CRLF + Option = "mode=reverse" + Address = a hostname or IPv4 address + + This command launches a remote hostname lookup request for every specified + request (or reverse lookup if "mode=reverse" is specified). Note that the + request is done in the background: to see the answers, your controller will + need to listen for ADDRMAP events; see 4.1.7 below. + + [Added in Tor 0.2.0.3-alpha] + +3.21. PROTOCOLINFO + + The syntax is: + "PROTOCOLINFO" *(SP PIVERSION) CRLF + + The server reply format is: + "250-PROTOCOLINFO" SP PIVERSION CRLF *InfoLine "250 OK" CRLF + + InfoLine = AuthLine / VersionLine / OtherLine + + AuthLine = "250-AUTH" SP "METHODS=" AuthMethod *(",")AuthMethod + *(SP "COOKIEFILE=" AuthCookieFile) CRLF + VersionLine = "250-VERSION" SP "Tor=" TorVersion [SP Arguments] CRLF + + AuthMethod = + "NULL" / ; No authentication is required + "HASHEDPASSWORD" / ; A controller must supply the original password + "COOKIE" / ; A controller must supply the contents of a cookie + + AuthCookieFile = QuotedString + TorVersion = QuotedString + + OtherLine = "250-" Keyword [SP Arguments] CRLF + + PIVERSION: 1*DIGIT + + Tor MAY give its InfoLines in any order; controllers MUST ignore InfoLines + with keywords they do not recognize. Controllers MUST ignore extraneous + data on any InfoLine. + + PIVERSION is there in case we drastically change the syntax one day. For + now it should always be "1". Controllers MAY provide a list of the + protocolinfo versions they support; Tor MAY select a version that the + controller does not support. + + AuthMethod is used to specify one or more control authentication + methods that Tor currently accepts. + + AuthCookieFile specifies the absolute path and filename of the + authentication cookie that Tor is expecting and is provided iff + the METHODS field contains the method "COOKIE". Controllers MUST handle + escape sequences inside this string. + + The VERSION line contains the Tor version. + + [Unlike other commands besides AUTHENTICATE, PROTOCOLINFO may be used (but + only once!) before AUTHENTICATE.] + + [PROTOCOLINFO was not supported before Tor 0.2.0.5-alpha.] + +4. Replies + + Reply codes follow the same 3-character format as used by SMTP, with the + first character defining a status, the second character defining a + subsystem, and the third designating fine-grained information. + + The TC protocol currently uses the following first characters: + + 2yz Positive Completion Reply + The command was successful; a new request can be started. + + 4yz Temporary Negative Completion reply + The command was unsuccessful but might be reattempted later. + + 5yz Permanent Negative Completion Reply + The command was unsuccessful; the client should not try exactly + that sequence of commands again. + + 6yz Asynchronous Reply + Sent out-of-order in response to an earlier SETEVENTS command. + + The following second characters are used: + + x0z Syntax + Sent in response to ill-formed or nonsensical commands. + + x1z Protocol + Refers to operations of the Tor Control protocol. + + x5z Tor + Refers to actual operations of Tor system. + + The following codes are defined: + + 250 OK + 251 Operation was unnecessary + [Tor has declined to perform the operation, but no harm was done.] + + 451 Resource exhausted + + 500 Syntax error: protocol + + 510 Unrecognized command + 511 Unimplemented command + 512 Syntax error in command argument + 513 Unrecognized command argument + 514 Authentication required + 515 Bad authentication + + 550 Unspecified Tor error + + 551 Internal error + [Something went wrong inside Tor, so that the client's + request couldn't be fulfilled.] + + 552 Unrecognized entity + [A configuration key, a stream ID, circuit ID, event, + mentioned in the command did not actually exist.] + + 553 Invalid configuration value + [The client tried to set a configuration option to an + incorrect, ill-formed, or impossible value.] + + 554 Invalid descriptor + + 555 Unmanaged entity + + 650 Asynchronous event notification + + Unless specified to have specific contents, the human-readable messages + in error replies should not be relied upon to match those in this document. + +4.1. Asynchronous events + + These replies can be sent after a corresponding SETEVENTS command has been + received. They will not be interleaved with other Reply elements, but they + can appear between a command and its corresponding reply. For example, + this sequence is possible: + + C: SETEVENTS CIRC + S: 250 OK + C: GETCONF SOCKSPORT ORPORT + S: 650 CIRC 1000 EXTENDED moria1,moria2 + S: 250-SOCKSPORT=9050 + S: 250 ORPORT=0 + + But this sequence is disallowed: + C: SETEVENTS CIRC + S: 250 OK + C: GETCONF SOCKSPORT ORPORT + S: 250-SOCKSPORT=9050 + S: 650 CIRC 1000 EXTENDED moria1,moria2 + S: 250 ORPORT=0 + + Clients MUST tolerate more arguments in an asynchonous reply than + expected, and MUST tolerate more lines in an asynchronous reply than + expected. For instance, a client that expects a CIRC message like: + 650 CIRC 1000 EXTENDED moria1,moria2 + must tolerate: + 650-CIRC 1000 EXTENDED moria1,moria2 0xBEEF + 650-EXTRAMAGIC=99 + 650 ANONYMITY=high + + If clients ask for extended events, then each event line as specified below + will be followed by additional extensions. Additional lines will be of the + form + "650" ("-"/" ") KEYWORD ["=" ARGUMENTS] CRLF + Additional arguments will be of the form + SP KEYWORD ["=" ( QuotedString / * NonSpDquote ) ] + Such clients MUST tolerate lines with keywords they do not recognize. + +4.1.1. Circuit status changed + + The syntax is: + + "650" SP "CIRC" SP CircuitID SP CircStatus [SP Path] + [SP "REASON=" Reason [SP "REMOTE_REASON=" Reason]] CRLF + + CircStatus = + "LAUNCHED" / ; circuit ID assigned to new circuit + "BUILT" / ; all hops finished, can now accept streams + "EXTENDED" / ; one more hop has been completed + "FAILED" / ; circuit closed (was not built) + "CLOSED" ; circuit closed (was built) + + Path = ServerID *("," ServerID) + + Reason = "NONE" / "TORPROTOCOL" / "INTERNAL" / "REQUESTED" / + "HIBERNATING" / "RESOURCELIMIT" / "CONNECTFAILED" / + "OR_IDENTITY" / "OR_CONN_CLOSED" / "TIMEOUT" / + "FINISHED" / "DESTROYED" / "NOPATH" / "NOSUCHSERVICE" + + The path is provided only when the circuit has been extended at least one + hop. + + The "REASON" field is provided only for FAILED and CLOSED events, and only + if extended events are enabled (see 3.19). Clients MUST accept reasons + not listed above. Reasons are as given in tor-spec.txt, except for: + + NOPATH (Not enough nodes to make circuit) + + The "REMOTE_REASON" field is provided only when we receive a DESTROY or + TRUNCATE cell, and only if extended events are enabled. It contains the + actual reason given by the remote OR for closing the circuit. Clients MUST + accept reasons not listed above. Reasons are as listed in tor-spec.txt. + +4.1.2. Stream status changed + + The syntax is: + + "650" SP "STREAM" SP StreamID SP StreamStatus SP CircID SP Target + [SP "REASON=" Reason [ SP "REMOTE_REASON=" Reason ]] + [SP "SOURCE=" Source] [ SP "SOURCE_ADDR=" Address ":" Port ] + [SP "PURPOSE=" Purpose] + CRLF + + StreamStatus = + "NEW" / ; New request to connect + "NEWRESOLVE" / ; New request to resolve an address + "REMAP" / ; Address re-mapped to another + "SENTCONNECT" / ; Sent a connect cell along a circuit + "SENTRESOLVE" / ; Sent a resolve cell along a circuit + "SUCCEEDED" / ; Received a reply; stream established + "FAILED" / ; Stream failed and not retriable + "CLOSED" / ; Stream closed + "DETACHED" ; Detached from circuit; still retriable + + Target = Address ":" Port + + The circuit ID designates which circuit this stream is attached to. If + the stream is unattached, the circuit ID "0" is given. + + Reason = "MISC" / "RESOLVEFAILED" / "CONNECTREFUSED" / + "EXITPOLICY" / "DESTROY" / "DONE" / "TIMEOUT" / + "HIBERNATING" / "INTERNAL"/ "RESOURCELIMIT" / + "CONNRESET" / "TORPROTOCOL" / "NOTDIRECTORY" / "END" + + The "REASON" field is provided only for FAILED, CLOSED, and DETACHED + events, and only if extended events are enabled (see 3.19). Clients MUST + accept reasons not listed above. Reasons are as given in tor-spec.txt, + except for: + + END (We received a RELAY_END cell from the other side of this + stream.) + [XXXX document more. -NM] + + The "REMOTE_REASON" field is provided only when we receive a RELAY_END + cell, and only if extended events are enabled. It contains the actual + reason given by the remote OR for closing the stream. Clients MUST accept + reasons not listed above. Reasons are as listed in tor-spec.txt. + + "REMAP" events include a Source if extended events are enabled: + Source = "CACHE" / "EXIT" + Clients MUST accept sources not listed above. "CACHE" is given if + the Tor client decided to remap the address because of a cached + answer, and "EXIT" is given if the remote node we queried gave us + the new address as a response. + + The "SOURCE_ADDR" field is included with NEW and NEWRESOLVE events if + extended events are enabled. It indicates the address and port + that requested the connection, and can be (e.g.) used to look up the + requesting program. + + Purpose = "DIR_FETCH" / "UPLOAD_DESC" / "DNS_REQUEST" / + "USER" / "DIRPORT_TEST" + + The "PURPOSE" field is provided only for NEW and NEWRESOLVE events, and + only if extended events are enabled (see 3.19). Clients MUST accept + purposes not listed above. + +4.1.3. OR Connection status changed + + The syntax is: + "650" SP "ORCONN" SP (ServerID / Target) SP ORStatus [ SP "REASON=" + Reason ] [ SP "NCIRCS=" NumCircuits ] CRLF + + ORStatus = "NEW" / "LAUNCHED" / "CONNECTED" / "FAILED" / "CLOSED" + + NEW is for incoming connections, and LAUNCHED is for outgoing + connections. CONNECTED means the TLS handshake has finished (in + either direction). FAILED means a connection is being closed that + hasn't finished its handshake, and CLOSED is for connections that + have handshaked. + + A ServerID is specified unless it's a NEW connection, in which + case we don't know what server it is yet, so we use Address:Port. + + If extended events are enabled (see 3.19), optional reason and + circuit counting information is provided for CLOSED and FAILED + events. + + Reason = "MISC" / "DONE" / "CONNECTREFUSED" / + "IDENTITY" / "CONNECTRESET" / "TIMEOUT" / "NOROUTE" / + "IOERROR" / "RESOURCELIMIT" + + NumCircuits counts both established and pending circuits. + +4.1.4. Bandwidth used in the last second + + The syntax is: + "650" SP "BW" SP BytesRead SP BytesWritten *(SP Type "=" Num) CRLF + BytesRead = 1*DIGIT + BytesWritten = 1*DIGIT + Type = "DIR" / "OR" / "EXIT" / "APP" / ... + Num = 1*DIGIT + + BytesRead and BytesWritten are the totals. [In a future Tor version, + we may also include a breakdown of the connection types that used + bandwidth this second (not implemented yet).] + +4.1.5. Log messages + + The syntax is: + "650" SP Severity SP ReplyText CRLF + or + "650+" Severity CRLF Data 650 SP "OK" CRLF + + Severity = "DEBUG" / "INFO" / "NOTICE" / "WARN"/ "ERR" + +4.1.6. New descriptors available + + Syntax: + "650" SP "NEWDESC" 1*(SP ServerID) CRLF + +4.1.7. New Address mapping + + Syntax: + "650" SP "ADDRMAP" SP Address SP NewAddress SP Expiry + [SP Error] SP GMTExpiry CRLF + + NewAddress = Address / "" + Expiry = DQUOTE ISOTime DQUOTE / "NEVER" + + Error = "error=" ErrorCode + ErrorCode = XXXX + GMTExpiry = "EXPIRES=" DQUOTE IsoTime DQUOTE + + Error and GMTExpiry are only provided if extended events are enabled. + + Expiry is expressed as the local time (rather than GMT). This is a bug, + left in for backward compatibility; new code should look at GMTExpiry + instead. + + These events are generated when a new address mapping is entered in the + cache, or when the answer for a RESOLVE command is found. + +4.1.8. Descriptors uploaded to us in our role as authoritative dirserver + + Syntax: + "650" "+" "AUTHDIR_NEWDESCS" CRLF Action CRLF Message CRLF + Descriptor CRLF "." CRLF "650" SP "OK" CRLF + Action = "ACCEPTED" / "DROPPED" / "REJECTED" + Message = Text + +4.1.9. Our descriptor changed + + Syntax: + "650" SP "DESCCHANGED" CRLF + + [First added in 0.1.2.2-alpha.] + +4.1.10. Status events + + Status events (STATUS_GENERAL, STATUS_CLIENT, and STATUS_SERVER) are sent + based on occurrences in the Tor process pertaining to the general state of + the program. Generally, they correspond to log messages of severity Notice + or higher. They differ from log messages in that their format is a + specified interface. + + Syntax: + "650" SP StatusType SP StatusSeverity SP StatusAction + [SP StatusArguments] CRLF + + StatusType = "STATUS_GENERAL" / "STATUS_CLIENT" / "STATUS_SERVER" + StatusSeverity = "NOTICE" / "WARN" / "ERR" + StatusAction = 1*ALPHA + StatusArguments = StatusArgument *(SP StatusArgument) + StatusArgument = StatusKeyword '=' StatusValue + StatusKeyword = 1*(ALNUM / "_") + StatusValue = 1*(ALNUM / '_') / QuotedString + + Action is a string, and Arguments is a series of keyword=value + pairs on the same line. Values may be space-terminated strings, + or quoted strings. + + These events are always produced with EXTENDED_EVENTS and + VERBOSE_NAMES; see the explanations in the USEFEATURE section + for details. + + Controllers MUST tolerate unrecognized actions, MUST tolerate + unrecognized arguments, MUST tolerate missing arguments, and MUST + tolerate arguments that arrive in any order. + + Each event description below is accompanied by a recommendation for + controllers. These recommendations are suggestions only; no controller + is required to implement them. + + Compatibility note: versions of Tor before 0.2.0.22-rc incorrectly + generated "STATUS_SERVER" as "STATUS_SEVER". To be compatible with those + versions, tools should accept both. + + Actions for STATUS_GENERAL events can be as follows: + + CLOCK_JUMPED + "TIME=NUM" + Tor spent enough time without CPU cycles that it has closed all + its circuits and will establish them anew. This typically + happens when a laptop goes to sleep and then wakes up again. It + also happens when the system is swapping so heavily that Tor is + starving. The "time" argument specifies the number of seconds Tor + thinks it was unconscious for (or alternatively, the number of + seconds it went back in time). + + This status event is sent as NOTICE severity normally, but WARN + severity if Tor is acting as a server currently. + + {Recommendation for controller: ignore it, since we don't really + know what the user should do anyway. Hm.} + + DANGEROUS_VERSION + "CURRENT=version" + "REASON=NEW/OBSOLETE/UNRECOMMENDED" + "RECOMMENDED=\"version, version, ...\"" + Tor has found that directory servers don't recommend its version of + the Tor software. RECOMMENDED is a comma-and-space-separated string + of Tor versions that are recommended. REASON is NEW if this version + of Tor is newer than any recommended version, OBSOLETE if + this version of Tor is older than any recommended version, and + UNRECOMMENDED if some recommended versions of Tor are newer and + some are older than this version. (The "OBSOLETE" reason was called + "OLD" from Tor 0.1.2.3-alpha up to and including 0.2.0.12-alpha.) + + {Controllers may want to suggest that the user upgrade OLD or + UNRECOMMENDED versions. NEW versions may be known-insecure, or may + simply be development versions.} + + TOO_MANY_CONNECTIONS + "CURRENT=NUM" + Tor has reached its ulimit -n or whatever the native limit is on file + descriptors or sockets. CURRENT is the number of sockets Tor + currently has open. The user should really do something about + this. The "current" argument shows the number of connections currently + open. + + {Controllers may recommend that the user increase the limit, or + increase it for them. Recommendations should be phrased in an + OS-appropriate way and automated when possible.} + + BUG + "REASON=STRING" + Tor has encountered a situation that its developers never expected, + and the developers would like to learn that it happened. Perhaps + the controller can explain this to the user and encourage her to + file a bug report? + + {Controllers should log bugs, but shouldn't annoy the user in case a + bug appears frequently.} + + CLOCK_SKEW + SKEW="+" / "-" SECONDS + MIN_SKEW="+" / "-" SECONDS. + SOURCE="DIRSERV:" IP ":" Port / + "NETWORKSTATUS:" IP ":" Port / + "OR:" IP ":" Port / + "CONSENSUS" + If "SKEW" is present, it's an estimate of how far we are from the + time declared in the source. (In other words, if we're an hour in + the past, the value is -3600.) "MIN_SKEW" is present, it's a lower + bound. If the source is a DIRSERV, we got the current time from a + connection to a dirserver. If the source is a NETWORKSTATUS, we + decided we're skewed because we got a v2 networkstatus from far in + the future. If the source is OR, the skew comes from a NETINFO + cell from a connection to another relay. If the source is + CONSENSUS, we decided we're skewed because we got a networkstatus + consensus from the future. + + {Tor should send this message to controllers when it thinks the + skew is so high that it will interfere with proper Tor operation. + Controllers shouldn't blindly adjust the clock, since the more + accurate source of skew info (DIRSERV) is currently + unauthenticated.} + + BAD_LIBEVENT + "METHOD=" libevent method + "VERSION=" libevent version + "BADNESS=" "BROKEN" / "BUGGY" / "SLOW" + "RECOVERED=" "NO" / "YES" + Tor knows about bugs in using the configured event method in this + version of libevent. "BROKEN" libevents won't work at all; + "BUGGY" libevents might work okay; "SLOW" libevents will work + fine, but not quickly. If "RECOVERED" is YES, Tor managed to + switch to a more reliable (but probably slower!) libevent method. + + {Controllers may want to warn the user if this event occurs, though + generally it's the fault of whoever built the Tor binary and there's + not much the user can do besides upgrade libevent or upgrade the + binary.} + + DIR_ALL_UNREACHABLE + Tor believes that none of the known directory servers are + reachable -- this is most likely because the local network is + down or otherwise not working, and might help to explain for the + user why Tor appears to be broken. + + {Controllers may want to warn the user if this event occurs; further + action is generally not possible.} + + CONSENSUS_ARRIVED + Tor has received and validated a new consensus networkstatus. + (This event can be delayed a little while after the consensus + is received, if Tor needs to fetch certificates.) + + Actions for STATUS_CLIENT events can be as follows: + + BOOTSTRAP + "PROGRESS=" num + "TAG=" Keyword + "SUMMARY=" String + ["WARNING=" String + "REASON=" Keyword + "COUNT=" num + "RECOMMENDATION=" Keyword + ] + + Tor has made some progress at establishing a connection to the + Tor network, fetching directory information, or making its first + circuit; or it has encountered a problem while bootstrapping. This + status event is especially useful for users with slow connections + or with connectivity problems. + + "Progress" gives a number between 0 and 100 for how far through + the bootstrapping process we are. "Summary" is a string that can + be displayed to the user to describe the *next* task that Tor + will tackle, i.e., the task it is working on after sending the + status event. "Tag" is a string that controllers can use to + recognize bootstrap phases, if they want to do something smarter + than just blindly displaying the summary string; see Section 5 + for the current tags that Tor issues. + + The StatusSeverity describes whether this is a normal bootstrap + phase (severity notice) or an indication of a bootstrapping + problem (severity warn). + + For bootstrap problems, we include the same progress, tag, and + summary values as we would for a normal bootstrap event, but we + also include "warning", "reason", "count", and "recommendation" + key/value combos. The "count" number tells how many bootstrap + problems there have been so far at this phase. The "reason" + string lists one of the reasons allowed in the ORCONN event. The + "warning" argument string with any hints Tor has to offer about + why it's having troubles bootstrapping. + + The "reason" values are long-term-stable controller-facing tags to + identify particular issues in a bootstrapping step. The warning + strings, on the other hand, are human-readable. Controllers + SHOULD NOT rely on the format of any warning string. Currently + the possible values for "recommendation" are either "ignore" or + "warn" -- if ignore, the controller can accumulate the string in + a pile of problems to show the user if the user asks; if warn, + the controller should alert the user that Tor is pretty sure + there's a bootstrapping problem. + + Currently Tor uses recommendation=ignore for the first + nine bootstrap problem reports for a given phase, and then + uses recommendation=warn for subsequent problems at that + phase. Hopefully this is a good balance between tolerating + occasional errors and reporting serious problems quickly. + + ENOUGH_DIR_INFO + Tor now knows enough network-status documents and enough server + descriptors that it's going to start trying to build circuits now. + + {Controllers may want to use this event to decide when to indicate + progress to their users, but should not interrupt the user's browsing + to tell them so.} + + NOT_ENOUGH_DIR_INFO + We discarded expired statuses and router descriptors to fall + below the desired threshold of directory information. We won't + try to build any circuits until ENOUGH_DIR_INFO occurs again. + + {Controllers may want to use this event to decide when to indicate + progress to their users, but should not interrupt the user's browsing + to tell them so.} + + CIRCUIT_ESTABLISHED + Tor is able to establish circuits for client use. This event will + only be sent if we just built a circuit that changed our mind -- + that is, prior to this event we didn't know whether we could + establish circuits. + + {Suggested use: controllers can notify their users that Tor is + ready for use as a client once they see this status event. [Perhaps + controllers should also have a timeout if too much time passes and + this event hasn't arrived, to give tips on how to troubleshoot. + On the other hand, hopefully Tor will send further status events + if it can identify the problem.]} + + CIRCUIT_NOT_ESTABLISHED + "REASON=" "EXTERNAL_ADDRESS" / "DIR_ALL_UNREACHABLE" / "CLOCK_JUMPED" + We are no longer confident that we can build circuits. The "reason" + keyword provides an explanation: which other status event type caused + our lack of confidence. + + {Controllers may want to use this event to decide when to indicate + progress to their users, but should not interrupt the user's browsing + to do so.} + [Note: only REASON=CLOCK_JUMPED is implemented currently.] + + DANGEROUS_PORT + "PORT=" port + "RESULT=" "REJECT" / "WARN" + A stream was initiated to a port that's commonly used for + vulnerable-plaintext protocols. If the Result is "reject", we + refused the connection; whereas if it's "warn", we allowed it. + + {Controllers should warn their users when this occurs, unless they + happen to know that the application using Tor is in fact doing so + correctly (e.g., because it is part of a distributed bundle). They + might also want some sort of interface to let the user configure + their RejectPlaintextPorts and WarnPlaintextPorts config options.} + + DANGEROUS_SOCKS + "PROTOCOL=" "SOCKS4" / "SOCKS5" + "ADDRESS=" IP:port + A connection was made to Tor's SOCKS port using one of the SOCKS + approaches that doesn't support hostnames -- only raw IP addresses. + If the client application got this address from gethostbyname(), + it may be leaking target addresses via DNS. + + {Controllers should warn their users when this occurs, unless they + happen to know that the application using Tor is in fact doing so + correctly (e.g., because it is part of a distributed bundle).} + + SOCKS_UNKNOWN_PROTOCOL + "DATA=string" + A connection was made to Tor's SOCKS port that tried to use it + for something other than the SOCKS protocol. Perhaps the user is + using Tor as an HTTP proxy? The DATA is the first few characters + sent to Tor on the SOCKS port. + + {Controllers may want to warn their users when this occurs: it + indicates a misconfigured application.} + + SOCKS_BAD_HOSTNAME + "HOSTNAME=QuotedString" + Some application gave us a funny-looking hostname. Perhaps + it is broken? In any case it won't work with Tor and the user + should know. + + {Controllers may want to warn their users when this occurs: it + usually indicates a misconfigured application.} + + Actions for STATUS_SERVER can be as follows: + + EXTERNAL_ADDRESS + "ADDRESS=IP" + "HOSTNAME=NAME" + "METHOD=CONFIGURED/DIRSERV/RESOLVED/INTERFACE/GETHOSTNAME" + Our best idea for our externally visible IP has changed to 'IP'. + If 'HOSTNAME' is present, we got the new IP by resolving 'NAME'. If the + method is 'CONFIGURED', the IP was given verbatim as a configuration + option. If the method is 'RESOLVED', we resolved the Address + configuration option to get the IP. If the method is 'GETHOSTNAME', + we resolved our hostname to get the IP. If the method is 'INTERFACE', + we got the address of one of our network interfaces to get the IP. If + the method is 'DIRSERV', a directory server told us a guess for what + our IP might be. + + {Controllers may want to record this info and display it to the user.} + + CHECKING_REACHABILITY + "ORADDRESS=IP:port" + "DIRADDRESS=IP:port" + We're going to start testing the reachability of our external OR port + or directory port. + + {This event could affect the controller's idea of server status, but + the controller should not interrupt the user to tell them so.} + + REACHABILITY_SUCCEEDED + "ORADDRESS=IP:port" + "DIRADDRESS=IP:port" + We successfully verified the reachability of our external OR port or + directory port (depending on which of ORADDRESS or DIRADDRESS is + given.) + + {This event could affect the controller's idea of server status, but + the controller should not interrupt the user to tell them so.} + + GOOD_SERVER_DESCRIPTOR + We successfully uploaded our server descriptor to at least one + of the directory authorities, with no complaints. + + {Originally, the goal of this event was to declare "every authority + has accepted the descriptor, so there will be no complaints + about it." But since some authorities might be offline, it's + harder to get certainty than we had thought. As such, this event + is equivalent to ACCEPTED_SERVER_DESCRIPTOR below. Controllers + should just look at ACCEPTED_SERVER_DESCRIPTOR and should ignore + this event for now.} + + NAMESERVER_STATUS + "NS=addr" + "STATUS=" "UP" / "DOWN" + "ERR=" message + One of our nameservers has changed status. + + {This event could affect the controller's idea of server status, but + the controller should not interrupt the user to tell them so.} + + NAMESERVER_ALL_DOWN + All of our nameservers have gone down. + + {This is a problem; if it happens often without the nameservers + coming up again, the user needs to configure more or better + nameservers.} + + DNS_HIJACKED + Our DNS provider is providing an address when it should be saying + "NOTFOUND"; Tor will treat the address as a synonym for "NOTFOUND". + + {This is an annoyance; controllers may want to tell admins that their + DNS provider is not to be trusted.} + + DNS_USELESS + Our DNS provider is giving a hijacked address instead of well-known + websites; Tor will not try to be an exit node. + + {Controllers could warn the admin if the server is running as an + exit server: the admin needs to configure a good DNS server. + Alternatively, this happens a lot in some restrictive environments + (hotels, universities, coffeeshops) when the user hasn't registered.} + + BAD_SERVER_DESCRIPTOR + "DIRAUTH=addr:port" + "REASON=string" + A directory authority rejected our descriptor. Possible reasons + include malformed descriptors, incorrect keys, highly skewed clocks, + and so on. + + {Controllers should warn the admin, and try to cope if they can.} + + ACCEPTED_SERVER_DESCRIPTOR + "DIRAUTH=addr:port" + A single directory authority accepted our descriptor. + // actually notice + + {This event could affect the controller's idea of server status, but + the controller should not interrupt the user to tell them so.} + + REACHABILITY_FAILED + "ORADDRESS=IP:port" + "DIRADDRESS=IP:port" + We failed to connect to our external OR port or directory port + successfully. + + {This event could affect the controller's idea of server status. The + controller should warn the admin and suggest reasonable steps to take.} + +4.1.11. Our set of guard nodes has changed + + Syntax: + "650" SP "GUARD" SP Type SP Name SP Status ... CRLF + Type = "ENTRY" + Name = The (possibly verbose) nickname of the guard affected. + Status = "NEW" | "UP" | "DOWN" | "BAD" | "GOOD" | "DROPPED" + + [explain states. XXX] + +4.1.12. Network status has changed + + Syntax: + "650" "+" "NS" CRLF 1*NetworkStatus "." CRLF "650" SP "OK" CRLF + + The event is used whenever our local view of a relay status changes. + This happens when we get a new v3 consensus (in which case the entries + we see are a duplicate of what we see in the NEWCONSENSUS event, + below), but it also happens when we decide to mark a relay as up or + down in our local status, for example based on connection attempts. + + [First added in 0.1.2.3-alpha] + +4.1.13. Bandwidth used on an application stream + + The syntax is: + "650" SP "STREAM_BW" SP StreamID SP BytesRead SP BytesWritten CRLF + BytesRead = 1*DIGIT + BytesWritten = 1*DIGIT + + BytesRead and BytesWritten are the number of bytes read and written since + the last STREAM_BW event on this stream. These events are generated about + once per second per stream; no events are generated for streams that have + not read or written. + + These events apply only to streams entering Tor (such as on a SOCKSPort, + TransPort, or so on). They are not generated for exiting streams. + +4.1.14. Per-country client stats + + The syntax is: + "650" SP "CLIENTS_SEEN" SP TimeStarted SP CountrySummary CRLF + + We just generated a new summary of which countries we've seen clients + from recently. The controller could display this for the user, e.g. + in their "relay" configuration window, to give them a sense that they + are actually being useful. + + Currently only bridge relays will receive this event, but once we figure + out how to sufficiently aggregate and sanitize the client counts on + main relays, we might start sending these events in other cases too. + + TimeStarted is a quoted string indicating when the reported summary + counts from (in GMT). + + The CountrySummary keyword has as its argument a comma-separated + set of "countrycode=count" pairs. For example, + 650-CLIENTS_SEEN TimeStarted="Thu Dec 25 23:50:43 EST 2008" + 650 CountrySummary=us=16,de=8,uk=8 +[XXX Matt Edman informs me that the time format above is wrong. -RD] + +4.1.15. New consensus networkstatus has arrived. + + The syntax is: + "650" "+" "NEWCONSENSUS" CRLF 1*NetworkStatus "." CRLF "650" SP + "OK" CRLF + + A new consensus networkstatus has arrived. We include NS-style lines for + every relay in the consensus. NEWCONSENSUS is a separate event from the + NS event, because the list here represents every usable relay: so any + relay *not* mentioned in this list is implicitly no longer recommended. + + [First added in 0.2.1.13-alpha] + +5. Implementation notes + +5.1. Authentication + + If the control port is open and no authentication operation is enabled, Tor + trusts any local user that connects to the control port. This is generally + a poor idea. + + If the 'CookieAuthentication' option is true, Tor writes a "magic cookie" + file named "control_auth_cookie" into its data directory. To authenticate, + the controller must send the contents of this file, encoded in hexadecimal. + + If the 'HashedControlPassword' option is set, it must contain the salted + hash of a secret password. The salted hash is computed according to the + S2K algorithm in RFC 2440 (OpenPGP), and prefixed with the s2k specifier. + This is then encoded in hexadecimal, prefixed by the indicator sequence + "16:". Thus, for example, the password 'foo' could encode to: + 16:660537E3E1CD49996044A3BF558097A981F539FEA2F9DA662B4626C1C2 + ++++++++++++++++**^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + salt hashed value + indicator + You can generate the salt of a password by calling + 'tor --hash-password ' + or by using the example code in the Python and Java controller libraries. + To authenticate under this scheme, the controller sends Tor the original + secret that was used to generate the password, either as a quoted string + or encoded in hexadecimal. + +5.2. Don't let the buffer get too big. + + If you ask for lots of events, and 16MB of them queue up on the buffer, + the Tor process will close the socket. + +5.3. Backward compatibility with v0 control protocol. + + The 'version 0' control protocol was replaced in Tor 0.1.1.x. Support + was removed in Tor 0.2.0.x. Every non-obsolete version of Tor now + supports the version 1 control protocol. + + For backward compatibility with the "version 0" control protocol, + Tor used to check whether the third octet of the first command is zero. + (If it was, Tor assumed that version 0 is in use.) + + This compatibility was removed in Tor 0.1.2.16 and 0.2.0.4-alpha. + +5.4. Tor config options for use by controllers + + Tor provides a few special configuration options for use by controllers. + These options can be set and examined by the SETCONF and GETCONF commands, + but are not saved to disk by SAVECONF. + + Generally, these options make Tor unusable by disabling a portion of Tor's + normal operations. Unless a controller provides replacement functionality + to fill this gap, Tor will not correctly handle user requests. + + __AllDirOptionsPrivate + + If true, Tor will try to launch all directory operations through + anonymous connections. (Ordinarily, Tor only tries to anonymize + requests related to hidden services.) This option will slow down + directory access, and may stop Tor from working entirely if it does not + yet have enough directory information to build circuits. + + (Boolean. Default: "0".) + + __DisablePredictedCircuits + + If true, Tor will not launch preemptive "general-purpose" circuits for + streams to attach to. (It will still launch circuits for testing and + for hidden services.) + + (Boolean. Default: "0".) + + __LeaveStreamsUnattached + + If true, Tor will not automatically attach new streams to circuits; + instead, the controller must attach them with ATTACHSTREAM. If the + controller does not attach the streams, their data will never be routed. + + (Boolean. Default: "0".) + + __HashedControlSessionPassword + + As HashedControlPassword, but is not saved to the torrc file by + SAVECONF. Added in Tor 0.2.0.20-rc. + + __ReloadTorrcOnSIGHUP + + If this option is true (the default), we reload the torrc from disk + every time we get a SIGHUP (from the controller or via a signal). + Otherwise, we don't. This option exists so that controllers can keep + their options from getting overwritten when a user sends Tor a HUP for + some other reason (for example, to rotate the logs). + + (Boolean. Default: "1") + +5.5. Phases from the Bootstrap status event. + + This section describes the various bootstrap phases currently reported + by Tor. Controllers should not assume that the percentages and tags + listed here will continue to match up, or even that the tags will stay + in the same order. Some phases might also be skipped (not reported) + if the associated bootstrap step is already complete, or if the phase + no longer is necessary. Only "starting" and "done" are guaranteed to + exist in all future versions. + + Current Tor versions enter these phases in order, monotonically. + Future Tors MAY revisit earlier stages. + + Phase 0: + tag=starting summary="Starting" + + Tor starts out in this phase. + + Phase 5: + tag=conn_dir summary="Connecting to directory mirror" + + Tor sends this event as soon as Tor has chosen a directory mirror -- + e.g. one of the authorities if bootstrapping for the first time or + after a long downtime, or one of the relays listed in its cached + directory information otherwise. + + Tor will stay at this phase until it has successfully established + a TCP connection with some directory mirror. Problems in this phase + generally happen because Tor doesn't have a network connection, or + because the local firewall is dropping SYN packets. + + Phase 10: + tag=handshake_dir summary="Finishing handshake with directory mirror" + + This event occurs when Tor establishes a TCP connection with a relay used + as a directory mirror (or its https proxy if it's using one). Tor remains + in this phase until the TLS handshake with the relay is finished. + + Problems in this phase generally happen because Tor's firewall is + doing more sophisticated MITM attacks on it, or doing packet-level + keyword recognition of Tor's handshake. + + Phase 15: + tag=onehop_create summary="Establishing one-hop circuit for dir info" + + Once TLS is finished with a relay, Tor will send a CREATE_FAST cell + to establish a one-hop circuit for retrieving directory information. + It will remain in this phase until it receives the CREATED_FAST cell + back, indicating that the circuit is ready. + + Phase 20: + tag=requesting_status summary="Asking for networkstatus consensus" + + Once we've finished our one-hop circuit, we will start a new stream + for fetching the networkstatus consensus. We'll stay in this phase + until we get the 'connected' relay cell back, indicating that we've + established a directory connection. + + Phase 25: + tag=loading_status summary="Loading networkstatus consensus" + + Once we've established a directory connection, we will start fetching + the networkstatus consensus document. This could take a while; this + phase is a good opportunity for using the "progress" keyword to indicate + partial progress. + + This phase could stall if the directory mirror we picked doesn't + have a copy of the networkstatus consensus so we have to ask another, + or it does give us a copy but we don't find it valid. + + Phase 40: + tag=loading_keys summary="Loading authority key certs" + + Sometimes when we've finished loading the networkstatus consensus, + we find that we don't have all the authority key certificates for the + keys that signed the consensus. At that point we put the consensus we + fetched on hold and fetch the keys so we can verify the signatures. + + Phase 45 + tag=requesting_descriptors summary="Asking for relay descriptors" + + Once we have a valid networkstatus consensus and we've checked all + its signatures, we start asking for relay descriptors. We stay in this + phase until we have received a 'connected' relay cell in response to + a request for descriptors. + + Phase 50: + tag=loading_descriptors summary="Loading relay descriptors" + + We will ask for relay descriptors from several different locations, + so this step will probably make up the bulk of the bootstrapping, + especially for users with slow connections. We stay in this phase until + we have descriptors for at least 1/4 of the usable relays listed in + the networkstatus consensus. This phase is also a good opportunity to + use the "progress" keyword to indicate partial steps. + + Phase 80: + tag=conn_or summary="Connecting to entry guard" + + Once we have a valid consensus and enough relay descriptors, we choose + some entry guards and start trying to build some circuits. This step + is similar to the "conn_dir" phase above; the only difference is + the context. + + If a Tor starts with enough recent cached directory information, + its first bootstrap status event will be for the conn_or phase. + + Phase 85: + tag=handshake_or summary="Finishing handshake with entry guard" + + This phase is similar to the "handshake_dir" phase, but it gets reached + if we finish a TCP connection to a Tor relay and we have already reached + the "conn_or" phase. We'll stay in this phase until we complete a TLS + handshake with a Tor relay. + + Phase 90: + tag=circuit_create summary="Establishing circuits" + + Once we've finished our TLS handshake with an entry guard, we will + set about trying to make some 3-hop circuits in case we need them soon. + + Phase 100: + tag=done summary="Done" + + A full 3-hop exit circuit has been established. Tor is ready to handle + application connections now. + diff --git a/orchid/doc/spec/dir-spec.txt b/orchid/doc/spec/dir-spec.txt new file mode 100644 index 00000000..faa3a660 --- /dev/null +++ b/orchid/doc/spec/dir-spec.txt @@ -0,0 +1,2132 @@ + + Tor directory protocol, version 3 + +0. Scope and preliminaries + + This directory protocol is used by Tor version 0.2.0.x-alpha and later. + See dir-spec-v1.txt for information on the protocol used up to the + 0.1.0.x series, and dir-spec-v2.txt for information on the protocol + used by the 0.1.1.x and 0.1.2.x series. + + Caches and authorities must still support older versions of the + directory protocols, until the versions of Tor that require them are + finally out of commission. See Section XXXX on backward compatibility. + + This document merges and supersedes the following proposals: + + 101 Voting on the Tor Directory System + 103 Splitting identity key from regularly used signing key + 104 Long and Short Router Descriptors + + AS OF 14 JUNE 2007, THIS SPECIFICATION HAS NOT YET BEEN COMPLETELY + IMPLEMENTED, OR COMPLETELY COMPLETED. + + XXX when to download certificates. + XXX timeline + XXX fill in XXXXs + +0.1. History + + The earliest versions of Onion Routing shipped with a list of known + routers and their keys. When the set of routers changed, users needed to + fetch a new list. + + The Version 1 Directory protocol + -------------------------------- + + Early versions of Tor (0.0.2) introduced "Directory authorities": servers + that served signed "directory" documents containing a list of signed + "router descriptors", along with short summary of the status of each + router. Thus, clients could get up-to-date information on the state of + the network automatically, and be certain that the list they were getting + was attested by a trusted directory authority. + + Later versions (0.0.8) added directory caches, which download + directories from the authorities and serve them to clients. Non-caches + fetch from the caches in preference to fetching from the authorities, thus + distributing bandwidth requirements. + + Also added during the version 1 directory protocol were "router status" + documents: short documents that listed only the up/down status of the + routers on the network, rather than a complete list of all the + descriptors. Clients and caches would fetch these documents far more + frequently than they would fetch full directories. + + The Version 2 Directory Protocol + -------------------------------- + + During the Tor 0.1.1.x series, Tor revised its handling of directory + documents in order to address two major problems: + + * Directories had grown quite large (over 1MB), and most directory + downloads consisted mainly of router descriptors that clients + already had. + + * Every directory authority was a trust bottleneck: if a single + directory authority lied, it could make clients believe for a time + an arbitrarily distorted view of the Tor network. (Clients + trusted the most recent signed document they downloaded.) Thus, + adding more authorities would make the system less secure, not + more. + + To address these, we extended the directory protocol so that + authorities now published signed "network status" documents. Each + network status listed, for every router in the network: a hash of its + identity key, a hash of its most recent descriptor, and a summary of + what the authority believed about its status. Clients would download + the authorities' network status documents in turn, and believe + statements about routers iff they were attested to by more than half of + the authorities. + + Instead of downloading all router descriptors at once, clients + downloaded only the descriptors that they did not have. Descriptors + were indexed by their digests, in order to prevent malicious caches + from giving different versions of a router descriptor to different + clients. + + Routers began working harder to upload new descriptors only when their + contents were substantially changed. + + +0.2. Goals of the version 3 protocol + + Version 3 of the Tor directory protocol tries to solve the following + issues: + + * A great deal of bandwidth used to transmit router descriptors was + used by two fields that are not actually used by Tor routers + (namely read-history and write-history). We save about 60% by + moving them into a separate document that most clients do not + fetch or use. + + * It was possible under certain perverse circumstances for clients + to download an unusual set of network status documents, thus + partitioning themselves from clients who have a more recent and/or + typical set of documents. Even under the best of circumstances, + clients were sensitive to the ages of the network status documents + they downloaded. Therefore, instead of having the clients + correlate multiple network status documents, we have the + authorities collectively vote on a single consensus network status + document. + + * The most sensitive data in the entire network (the identity keys + of the directory authorities) needed to be stored unencrypted so + that the authorities can sign network-status documents on the fly. + Now, the authorities' identity keys are stored offline, and used + to certify medium-term signing keys that can be rotated. + +0.3. Some Remaining questions + + Things we could solve on a v3 timeframe: + + The SHA-1 hash is showing its age. We should do something about our + dependency on it. We could probably future-proof ourselves here in + this revision, at least so far as documents from the authorities are + concerned. + + Too many things about the authorities are hardcoded by IP. + + Perhaps we should start accepting longer identity keys for routers + too. + + Things to solve eventually: + + Requiring every client to know about every router won't scale forever. + + Requiring every directory cache to know every router won't scale + forever. + + +1. Outline + + There is a small set (say, around 5-10) of semi-trusted directory + authorities. A default list of authorities is shipped with the Tor + software. Users can change this list, but are encouraged not to do so, + in order to avoid partitioning attacks. + + Every authority has a very-secret, long-term "Authority Identity Key". + This is stored encrypted and/or offline, and is used to sign "key + certificate" documents. Every key certificate contains a medium-term + (3-12 months) "authority signing key", that is used by the authority to + sign other directory information. (Note that the authority identity + key is distinct from the router identity key that the authority uses + in its role as an ordinary router.) + + Routers periodically upload signed "routers descriptors" to the + directory authorities describing their keys, capabilities, and other + information. Routers may also upload signed "extra info documents" + containing information that is not required for the Tor protocol. + Directory authorities serve router descriptors indexed by router + identity, or by hash of the descriptor. + + Routers may act as directory caches to reduce load on the directory + authorities. They announce this in their descriptors. + + Periodically, each directory authority generates a view of + the current descriptors and status for known routers. They send a + signed summary of this view (a "status vote") to the other + authorities. The authorities compute the result of this vote, and sign + a "consensus status" document containing the result of the vote. + + Directory caches download, cache, and re-serve consensus documents. + + Clients, directory caches, and directory authorities all use consensus + documents to find out when their list of routers is out-of-date. + (Directory authorities also use vote statuses.) If it is, they download + any missing router descriptors. Clients download missing descriptors + from caches; caches and authorities download from authorities. + Descriptors are downloaded by the hash of the descriptor, not by the + server's identity key: this prevents servers from attacking clients by + giving them descriptors nobody else uses. + + All directory information is uploaded and downloaded with HTTP. + + [Authorities also generate and caches also cache documents produced and + used by earlier versions of this protocol; see section XXX for notes.] + +1.1. What's different from version 2? + + Clients used to download multiple network status documents, + corresponding roughly to "status votes" above. They would compute the + result of the vote on the client side. + + Authorities used to sign documents using the same private keys they used + for their roles as routers. This forced them to keep these extremely + sensitive keys in memory unencrypted. + + All of the information in extra-info documents used to be kept in the + main descriptors. + +1.2. Document meta-format + + Router descriptors, directories, and running-routers documents all obey the + following lightweight extensible information format. + + The highest level object is a Document, which consists of one or more + Items. Every Item begins with a KeywordLine, followed by zero or more + Objects. A KeywordLine begins with a Keyword, optionally followed by + whitespace and more non-newline characters, and ends with a newline. A + Keyword is a sequence of one or more characters in the set [A-Za-z0-9-]. + An Object is a block of encoded data in pseudo-Open-PGP-style + armor. (cf. RFC 2440) + + More formally: + + NL = The ascii LF character (hex value 0x0a). + Document ::= (Item | NL)+ + Item ::= KeywordLine Object* + KeywordLine ::= Keyword NL | Keyword WS ArgumentChar+ NL + Keyword = KeywordChar+ + KeywordChar ::= 'A' ... 'Z' | 'a' ... 'z' | '0' ... '9' | '-' + ArgumentChar ::= any printing ASCII character except NL. + WS = (SP | TAB)+ + Object ::= BeginLine Base-64-encoded-data EndLine + BeginLine ::= "-----BEGIN " Keyword "-----" NL + EndLine ::= "-----END " Keyword "-----" NL + + The BeginLine and EndLine of an Object must use the same keyword. + + When interpreting a Document, software MUST ignore any KeywordLine that + starts with a keyword it doesn't recognize; future implementations MUST NOT + require current clients to understand any KeywordLine not currently + described. + + The "opt" keyword was used until Tor 0.1.2.5-alpha for non-critical future + extensions. All implementations MUST ignore any item of the form "opt + keyword ....." when they would not recognize "keyword ....."; and MUST + treat "opt keyword ....." as synonymous with "keyword ......" when keyword + is recognized. + + Implementations before 0.1.2.5-alpha rejected any document with a + KeywordLine that started with a keyword that they didn't recognize. + When generating documents that need to be read by older versions of Tor, + implementations MUST prefix items not recognized by older versions of + Tor with an "opt" until those versions of Tor are obsolete. [Note that + key certificates, status vote documents, extra info documents, and + status consensus documents will never be read by older versions of Tor.] + + Other implementations that want to extend Tor's directory format MAY + introduce their own items. The keywords for extension items SHOULD start + with the characters "x-" or "X-", to guarantee that they will not conflict + with keywords used by future versions of Tor. + + In our document descriptions below, we tag Items with a multiplicity in + brackets. Possible tags are: + + "At start, exactly once": These items MUST occur in every instance of + the document type, and MUST appear exactly once, and MUST be the + first item in their documents. + + "Exactly once": These items MUST occur exactly one time in every + instance of the document type. + + "At end, exactly once": These items MUST occur in every instance of + the document type, and MUST appear exactly once, and MUST be the + last item in their documents. + + "At most once": These items MAY occur zero or one times in any + instance of the document type, but MUST NOT occur more than once. + + "Any number": These items MAY occur zero, one, or more times in any + instance of the document type. + + "Once or more": These items MUST occur at least once in any instance + of the document type, and MAY occur more. + +1.3. Signing documents + + Every signable document below is signed in a similar manner, using a + given "Initial Item", a final "Signature Item", a digest algorithm, and + a signing key. + + The Initial Item must be the first item in the document. + + The Signature Item has the following format: + + [arguments] NL SIGNATURE NL + + The "SIGNATURE" Object contains a signature (using the signing key) of + the PKCS1-padded digest of the entire document, taken from the + beginning of the Initial item, through the newline after the Signature + Item's keyword and its arguments. + + Unless otherwise, the digest algorithm is SHA-1. + + All documents are invalid unless signed with the correct signing key. + + The "Digest" of a document, unless stated otherwise, is its digest *as + signed by this signature scheme*. + +1.4. Voting timeline + + Every consensus document has a "valid-after" (VA) time, a "fresh-until" + (FU) time and a "valid-until" (VU) time. VA MUST precede FU, which MUST + in turn precede VU. Times are chosen so that every consensus will be + "fresh" until the next consensus becomes valid, and "valid" for a while + after. At least 3 consensuses should be valid at any given time. + + The timeline for a given consensus is as follows: + + VA-DistSeconds-VoteSeconds: The authorities exchange votes. + + VA-DistSeconds-VoteSeconds/2: The authorities try to download any + votes they don't have. + + VA-DistSeconds: The authorities calculate the consensus and exchange + signatures. + + VA-DistSeconds/2: The authorities try to download any signatures + they don't have. + + VA: All authorities have a multiply signed consensus. + + VA ... FU: Caches download the consensus. (Note that since caches have + no way of telling what VA and FU are until they have downloaded + the consensus, they assume that the present consensus's VA is + equal to the previous one's FU, and that its FU is one interval after + that.) + + FU: The consensus is no longer the freshest consensus. + + FU ... (the current consensus's VU): Clients download the consensus. + (See note above: clients guess that the next consensus's FU will be + two intervals after the current VA.) + + VU: The consensus is no longer valid. + + VoteSeconds and DistSeconds MUST each be at least 20 seconds; FU-VA and + VU-FU MUST each be at least 5 minutes. + +2. Router operation and formats + + ORs SHOULD generate a new router descriptor and a new extra-info + document whenever any of the following events have occurred: + + - A period of time (18 hrs by default) has passed since the last + time a descriptor was generated. + + - A descriptor field other than bandwidth or uptime has changed. + + - Bandwidth has changed by a factor of 2 from the last time a + descriptor was generated, and at least a given interval of time + (20 mins by default) has passed since then. + + - Its uptime has been reset (by restarting). + + [XXX this list is incomplete; see router_differences_are_cosmetic() + in routerlist.c for others] + + ORs SHOULD NOT publish a new router descriptor or extra-info document + if none of the above events have occurred and not much time has passed + (12 hours by default). + + After generating a descriptor, ORs upload them to every directory + authority they know, by posting them (in order) to the URL + + http:///tor/ + +2.1. Router descriptor format + + Router descriptors consist of the following items. For backward + compatibility, there should be an extra NL at the end of each router + descriptor. + + In lines that take multiple arguments, extra arguments SHOULD be + accepted and ignored. Many of the nonterminals below are defined in + section 2.3. + + "router" nickname address ORPort SOCKSPort DirPort NL + + [At start, exactly once.] + + Indicates the beginning of a router descriptor. "nickname" must be a + valid router nickname as specified in 2.3. "address" must be an IPv4 + address in dotted-quad format. The last three numbers indicate the + TCP ports at which this OR exposes functionality. ORPort is a port at + which this OR accepts TLS connections for the main OR protocol; + SOCKSPort is deprecated and should always be 0; and DirPort is the + port at which this OR accepts directory-related HTTP connections. If + any port is not supported, the value 0 is given instead of a port + number. (At least one of DirPort and ORPort SHOULD be set; + authorities MAY reject any descriptor with both DirPort and ORPort of + 0.) + + "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed NL + + [Exactly once] + + Estimated bandwidth for this router, in bytes per second. The + "average" bandwidth is the volume per second that the OR is willing to + sustain over long periods; the "burst" bandwidth is the volume that + the OR is willing to sustain in very short intervals. The "observed" + value is an estimate of the capacity this server can handle. The + server remembers the max bandwidth sustained output over any ten + second period in the past day, and another sustained input. The + "observed" value is the lesser of these two numbers. + + "platform" string NL + + [At most once] + + A human-readable string describing the system on which this OR is + running. This MAY include the operating system, and SHOULD include + the name and version of the software implementing the Tor protocol. + + "published" YYYY-MM-DD HH:MM:SS NL + + [Exactly once] + + The time, in GMT, when this descriptor (and its corresponding + extra-info document if any) was generated. + + "fingerprint" fingerprint NL + + [At most once] + + A fingerprint (a HASH_LEN-byte of asn1 encoded public key, encoded in + hex, with a single space after every 4 characters) for this router's + identity key. A descriptor is considered invalid (and MUST be + rejected) if the fingerprint line does not match the public key. + + [We didn't start parsing this line until Tor 0.1.0.6-rc; it should + be marked with "opt" until earlier versions of Tor are obsolete.] + + "hibernating" bool NL + + [At most once] + + If the value is 1, then the Tor server was hibernating when the + descriptor was published, and shouldn't be used to build circuits. + + [We didn't start parsing this line until Tor 0.1.0.6-rc; it should be + marked with "opt" until earlier versions of Tor are obsolete.] + + "uptime" number NL + + [At most once] + + The number of seconds that this OR process has been running. + + "onion-key" NL a public key in PEM format + + [Exactly once] + + This key is used to encrypt EXTEND cells for this OR. The key MUST be + accepted for at least 1 week after any new key is published in a + subsequent descriptor. It MUST be 1024 bits. + + "signing-key" NL a public key in PEM format + + [Exactly once] + + The OR's long-term identity key. It MUST be 1024 bits. + + "accept" exitpattern NL + "reject" exitpattern NL + + [Any number] + + These lines describe an "exit policy": the rules that an OR follows + when deciding whether to allow a new stream to a given address. The + 'exitpattern' syntax is described below. There MUST be at least one + such entry. The rules are considered in order; if no rule matches, + the address will be accepted. For clarity, the last such entry SHOULD + be accept *:* or reject *:*. + + "router-signature" NL Signature NL + + [At end, exactly once] + + The "SIGNATURE" object contains a signature of the PKCS1-padded + hash of the entire router descriptor, taken from the beginning of the + "router" line, through the newline after the "router-signature" line. + The router descriptor is invalid unless the signature is performed + with the router's identity key. + + "contact" info NL + + [At most once] + + Describes a way to contact the server's administrator, preferably + including an email address and a PGP key fingerprint. + + "family" names NL + + [At most once] + + 'Names' is a space-separated list of server nicknames or + hexdigests. If two ORs list one another in their "family" entries, + then OPs should treat them as a single OR for the purpose of path + selection. + + For example, if node A's descriptor contains "family B", and node B's + descriptor contains "family A", then node A and node B should never + be used on the same circuit. + + "read-history" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM... NL + [At most once] + "write-history" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM... NL + [At most once] + + Declare how much bandwidth the OR has used recently. Usage is divided + into intervals of NSEC seconds. The YYYY-MM-DD HH:MM:SS field + defines the end of the most recent interval. The numbers are the + number of bytes used in the most recent intervals, ordered from + oldest to newest. + + [We didn't start parsing these lines until Tor 0.1.0.6-rc; they should + be marked with "opt" until earlier versions of Tor are obsolete.] + + [See also migration notes in section 2.2.1.] + + "eventdns" bool NL + + [At most once] + + Declare whether this version of Tor is using the newer enhanced + dns logic. Versions of Tor with this field set to false SHOULD NOT + be used for reverse hostname lookups. + + [All versions of Tor before 0.1.2.2-alpha should be assumed to have + this option set to 0 if it is not present. All Tor versions at + 0.1.2.2-alpha or later should be assumed to have this option set to + 1 if it is not present. Until 0.1.2.1-alpha-dev, this option was + not generated, even when the new DNS code was in use. Versions of Tor + before 0.1.2.1-alpha-dev did not parse this option, so it should be + marked "opt". The dnsworker logic has been removed, so this option + should not be used by new server code. However, it can still be + used, and should still be recognized by new code until Tor 0.1.2.x + is obsolete.] + + "caches-extra-info" NL + + [At most once.] + + Present only if this router is a directory cache that provides + extra-info documents. + + [Versions before 0.2.0.1-alpha don't recognize this, and versions + before 0.1.2.5-alpha will reject descriptors containing it unless + it is prefixed with "opt"; it should be so prefixed until these + versions are obsolete.] + + "extra-info-digest" digest NL + + [At most once] + + "Digest" is a hex-encoded digest (using upper-case characters) of the + router's extra-info document, as signed in the router's extra-info + (that is, not including the signature). (If this field is absent, the + router is not uploading a corresponding extra-info document.) + + [Versions before 0.2.0.1-alpha don't recognize this, and versions + before 0.1.2.5-alpha will reject descriptors containing it unless + it is prefixed with "opt"; it should be so prefixed until these + versions are obsolete.] + + "hidden-service-dir" *(SP VersionNum) NL + + [At most once.] + + Present only if this router stores and serves hidden service + descriptors. If any VersionNum(s) are specified, this router + supports those descriptor versions. If none are specified, it + defaults to version 2 descriptors. + + [Versions of Tor before 0.1.2.5-alpha rejected router descriptors + with unrecognized items; the protocols line should be preceded with + an "opt" until these Tors are obsolete.] + + "protocols" SP "Link" SP LINK-VERSION-LIST SP "Circuit" SP + CIRCUIT-VERSION-LIST NL + + [At most once.] + + Both lists are space-separated sequences of numbers, to indicate which + protocols the server supports. As of 30 Mar 2008, specified + protocols are "Link 1 2 Circuit 1". See section 4.1 of tor-spec.txt + for more information about link protocol versions. + + [Versions of Tor before 0.1.2.5-alpha rejected router descriptors + with unrecognized items; the protocols line should be preceded with + an "opt" until these Tors are obsolete.] + + "allow-single-hop-exits" + + [At most once.] + + Present only if the router allows single-hop circuits to make exit + connections. Most Tor servers do not support this: this is + included for specialized controllers designed to support perspective + access and such. + + +2.2. Extra-info documents + + Extra-info documents consist of the following items: + + "extra-info" Nickname Fingerprint NL + [At start, exactly once.] + + Identifies what router this is an extra info descriptor for. + Fingerprint is encoded in hex (using upper-case letters), with + no spaces. + + "published" + + [Exactly once.] + + The time, in GMT, when this document (and its corresponding router + descriptor if any) was generated. It MUST match the published time + in the corresponding router descriptor. + + "read-history" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM... NL + [At most once.] + "write-history" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM... NL + [At most once.] + + As documented in 2.1 above. See migration notes in section 2.2.1. + + "geoip-start" YYYY-MM-DD HH:MM:SS NL + "geoip-client-origins" CC=N,CC=N,... NL + + Only generated by bridge routers (see blocking.pdf), and only + when they have been configured with a geoip database. + Non-bridges SHOULD NOT generate these fields. Contains a list + of mappings from two-letter country codes (CC) to the number + of clients that have connected to that bridge from that + country (approximate, and rounded up to the nearest multiple of 8 + in order to hamper traffic analysis). A country is included + only if it has at least one address. The time in + "geoip-start" is the time at which we began collecting geoip + statistics. + + "dirreq-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL + [At most once.] + + YYYY-MM-DD HH:MM:SS defines the end of the included measurement + interval of length NSEC seconds (86400 seconds by default). + + A "dirreq-stats-end" line, as well as any other "dirreq-*" line, + is only added when the relay has opened its Dir port and after 24 + hours of measuring directory requests. + + "dirreq-v2-ips" CC=N,CC=N,... NL + [At most once.] + "dirreq-v3-ips" CC=N,CC=N,... NL + [At most once.] + + List of mappings from two-letter country codes to the number of + unique IP addresses that have connected from that country to + request a v2/v3 network status, rounded up to the nearest multiple + of 8. Only those IP addresses are counted that the directory can + answer with a 200 OK status code. + + "dirreq-v2-reqs" CC=N,CC=N,... NL + [At most once.] + "dirreq-v3-reqs" CC=N,CC=N,... NL + [At most once.] + + List of mappings from two-letter country codes to the number of + requests for v2/v3 network statuses from that country, rounded up + to the nearest multiple of 8. Only those requests are counted that + the directory can answer with a 200 OK status code. + + "dirreq-v2-share" num% NL + [At most once.] + "dirreq-v3-share" num% NL + [At most once.] + + The share of v2/v3 network status requests that the directory + expects to receive from clients based on its advertised bandwidth + compared to the overall network bandwidth capacity. Shares are + formatted in percent with two decimal places. Shares are + calculated as means over the whole 24-hour interval. + + "dirreq-v2-resp" status=num,... NL + [At most once.] + "dirreq-v3-resp" status=nul,... NL + [At most once.] + + List of mappings from response statuses to the number of requests + for v2/v3 network statuses that were answered with that response + status, rounded up to the nearest multiple of 4. Only response + statuses with at least 1 response are reported. New response + statuses can be added at any time. The current list of response + statuses is as follows: + + "ok": a network status request is answered; this number + corresponds to the sum of all requests as reported in + "dirreq-v2-reqs" or "dirreq-v3-reqs", respectively, before + rounding up. + "not-enough-sigs: a version 3 network status is not signed by a + sufficient number of requested authorities. + "unavailable": a requested network status object is unavailable. + "not-found": a requested network status is not found. + "not-modified": a network status has not been modified since the + If-Modified-Since time that is included in the request. + "busy": the directory is busy. + + "dirreq-v2-direct-dl" key=val,... NL + [At most once.] + "dirreq-v3-direct-dl" key=val,... NL + [At most once.] + "dirreq-v2-tunneled-dl" key=val,... NL + [At most once.] + "dirreq-v3-tunneled-dl" key=val,... NL + [At most once.] + + List of statistics about possible failures in the download process + of v2/v3 network statuses. Requests are either "direct" + HTTP-encoded requests over the relay's directory port, or + "tunneled" requests using a BEGIN_DIR cell over the relay's OR + port. The list of possible statistics can change, and statistics + can be left out from reporting. The current list of statistics is + as follows: + + Successful downloads and failures: + + "complete": a client has finished the download successfully. + "timeout": a download did not finish within 10 minutes after + starting to send the response. + "running": a download is still running at the end of the + measurement period for less than 10 minutes after starting to + send the response. + + Download times: + + "min", "max": smallest and largest measured bandwidth in B/s. + "d[1-4,6-9]": 1st to 4th and 6th to 9th decile of measured + bandwidth in B/s. For a given decile i, i/10 of all downloads + had a smaller bandwidth than di, and (10-i)/10 of all downloads + had a larger bandwidth than di. + "q[1,3]": 1st and 3rd quartile of measured bandwidth in B/s. One + fourth of all downloads had a smaller bandwidth than q1, one + fourth of all downloads had a larger bandwidth than q3, and the + remaining half of all downloads had a bandwidth between q1 and + q3. + "md": median of measured bandwidth in B/s. Half of the downloads + had a smaller bandwidth than md, the other half had a larger + bandwidth than md. + + "entry-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL + [At most once.] + + YYYY-MM-DD HH:MM:SS defines the end of the included measurement + interval of length NSEC seconds (86400 seconds by default). + + An "entry-stats-end" line, as well as any other "entry-*" + line, is first added after the relay has been running for at least + 24 hours. + + "entry-ips" CC=N,CC=N,... NL + [At most once.] + + List of mappings from two-letter country codes to the number of + unique IP addresses that have connected from that country to the + relay and which are no known other relays, rounded up to the + nearest multiple of 8. + + "cell-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL + [At most once.] + + YYYY-MM-DD HH:MM:SS defines the end of the included measurement + interval of length NSEC seconds (86400 seconds by default). + + A "cell-stats-end" line, as well as any other "cell-*" line, + is first added after the relay has been running for at least 24 + hours. + + "cell-processed-cells" num,...,num NL + [At most once.] + + Mean number of processed cells per circuit, subdivided into + deciles of circuits by the number of cells they have processed in + descending order from loudest to quietest circuits. + + "cell-queued-cells" num,...,num NL + [At most once.] + + Mean number of cells contained in queues by circuit decile. These + means are calculated by 1) determining the mean number of cells in + a single circuit between its creation and its termination and 2) + calculating the mean for all circuits in a given decile as + determined in "cell-processed-cells". Numbers have a precision of + two decimal places. + + "cell-time-in-queue" num,...,num NL + [At most once.] + + Mean time cells spend in circuit queues in milliseconds. Times are + calculated by 1) determining the mean time cells spend in the + queue of a single circuit and 2) calculating the mean for all + circuits in a given decile as determined in + "cell-processed-cells". + + "cell-circuits-per-decile" num NL + [At most once.] + + Mean number of circuits that are included in any of the deciles, + rounded up to the next integer. + + "exit-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL + [At most once.] + + YYYY-MM-DD HH:MM:SS defines the end of the included measurement + interval of length NSEC seconds (86400 seconds by default). + + An "exit-stats-end" line, as well as any other "exit-*" line, is + first added after the relay has been running for at least 24 hours + and only if the relay permits exiting (where exiting to a single + port and IP address is sufficient). + + "exit-kibibytes-written" port=N,port=N,... NL + [At most once.] + "exit-kibibytes-read" port=N,port=N,... NL + [At most once.] + + List of mappings from ports to the number of kibibytes that the + relay has written to or read from exit connections to that port, + rounded up to the next full kibibyte. + + "exit-streams-opened" port=N,port=N,... NL + [At most once.] + + List of mappings from ports to the number of opened exit streams + to that port, rounded up to the nearest multiple of 4. + + "router-signature" NL Signature NL + [At end, exactly once.] + + A document signature as documented in section 1.3, using the + initial item "extra-info" and the final item "router-signature", + signed with the router's identity key. + +2.2.1. Moving history fields to extra-info documents. + + Tools that want to use the read-history and write-history values SHOULD + download extra-info documents as well as router descriptors. Such + tools SHOULD accept history values from both sources; if they appear in + both documents, the values in the extra-info documents are authoritative. + + New versions of Tor no longer generate router descriptors + containing read-history or write-history. Tools should continue to + accept read-history and write-history values in router descriptors + produced by older versions of Tor until all Tor versions earlier + than 0.2.0.x are obsolete. + +2.3. Nonterminals in router descriptors + + nickname ::= between 1 and 19 alphanumeric characters ([A-Za-z0-9]), + case-insensitive. + hexdigest ::= a '$', followed by 40 hexadecimal characters + ([A-Fa-f0-9]). [Represents a server by the digest of its identity + key.] + + exitpattern ::= addrspec ":" portspec + portspec ::= "*" | port | port "-" port + port ::= an integer between 1 and 65535, inclusive. + + [Some implementations incorrectly generate ports with value 0. + Implementations SHOULD accept this, and SHOULD NOT generate it. + Connections to port 0 are never permitted.] + + addrspec ::= "*" | ip4spec | ip6spec + ipv4spec ::= ip4 | ip4 "/" num_ip4_bits | ip4 "/" ip4mask + ip4 ::= an IPv4 address in dotted-quad format + ip4mask ::= an IPv4 mask in dotted-quad format + num_ip4_bits ::= an integer between 0 and 32 + ip6spec ::= ip6 | ip6 "/" num_ip6_bits + ip6 ::= an IPv6 address, surrounded by square brackets. + num_ip6_bits ::= an integer between 0 and 128 + + bool ::= "0" | "1" + +3. Formats produced by directory authorities. + + Every authority has two keys used in this protocol: a signing key, and + an authority identity key. (Authorities also have a router identity + key used in their role as a router and by earlier versions of the + directory protocol.) The identity key is used from time to time to + sign new key certificates using new signing keys; it is very sensitive. + The signing key is used to sign key certificates and status documents. + + There are three kinds of documents generated by directory authorities: + + Key certificates + Status votes + Status consensuses + + Each is discussed below. + +3.1. Key certificates + + Key certificates consist of the following items: + + "dir-key-certificate-version" version NL + + [At start, exactly once.] + + Determines the version of the key certificate. MUST be "3" for + the protocol described in this document. Implementations MUST + reject formats they don't understand. + + "dir-address" IPPort NL + [At most once] + + An IP:Port for this authority's directory port. + + "fingerprint" fingerprint NL + + [Exactly once.] + + Hexadecimal encoding without spaces based on the authority's + identity key. + + "dir-identity-key" NL a public key in PEM format + + [Exactly once.] + + The long-term authority identity key for this authority. This key + SHOULD be at least 2048 bits long; it MUST NOT be shorter than + 1024 bits. + + "dir-key-published" YYYY-MM-DD HH:MM:SS NL + + [Exactly once.] + + The time (in GMT) when this document and corresponding key were + last generated. + + "dir-key-expires" YYYY-MM-DD HH:MM:SS NL + + [Exactly once.] + + A time (in GMT) after which this key is no longer valid. + + "dir-signing-key" NL a key in PEM format + + [Exactly once.] + + The directory server's public signing key. This key MUST be at + least 1024 bits, and MAY be longer. + + "dir-key-crosscert" NL CrossSignature NL + + [At most once.] + + NOTE: Authorities MUST include this field in all newly generated + certificates. A future version of this specification will make + the field required. + + CrossSignature is a signature, made using the certificate's signing + key, of the digest of the PKCS1-padded hash of the certificate's + identity key. For backward compatibility with broken versions of the + parser, we wrap the base64-encoded signature in -----BEGIN ID + SIGNATURE---- and -----END ID SIGNATURE----- tags. Implementations + MUST allow the "ID " portion to be omitted, however. + + When encountering a certificate with a dir-key-crosscert entry, + implementations MUST verify that the signature is a correct signature + of the hash of the identity key using the signing key. + + "dir-key-certification" NL Signature NL + + [At end, exactly once.] + + A document signature as documented in section 1.3, using the + initial item "dir-key-certificate-version" and the final item + "dir-key-certification", signed with the authority identity key. + + Authorities MUST generate a new signing key and corresponding + certificate before the key expires. + +3.2. Vote and consensus status documents + + Votes and consensuses are more strictly formatted then other documents + in this specification, since different authorities must be able to + generate exactly the same consensus given the same set of votes. + + The procedure for deciding when to generate vote and consensus status + documents are described in section XXX below. + + Status documents contain a preamble, an authority section, a list of + router status entries, and one or more footer signature, in that order. + + Unlike other formats described above, a SP in these documents must be a + single space character (hex 20). + + Some items appear only in votes, and some items appear only in + consensuses. Unless specified, items occur in both. + + The preamble contains the following items. They MUST occur in the + order given here: + + "network-status-version" SP version NL. + + [At start, exactly once.] + + A document format version. For this specification, the version is + "3". + + "vote-status" SP type NL + + [Exactly once.] + + The status MUST be "vote" or "consensus", depending on the type of + the document. + + "consensus-methods" SP IntegerList NL + + [Exactly once for votes; does not occur in consensuses.] + + A space-separated list of supported methods for generating + consensuses from votes. See section 3.4.1 for details. Method "1" + MUST be included. + + "consensus-method" SP Integer NL + + [Exactly once for consensuses; does not occur in votes.] + + See section 3.4.1 for details. + + (Only included when the vote is generated with consensus-method 2 or + later.) + + "published" SP YYYY-MM-DD SP HH:MM:SS NL + + [Exactly once for votes; does not occur in consensuses.] + + The publication time for this status document (if a vote). + + "valid-after" SP YYYY-MM-DD SP HH:MM:SS NL + + [Exactly once.] + + The start of the Interval for this vote. Before this time, the + consensus document produced from this vote should not be used. + See 1.4 for voting timeline information. + + "fresh-until" SP YYYY-MM-DD SP HH:MM:SS NL + + [Exactly once.] + + The time at which the next consensus should be produced; before this + time, there is no point in downloading another consensus, since there + won't be a new one. See 1.4 for voting timeline information. + + "valid-until" SP YYYY-MM-DD SP HH:MM:SS NL + + [Exactly once.] + + The end of the Interval for this vote. After this time, the + consensus produced by this vote should not be used. See 1.4 for + voting timeline information. + + "voting-delay" SP VoteSeconds SP DistSeconds NL + + [Exactly once.] + + VoteSeconds is the number of seconds that we will allow to collect + votes from all authorities; DistSeconds is the number of seconds + we'll allow to collect signatures from all authorities. See 1.4 for + voting timeline information. + + "client-versions" SP VersionList NL + + [At most once.] + + A comma-separated list of recommended client versions, in + ascending order. If absent, no opinion is held about client + versions. + + "server-versions" SP VersionList NL + + [At most once.] + + A comma-separated list of recommended server versions, in + ascending order. If absent, no opinion is held about server + versions. + + "known-flags" SP FlagList NL + + [Exactly once.] + + A space-separated list of all of the flags that this document + might contain. A flag is "known" either because the authority + knows about them and might set them (if in a vote), or because + enough votes were counted for the consensus for an authoritative + opinion to have been formed about their status. + + "params" SP [Parameters] NL + + [At most once] + + Parameter ::= Keyword '=' Int32 + Int32 ::= A decimal integer between -2147483648 and 2147483647. + Parameters ::= Parameter | Parameters SP Parameter + + The parameters list, if present, contains a space-separated list of + key-value pairs, sorted in lexical order by their keyword. Each + parameter has its own meaning. + + (Only included when the vote is generated with consensus-method 7 or + later.) + + The authority section of a vote contains the following items, followed + in turn by the authority's current key certificate: + + "dir-source" SP nickname SP identity SP address SP IP SP dirport SP + orport NL + + [Exactly once, at start] + + Describes this authority. The nickname is a convenient identifier + for the authority. The identity is an uppercase hex fingerprint of + the authority's current (v3 authority) identity key. The address is + the server's hostname. The IP is the server's current IP address, + and dirport is its current directory port. XXXXorport + + "contact" SP string NL + + [At most once.] + + An arbitrary string describing how to contact the directory + server's administrator. Administrators should include at least an + email address and a PGP fingerprint. + + "legacy-key" SP FINGERPRINT NL + + [At most once] + + Lists a fingerprint for an obsolete _identity_ key still used + by this authority to keep older clients working. This option + is used to keep key around for a little while in case the + authorities need to migrate many identity keys at once. + (Generally, this would only happen because of a security + vulnerability that affected multiple authorities, like the + Debian OpenSSL RNG bug of May 2008.) + + The authority section of a consensus contains groups the following items, + in the order given, with one group for each authority that contributed to + the consensus, with groups sorted by authority identity digest: + + "dir-source" SP nickname SP identity SP address SP IP SP dirport SP + orport NL + + [Exactly once, at start] + + As in the authority section of a vote. + + "contact" SP string NL + + [At most once.] + + As in the authority section of a vote. + + "vote-digest" SP digest NL + + [Exactly once.] + + A digest of the vote from the authority that contributed to this + consensus, as signed (that is, not including the signature). + (Hex, upper-case.) + + Each router status entry contains the following items. Router status + entries are sorted in ascending order by identity digest. + + "r" SP nickname SP identity SP digest SP publication SP IP SP ORPort + SP DirPort NL + + [At start, exactly once.] + + "Nickname" is the OR's nickname. "Identity" is a hash of its + identity key, encoded in base64, with trailing equals sign(s) + removed. "Digest" is a hash of its most recent descriptor as + signed (that is, not including the signature), encoded in base64. + "Publication" is the + publication time of its most recent descriptor, in the form + YYYY-MM-DD HH:MM:SS, in GMT. "IP" is its current IP address; + ORPort is its current OR port, "DirPort" is it's current directory + port, or "0" for "none". + + "s" SP Flags NL + + [At most once.] + + A series of space-separated status flags, in alphabetical order. + Currently documented flags are: + + "Authority" if the router is a directory authority. + "BadExit" if the router is believed to be useless as an exit node + (because its ISP censors it, because it is behind a restrictive + proxy, or for some similar reason). + "BadDirectory" if the router is believed to be useless as a + directory cache (because its directory port isn't working, + its bandwidth is always throttled, or for some similar + reason). + "Exit" if the router is more useful for building + general-purpose exit circuits than for relay circuits. The + path building algorithm uses this flag; see path-spec.txt. + "Fast" if the router is suitable for high-bandwidth circuits. + "Guard" if the router is suitable for use as an entry guard. + "HSDir" if the router is considered a v2 hidden service directory. + "Named" if the router's identity-nickname mapping is canonical, + and this authority binds names. + "Stable" if the router is suitable for long-lived circuits. + "Running" if the router is currently usable. + "Unnamed" if another router has bound the name used by this + router, and this authority binds names. + "Valid" if the router has been 'validated'. + "V2Dir" if the router implements the v2 directory protocol. + "V3Dir" if the router implements this protocol. + + "v" SP version NL + + [At most once.] + + The version of the Tor protocol that this server is running. If + the value begins with "Tor" SP, the rest of the string is a Tor + version number, and the protocol is "The Tor protocol as supported + by the given version of Tor." Otherwise, if the value begins with + some other string, Tor has upgraded to a more sophisticated + protocol versioning system, and the protocol is "a version of the + Tor protocol more recent than any we recognize." + + Directory authorities SHOULD omit version strings they receive from + descriptors if they would cause "v" lines to be over 128 characters + long. + + "w" SP "Bandwidth=" INT [SP "Measured=" INT] NL + + [At most once.] + + An estimate of the bandwidth of this server, in an arbitrary + unit (currently kilobytes per second). Used to weight router + selection. + + Additionally, the Measured= keyword is present in votes by + participating bandwidth measurement authorites to indicate + a measured bandwidth currently produced by measuring stream + capacities. + + Other weighting keywords may be added later. + Clients MUST ignore keywords they do not recognize. + + "p" SP ("accept" / "reject") SP PortList NL + + [At most once.] + + PortList = PortOrRange + PortList = PortList "," PortOrRange + PortOrRange = INT "-" INT / INT + + A list of those ports that this router supports (if 'accept') + or does not support (if 'reject') for exit to "most + addresses". + + The signature section contains the following item, which appears + Exactly Once for a vote, and At Least Once for a consensus. + + "directory-signature" SP identity SP signing-key-digest NL Signature + + This is a signature of the status document, with the initial item + "network-status-version", and the signature item + "directory-signature", using the signing key. (In this case, we take + the hash through the _space_ after directory-signature, not the + newline: this ensures that all authorities sign the same thing.) + "identity" is the hex-encoded digest of the authority identity key of + the signing authority, and "signing-key-digest" is the hex-encoded + digest of the current authority signing key of the signing authority. + +3.3. Deciding how to vote. + + (This section describes how directory authorities choose which status + flags to apply to routers, as of Tor 0.2.0.0-alpha-dev. Later directory + authorities MAY do things differently, so long as clients keep working + well. Clients MUST NOT depend on the exact behaviors in this section.) + + In the below definitions, a router is considered "active" if it is + running, valid, and not hibernating. + + "Valid" -- a router is 'Valid' if it is running a version of Tor not + known to be broken, and the directory authority has not blacklisted + it as suspicious. + + "Named" -- Directory authority administrators may decide to support name + binding. If they do, then they must maintain a file of + nickname-to-identity-key mappings, and try to keep this file consistent + with other directory authorities. If they don't, they act as clients, and + report bindings made by other directory authorities (name X is bound to + identity Y if at least one binding directory lists it, and no directory + binds X to some other Y'.) A router is called 'Named' if the router + believes the given name should be bound to the given key. + + Two strategies exist on the current network for deciding on + values for the Named flag. In the original version, server + operators were asked to send nickname-identity pairs to a + mailing list of Naming directory authorities operators. The + operators were then supposed to add the pairs to their + mapping files; in practice, they didn't get to this often. + + Newer Naming authorities run a script that registers routers + in their mapping files once the routers have been online at + least two weeks, no other router has that nickname, and no + other router has wanted the nickname for a month. If a router + has not been online for six months, the router is removed. + + "Unnamed" -- Directory authorities that support naming should vote for a + router to be 'Unnamed' if its given nickname is mapped to a different + identity. + + "Running" -- A router is 'Running' if the authority managed to connect to + it successfully within the last 30 minutes. + + "Stable" -- A router is 'Stable' if it is active, and either its Weighted + MTBF is at least the median for known active routers or its Weighted MTBF + corresponds to at least 7 days. Routers are never called Stable if they are + running a version of Tor known to drop circuits stupidly. (0.1.1.10-alpha + through 0.1.1.16-rc are stupid this way.) + + To calculate weighted MTBF, compute the weighted mean of the lengths + of all intervals when the router was observed to be up, weighting + intervals by $\alpha^n$, where $n$ is the amount of time that has + passed since the interval ended, and $\alpha$ is chosen so that + measurements over approximately one month old no longer influence the + weighted MTBF much. + + [XXXX what happens when we have less than 4 days of MTBF info.] + + "Exit" -- A router is called an 'Exit' iff it allows exits to at + least two of the ports 80, 443, and 6667 and allows exits to at + least one /8 address space. + + "Fast" -- A router is 'Fast' if it is active, and its bandwidth is + either in the top 7/8ths for known active routers or at least 100KB/s. + + "Guard" -- A router is a possible 'Guard' if its Weighted Fractional + Uptime is at least the median for "familiar" active routers, and if + its bandwidth is at least median or at least 250KB/s. + If the total bandwidth of active non-BadExit Exit servers is less + than one third of the total bandwidth of all active servers, no Exit is + listed as a Guard. + + To calculate weighted fractional uptime, compute the fraction + of time that the router is up in any given day, weighting so that + downtime and uptime in the past counts less. + + A node is 'familiar' if 1/8 of all active nodes have appeared more + recently than it, OR it has been around for a few weeks. + + "Authority" -- A router is called an 'Authority' if the authority + generating the network-status document believes it is an authority. + + "V2Dir" -- A router supports the v2 directory protocol if it has an open + directory port, and it is running a version of the directory protocol that + supports the functionality clients need. (Currently, this is + 0.1.1.9-alpha or later.) + + "V3Dir" -- A router supports the v3 directory protocol if it has an open + directory port, and it is running a version of the directory protocol that + supports the functionality clients need. (Currently, this is + 0.2.0.?????-alpha or later.) + + "HSDir" -- A router is a v2 hidden service directory if it stores and + serves v2 hidden service descriptors and the authority managed to connect + to it successfully within the last 24 hours. + + Directory server administrators may label some servers or IPs as + blacklisted, and elect not to include them in their network-status lists. + + Authorities SHOULD 'disable' any servers in excess of 3 on any single IP. + When there are more than 3 to choose from, authorities should first prefer + authorities to non-authorities, then prefer Running to non-Running, and + then prefer high-bandwidth to low-bandwidth. To 'disable' a server, the + authority *should* advertise it without the Running or Valid flag. + + Thus, the network-status vote includes all non-blacklisted, + non-expired, non-superseded descriptors. + + The bandwidth in a "w" line should be taken as the best estimate + of the router's actual capacity that the authority has. For now, + this should be the lesser of the observed bandwidth and bandwidth + rate limit from the router descriptor. It is given in kilobytes + per second, and capped at some arbitrary value (currently 10 MB/s). + + The Measured= keyword on a "w" line vote is currently computed + by multiplying the previous published consensus bandwidth by the + ratio of the measured average node stream capacity to the network + average. If 3 or more authorities provide a Measured= keyword for + a router, the authorites produce a consensus containing a "w" + Bandwidth= keyword equal to the median of the Measured= votes. + + The ports listed in a "p" line should be taken as those ports for + which the router's exit policy permits 'most' addresses, ignoring any + accept not for all addresses, ignoring all rejects for private + netblocks. "Most" addresses are permitted if no more than 2^25 + IPv4 addresses (two /8 networks) were blocked. The list is encoded + as described in 3.4.2. + +3.4. Computing a consensus from a set of votes + + Given a set of votes, authorities compute the contents of the consensus + document as follows: + + The "valid-after", "valid-until", and "fresh-until" times are taken as + the median of the respective values from all the votes. + + The times in the "voting-delay" line are taken as the median of the + VoteSeconds and DistSeconds times in the votes. + + Known-flags is the union of all flags known by any voter. + + Entries are given on the "params" line for every keyword on which any + authority voted. The values given are the low-median of all votes on + that keyword. + + "client-versions" and "server-versions" are sorted in ascending + order; A version is recommended in the consensus if it is recommended + by more than half of the voting authorities that included a + client-versions or server-versions lines in their votes. + + The authority item groups (dir-source, contact, fingerprint, + vote-digest) are taken from the votes of the voting + authorities. These groups are sorted by the digests of the + authorities identity keys, in ascending order. If the consensus + method is 3 or later, a dir-source line must be included for + every vote with legacy-key entry, using the legacy-key's + fingerprint, the voter's ordinary nickname with the string + "-legacy" appended, and all other fields as from the original + vote's dir-source line. + + A router status entry: + * is included in the result if some router status entry with the same + identity is included by more than half of the authorities (total + authorities, not just those whose votes we have). + + * For any given identity, we include at most one router status entry. + + * A router entry has a flag set if that is included by more than half + of the authorities who care about that flag. + + * Two router entries are "the same" if they have the same + tuple. + We choose the tuple for a given router as whichever tuple appears + for that router in the most votes. We break ties first in favor of + the more recently published, then in favor of smaller server + descriptor digest. + + * The Named flag appears if it is included for this routerstatus by + _any_ authority, and if all authorities that list it list the same + nickname. However, if consensus-method 2 or later is in use, and + any authority calls this identity/nickname pair Unnamed, then + this routerstatus does not get the Named flag. + + * If consensus-method 2 or later is in use, the Unnamed flag is + set for a routerstatus if any authorities have voted for a different + identities to be Named with that nickname, or if any authority + lists that nickname/ID pair as Unnamed. + + (With consensus-method 1, Unnamed is set like any other flag.) + + * The version is given as whichever version is listed by the most + voters, with ties decided in favor of more recent versions. + + * If consensus-method 4 or later is in use, then routers that + do not have the Running flag are not listed at all. + + * If consensus-method 5 or later is in use, then the "w" line + is generated using a low-median of the bandwidth values from + the votes that included "w" lines for this router. + + * If consensus-method 5 or later is in use, then the "p" line + is taken from the votes that have the same policy summary + for the descriptor we are listing. (They should all be the + same. If they are not, we pick the most commonly listed + one, breaking ties in favor of the lexicographically larger + vote.) The port list is encoded as specified in 3.4.2. + + * If consensus-method 6 or later is in use and if 3 or more + authorities provide a Measured= keyword in their votes for + a router, the authorities produce a consensus containing a + Bandwidth= keyword equal to the median of the Measured= votes. + + * If consensus-method 7 or later is in use, the params line is + included in the output. + + The signatures at the end of a consensus document are sorted in + ascending order by identity digest. + + All ties in computing medians are broken in favor of the smaller or + earlier item. + +3.4.1. Forward compatibility + + Future versions of Tor will need to include new information in the + consensus documents, but it is important that all authorities (or at least + half) generate and sign the same signed consensus. + + To achieve this, authorities list in their votes their supported methods + for generating consensuses from votes. Later methods will be assigned + higher numbers. Currently recognized methods: + "1" -- The first implemented version. + "2" -- Added support for the Unnamed flag. + "3" -- Added legacy ID key support to aid in authority ID key rollovers + "4" -- No longer list routers that are not running in the consensus + "5" -- adds support for "w" and "p" lines. + "6" -- Prefers measured bandwidth values rather than advertised + + Before generating a consensus, an authority must decide which consensus + method to use. To do this, it looks for the highest version number + supported by more than 2/3 of the authorities voting. If it supports this + method, then it uses it. Otherwise, it falls back to method 1. + + (The consensuses generated by new methods must be parsable by + implementations that only understand the old methods, and must not cause + those implementations to compromise their anonymity. This is a means for + making changes in the contents of consensus; not for making + backward-incompatible changes in their format.) + +3.4.2. Encoding port lists + + Whether the summary shows the list of accepted ports or the list of + rejected ports depends on which list is shorter (has a shorter string + representation). In case of ties we choose the list of accepted + ports. As an exception to this rule an allow-all policy is + represented as "accept 1-65535" instead of "reject " and a reject-all + policy is similarly given as "reject 1-65535". + + Summary items are compressed, that is instead of "80-88,89-100" there + only is a single item of "80-100", similarly instead of "20,21" a + summary will say "20-21". + + Port lists are sorted in ascending order. + + The maximum allowed length of a policy summary (including the "accept " + or "reject ") is 1000 characters. If a summary exceeds that length we + use an accept-style summary and list as much of the port list as is + possible within these 1000 bytes. [XXXX be more specific.] + +3.5. Detached signatures + + Assuming full connectivity, every authority should compute and sign the + same consensus directory in each period. Therefore, it isn't necessary to + download the consensus computed by each authority; instead, the + authorities only push/fetch each others' signatures. A "detached + signature" document contains items as follows: + + "consensus-digest" SP Digest NL + + [At start, at most once.] + + The digest of the consensus being signed. + + "valid-after" SP YYYY-MM-DD SP HH:MM:SS NL + "fresh-until" SP YYYY-MM-DD SP HH:MM:SS NL + "valid-until" SP YYYY-MM-DD SP HH:MM:SS NL + + [As in the consensus] + + "directory-signature" + + [As in the consensus; the signature object is the same as in the + consensus document.] + + +4. Directory server operation + + All directory authorities and directory caches ("directory servers") + implement this section, except as noted. + +4.1. Accepting uploads (authorities only) + + When a router posts a signed descriptor to a directory authority, the + authority first checks whether it is well-formed and correctly + self-signed. If it is, the authority next verifies that the nickname + in question is not already assigned to a router with a different + public key. + Finally, the authority MAY check that the router is not blacklisted + because of its key, IP, or another reason. + + If the descriptor passes these tests, and the authority does not already + have a descriptor for a router with this public key, it accepts the + descriptor and remembers it. + + If the authority _does_ have a descriptor with the same public key, the + newly uploaded descriptor is remembered if its publication time is more + recent than the most recent old descriptor for that router, and either: + - There are non-cosmetic differences between the old descriptor and the + new one. + - Enough time has passed between the descriptors' publication times. + (Currently, 12 hours.) + + Differences between router descriptors are "non-cosmetic" if they would be + sufficient to force an upload as described in section 2 above. + + Note that the "cosmetic difference" test only applies to uploaded + descriptors, not to descriptors that the authority downloads from other + authorities. + + When a router posts a signed extra-info document to a directory authority, + the authority again checks it for well-formedness and correct signature, + and checks that its matches the extra-info-digest in some router + descriptor that it believes is currently useful. If so, it accepts it and + stores it and serves it as requested. If not, it drops it. + +4.2. Voting (authorities only) + + Authorities divide time into Intervals. Authority administrators SHOULD + try to all pick the same interval length, and SHOULD pick intervals that + are commonly used divisions of time (e.g., 5 minutes, 15 minutes, 30 + minutes, 60 minutes, 90 minutes). Voting intervals SHOULD be chosen to + divide evenly into a 24-hour day. + + Authorities SHOULD act according to interval and delays in the + latest consensus. Lacking a latest consensus, they SHOULD default to a + 30-minute Interval, a 5 minute VotingDelay, and a 5 minute DistDelay. + + Authorities MUST take pains to ensure that their clocks remain accurate + within a few seconds. (Running NTP is usually sufficient.) + + The first voting period of each day begins at 00:00 (midnight) GMT. If + the last period of the day would be truncated by one-half or more, it is + merged with the second-to-last period. + + An authority SHOULD publish its vote immediately at the start of each voting + period (minus VoteSeconds+DistSeconds). It does this by making it + available at + http:///tor/status-vote/next/authority.z + and sending it in an HTTP POST request to each other authority at the URL + http:///tor/post/vote + + If, at the start of the voting period, minus DistSeconds, an authority + does not have a current statement from another authority, the first + authority downloads the other's statement. + + Once an authority has a vote from another authority, it makes it available + at + http:///tor/status-vote/next/.z + where is the fingerprint of the other authority's identity key. + And at + http:///tor/status-vote/next/d/.z + where is the digest of the vote document. + + The consensus status, along with as many signatures as the server + currently knows, should be available at + http:///tor/status-vote/next/consensus.z + All of the detached signatures it knows for consensus status should be + available at: + http:///tor/status-vote/next/consensus-signatures.z + + Once there are enough signatures, or once the voting period starts, + these documents are available at + http:///tor/status-vote/current/consensus.z + and + http:///tor/status-vote/current/consensus-signatures.z + [XXX current/consensus-signatures is not currently implemented, as it + is not used in the voting protocol.] + + The other vote documents are analogously made available under + http:///tor/status-vote/current/authority.z + http:///tor/status-vote/current/.z + http:///tor/status-vote/current/d/.z + once the consensus is complete. + + Once an authority has computed and signed a consensus network status, it + should send its detached signature to each other authority in an HTTP POST + request to the URL: + http:///tor/post/consensus-signature + + [XXX Note why we support push-and-then-pull.] + + [XXX possible future features include support for downloading old + consensuses.] + +4.3. Downloading consensus status documents (caches only) + + All directory servers (authorities and caches) try to keep a recent + network-status consensus document to serve to clients. A cache ALWAYS + downloads a network-status consensus if any of the following are true: + - The cache has no consensus document. + - The cache's consensus document is no longer valid. + Otherwise, the cache downloads a new consensus document at a randomly + chosen time in the first half-interval after its current consensus + stops being fresh. (This time is chosen at random to avoid swarming + the authorities at the start of each period. The interval size is + inferred from the difference between the valid-after time and the + fresh-until time on the consensus.) + + [For example, if a cache has a consensus that became valid at 1:00, + and is fresh until 2:00, that cache will fetch a new consensus at + a random time between 2:00 and 2:30.] + +4.4. Downloading and storing router descriptors (authorities and caches) + + Periodically (currently, every 10 seconds), directory servers check + whether there are any specific descriptors that they do not have and that + they are not currently trying to download. Caches identify these + descriptors by hash in the recent network-status consensus documents; + authorities identify them by hash in vote (if publication date is more + recent than the descriptor we currently have). + + [XXXX need a way to fetch descriptors ahead of the vote? v2 status docs can + do that for now.] + + If so, the directory server launches requests to the authorities for these + descriptors, such that each authority is only asked for descriptors listed + in its most recent vote (if the requester is an authority) or in the + consensus (if the requester is a cache). If we're an authority, and more + than one authority lists the descriptor, we choose which to ask at random. + + If one of these downloads fails, we do not try to download that descriptor + from the authority that failed to serve it again unless we receive a newer + network-status (consensus or vote) from that authority that lists the same + descriptor. + + Directory servers must potentially cache multiple descriptors for each + router. Servers must not discard any descriptor listed by any recent + consensus. If there is enough space to store additional descriptors, + servers SHOULD try to hold those which clients are likely to download the + most. (Currently, this is judged based on the interval for which each + descriptor seemed newest.) +[XXXX define recent] + + Authorities SHOULD NOT download descriptors for routers that they would + immediately reject for reasons listed in 3.1. + +4.5. Downloading and storing extra-info documents + + All authorities, and any cache that chooses to cache extra-info documents, + and any client that uses extra-info documents, should implement this + section. + + Note that generally, clients don't need extra-info documents. + + Periodically, the Tor instance checks whether it is missing any extra-info + documents: in other words, if it has any router descriptors with an + extra-info-digest field that does not match any of the extra-info + documents currently held. If so, it downloads whatever extra-info + documents are missing. Caches download from authorities; non-caches try + to download from caches. We follow the same splitting and back-off rules + as in 4.4 (if a cache) or 5.3 (if a client). + +4.6. General-use HTTP URLs + + "Fingerprints" in these URLs are base-16-encoded SHA1 hashes. + + The most recent v3 consensus should be available at: + http:///tor/status-vote/current/consensus.z + + Starting with Tor version 0.2.1.1-alpha is also available at: + http:///tor/status-vote/current/consensus/++.z + + Where F1, F2, etc. are authority identity fingerprints the client trusts. + Servers will only return a consensus if more than half of the requested + authorities have signed the document, otherwise a 404 error will be sent + back. The fingerprints can be shortened to a length of any multiple of + two, using only the leftmost part of the encoded fingerprint. Tor uses + 3 bytes (6 hex characters) of the fingerprint. + + Clients SHOULD sort the fingerprints in ascending order. Server MUST + accept any order. + + Clients SHOULD use this format when requesting consensus documents from + directory authority servers and from caches running a version of Tor + that is known to support this URL format. + + A concatenated set of all the current key certificates should be available + at: + http:///tor/keys/all.z + + The key certificate for this server (if it is an authority) should be + available at: + http:///tor/keys/authority.z + + The key certificate for an authority whose authority identity fingerprint + is should be available at: + http:///tor/keys/fp/.z + + The key certificate whose signing key fingerprint is should be + available at: + http:///tor/keys/sk/.z + + The key certificate whose identity key fingerprint is and whose signing + key fingerprint is should be available at: + + http:///tor/keys/fp-sk/-.z + + (As usual, clients may request multiple certificates using: + http:///tor/keys/fp-sk/-+-.z ) + [The above fp-sk format was not supported before Tor 0.2.1.9-alpha.] + + The most recent descriptor for a server whose identity key has a + fingerprint of should be available at: + http:///tor/server/fp/.z + + The most recent descriptors for servers with identity fingerprints + ,, should be available at: + http:///tor/server/fp/++.z + + (NOTE: Implementations SHOULD NOT download descriptors by identity key + fingerprint. This allows a corrupted server (in collusion with a cache) to + provide a unique descriptor to a client, and thereby partition that client + from the rest of the network.) + + The server descriptor with (descriptor) digest (in hex) should be + available at: + http:///tor/server/d/.z + + The most recent descriptors with digests ,, should be + available at: + http:///tor/server/d/++.z + + The most recent descriptor for this server should be at: + http:///tor/server/authority.z + [Nothing in the Tor protocol uses this resource yet, but it is useful + for debugging purposes. Also, the official Tor implementations + (starting at 0.1.1.x) use this resource to test whether a server's + own DirPort is reachable.] + + A concatenated set of the most recent descriptors for all known servers + should be available at: + http:///tor/server/all.z + + Extra-info documents are available at the URLS + http:///tor/extra/d/... + http:///tor/extra/fp/... + http:///tor/extra/all[.z] + http:///tor/extra/authority[.z] + (As for /tor/server/ URLs: supports fetching extra-info + documents by their digest, by the fingerprint of their servers, + or all at once. When serving by fingerprint, we serve the + extra-info that corresponds to the descriptor we would serve by + that fingerprint. Only directory authorities of version + 0.2.0.1-alpha or later are guaranteed to support the first + three classes of URLs. Caches may support them, and MUST + support them if they have advertised "caches-extra-info".) + + For debugging, directories SHOULD expose non-compressed objects at URLs like + the above, but without the final ".z". + Clients MUST handle compressed concatenated information in two forms: + - A concatenated list of zlib-compressed objects. + - A zlib-compressed concatenated list of objects. + Directory servers MAY generate either format: the former requires less + CPU, but the latter requires less bandwidth. + + Clients SHOULD use upper case letters (A-F) when base16-encoding + fingerprints. Servers MUST accept both upper and lower case fingerprints + in requests. + +5. Client operation: downloading information + + Every Tor that is not a directory server (that is, those that do + not have a DirPort set) implements this section. + +5.1. Downloading network-status documents + + Each client maintains a list of directory authorities. Insofar as + possible, clients SHOULD all use the same list. + + Clients try to have a live consensus network-status document at all times. + A network-status document is "live" if the time in its valid-until field + has not passed. + + If a client is missing a live network-status document, it tries to fetch + it from a directory cache (or from an authority if it knows no caches). + On failure, the client waits briefly, then tries that network-status + document again from another cache. The client does not build circuits + until it has a live network-status consensus document, and it has + descriptors for more than 1/4 of the routers that it believes are running. + + (Note: clients can and should pick caches based on the network-status + information they have: once they have first fetched network-status info + from an authority, they should not need to go to the authority directly + again.) + + To avoid swarming the caches whenever a consensus expires, the + clients download new consensuses at a randomly chosen time after the + caches are expected to have a fresh consensus, but before their + consensus will expire. (This time is chosen uniformly at random from + the interval between the time 3/4 into the first interval after the + consensus is no longer fresh, and 7/8 of the time remaining after + that before the consensus is invalid.) + + [For example, if a cache has a consensus that became valid at 1:00, + and is fresh until 2:00, and expires at 4:00, that cache will fetch + a new consensus at a random time between 2:45 and 3:50, since 3/4 + of the one-hour interval is 45 minutes, and 7/8 of the remaining 75 + minutes is 65 minutes.] + +5.2. Downloading and storing router descriptors + + Clients try to have the best descriptor for each router. A descriptor is + "best" if: + * It is listed in the consensus network-status document. + + Periodically (currently every 10 seconds) clients check whether there are + any "downloadable" descriptors. A descriptor is downloadable if: + - It is the "best" descriptor for some router. + - The descriptor was published at least 10 minutes in the past. + (This prevents clients from trying to fetch descriptors that the + mirrors have probably not yet retrieved and cached.) + - The client does not currently have it. + - The client is not currently trying to download it. + - The client would not discard it immediately upon receiving it. + - The client thinks it is running and valid (see 6.1 below). + + If at least 16 known routers have downloadable descriptors, or if + enough time (currently 10 minutes) has passed since the last time the + client tried to download descriptors, it launches requests for all + downloadable descriptors, as described in 5.3 below. + + When a descriptor download fails, the client notes it, and does not + consider the descriptor downloadable again until a certain amount of time + has passed. (Currently 0 seconds for the first failure, 60 seconds for the + second, 5 minutes for the third, 10 minutes for the fourth, and 1 day + thereafter.) Periodically (currently once an hour) clients reset the + failure count. + + Clients retain the most recent descriptor they have downloaded for each + router so long as it is not too old (currently, 48 hours), OR so long as + no better descriptor has been downloaded for the same router. + + [Versions of Tor before 0.1.2.3-alpha would discard descriptors simply for + being published too far in the past.] [The code seems to discard + descriptors in all cases after they're 5 days old. True? -RD] + +5.3. Managing downloads + + When a client has no consensus network-status document, it downloads it + from a randomly chosen authority. In all other cases, the client + downloads from caches randomly chosen from among those believed to be V2 + directory servers. (This information comes from the network-status + documents; see 6 below.) + + When downloading multiple router descriptors, the client chooses multiple + mirrors so that: + - At least 3 different mirrors are used, except when this would result + in more than one request for under 4 descriptors. + - No more than 128 descriptors are requested from a single mirror. + - Otherwise, as few mirrors as possible are used. + After choosing mirrors, the client divides the descriptors among them + randomly. + + After receiving any response client MUST discard any network-status + documents and descriptors that it did not request. + +6. Using directory information + + Everyone besides directory authorities uses the approaches in this section + to decide which servers to use and what their keys are likely to be. + (Directory authorities just believe their own opinions, as in 3.1 above.) + +6.1. Choosing routers for circuits. + + Circuits SHOULD NOT be built until the client has enough directory + information: a live consensus network status [XXXX fallback?] and + descriptors for at least 1/4 of the servers believed to be running. + + A server is "listed" if it is included by the consensus network-status + document. Clients SHOULD NOT use unlisted servers. + + These flags are used as follows: + + - Clients SHOULD NOT use non-'Valid' or non-'Running' routers unless + requested to do so. + + - Clients SHOULD NOT use non-'Fast' routers for any purpose other than + very-low-bandwidth circuits (such as introduction circuits). + + - Clients SHOULD NOT use non-'Stable' routers for circuits that are + likely to need to be open for a very long time (such as those used for + IRC or SSH connections). + + - Clients SHOULD NOT choose non-'Guard' nodes when picking entry guard + nodes. + + - Clients SHOULD NOT download directory information from non-'V2Dir' + caches. + + See the "path-spec.txt" document for more details. + +6.2. Managing naming + + In order to provide human-memorable names for individual server + identities, some directory servers bind names to IDs. Clients handle + names in two ways: + + When a client encounters a name it has not mapped before: + + If the consensus lists any router with that name as "Named", or if + consensus-method 2 or later is in use and the consensus lists any + router with that name as having the "Unnamed" flag, then the name is + bound. (It's bound to the ID listed in the entry with the Named, + or to an unknown ID if no name is found.) + + When the user refers to a bound name, the implementation SHOULD provide + only the router with ID bound to that name, and no other router, even + if the router with the right ID can't be found. + + When a user tries to refer to a non-bound name, the implementation SHOULD + warn the user. After warning the user, the implementation MAY use any + router that advertises the name. + + Not every router needs a nickname. When a router doesn't configure a + nickname, it publishes with the default nickname "Unnamed". Authorities + SHOULD NOT ever mark a router with this nickname as Named; client software + SHOULD NOT ever use a router in response to a user request for a router + called "Unnamed". + +6.3. Software versions + + An implementation of Tor SHOULD warn when it has fetched a consensus + network-status, and it is running a software version not listed. + +6.4. Warning about a router's status. + + If a router tries to publish its descriptor to a Naming authority + that has its nickname mapped to another key, the router SHOULD + warn the operator that it is either using the wrong key or is using + an already claimed nickname. + + If a router has fetched a consensus document,, and the + authorities do not publish a binding for the router's nickname, the + router MAY remind the operator that the chosen nickname is not + bound to this key at the authorities, and suggest contacting the + authority operators. + + ... + +6.5. Router protocol versions + + A client should believe that a router supports a given feature if that + feature is supported by the router or protocol versions in more than half + of the live networkstatuses' "v" entries for that router. In other words, + if the "v" entries for some router are: + v Tor 0.0.8pre1 (from authority 1) + v Tor 0.1.2.11 (from authority 2) + v FutureProtocolDescription 99 (from authority 3) + then the client should believe that the router supports any feature + supported by 0.1.2.11. + + This is currently equivalent to believing the median declared version for + a router in all live networkstatuses. + +7. Standards compliance + + All clients and servers MUST support HTTP 1.0. Clients and servers MAY + support later versions of HTTP as well. + +7.1. HTTP headers + + Servers MAY set the Content-Length: header. Servers SHOULD set + Content-Encoding to "deflate" or "identity". + + Servers MAY include an X-Your-Address-Is: header, whose value is the + apparent IP address of the client connecting to them (as a dotted quad). + For directory connections tunneled over a BEGIN_DIR stream, servers SHOULD + report the IP from which the circuit carrying the BEGIN_DIR stream reached + them. [Servers before version 0.1.2.5-alpha reported 127.0.0.1 for all + BEGIN_DIR-tunneled connections.] + + Servers SHOULD disable caching of multiple network statuses or multiple + router descriptors. Servers MAY enable caching of single descriptors, + single network statuses, the list of all router descriptors, a v1 + directory, or a v1 running routers document. XXX mention times. + +7.2. HTTP status codes + + Tor delivers the following status codes. Some were chosen without much + thought; other code SHOULD NOT rely on specific status codes yet. + + 200 -- the operation completed successfully + -- the user requested statuses or serverdescs, and none of the ones we + requested were found (0.2.0.4-alpha and earlier). + + 304 -- the client specified an if-modified-since time, and none of the + requested resources have changed since that time. + + 400 -- the request is malformed, or + -- the URL is for a malformed variation of one of the URLs we support, + or + -- the client tried to post to a non-authority, or + -- the authority rejected a malformed posted document, or + + 404 -- the requested document was not found. + -- the user requested statuses or serverdescs, and none of the ones + requested were found (0.2.0.5-alpha and later). + + 503 -- we are declining the request in order to save bandwidth + -- user requested some items that we ordinarily generate or store, + but we do not have any available. + +9. Backward compatibility and migration plans + + Until Tor versions before 0.1.1.x are completely obsolete, directory + authorities should generate, and mirrors should download and cache, v1 + directories and running-routers lists, and allow old clients to download + them. These documents and the rules for retrieving, serving, and caching + them are described in dir-spec-v1.txt. + + Until Tor versions before 0.2.0.x are completely obsolete, directory + authorities should generate, mirrors should download and cache, v2 + network-status documents, and allow old clients to download them. + Additionally, all directory servers and caches should download, store, and + serve any router descriptor that is required because of v2 network-status + documents. These documents and the rules for retrieving, serving, and + caching them are described in dir-spec-v1.txt. + +A. Consensus-negotiation timeline. + + + Period begins: this is the Published time. + Everybody sends votes + Reconciliation: everybody tries to fetch missing votes. + consensus may exist at this point. + End of voting period: + everyone swaps signatures. + Now it's okay for caches to download + Now it's okay for clients to download. + + Valid-after/valid-until switchover + diff --git a/orchid/doc/spec/path-spec.txt b/orchid/doc/spec/path-spec.txt new file mode 100644 index 00000000..78f3b63b --- /dev/null +++ b/orchid/doc/spec/path-spec.txt @@ -0,0 +1,437 @@ + + Tor Path Specification + + Roger Dingledine + Nick Mathewson + +Note: This is an attempt to specify Tor as currently implemented. Future +versions of Tor will implement improved algorithms. + +This document tries to cover how Tor chooses to build circuits and assign +streams to circuits. Other implementations MAY take other approaches, but +implementors should be aware of the anonymity and load-balancing implications +of their choices. + + THIS SPEC ISN'T DONE YET. + +1. General operation + + Tor begins building circuits as soon as it has enough directory + information to do so (see section 5 of dir-spec.txt). Some circuits are + built preemptively because we expect to need them later (for user + traffic), and some are built because of immediate need (for user traffic + that no current circuit can handle, for testing the network or our + reachability, and so on). + + When a client application creates a new stream (by opening a SOCKS + connection or launching a resolve request), we attach it to an appropriate + open circuit if one exists, or wait if an appropriate circuit is + in-progress. We launch a new circuit only + if no current circuit can handle the request. We rotate circuits over + time to avoid some profiling attacks. + + To build a circuit, we choose all the nodes we want to use, and then + construct the circuit. Sometimes, when we want a circuit that ends at a + given hop, and we have an appropriate unused circuit, we "cannibalize" the + existing circuit and extend it to the new terminus. + + These processes are described in more detail below. + + This document describes Tor's automatic path selection logic only; path + selection can be overridden by a controller (with the EXTENDCIRCUIT and + ATTACHSTREAM commands). Paths constructed through these means may + violate some constraints given below. + +1.1. Terminology + + A "path" is an ordered sequence of nodes, not yet built as a circuit. + + A "clean" circuit is one that has not yet been used for any traffic. + + A "fast" or "stable" or "valid" node is one that has the 'Fast' or + 'Stable' or 'Valid' flag + set respectively, based on our current directory information. A "fast" + or "stable" circuit is one consisting only of "fast" or "stable" nodes. + + In an "exit" circuit, the final node is chosen based on waiting stream + requests if any, and in any case it avoids nodes with exit policy of + "reject *:*". An "internal" circuit, on the other hand, is one where + the final node is chosen just like a middle node (ignoring its exit + policy). + + A "request" is a client-side stream or DNS resolve that needs to be + served by a circuit. + + A "pending" circuit is one that we have started to build, but which has + not yet completed. + + A circuit or path "supports" a request if it is okay to use the + circuit/path to fulfill the request, according to the rules given below. + A circuit or path "might support" a request if some aspect of the request + is unknown (usually its target IP), but we believe the path probably + supports the request according to the rules given below. + +1.1. A server's bandwidth + + Old versions of Tor did not report bandwidths in network status + documents, so clients had to learn them from the routers' advertised + server descriptors. + + For versions of Tor prior to 0.2.1.17-rc, everywhere below where we + refer to a server's "bandwidth", we mean its clipped advertised + bandwidth, computed by taking the smaller of the 'rate' and + 'observed' arguments to the "bandwidth" element in the server's + descriptor. If a router's advertised bandwidth is greater than + MAX_BELIEVABLE_BANDWIDTH (currently 10 MB/s), we clipped to that + value. + + For more recent versions of Tor, we take the bandwidth value declared + in the consensus, and fall back to the clipped advertised bandwidth + only if the consensus does not have bandwidths listed. + +2. Building circuits + +2.1. When we build + +2.1.1. Clients build circuits preemptively + + When running as a client, Tor tries to maintain at least a certain + number of clean circuits, so that new streams can be handled + quickly. To increase the likelihood of success, Tor tries to + predict what circuits will be useful by choosing from among nodes + that support the ports we have used in the recent past (by default + one hour). Specifically, on startup Tor tries to maintain one clean + fast exit circuit that allows connections to port 80, and at least + two fast clean stable internal circuits in case we get a resolve + request or hidden service request (at least three if we _run_ a + hidden service). + + After that, Tor will adapt the circuits that it preemptively builds + based on the requests it sees from the user: it tries to have two fast + clean exit circuits available for every port seen within the past hour + (each circuit can be adequate for many predicted ports -- it doesn't + need two separate circuits for each port), and it tries to have the + above internal circuits available if we've seen resolves or hidden + service activity within the past hour. If there are 12 or more clean + circuits open, it doesn't open more even if it has more predictions. + + Only stable circuits can "cover" a port that is listed in the + LongLivedPorts config option. Similarly, hidden service requests + to ports listed in LongLivedPorts make us create stable internal + circuits. + + Note that if there are no requests from the user for an hour, Tor + will predict no use and build no preemptive circuits. + + The Tor client SHOULD NOT store its list of predicted requests to a + persistent medium. + +2.1.2. Clients build circuits on demand + + Additionally, when a client request exists that no circuit (built or + pending) might support, we create a new circuit to support the request. + For exit connections, we pick an exit node that will handle the + most pending requests (choosing arbitrarily among ties), launch a + circuit to end there, and repeat until every unattached request + might be supported by a pending or built circuit. For internal + circuits, we pick an arbitrary acceptable path, repeating as needed. + + In some cases we can reuse an already established circuit if it's + clean; see Section 2.3 (cannibalizing circuits) for details. + +2.1.3. Servers build circuits for testing reachability and bandwidth + + Tor servers test reachability of their ORPort once they have + successfully built a circuit (on start and whenever their IP address + changes). They build an ordinary fast internal circuit with themselves + as the last hop. As soon as any testing circuit succeeds, the Tor + server decides it's reachable and is willing to publish a descriptor. + + We launch multiple testing circuits (one at a time), until we + have NUM_PARALLEL_TESTING_CIRC (4) such circuits open. Then we + do a "bandwidth test" by sending a certain number of relay drop + cells down each circuit: BandwidthRate * 10 / CELL_NETWORK_SIZE + total cells divided across the four circuits, but never more than + CIRCWINDOW_START (1000) cells total. This exercises both outgoing and + incoming bandwidth, and helps to jumpstart the observed bandwidth + (see dir-spec.txt). + + Tor servers also test reachability of their DirPort once they have + established a circuit, but they use an ordinary exit circuit for + this purpose. + +2.1.4. Hidden-service circuits + + See section 4 below. + +2.1.5. Rate limiting of failed circuits + + If we fail to build a circuit N times in a X second period (see Section + 2.3 for how this works), we stop building circuits until the X seconds + have elapsed. + XXXX + +2.1.6. When to tear down circuits + + XXXX + +2.2. Path selection and constraints + + We choose the path for each new circuit before we build it. We choose the + exit node first, followed by the other nodes in the circuit. All paths + we generate obey the following constraints: + - We do not choose the same router twice for the same path. + - We do not choose any router in the same family as another in the same + path. + - We do not choose more than one router in a given /16 subnet + (unless EnforceDistinctSubnets is 0). + - We don't choose any non-running or non-valid router unless we have + been configured to do so. By default, we are configured to allow + non-valid routers in "middle" and "rendezvous" positions. + - If we're using Guard nodes, the first node must be a Guard (see 5 + below) + - XXXX Choosing the length + + For circuits that do not need to be "fast", when choosing among + multiple candidates for a path element, we choose randomly. + + For "fast" circuits, we pick a given router as an exit with probability + proportional to its bandwidth. + + For non-exit positions on "fast" circuits, we pick routers as above, but + we weight the bandwidth of Exit-flagged nodes depending + on the fraction of bandwidth available from non-Exit nodes. Call the + total bandwidth for Exit nodes under consideration E, + and the total bandwidth for all nodes under + consideration T. If E..exit, the request is rewritten to a request for + , and the request is only supported by the exit whose nickname + or fingerprint is . + +2.3. Cannibalizing circuits + + If we need a circuit and have a clean one already established, in + some cases we can adapt the clean circuit for our new + purpose. Specifically, + + For hidden service interactions, we can "cannibalize" a clean internal + circuit if one is available, so we don't need to build those circuits + from scratch on demand. + + We can also cannibalize clean circuits when the client asks to exit + at a given node -- either via the ".exit" notation or because the + destination is running at the same location as an exit node. + + +2.4. Handling failure + + If an attempt to extend a circuit fails (either because the first create + failed or a subsequent extend failed) then the circuit is torn down and is + no longer pending. (XXXX really?) Requests that might have been + supported by the pending circuit thus become unsupported, and a new + circuit needs to be constructed. + + If a stream "begin" attempt fails with an EXITPOLICY error, we + decide that the exit node's exit policy is not correctly advertised, + so we treat the exit node as if it were a non-exit until we retrieve + a fresh descriptor for it. + + XXXX + +3. Attaching streams to circuits + + When a circuit that might support a request is built, Tor tries to attach + the request's stream to the circuit and sends a BEGIN, BEGIN_DIR, + or RESOLVE relay + cell as appropriate. If the request completes unsuccessfully, Tor + considers the reason given in the CLOSE relay cell. [XXX yes, and?] + + + After a request has remained unattached for SocksTimeout (2 minutes + by default), Tor abandons the attempt and signals an error to the + client as appropriate (e.g., by closing the SOCKS connection). + + XXX Timeouts and when Tor auto-retries. + * What stream-end-reasons are appropriate for retrying. + + If no reply to BEGIN/RESOLVE, then the stream will timeout and fail. + +4. Hidden-service related circuits + + XXX Tracking expected hidden service use (client-side and hidserv-side) + +5. Guard nodes + + We use Guard nodes (also called "helper nodes" in the literature) to + prevent certain profiling attacks. Here's the risk: if we choose entry and + exit nodes at random, and an attacker controls C out of N servers + (ignoring bandwidth), then the + attacker will control the entry and exit node of any given circuit with + probability (C/N)^2. But as we make many different circuits over time, + then the probability that the attacker will see a sample of about (C/N)^2 + of our traffic goes to 1. Since statistical sampling works, the attacker + can be sure of learning a profile of our behavior. + + If, on the other hand, we picked an entry node and held it fixed, we would + have probability C/N of choosing a bad entry and being profiled, and + probability (N-C)/N of choosing a good entry and not being profiled. + + When guard nodes are enabled, Tor maintains an ordered list of entry nodes + as our chosen guards, and stores this list persistently to disk. If a Guard + node becomes unusable, rather than replacing it, Tor adds new guards to the + end of the list. When choosing the first hop of a circuit, Tor + chooses at + random from among the first NumEntryGuards (default 3) usable guards on the + list. If there are not at least 2 usable guards on the list, Tor adds + routers until there are, or until there are no more usable routers to add. + + A guard is unusable if any of the following hold: + - it is not marked as a Guard by the networkstatuses, + - it is not marked Valid (and the user hasn't set AllowInvalid entry) + - it is not marked Running + - Tor couldn't reach it the last time it tried to connect + + A guard is unusable for a particular circuit if any of the rules for path + selection in 2.2 are not met. In particular, if the circuit is "fast" + and the guard is not Fast, or if the circuit is "stable" and the guard is + not Stable, or if the guard has already been chosen as the exit node in + that circuit, Tor can't use it as a guard node for that circuit. + + If the guard is excluded because of its status in the networkstatuses for + over 30 days, Tor removes it from the list entirely, preserving order. + + If Tor fails to connect to an otherwise usable guard, it retries + periodically: every hour for six hours, every 4 hours for 3 days, every + 18 hours for a week, and every 36 hours thereafter. Additionally, Tor + retries unreachable guards the first time it adds a new guard to the list, + since it is possible that the old guards were only marked as unreachable + because the network was unreachable or down. + + Tor does not add a guard persistently to the list until the first time we + have connected to it successfully. + +6. Router descriptor purposes + + There are currently three "purposes" supported for router descriptors: + general, controller, and bridge. Most descriptors are of type general + -- these are the ones listed in the consensus, and the ones fetched + and used in normal cases. + + Controller-purpose descriptors are those delivered by the controller + and labelled as such: they will be kept around (and expire like + normal descriptors), and they can be used by the controller in its + CIRCUITEXTEND commands. Otherwise they are ignored by Tor when it + chooses paths. + + Bridge-purpose descriptors are for routers that are used as bridges. See + doc/design-paper/blocking.pdf for more design explanation, or proposal + 125 for specific details. Currently bridge descriptors are used in place + of normal entry guards, for Tor clients that have UseBridges enabled. + + +X. Old notes + +X.1. Do we actually do this? + +How to deal with network down. + - While all helpers are down/unreachable and there are no established + or on-the-way testing circuits, launch a testing circuit. (Do this + periodically in the same way we try to establish normal circuits + when things are working normally.) + (Testing circuits are a special type of circuit, that streams won't + attach to by accident.) + - When a testing circuit succeeds, mark all helpers up and hold + the testing circuit open. + - If a connection to a helper succeeds, close all testing circuits. + Else mark that helper down and try another. + - If the last helper is marked down and we already have a testing + circuit established, then add the first hop of that testing circuit + to the end of our helper node list, close that testing circuit, + and go back to square one. (Actually, rather than closing the + testing circuit, can we get away with converting it to a normal + circuit and beginning to use it immediately?) + + [Do we actually do any of the above? If so, let's spec it. If not, let's + remove it. -NM] + +X.2. A thing we could do to deal with reachability. + +And as a bonus, it leads to an answer to Nick's attack ("If I pick +my helper nodes all on 18.0.0.0:*, then I move, you'll know where I +bootstrapped") -- the answer is to pick your original three helper nodes +without regard for reachability. Then the above algorithm will add some +more that are reachable for you, and if you move somewhere, it's more +likely (though not certain) that some of the originals will become useful. +Is that smart or just complex? + +X.3. Some stuff that worries me about entry guards. 2006 Jun, Nickm. + + It is unlikely for two users to have the same set of entry guards. + Observing a user is sufficient to learn its entry guards. So, as we move + around, entry guards make us linkable. If we want to change guards when + our location (IP? subnet?) changes, we have two bad options. We could + - Drop the old guards. But if we go back to our old location, + we'll not use our old guards. For a laptop that sometimes gets used + from work and sometimes from home, this is pretty fatal. + - Remember the old guards as associated with the old location, and use + them again if we ever go back to the old location. This would be + nasty, since it would force us to record where we've been. + + [Do we do any of this now? If not, this should move into 099-misc or + 098-todo. -NM] + diff --git a/orchid/doc/spec/rend-spec.txt b/orchid/doc/spec/rend-spec.txt new file mode 100644 index 00000000..f0300926 --- /dev/null +++ b/orchid/doc/spec/rend-spec.txt @@ -0,0 +1,751 @@ + + Tor Rendezvous Specification + +0. Overview and preliminaries + + Read + https://www.torproject.org/doc/design-paper/tor-design.html#sec:rendezvous + before you read this specification. It will make more sense. + + Rendezvous points provide location-hidden services (server + anonymity) for the onion routing network. With rendezvous points, + Bob can offer a TCP service (say, a webserver) via the onion + routing network, without revealing the IP of that service. + + Bob does this by anonymously advertising a public key for his + service, along with a list of onion routers to act as "Introduction + Points" for his service. He creates forward circuits to those + introduction points, and tells them about his public key. To + connect to Bob, Alice first builds a circuit to an OR to act as + her "Rendezvous Point." She then connects to one of Bob's chosen + introduction points, optionally provides authentication or + authorization information, and asks it to tell him about her Rendezvous + Point (RP). If Bob chooses to answer, he builds a circuit to her + RP, and tells it to connect him to Alice. The RP joins their + circuits together, and begins relaying cells. Alice's 'BEGIN' + cells are received directly by Bob's OP, which passes data to + and from the local server implementing Bob's service. + + Below we describe a network-level specification of this service, + along with interfaces to make this process transparent to Alice + (so long as she is using an OP). + +0.1. Notation, conventions and prerequisites + + In the specifications below, we use the same notation and terminology + as in "tor-spec.txt". The service specified here also requires the + existence of an onion routing network as specified in that file. + + H(x) is a SHA1 digest of x. + PKSign(SK,x) is a PKCS.1-padded RSA signature of x with SK. + PKEncrypt(SK,x) is a PKCS.1-padded RSA encryption of x with SK. + Public keys are all RSA, and encoded in ASN.1. + All integers are stored in network (big-endian) order. + All symmetric encryption uses AES in counter mode, except where + otherwise noted. + + In all discussions, "Alice" will refer to a user connecting to a + location-hidden service, and "Bob" will refer to a user running a + location-hidden service. + + An OP is (as defined elsewhere) an "Onion Proxy" or Tor client. + + An OR is (as defined elsewhere) an "Onion Router" or Tor server. + + An "Introduction point" is a Tor server chosen to be Bob's medium-term + 'meeting place'. A "Rendezvous point" is a Tor server chosen by Alice to + be a short-term communication relay between her and Bob. All Tor servers + potentially act as introduction and rendezvous points. + +0.2. Protocol outline + + 1. Bob->Bob's OP: "Offer IP:Port as + public-key-name:Port". [configuration] + (We do not specify this step; it is left to the implementor of + Bob's OP.) + + 2. Bob's OP generates keypair and rendezvous service descriptor: + "Meet public-key X at introduction point A, B, or C." (signed) + + 3. Bob's OP->Introduction point via Tor: [introduction setup] + "This pk is me." + + 4. Bob's OP->directory service via Tor: publishes Bob's service + descriptor [advertisement] + + 5. Out of band, Alice receives a [x.y.]z.onion:port address. + She opens a SOCKS connection to her OP, and requests + x.y.z.onion:port. + + 6. Alice's OP retrieves Bob's descriptor via Tor. [descriptor lookup.] + + 7. Alice's OP chooses a rendezvous point, opens a circuit to that + rendezvous point, and establishes a rendezvous circuit. [rendezvous + setup.] + + 8. Alice connects to the Introduction point via Tor, and tells it about + her rendezvous point and optional authentication/authorization + information. (Encrypted to Bob.) [Introduction 1] + + 9. The Introduction point passes this on to Bob's OP via Tor, along the + introduction circuit. [Introduction 2] + + 10. Bob's OP decides whether to connect to Alice, and if so, creates a + circuit to Alice's RP via Tor. Establishes a shared circuit. + [Rendezvous.] + + 11. Alice's OP sends begin cells to Bob's OP. [Connection] + +0.3. Constants and new cell types + + Relay cell types + 32 -- RELAY_ESTABLISH_INTRO + 33 -- RELAY_ESTABLISH_RENDEZVOUS + 34 -- RELAY_INTRODUCE1 + 35 -- RELAY_INTRODUCE2 + 36 -- RELAY_RENDEZVOUS1 + 37 -- RELAY_RENDEZVOUS2 + 38 -- RELAY_INTRO_ESTABLISHED + 39 -- RELAY_RENDEZVOUS_ESTABLISHED + 40 -- RELAY_COMMAND_INTRODUCE_ACK + +0.4. Version overview + + There are several parts in the hidden service protocol that have + changed over time, each of them having its own version number, whereas + other parts remained the same. The following list of potentially + versioned protocol parts should help reduce some confusion: + + - Hidden service descriptor: the binary-based v0 was the default for + a long time, and an ascii-based v2 has been added by proposal + 114. See 1.2. + + - Hidden service descriptor propagation mechanism: currently related to + the hidden service descriptor version -- v0 publishes to the original + hs directory authorities, whereas v2 publishes to a rotating subset + of relays with the "hsdir" flag; see 1.4 and 1.6. + + - Introduction protocol for how to generate an introduction cell: + v0 specified a nickname for the rendezvous point and assumed the + relay would know about it, whereas v2 now specifies IP address, + port, and onion key so the relay doesn't need to already recognize + it. See 1.8. + +1. The Protocol + +1.1. Bob configures his local OP. + + We do not specify a format for the OP configuration file. However, + OPs SHOULD allow Bob to provide more than one advertised service + per OP, and MUST allow Bob to specify one or more virtual ports per + service. Bob provides a mapping from each of these virtual ports + to a local IP:Port pair. + +1.2. Bob's OP generates service descriptors. + + The first time the OP provides an advertised service, it generates + a public/private keypair (stored locally). + + Beginning with 0.2.0.10-alpha, Bob's OP encodes "V2" descriptors. The + format of a "V2" descriptor is as follows: + + "rendezvous-service-descriptor" descriptor-id NL + + [At start, exactly once] + + Indicates the beginning of the descriptor. "descriptor-id" is a + periodically changing identifier of 160 bits formatted as 32 base32 + chars that is calculated by the hidden service and its clients. If + the optional "descriptor-cookie" is used, this "descriptor-id" + cannot be computed by anyone else. (Everyone can verify that this + "descriptor-id" belongs to the rest of the descriptor, even without + knowing the optional "descriptor-cookie", as described below.) The + "descriptor-id" is calculated by performing the following operation: + + descriptor-id = + H(permanent-id | H(time-period | descriptor-cookie | replica)) + + "permanent-id" is the permanent identifier of the hidden service, + consisting of 80 bits. It can be calculated by computing the hash value + of the public hidden service key and truncating after the first 80 bits: + + permanent-id = H(public-key)[:10] + + "H(time-period | descriptor-cookie | replica)" is the (possibly + secret) id part that is + necessary to verify that the hidden service is the true originator + of this descriptor. It can only be created by the hidden service + and its clients, but the "signature" below can only be created by + the service. + + "descriptor-cookie" is an optional secret password of 128 bits that + is shared between the hidden service provider and its clients. + + "replica" denotes the number of the non-consecutive replica. + + (Each descriptor is replicated on a number of _consecutive_ nodes + in the identifier ring by making every storing node responsible + for the identifier intervals starting from its 3rd predecessor's + ID to its own ID. In addition to that, every service publishes + multiple descriptors with different descriptor IDs in order to + distribute them to different places on the ring. Therefore, + "replica" chooses one of the _non-consecutive_ replicas. -KL) + + The "time-period" changes periodically depending on the global time and + as a function of "permanent-id". The current value for "time-period" can + be calculated using the following formula: + + time-period = (current-time + permanent-id-byte * 86400 / 256) + / 86400 + + "current-time" contains the current system time in seconds since + 1970-01-01 00:00, e.g. 1188241957. "permanent-id-byte" is the first + (unsigned) byte of the permanent identifier (which is in network + order), e.g. 143. Adding the product of "permanent-id-byte" and + 86400 (seconds per day), divided by 256, prevents "time-period" from + changing for all descriptors at the same time of the day. The result + of the overall operation is a (network-ordered) 32-bit integer, e.g. + 13753 or 0x000035B9 with the example values given above. + + "version" version-number NL + + [Exactly once] + + The version number of this descriptor's format. In this case: 2. + + "permanent-key" NL a public key in PEM format + + [Exactly once] + + The public key of the hidden service which is required to verify the + "descriptor-id" and the "signature". + + "secret-id-part" secret-id-part NL + + [Exactly once] + + The result of the following operation as explained above, formatted as + 32 base32 chars. Using this secret id part, everyone can verify that + the signed descriptor belongs to "descriptor-id". + + secret-id-part = H(time-period | descriptor-cookie | replica) + + "publication-time" YYYY-MM-DD HH:MM:SS NL + + [Exactly once] + + A timestamp when this descriptor has been created. + + "protocol-versions" version-string NL + + [Exactly once] + + A comma-separated list of recognized and permitted version numbers + for use in INTRODUCE cells; these versions are described in section + 1.8 below. + + "introduction-points" NL encrypted-string + + [At most once] + + A list of introduction points. If the optional "descriptor-cookie" is + used, this list is encrypted with AES in CTR mode with a random + initialization vector of 128 bits that is written to + the beginning of the encrypted string, and the "descriptor-cookie" as + secret key of 128 bits length. + + The string containing the introduction point data (either encrypted + or not) is encoded in base64, and surrounded with + "-----BEGIN MESSAGE-----" and "-----END MESSAGE-----". + + The unencrypted string may begin with: + + ["service-authentication" auth-type NL auth-data ... reserved] + + [At start, any number] + + The service-specific authentication data can be used to perform + client authentication. This data is independent of the selected + introduction point as opposed to "intro-authentication" below. + + Subsequently, an arbitrary number of introduction point entries may + follow, each containing the following data: + + "introduction-point" identifier NL + + [At start, exactly once] + + The identifier of this introduction point: the base-32 encoded + hash of this introduction point's identity key. + + "ip-address" ip-address NL + + [Exactly once] + + The IP address of this introduction point. + + "onion-port" port NL + + [Exactly once] + + The TCP port on which the introduction point is listening for + incoming onion requests. + + "onion-key" NL a public key in PEM format + + [Exactly once] + + The public key that can be used to encrypt messages to this + introduction point. + + "service-key" NL a public key in PEM format + + [Exactly once] + + The public key that can be used to encrypt messages to the hidden + service. + + ["intro-authentication" auth-type NL auth-data ... reserved] + + [Any number] + + The introduction-point-specific authentication data can be used + to perform client authentication. This data depends on the + selected introduction point as opposed to "service-authentication" + above. + + (This ends the fields in the encrypted portion of the descriptor.) + + [It's ok for Bob to advertise 0 introduction points. He might want + to do that if he previously advertised some introduction points, + and now he doesn't have any. -RD] + + "signature" NL signature-string + + [At end, exactly once] + + A signature of all fields above with the private key of the hidden + service. + +1.2.1. Other descriptor formats we don't use. + + Support for the V0 descriptor format was dropped in 0.2.2.0-alpha-dev: + + KL Key length [2 octets] + PK Bob's public key [KL octets] + TS A timestamp [4 octets] + NI Number of introduction points [2 octets] + Ipt A list of NUL-terminated ORs [variable] + SIG Signature of above fields [variable] + + KL is the length of PK, in octets. + TS is the number of seconds elapsed since Jan 1, 1970. + + The members of Ipt may be either (a) nicknames, or (b) identity key + digests, encoded in hex, and prefixed with a '$'. + + The V1 descriptor format was understood and accepted from + 0.1.1.5-alpha-cvs to 0.2.0.6-alpha-dev, but no Tors generated it and + it was removed: + + V Format byte: set to 255 [1 octet] + V Version byte: set to 1 [1 octet] + KL Key length [2 octets] + PK Bob's public key [KL octets] + TS A timestamp [4 octets] + PROTO Protocol versions: bitmask [2 octets] + NI Number of introduction points [2 octets] + For each introduction point: (as in INTRODUCE2 cells) + IP Introduction point's address [4 octets] + PORT Introduction point's OR port [2 octets] + ID Introduction point identity ID [20 octets] + KLEN Length of onion key [2 octets] + KEY Introduction point onion key [KLEN octets] + SIG Signature of above fields [variable] + + A hypothetical "V1" descriptor, that has never been used but might + be useful for historical reasons, contains: + + V Format byte: set to 255 [1 octet] + V Version byte: set to 1 [1 octet] + KL Key length [2 octets] + PK Bob's public key [KL octets] + TS A timestamp [4 octets] + PROTO Rendezvous protocol versions: bitmask [2 octets] + NA Number of auth mechanisms accepted [1 octet] + For each auth mechanism: + AUTHT The auth type that is supported [2 octets] + AUTHL Length of auth data [1 octet] + AUTHD Auth data [variable] + NI Number of introduction points [2 octets] + For each introduction point: (as in INTRODUCE2 cells) + ATYPE An address type (typically 4) [1 octet] + ADDR Introduction point's IP address [4 or 16 octets] + PORT Introduction point's OR port [2 octets] + AUTHT The auth type that is supported [2 octets] + AUTHL Length of auth data [1 octet] + AUTHD Auth data [variable] + ID Introduction point identity ID [20 octets] + KLEN Length of onion key [2 octets] + KEY Introduction point onion key [KLEN octets] + SIG Signature of above fields [variable] + + AUTHT specifies which authentication/authorization mechanism is + required by the hidden service or the introduction point. AUTHD + is arbitrary data that can be associated with an auth approach. + Currently only AUTHT of [00 00] is supported, with an AUTHL of 0. + See section 2 of this document for details on auth mechanisms. + +1.3. Bob's OP establishes his introduction points. + + The OP establishes a new introduction circuit to each introduction + point. These circuits MUST NOT be used for anything but hidden service + introduction. To establish the introduction, Bob sends a + RELAY_ESTABLISH_INTRO cell, containing: + + KL Key length [2 octets] + PK Introduction public key [KL octets] + HS Hash of session info [20 octets] + SIG Signature of above information [variable] + + [XXX011, need to add auth information here. -RD] + + To prevent replay attacks, the HS field contains a SHA-1 hash based on the + shared secret KH between Bob's OP and the introduction point, as + follows: + HS = H(KH | "INTRODUCE") + That is: + HS = H(KH | [49 4E 54 52 4F 44 55 43 45]) + (KH, as specified in tor-spec.txt, is H(g^xy | [00]) .) + + Upon receiving such a cell, the OR first checks that the signature is + correct with the included public key. If so, it checks whether HS is + correct given the shared state between Bob's OP and the OR. If either + check fails, the OP discards the cell; otherwise, it associates the + circuit with Bob's public key, and dissociates any other circuits + currently associated with PK. On success, the OR sends Bob a + RELAY_INTRO_ESTABLISHED cell with an empty payload. + + Bob's OP does not include its own public key in the RELAY_ESTABLISH_INTRO + cell, but the public key of a freshly generated introduction key pair. + The OP also includes these fresh public keys in the v2 hidden service + descriptor together with the other introduction point information. The + reason is that the introduction point does not need to and therefore + should not know for which hidden service it works, so as to prevent it + from tracking the hidden service's activity. + +1.4. Bob's OP advertises his service descriptor(s). + + Bob's OP opens a stream to each directory server's directory port via Tor. + (He may re-use old circuits for this.) Over this stream, Bob's OP makes + an HTTP 'POST' request, to a URL "/tor/rendezvous/publish" relative to the + directory server's root, containing as its body Bob's service descriptor. + + Bob should upload a service descriptor for each version format that + is supported in the current Tor network. + + Upon receiving a descriptor, the directory server checks the signature, + and discards the descriptor if the signature does not match the enclosed + public key. Next, the directory server checks the timestamp. If the + timestamp is more than 24 hours in the past or more than 1 hour in the + future, or the directory server already has a newer descriptor with the + same public key, the server discards the descriptor. Otherwise, the + server discards any older descriptors with the same public key and + version format, and associates the new descriptor with the public key. + The directory server remembers this descriptor for at least 24 hours + after its timestamp. At least every 18 hours, Bob's OP uploads a + fresh descriptor. + + Bob's OP publishes v2 descriptors to a changing subset of all v2 hidden + service directories. Therefore, Bob's OP opens a stream via Tor to each + responsible hidden service directory. (He may re-use old circuits + for this.) Over this stream, Bob's OP makes an HTTP 'POST' request to a + URL "/tor/rendezvous2/publish" relative to the hidden service + directory's root, containing as its body Bob's service descriptor. + + At any time, there are 6 hidden service directories responsible for + keeping replicas of a descriptor; they consist of 2 sets of 3 hidden + service directories with consecutive onion IDs. Bob's OP learns about + the complete list of hidden service directories by filtering the + consensus status document received from the directory authorities. A + hidden service directory is deemed responsible for all descriptor IDs in + the interval from its direct predecessor, exclusive, to its own ID, + inclusive; it further holds replicas for its 2 predecessors. A + participant only trusts its own routing list and never learns about + routing information from other parties. + + Bob's OP publishes a new v2 descriptor once an hour or whenever its + content changes. V2 descriptors can be found by clients within a given + time period of 24 hours, after which they change their ID as described + under 1.2. If a published descriptor would be valid for less than 60 + minutes (= 2 x 30 minutes to allow the server to be 30 minutes behind + and the client 30 minutes ahead), Bob's OP publishes the descriptor + under the ID of both, the current and the next publication period. + +1.5. Alice receives a x.y.z.onion address. + + When Alice receives a pointer to a location-hidden service, it is as a + hostname of the form "z.onion" or "y.z.onion" or "x.y.z.onion", where + z is a base-32 encoding of a 10-octet hash of Bob's service's public + key, computed as follows: + + 1. Let H = H(PK). + 2. Let H' = the first 80 bits of H, considering each octet from + most significant bit to least significant bit. + 2. Generate a 16-character encoding of H', using base32 as defined + in RFC 3548. + + (We only use 80 bits instead of the 160 bits from SHA1 because we + don't need to worry about arbitrary collisions, and because it will + make handling the url's more convenient.) + + The string "x", if present, is the base-32 encoding of the + authentication/authorization required by the introduction point. + The string "y", if present, is the base-32 encoding of the + authentication/authorization required by the hidden service. + Omitting a string is taken to mean auth type [00 00]. + See section 2 of this document for details on auth mechanisms. + + [Yes, numbers are allowed at the beginning. See RFC 1123. -NM] + +1.6. Alice's OP retrieves a service descriptor. + + Similarly to the description in section 1.4, Alice's OP fetches a v2 + descriptor from a randomly chosen hidden service directory out of the + changing subset of 6 nodes. If the request is unsuccessful, Alice retries + the other remaining responsible hidden service directories in a random + order. Alice relies on Bob to care about a potential clock skew between + the two by possibly storing two sets of descriptors (see end of section + 1.4). + + Alice's OP opens a stream via Tor to the chosen v2 hidden service + directory. (She may re-use old circuits for this.) Over this stream, + Alice's OP makes an HTTP 'GET' request for the document + "/tor/rendezvous2/", where z is replaced with the encoding of the + descriptor ID. The directory replies with a 404 HTTP response if it does + not recognize , and otherwise returns Bob's most recently uploaded + service descriptor. + + If Alice's OP receives a 404 response, it tries the other directory + servers, and only fails the lookup if none recognize the public key hash. + + Upon receiving a service descriptor, Alice verifies with the same process + as the directory server uses, described above in section 1.4. + + The directory server gives a 400 response if it cannot understand Alice's + request. + + Alice should cache the descriptor locally, but should not use + descriptors that are more than 24 hours older than their timestamp. + [Caching may make her partitionable, but she fetched it anonymously, + and we can't very well *not* cache it. -RD] + +1.7. Alice's OP establishes a rendezvous point. + + When Alice requests a connection to a given location-hidden service, + and Alice's OP does not have an established circuit to that service, + the OP builds a rendezvous circuit. It does this by establishing + a circuit to a randomly chosen OR, and sending a + RELAY_ESTABLISH_RENDEZVOUS cell to that OR. The body of that cell + contains: + + RC Rendezvous cookie [20 octets] + + [XXX011 this looks like an auth mechanism. should we generalize here? -RD] + + The rendezvous cookie is an arbitrary 20-byte value, chosen randomly by + Alice's OP. + + Upon receiving a RELAY_ESTABLISH_RENDEZVOUS cell, the OR associates the + RC with the circuit that sent it. It replies to Alice with an empty + RELAY_RENDEZVOUS_ESTABLISHED cell to indicate success. + + Alice's OP MUST NOT use the circuit which sent the cell for any purpose + other than rendezvous with the given location-hidden service. + +1.8. Introduction: from Alice's OP to Introduction Point + + Alice builds a separate circuit to one of Bob's chosen introduction + points, and sends it a RELAY_INTRODUCE1 cell containing: + + Cleartext + PK_ID Identifier for Bob's PK [20 octets] + Encrypted to Bob's PK: (in the v0 intro protocol) + RP Rendezvous point's nickname [20 octets] + RC Rendezvous cookie [20 octets] + g^x Diffie-Hellman data, part 1 [128 octets] + OR (in the v1 intro protocol) + VER Version byte: set to 1. [1 octet] + RP Rendezvous point nick or ID [42 octets] + RC Rendezvous cookie [20 octets] + g^x Diffie-Hellman data, part 1 [128 octets] + OR (in the v2 intro protocol) + VER Version byte: set to 2. [1 octet] + IP Rendezvous point's address [4 octets] + PORT Rendezvous point's OR port [2 octets] + ID Rendezvous point identity ID [20 octets] + KLEN Length of onion key [2 octets] + KEY Rendezvous point onion key [KLEN octets] + RC Rendezvous cookie [20 octets] + g^x Diffie-Hellman data, part 1 [128 octets] + + PK_ID is the hash of Bob's public key. RP is NUL-padded and + terminated. In version 0, it must contain a nickname. In version 1, + it must contain EITHER a nickname or an identity key digest that is + encoded in hex and prefixed with a '$'. + + The hybrid encryption to Bob's PK works just like the hybrid + encryption in CREATE cells (see tor-spec). Thus the payload of the + version 0 RELAY_INTRODUCE1 cell on the wire will contain + 20+42+16+20+20+128=246 bytes, and the version 1 and version 2 + introduction formats have other sizes. + + Through Tor 0.2.0.6-alpha, clients only generated the v0 introduction + format, whereas hidden services have understood and accepted v0, + v1, and v2 since 0.1.1.x. As of Tor 0.2.0.7-alpha and 0.1.2.18, + clients switched to using the v2 intro format. + + If Alice has downloaded a v2 descriptor, she uses the contained public + key ("service-key") instead of Bob's public key to create the + RELAY_INTRODUCE1 cell as described above. + +1.8.1. Other introduction formats we don't use. + + We briefly speculated about using the following format for the + "encrypted to Bob's PK" part of the introduction, but no Tors have + ever generated these. + + VER Version byte: set to 3. [1 octet] + ATYPE An address type (typically 4) [1 octet] + ADDR Rendezvous point's IP address [4 or 16 octets] + PORT Rendezvous point's OR port [2 octets] + AUTHT The auth type that is supported [2 octets] + AUTHL Length of auth data [1 octet] + AUTHD Auth data [variable] + ID Rendezvous point identity ID [20 octets] + KLEN Length of onion key [2 octets] + KEY Rendezvous point onion key [KLEN octets] + RC Rendezvous cookie [20 octets] + g^x Diffie-Hellman data, part 1 [128 octets] + +1.9. Introduction: From the Introduction Point to Bob's OP + + If the Introduction Point recognizes PK_ID as a public key which has + established a circuit for introductions as in 1.3 above, it sends the body + of the cell in a new RELAY_INTRODUCE2 cell down the corresponding circuit. + (If the PK_ID is unrecognized, the RELAY_INTRODUCE1 cell is discarded.) + + After sending the RELAY_INTRODUCE2 cell, the OR replies to Alice with an + empty RELAY_COMMAND_INTRODUCE_ACK cell. If no RELAY_INTRODUCE2 cell can + be sent, the OR replies to Alice with a non-empty cell to indicate an + error. (The semantics of the cell body may be determined later; the + current implementation sends a single '1' byte on failure.) + + When Bob's OP receives the RELAY_INTRODUCE2 cell, it decrypts it with + the private key for the corresponding hidden service, and extracts the + rendezvous point's nickname, the rendezvous cookie, and the value of g^x + chosen by Alice. + +1.10. Rendezvous + + Bob's OP builds a new Tor circuit ending at Alice's chosen rendezvous + point, and sends a RELAY_RENDEZVOUS1 cell along this circuit, containing: + RC Rendezvous cookie [20 octets] + g^y Diffie-Hellman [128 octets] + KH Handshake digest [20 octets] + + (Bob's OP MUST NOT use this circuit for any other purpose.) + + If the RP recognizes RC, it relays the rest of the cell down the + corresponding circuit in a RELAY_RENDEZVOUS2 cell, containing: + + g^y Diffie-Hellman [128 octets] + KH Handshake digest [20 octets] + + (If the RP does not recognize the RC, it discards the cell and + tears down the circuit.) + + When Alice's OP receives a RELAY_RENDEZVOUS2 cell on a circuit which + has sent a RELAY_ESTABLISH_RENDEZVOUS cell but which has not yet received + a reply, it uses g^y and H(g^xy) to complete the handshake as in the Tor + circuit extend process: they establish a 60-octet string as + K = SHA1(g^xy | [00]) | SHA1(g^xy | [01]) | SHA1(g^xy | [02]) + and generate + KH = K[0..15] + Kf = K[16..31] + Kb = K[32..47] + + Subsequently, the rendezvous point passes relay cells, unchanged, from + each of the two circuits to the other. When Alice's OP sends + RELAY cells along the circuit, it first encrypts them with the + Kf, then with all of the keys for the ORs in Alice's side of the circuit; + and when Alice's OP receives RELAY cells from the circuit, it decrypts + them with the keys for the ORs in Alice's side of the circuit, then + decrypts them with Kb. Bob's OP does the same, with Kf and Kb + interchanged. + +1.11. Creating streams + + To open TCP connections to Bob's location-hidden service, Alice's OP sends + a RELAY_BEGIN cell along the established circuit, using the special + address "", and a chosen port. Bob's OP chooses a destination IP and + port, based on the configuration of the service connected to the circuit, + and opens a TCP stream. From then on, Bob's OP treats the stream as an + ordinary exit connection. + [ Except he doesn't include addr in the connected cell or the end + cell. -RD] + + Alice MAY send multiple RELAY_BEGIN cells along the circuit, to open + multiple streams to Bob. Alice SHOULD NOT send RELAY_BEGIN cells for any + other address along her circuit to Bob; if she does, Bob MUST reject them. + +2. Authentication and authorization. + +Foo. + +3. Hidden service directory operation + + This section has been introduced with the v2 hidden service descriptor + format. It describes all operations of the v2 hidden service descriptor + fetching and propagation mechanism that are required for the protocol + described in section 1 to succeed with v2 hidden service descriptors. + +3.1. Configuring as hidden service directory + + Every onion router that has its directory port open can decide whether it + wants to store and serve hidden service descriptors. An onion router which + is configured as such includes the "hidden-service-dir" flag in its router + descriptors that it sends to directory authorities. + + The directory authorities include a new flag "HSDir" for routers that + decided to provide storage for hidden service descriptors and that + have been running for at least 24 hours. + +3.2. Accepting publish requests + + Hidden service directory nodes accept publish requests for v2 hidden service + descriptors and store them to their local memory. (It is not necessary to + make descriptors persistent, because after restarting, the onion router + would not be accepted as a storing node anyway, because it has not been + running for at least 24 hours.) All requests and replies are formatted as + HTTP messages. Requests are initiated via BEGIN_DIR cells directed to + the router's directory port, and formatted as HTTP POST requests to the URL + "/tor/rendezvous2/publish" relative to the hidden service directory's root, + containing as its body a v2 service descriptor. + + A hidden service directory node parses every received descriptor and only + stores it when it thinks that it is responsible for storing that descriptor + based on its own routing table. See section 1.4 for more information on how + to determine responsibility for a certain descriptor ID. + +3.3. Processing fetch requests + + Hidden service directory nodes process fetch requests for hidden service + descriptors by looking them up in their local memory. (They do not need to + determine if they are responsible for the passed ID, because it does no harm + if they deliver a descriptor for which they are not (any more) responsible.) + All requests and replies are formatted as HTTP messages. Requests are + initiated via BEGIN_DIR cells directed to the router's directory port, + and formatted as HTTP GET requests for the document "/tor/rendezvous2/", + where z is replaced with the encoding of the descriptor ID. + diff --git a/orchid/doc/spec/socks-extensions.txt b/orchid/doc/spec/socks-extensions.txt new file mode 100644 index 00000000..62d86acd --- /dev/null +++ b/orchid/doc/spec/socks-extensions.txt @@ -0,0 +1,78 @@ +Tor's extensions to the SOCKS protocol + +1. Overview + + The SOCKS protocol provides a generic interface for TCP proxies. Client + software connects to a SOCKS server via TCP, and requests a TCP connection + to another address and port. The SOCKS server establishes the connection, + and reports success or failure to the client. After the connection has + been established, the client application uses the TCP stream as usual. + + Tor supports SOCKS4 as defined in [1], SOCKS4A as defined in [2], and + SOCKS5 as defined in [3]. + + The stickiest issue for Tor in supporting clients, in practice, is forcing + DNS lookups to occur at the OR side: if clients do their own DNS lookup, + the DNS server can learn which addresses the client wants to reach. + SOCKS4 supports addressing by IPv4 address; SOCKS4A is a kludge on top of + SOCKS4 to allow addressing by hostname; SOCKS5 supports IPv4, IPv6, and + hostnames. + +1.1. Extent of support + + Tor supports the SOCKS4, SOCKS4A, and SOCKS5 standards, except as follows: + + BOTH: + - The BIND command is not supported. + + SOCKS4,4A: + - SOCKS4 usernames are ignored. + + SOCKS5: + - The (SOCKS5) "UDP ASSOCIATE" command is not supported. + - IPv6 is not supported in CONNECT commands. + - Only the "NO AUTHENTICATION" (SOCKS5) authentication method [00] is + supported. + +2. Name lookup + + As an extension to SOCKS4A and SOCKS5, Tor implements a new command value, + "RESOLVE" [F0]. When Tor receives a "RESOLVE" SOCKS command, it initiates + a remote lookup of the hostname provided as the target address in the SOCKS + request. The reply is either an error (if the address couldn't be + resolved) or a success response. In the case of success, the address is + stored in the portion of the SOCKS response reserved for remote IP address. + + (We support RESOLVE in SOCKS4 too, even though it is unnecessary.) + + For SOCKS5 only, we support reverse resolution with a new command value, + "RESOLVE_PTR" [F1]. In response to a "RESOLVE_PTR" SOCKS5 command with + an IPv4 address as its target, Tor attempts to find the canonical + hostname for that IPv4 record, and returns it in the "server bound + address" portion of the reply. + (This command was not supported before Tor 0.1.2.2-alpha.) + +3. Other command extensions. + + Tor 0.1.2.4-alpha added a new command value: "CONNECT_DIR" [F2]. + In this case, Tor will open an encrypted direct TCP connection to the + directory port of the Tor server specified by address:port (the port + specified should be the ORPort of the server). It uses a one-hop tunnel + and a "BEGIN_DIR" relay cell to accomplish this secure connection. + + The F2 command value was removed in Tor 0.2.0.10-alpha in favor of a + new use_begindir flag in edge_connection_t. + +4. HTTP-resistance + + Tor checks the first byte of each SOCKS request to see whether it looks + more like an HTTP request (that is, it starts with a "G", "H", or "P"). If + so, Tor returns a small webpage, telling the user that his/her browser is + misconfigured. This is helpful for the many users who mistakenly try to + use Tor as an HTTP proxy instead of a SOCKS proxy. + +References: + [1] http://archive.socks.permeo.com/protocol/socks4.protocol + [2] http://archive.socks.permeo.com/protocol/socks4a.protocol + [3] SOCKS5: RFC1928 + diff --git a/orchid/doc/spec/tor-spec.txt b/orchid/doc/spec/tor-spec.txt new file mode 100644 index 00000000..efa6029f --- /dev/null +++ b/orchid/doc/spec/tor-spec.txt @@ -0,0 +1,992 @@ + + Tor Protocol Specification + + Roger Dingledine + Nick Mathewson + +Note: This document aims to specify Tor as implemented in 0.2.1.x. Future +versions of Tor may implement improved protocols, and compatibility is not +guaranteed. Compatibility notes are given for versions 0.1.1.15-rc and +later; earlier versions are not compatible with the Tor network as of this +writing. + +This specification is not a design document; most design criteria +are not examined. For more information on why Tor acts as it does, +see tor-design.pdf. + +0. Preliminaries + +0.1. Notation and encoding + + PK -- a public key. + SK -- a private key. + K -- a key for a symmetric cypher. + + a|b -- concatenation of 'a' and 'b'. + + [A0 B1 C2] -- a three-byte sequence, containing the bytes with + hexadecimal values A0, B1, and C2, in that order. + + All numeric values are encoded in network (big-endian) order. + + H(m) -- a cryptographic hash of m. + +0.2. Security parameters + + Tor uses a stream cipher, a public-key cipher, the Diffie-Hellman + protocol, and a hash function. + + KEY_LEN -- the length of the stream cipher's key, in bytes. + + PK_ENC_LEN -- the length of a public-key encrypted message, in bytes. + PK_PAD_LEN -- the number of bytes added in padding for public-key + encryption, in bytes. (The largest number of bytes that can be encrypted + in a single public-key operation is therefore PK_ENC_LEN-PK_PAD_LEN.) + + DH_LEN -- the number of bytes used to represent a member of the + Diffie-Hellman group. + DH_SEC_LEN -- the number of bytes used in a Diffie-Hellman private key (x). + + HASH_LEN -- the length of the hash function's output, in bytes. + + PAYLOAD_LEN -- The longest allowable cell payload, in bytes. (509) + + CELL_LEN -- The length of a Tor cell, in bytes. + +0.3. Ciphers + + For a stream cipher, we use 128-bit AES in counter mode, with an IV of all + 0 bytes. + + For a public-key cipher, we use RSA with 1024-bit keys and a fixed + exponent of 65537. We use OAEP-MGF1 padding, with SHA-1 as its digest + function. We leave the optional "Label" parameter unset. (For OAEP + padding, see ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.pdf) + + For Diffie-Hellman, we use a generator (g) of 2. For the modulus (p), we + use the 1024-bit safe prime from rfc2409 section 6.2 whose hex + representation is: + + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE65381FFFFFFFFFFFFFFFF" + + As an optimization, implementations SHOULD choose DH private keys (x) of + 320 bits. Implementations that do this MUST never use any DH key more + than once. + [May other implementations reuse their DH keys?? -RD] + [Probably not. Conceivably, you could get away with changing DH keys once + per second, but there are too many oddball attacks for me to be + comfortable that this is safe. -NM] + + For a hash function, we use SHA-1. + + KEY_LEN=16. + DH_LEN=128; DH_SEC_LEN=40. + PK_ENC_LEN=128; PK_PAD_LEN=42. + HASH_LEN=20. + + When we refer to "the hash of a public key", we mean the SHA-1 hash of the + DER encoding of an ASN.1 RSA public key (as specified in PKCS.1). + + All "random" values should be generated with a cryptographically strong + random number generator, unless otherwise noted. + + The "hybrid encryption" of a byte sequence M with a public key PK is + computed as follows: + 1. If M is less than PK_ENC_LEN-PK_PAD_LEN, pad and encrypt M with PK. + 2. Otherwise, generate a KEY_LEN byte random key K. + Let M1 = the first PK_ENC_LEN-PK_PAD_LEN-KEY_LEN bytes of M, + and let M2 = the rest of M. + Pad and encrypt K|M1 with PK. Encrypt M2 with our stream cipher, + using the key K. Concatenate these encrypted values. + [XXX Note that this "hybrid encryption" approach does not prevent + an attacker from adding or removing bytes to the end of M. It also + allows attackers to modify the bytes not covered by the OAEP -- + see Goldberg's PET2006 paper for details. We will add a MAC to this + scheme one day. -RD] + +0.4. Other parameter values + + CELL_LEN=512 + +1. System overview + + Tor is a distributed overlay network designed to anonymize + low-latency TCP-based applications such as web browsing, secure shell, + and instant messaging. Clients choose a path through the network and + build a ``circuit'', in which each node (or ``onion router'' or ``OR'') + in the path knows its predecessor and successor, but no other nodes in + the circuit. Traffic flowing down the circuit is sent in fixed-size + ``cells'', which are unwrapped by a symmetric key at each node (like + the layers of an onion) and relayed downstream. + +1.1. Keys and names + + Every Tor server has multiple public/private keypairs: + + - A long-term signing-only "Identity key" used to sign documents and + certificates, and used to establish server identity. + - A medium-term "Onion key" used to decrypt onion skins when accepting + circuit extend attempts. (See 5.1.) Old keys MUST be accepted for at + least one week after they are no longer advertised. Because of this, + servers MUST retain old keys for a while after they're rotated. + - A short-term "Connection key" used to negotiate TLS connections. + Tor implementations MAY rotate this key as often as they like, and + SHOULD rotate this key at least once a day. + + Tor servers are also identified by "nicknames"; these are specified in + dir-spec.txt. + +2. Connections + + Connections between two Tor servers, or between a client and a server, + use TLS/SSLv3 for link authentication and encryption. All + implementations MUST support the SSLv3 ciphersuite + "SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA", and SHOULD support the TLS + ciphersuite "TLS_DHE_RSA_WITH_AES_128_CBC_SHA" if it is available. + + There are three acceptable ways to perform a TLS handshake when + connecting to a Tor server: "certificates up-front", "renegotiation", and + "backwards-compatible renegotiation". ("Backwards-compatible + renegotiation" is, as the name implies, compatible with both other + handshake types.) + + Before Tor 0.2.0.21, only "certificates up-front" was supported. In Tor + 0.2.0.21 or later, "backwards-compatible renegotiation" is used. + + In "certificates up-front", the connection initiator always sends a + two-certificate chain, consisting of an X.509 certificate using a + short-term connection public key and a second, self- signed X.509 + certificate containing its identity key. The other party sends a similar + certificate chain. The initiator's ClientHello MUST NOT include any + ciphersuites other than: + TLS_DHE_RSA_WITH_AES_256_CBC_SHA + TLS_DHE_RSA_WITH_AES_128_CBC_SHA + SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA + SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA + + In "renegotiation", the connection initiator sends no certificates, and + the responder sends a single connection certificate. Once the TLS + handshake is complete, the initiator renegotiates the handshake, with each + parties sending a two-certificate chain as in "certificates up-front". + The initiator's ClientHello MUST include at least once ciphersuite not in + the list above. The responder SHOULD NOT select any ciphersuite besides + those in the list above. + [The above "should not" is because some of the ciphers that + clients list may be fake.] + + In "backwards-compatible renegotiation", the connection initiator's + ClientHello MUST include at least one ciphersuite other than those listed + above. The connection responder examines the initiator's ciphersuite list + to see whether it includes any ciphers other than those included in the + list above. If extra ciphers are included, the responder proceeds as in + "renegotiation": it sends a single certificate and does not request + client certificates. Otherwise (in the case that no extra ciphersuites + are included in the ClientHello) the responder proceeds as in + "certificates up-front": it requests client certificates, and sends a + two-certificate chain. In either case, once the responder has sent its + certificate or certificates, the initiator counts them. If two + certificates have been sent, it proceeds as in "certificates up-front"; + otherwise, it proceeds as in "renegotiation". + + All new implementations of the Tor server protocol MUST support + "backwards-compatible renegotiation"; clients SHOULD do this too. If + this is not possible, new client implementations MUST support both + "renegotiation" and "certificates up-front" and use the router's + published link protocols list (see dir-spec.txt on the "protocols" entry) + to decide which to use. + + In all of the above handshake variants, certificates sent in the clear + SHOULD NOT include any strings to identify the host as a Tor server. In + the "renegotation" and "backwards-compatible renegotiation", the + initiator SHOULD chose a list of ciphersuites and TLS extensions chosen + to mimic one used by a popular web browser. + + Responders MUST NOT select any TLS ciphersuite that lacks ephemeral keys, + or whose symmetric keys are less then KEY_LEN bits, or whose digests are + less than HASH_LEN bits. Responders SHOULD NOT select any SSLv3 + ciphersuite other than those listed above. + + Even though the connection protocol is identical, we will think of the + initiator as either an onion router (OR) if it is willing to relay + traffic for other Tor users, or an onion proxy (OP) if it only handles + local requests. Onion proxies SHOULD NOT provide long-term-trackable + identifiers in their handshakes. + + In all handshake variants, once all certificates are exchanged, all + parties receiving certificates must confirm that the identity key is as + expected. (When initiating a connection, the expected identity key is + the one given in the directory; when creating a connection because of an + EXTEND cell, the expected identity key is the one given in the cell.) If + the key is not as expected, the party must close the connection. + + When connecting to an OR, all parties SHOULD reject the connection if that + OR has a malformed or missing certificate. When accepting an incoming + connection, an OR SHOULD NOT reject incoming connections from parties with + malformed or missing certificates. (However, an OR should not believe + that an incoming connection is from another OR unless the certificates + are present and well-formed.) + + [Before version 0.1.2.8-rc, ORs rejected incoming connections from ORs and + OPs alike if their certificates were missing or malformed.] + + Once a TLS connection is established, the two sides send cells + (specified below) to one another. Cells are sent serially. All + cells are CELL_LEN bytes long. Cells may be sent embedded in TLS + records of any size or divided across TLS records, but the framing + of TLS records MUST NOT leak information about the type or contents + of the cells. + + TLS connections are not permanent. Either side MAY close a connection + if there are no circuits running over it and an amount of time + (KeepalivePeriod, defaults to 5 minutes) has passed since the last time + any traffic was transmitted over the TLS connection. Clients SHOULD + also hold a TLS connection with no circuits open, if it is likely that a + circuit will be built soon using that connection. + + (As an exception, directory servers may try to stay connected to all of + the ORs -- though this will be phased out for the Tor 0.1.2.x release.) + + To avoid being trivially distinguished from servers, client-only Tor + instances are encouraged but not required to use a two-certificate chain + as well. Clients SHOULD NOT keep using the same certificates when + their IP address changes. Clients MAY send no certificates at all. + +3. Cell Packet format + + The basic unit of communication for onion routers and onion + proxies is a fixed-width "cell". + + On a version 1 connection, each cell contains the following + fields: + + CircID [2 bytes] + Command [1 byte] + Payload (padded with 0 bytes) [PAYLOAD_LEN bytes] + + On a version 2 connection, all cells are as in version 1 connections, + except for the initial VERSIONS cell, whose format is: + + Circuit [2 octets; set to 0] + Command [1 octet; set to 7 for VERSIONS] + Length [2 octets; big-endian integer] + Payload [Length bytes] + + The CircID field determines which circuit, if any, the cell is + associated with. + + The 'Command' field holds one of the following values: + 0 -- PADDING (Padding) (See Sec 7.2) + 1 -- CREATE (Create a circuit) (See Sec 5.1) + 2 -- CREATED (Acknowledge create) (See Sec 5.1) + 3 -- RELAY (End-to-end data) (See Sec 5.5 and 6) + 4 -- DESTROY (Stop using a circuit) (See Sec 5.4) + 5 -- CREATE_FAST (Create a circuit, no PK) (See Sec 5.1) + 6 -- CREATED_FAST (Circuit created, no PK) (See Sec 5.1) + 7 -- VERSIONS (Negotiate proto version) (See Sec 4) + 8 -- NETINFO (Time and address info) (See Sec 4) + 9 -- RELAY_EARLY (End-to-end data; limited) (See sec 5.6) + + The interpretation of 'Payload' depends on the type of the cell. + PADDING: Payload is unused. + CREATE: Payload contains the handshake challenge. + CREATED: Payload contains the handshake response. + RELAY: Payload contains the relay header and relay body. + DESTROY: Payload contains a reason for closing the circuit. + (see 5.4) + Upon receiving any other value for the command field, an OR must + drop the cell. Since more cell types may be added in the future, ORs + should generally not warn when encountering unrecognized commands. + + The payload is padded with 0 bytes. + + PADDING cells are currently used to implement connection keepalive. + If there is no other traffic, ORs and OPs send one another a PADDING + cell every few minutes. + + CREATE, CREATED, and DESTROY cells are used to manage circuits; + see section 5 below. + + RELAY cells are used to send commands and data along a circuit; see + section 6 below. + + VERSIONS and NETINFO cells are used to set up connections. See section 4 + below. + +4. Negotiating and initializing connections + +4.1. Negotiating versions with VERSIONS cells + + There are multiple instances of the Tor link connection protocol. Any + connection negotiated using the "certificates up front" handshake (see + section 2 above) is "version 1". In any connection where both parties + have behaved as in the "renegotiation" handshake, the link protocol + version is 2 or higher. + + To determine the version, in any connection where the "renegotiation" + handshake was used (that is, where the server sent only one certificate + at first and where the client did not send any certificates until + renegotiation), both parties MUST send a VERSIONS cell immediately after + the renegotiation is finished, before any other cells are sent. Parties + MUST NOT send any other cells on a connection until they have received a + VERSIONS cell. + + The payload in a VERSIONS cell is a series of big-endian two-byte + integers. Both parties MUST select as the link protocol version the + highest number contained both in the VERSIONS cell they sent and in the + versions cell they received. If they have no such version in common, + they cannot communicate and MUST close the connection. + + Since the version 1 link protocol does not use the "renegotiation" + handshake, implementations MUST NOT list version 1 in their VERSIONS + cell. + +4.2. NETINFO cells + + If version 2 or higher is negotiated, each party sends the other a + NETINFO cell. The cell's payload is: + + Timestamp [4 bytes] + Other OR's address [variable] + Number of addresses [1 byte] + This OR's addresses [variable] + + The address format is a type/length/value sequence as given in section + 6.4 below. The timestamp is a big-endian unsigned integer number of + seconds since the unix epoch. + + Implementations MAY use the timestamp value to help decide if their + clocks are skewed. Initiators MAY use "other OR's address" to help + learn which address their connections are originating from, if they do + not know it. Initiators SHOULD use "this OR's address" to make sure + that they have connected to another OR at its canonical address. + + [As of 0.2.0.23-rc, implementations use none of the above values.] + + +5. Circuit management + +5.1. CREATE and CREATED cells + + Users set up circuits incrementally, one hop at a time. To create a + new circuit, OPs send a CREATE cell to the first node, with the + first half of the DH handshake; that node responds with a CREATED + cell with the second half of the DH handshake plus the first 20 bytes + of derivative key data (see section 5.2). To extend a circuit past + the first hop, the OP sends an EXTEND relay cell (see section 5) + which instructs the last node in the circuit to send a CREATE cell + to extend the circuit. + + The payload for a CREATE cell is an 'onion skin', which consists + of the first step of the DH handshake data (also known as g^x). + This value is hybrid-encrypted (see 0.3) to Bob's onion key, giving + an onion-skin of: + PK-encrypted: + Padding [PK_PAD_LEN bytes] + Symmetric key [KEY_LEN bytes] + First part of g^x [PK_ENC_LEN-PK_PAD_LEN-KEY_LEN bytes] + Symmetrically encrypted: + Second part of g^x [DH_LEN-(PK_ENC_LEN-PK_PAD_LEN-KEY_LEN) + bytes] + + The relay payload for an EXTEND relay cell consists of: + Address [4 bytes] + Port [2 bytes] + Onion skin [DH_LEN+KEY_LEN+PK_PAD_LEN bytes] + Identity fingerprint [HASH_LEN bytes] + + The port and address field denote the IPV4 address and port of the next + onion router in the circuit; the public key hash is the hash of the PKCS#1 + ASN1 encoding of the next onion router's identity (signing) key. (See 0.3 + above.) Including this hash allows the extending OR verify that it is + indeed connected to the correct target OR, and prevents certain + man-in-the-middle attacks. + + The payload for a CREATED cell, or the relay payload for an + EXTENDED cell, contains: + DH data (g^y) [DH_LEN bytes] + Derivative key data (KH) [HASH_LEN bytes] + + The CircID for a CREATE cell is an arbitrarily chosen 2-byte integer, + selected by the node (OP or OR) that sends the CREATE cell. To prevent + CircID collisions, when one node sends a CREATE cell to another, it chooses + from only one half of the possible values based on the ORs' public + identity keys: if the sending node has a lower key, it chooses a CircID with + an MSB of 0; otherwise, it chooses a CircID with an MSB of 1. + + (An OP with no public key MAY choose any CircID it wishes, since an OP + never needs to process a CREATE cell.) + + Public keys are compared numerically by modulus. + + As usual with DH, x and y MUST be generated randomly. + +5.1.1. CREATE_FAST/CREATED_FAST cells + + When initializing the first hop of a circuit, the OP has already + established the OR's identity and negotiated a secret key using TLS. + Because of this, it is not always necessary for the OP to perform the + public key operations to create a circuit. In this case, the + OP MAY send a CREATE_FAST cell instead of a CREATE cell for the first + hop only. The OR responds with a CREATED_FAST cell, and the circuit is + created. + + A CREATE_FAST cell contains: + + Key material (X) [HASH_LEN bytes] + + A CREATED_FAST cell contains: + + Key material (Y) [HASH_LEN bytes] + Derivative key data [HASH_LEN bytes] (See 5.2 below) + + The values of X and Y must be generated randomly. + + If an OR sees a circuit created with CREATE_FAST, the OR is sure to be the + first hop of a circuit. ORs SHOULD reject attempts to create streams with + RELAY_BEGIN exiting the circuit at the first hop: letting Tor be used as a + single hop proxy makes exit nodes a more attractive target for compromise. + +5.2. Setting circuit keys + + Once the handshake between the OP and an OR is completed, both can + now calculate g^xy with ordinary DH. Before computing g^xy, both client + and server MUST verify that the received g^x or g^y value is not degenerate; + that is, it must be strictly greater than 1 and strictly less than p-1 + where p is the DH modulus. Implementations MUST NOT complete a handshake + with degenerate keys. Implementations MUST NOT discard other "weak" + g^x values. + + (Discarding degenerate keys is critical for security; if bad keys + are not discarded, an attacker can substitute the server's CREATED + cell's g^y with 0 or 1, thus creating a known g^xy and impersonating + the server. Discarding other keys may allow attacks to learn bits of + the private key.) + + If CREATE or EXTEND is used to extend a circuit, the client and server + base their key material on K0=g^xy, represented as a big-endian unsigned + integer. + + If CREATE_FAST is used, the client and server base their key material on + K0=X|Y. + + From the base key material K0, they compute KEY_LEN*2+HASH_LEN*3 bytes of + derivative key data as + K = H(K0 | [00]) | H(K0 | [01]) | H(K0 | [02]) | ... + + The first HASH_LEN bytes of K form KH; the next HASH_LEN form the forward + digest Df; the next HASH_LEN 41-60 form the backward digest Db; the next + KEY_LEN 61-76 form Kf, and the final KEY_LEN form Kb. Excess bytes from K + are discarded. + + KH is used in the handshake response to demonstrate knowledge of the + computed shared key. Df is used to seed the integrity-checking hash + for the stream of data going from the OP to the OR, and Db seeds the + integrity-checking hash for the data stream from the OR to the OP. Kf + is used to encrypt the stream of data going from the OP to the OR, and + Kb is used to encrypt the stream of data going from the OR to the OP. + +5.3. Creating circuits + + When creating a circuit through the network, the circuit creator + (OP) performs the following steps: + + 1. Choose an onion router as an exit node (R_N), such that the onion + router's exit policy includes at least one pending stream that + needs a circuit (if there are any). + + 2. Choose a chain of (N-1) onion routers + (R_1...R_N-1) to constitute the path, such that no router + appears in the path twice. + + 3. If not already connected to the first router in the chain, + open a new connection to that router. + + 4. Choose a circID not already in use on the connection with the + first router in the chain; send a CREATE cell along the + connection, to be received by the first onion router. + + 5. Wait until a CREATED cell is received; finish the handshake + and extract the forward key Kf_1 and the backward key Kb_1. + + 6. For each subsequent onion router R (R_2 through R_N), extend + the circuit to R. + + To extend the circuit by a single onion router R_M, the OP performs + these steps: + + 1. Create an onion skin, encrypted to R_M's public onion key. + + 2. Send the onion skin in a relay EXTEND cell along + the circuit (see section 5). + + 3. When a relay EXTENDED cell is received, verify KH, and + calculate the shared keys. The circuit is now extended. + + When an onion router receives an EXTEND relay cell, it sends a CREATE + cell to the next onion router, with the enclosed onion skin as its + payload. As special cases, if the extend cell includes a digest of + all zeroes, or asks to extend back to the relay that sent the extend + cell, the circuit will fail and be torn down. The initiating onion + router chooses some circID not yet used on the connection between the + two onion routers. (But see section 5.1. above, concerning choosing + circIDs based on lexicographic order of nicknames.) + + When an onion router receives a CREATE cell, if it already has a + circuit on the given connection with the given circID, it drops the + cell. Otherwise, after receiving the CREATE cell, it completes the + DH handshake, and replies with a CREATED cell. Upon receiving a + CREATED cell, an onion router packs it payload into an EXTENDED relay + cell (see section 5), and sends that cell up the circuit. Upon + receiving the EXTENDED relay cell, the OP can retrieve g^y. + + (As an optimization, OR implementations may delay processing onions + until a break in traffic allows time to do so without harming + network latency too greatly.) + +5.3.1. Canonical connections + + It is possible for an attacker to launch a man-in-the-middle attack + against a connection by telling OR Alice to extend to OR Bob at some + address X controlled by the attacker. The attacker cannot read the + encrypted traffic, but the attacker is now in a position to count all + bytes sent between Alice and Bob (assuming Alice was not already + connected to Bob.) + + To prevent this, when an OR we gets an extend request, it SHOULD use an + existing OR connection if the ID matches, and ANY of the following + conditions hold: + - The IP matches the requested IP. + - The OR knows that the IP of the connection it's using is canonical + because it was listed in the NETINFO cell. + - The OR knows that the IP of the connection it's using is canonical + because it was listed in the server descriptor. + + [This is not implemented in Tor 0.2.0.23-rc.] + +5.4. Tearing down circuits + + Circuits are torn down when an unrecoverable error occurs along + the circuit, or when all streams on a circuit are closed and the + circuit's intended lifetime is over. Circuits may be torn down + either completely or hop-by-hop. + + To tear down a circuit completely, an OR or OP sends a DESTROY + cell to the adjacent nodes on that circuit, using the appropriate + direction's circID. + + Upon receiving an outgoing DESTROY cell, an OR frees resources + associated with the corresponding circuit. If it's not the end of + the circuit, it sends a DESTROY cell for that circuit to the next OR + in the circuit. If the node is the end of the circuit, then it tears + down any associated edge connections (see section 6.1). + + After a DESTROY cell has been processed, an OR ignores all data or + destroy cells for the corresponding circuit. + + To tear down part of a circuit, the OP may send a RELAY_TRUNCATE cell + signaling a given OR (Stream ID zero). That OR sends a DESTROY + cell to the next node in the circuit, and replies to the OP with a + RELAY_TRUNCATED cell. + + When an unrecoverable error occurs along one connection in a + circuit, the nodes on either side of the connection should, if they + are able, act as follows: the node closer to the OP should send a + RELAY_TRUNCATED cell towards the OP; the node farther from the OP + should send a DESTROY cell down the circuit. + + The payload of a RELAY_TRUNCATED or DESTROY cell contains a single octet, + describing why the circuit is being closed or truncated. When sending a + TRUNCATED or DESTROY cell because of another TRUNCATED or DESTROY cell, + the error code should be propagated. The origin of a circuit always sets + this error code to 0, to avoid leaking its version. + + The error codes are: + 0 -- NONE (No reason given.) + 1 -- PROTOCOL (Tor protocol violation.) + 2 -- INTERNAL (Internal error.) + 3 -- REQUESTED (A client sent a TRUNCATE command.) + 4 -- HIBERNATING (Not currently operating; trying to save bandwidth.) + 5 -- RESOURCELIMIT (Out of memory, sockets, or circuit IDs.) + 6 -- CONNECTFAILED (Unable to reach server.) + 7 -- OR_IDENTITY (Connected to server, but its OR identity was not + as expected.) + 8 -- OR_CONN_CLOSED (The OR connection that was carrying this circuit + died.) + 9 -- FINISHED (The circuit has expired for being dirty or old.) + 10 -- TIMEOUT (Circuit construction took too long) + 11 -- DESTROYED (The circuit was destroyed w/o client TRUNCATE) + 12 -- NOSUCHSERVICE (Request for unknown hidden service) + +5.5. Routing relay cells + + When an OR receives a RELAY or RELAY_EARLY cell, it checks the cell's + circID and determines whether it has a corresponding circuit along that + connection. If not, the OR drops the cell. + + Otherwise, if the OR is not at the OP edge of the circuit (that is, + either an 'exit node' or a non-edge node), it de/encrypts the payload + with the stream cipher, as follows: + 'Forward' relay cell (same direction as CREATE): + Use Kf as key; decrypt. + 'Back' relay cell (opposite direction from CREATE): + Use Kb as key; encrypt. + Note that in counter mode, decrypt and encrypt are the same operation. + + The OR then decides whether it recognizes the relay cell, by + inspecting the payload as described in section 6.1 below. If the OR + recognizes the cell, it processes the contents of the relay cell. + Otherwise, it passes the decrypted relay cell along the circuit if + the circuit continues. If the OR at the end of the circuit + encounters an unrecognized relay cell, an error has occurred: the OR + sends a DESTROY cell to tear down the circuit. + + When a relay cell arrives at an OP, the OP decrypts the payload + with the stream cipher as follows: + OP receives data cell: + For I=N...1, + Decrypt with Kb_I. If the payload is recognized (see + section 6..1), then stop and process the payload. + + For more information, see section 6 below. + +5.6. Handling relay_early cells + + A RELAY_EARLY cell is designed to limit the length any circuit can reach. + When an OR receives a RELAY_EARLY cell, and the next node in the circuit + is speaking v2 of the link protocol or later, the OR relays the cell as a + RELAY_EARLY cell. Otherwise, it relays it as a RELAY cell. + + If a node ever receives more than 8 RELAY_EARLY cells on a given + outbound circuit, it SHOULD close the circuit. (For historical reasons, + we don't limit the number of inbound RELAY_EARLY cells; they should + be harmless anyway because clients won't accept extend requests. See + bug 1038.) + + When speaking v2 of the link protocol or later, clients MUST only send + EXTEND cells inside RELAY_EARLY cells. Clients SHOULD send the first ~8 + RELAY cells that are not targeted at the first hop of any circuit as + RELAY_EARLY cells too, in order to partially conceal the circuit length. + + [In a future version of Tor, servers will reject any EXTEND cell not + received in a RELAY_EARLY cell. See proposal 110.] + +6. Application connections and stream management + +6.1. Relay cells + + Within a circuit, the OP and the exit node use the contents of + RELAY packets to tunnel end-to-end commands and TCP connections + ("Streams") across circuits. End-to-end commands can be initiated + by either edge; streams are initiated by the OP. + + The payload of each unencrypted RELAY cell consists of: + Relay command [1 byte] + 'Recognized' [2 bytes] + StreamID [2 bytes] + Digest [4 bytes] + Length [2 bytes] + Data [CELL_LEN-14 bytes] + + The relay commands are: + 1 -- RELAY_BEGIN [forward] + 2 -- RELAY_DATA [forward or backward] + 3 -- RELAY_END [forward or backward] + 4 -- RELAY_CONNECTED [backward] + 5 -- RELAY_SENDME [forward or backward] [sometimes control] + 6 -- RELAY_EXTEND [forward] [control] + 7 -- RELAY_EXTENDED [backward] [control] + 8 -- RELAY_TRUNCATE [forward] [control] + 9 -- RELAY_TRUNCATED [backward] [control] + 10 -- RELAY_DROP [forward or backward] [control] + 11 -- RELAY_RESOLVE [forward] + 12 -- RELAY_RESOLVED [backward] + 13 -- RELAY_BEGIN_DIR [forward] + + 32..40 -- Used for hidden services; see rend-spec.txt. + + Commands labelled as "forward" must only be sent by the originator + of the circuit. Commands labelled as "backward" must only be sent by + other nodes in the circuit back to the originator. Commands marked + as either can be sent either by the originator or other nodes. + + The 'recognized' field in any unencrypted relay payload is always set + to zero; the 'digest' field is computed as the first four bytes of + the running digest of all the bytes that have been destined for + this hop of the circuit or originated from this hop of the circuit, + seeded from Df or Db respectively (obtained in section 5.2 above), + and including this RELAY cell's entire payload (taken with the digest + field set to zero). + + When the 'recognized' field of a RELAY cell is zero, and the digest + is correct, the cell is considered "recognized" for the purposes of + decryption (see section 5.5 above). + + (The digest does not include any bytes from relay cells that do + not start or end at this hop of the circuit. That is, it does not + include forwarded data. Therefore if 'recognized' is zero but the + digest does not match, the running digest at that node should + not be updated, and the cell should be forwarded on.) + + All RELAY cells pertaining to the same tunneled stream have the + same stream ID. StreamIDs are chosen arbitrarily by the OP. RELAY + cells that affect the entire circuit rather than a particular + stream use a StreamID of zero -- they are marked in the table above + as "[control]" style cells. (Sendme cells are marked as "sometimes + control" because they can take include a StreamID or not depending + on their purpose -- see Section 7.) + + The 'Length' field of a relay cell contains the number of bytes in + the relay payload which contain real payload data. The remainder of + the payload is padded with NUL bytes. + + If the RELAY cell is recognized but the relay command is not + understood, the cell must be dropped and ignored. Its contents + still count with respect to the digests, though. + +6.2. Opening streams and transferring data + + To open a new anonymized TCP connection, the OP chooses an open + circuit to an exit that may be able to connect to the destination + address, selects an arbitrary StreamID not yet used on that circuit, + and constructs a RELAY_BEGIN cell with a payload encoding the address + and port of the destination host. The payload format is: + + ADDRESS | ':' | PORT | [00] + + where ADDRESS can be a DNS hostname, or an IPv4 address in + dotted-quad format, or an IPv6 address surrounded by square brackets; + and where PORT is a decimal integer between 1 and 65535, inclusive. + + [What is the [00] for? -NM] + [It's so the payload is easy to parse out with string funcs -RD] + + Upon receiving this cell, the exit node resolves the address as + necessary, and opens a new TCP connection to the target port. If the + address cannot be resolved, or a connection can't be established, the + exit node replies with a RELAY_END cell. (See 6.4 below.) + Otherwise, the exit node replies with a RELAY_CONNECTED cell, whose + payload is in one of the following formats: + The IPv4 address to which the connection was made [4 octets] + A number of seconds (TTL) for which the address may be cached [4 octets] + or + Four zero-valued octets [4 octets] + An address type (6) [1 octet] + The IPv6 address to which the connection was made [16 octets] + A number of seconds (TTL) for which the address may be cached [4 octets] + [XXXX No version of Tor currently generates the IPv6 format.] + + [Tor servers before 0.1.2.0 set the TTL field to a fixed value. Later + versions set the TTL to the last value seen from a DNS server, and expire + their own cached entries after a fixed interval. This prevents certain + attacks.] + + The OP waits for a RELAY_CONNECTED cell before sending any data. + Once a connection has been established, the OP and exit node + package stream data in RELAY_DATA cells, and upon receiving such + cells, echo their contents to the corresponding TCP stream. + RELAY_DATA cells sent to unrecognized streams are dropped. + + Relay RELAY_DROP cells are long-range dummies; upon receiving such + a cell, the OR or OP must drop it. + +6.2.1. Opening a directory stream + + If a Tor server is a directory server, it should respond to a + RELAY_BEGIN_DIR cell as if it had received a BEGIN cell requesting a + connection to its directory port. RELAY_BEGIN_DIR cells ignore exit + policy, since the stream is local to the Tor process. + + If the Tor server is not running a directory service, it should respond + with a REASON_NOTDIRECTORY RELAY_END cell. + + Clients MUST generate an all-zero payload for RELAY_BEGIN_DIR cells, + and servers MUST ignore the payload. + + [RELAY_BEGIN_DIR was not supported before Tor 0.1.2.2-alpha; clients + SHOULD NOT send it to routers running earlier versions of Tor.] + +6.3. Closing streams + + When an anonymized TCP connection is closed, or an edge node + encounters error on any stream, it sends a 'RELAY_END' cell along the + circuit (if possible) and closes the TCP connection immediately. If + an edge node receives a 'RELAY_END' cell for any stream, it closes + the TCP connection completely, and sends nothing more along the + circuit for that stream. + + The payload of a RELAY_END cell begins with a single 'reason' byte to + describe why the stream is closing, plus optional data (depending on + the reason.) The values are: + + 1 -- REASON_MISC (catch-all for unlisted reasons) + 2 -- REASON_RESOLVEFAILED (couldn't look up hostname) + 3 -- REASON_CONNECTREFUSED (remote host refused connection) [*] + 4 -- REASON_EXITPOLICY (OR refuses to connect to host or port) + 5 -- REASON_DESTROY (Circuit is being destroyed) + 6 -- REASON_DONE (Anonymized TCP connection was closed) + 7 -- REASON_TIMEOUT (Connection timed out, or OR timed out + while connecting) + 8 -- (unallocated) [**] + 9 -- REASON_HIBERNATING (OR is temporarily hibernating) + 10 -- REASON_INTERNAL (Internal error at the OR) + 11 -- REASON_RESOURCELIMIT (OR has no resources to fulfill request) + 12 -- REASON_CONNRESET (Connection was unexpectedly reset) + 13 -- REASON_TORPROTOCOL (Sent when closing connection because of + Tor protocol violations.) + 14 -- REASON_NOTDIRECTORY (Client sent RELAY_BEGIN_DIR to a + non-directory server.) + + (With REASON_EXITPOLICY, the 4-byte IPv4 address or 16-byte IPv6 address + forms the optional data, along with a 4-byte TTL; no other reason + currently has extra data.) + + OPs and ORs MUST accept reasons not on the above list, since future + versions of Tor may provide more fine-grained reasons. + + Tors SHOULD NOT send any reason except REASON_MISC for a stream that they + have originated. + + [*] Older versions of Tor also send this reason when connections are + reset. + [**] Due to a bug in versions of Tor through 0095, error reason 8 must + remain allocated until that version is obsolete. + + --- [The rest of this section describes unimplemented functionality.] + + Because TCP connections can be half-open, we follow an equivalent + to TCP's FIN/FIN-ACK/ACK protocol to close streams. + + An exit connection can have a TCP stream in one of three states: + 'OPEN', 'DONE_PACKAGING', and 'DONE_DELIVERING'. For the purposes + of modeling transitions, we treat 'CLOSED' as a fourth state, + although connections in this state are not, in fact, tracked by the + onion router. + + A stream begins in the 'OPEN' state. Upon receiving a 'FIN' from + the corresponding TCP connection, the edge node sends a 'RELAY_FIN' + cell along the circuit and changes its state to 'DONE_PACKAGING'. + Upon receiving a 'RELAY_FIN' cell, an edge node sends a 'FIN' to + the corresponding TCP connection (e.g., by calling + shutdown(SHUT_WR)) and changing its state to 'DONE_DELIVERING'. + + When a stream in already in 'DONE_DELIVERING' receives a 'FIN', it + also sends a 'RELAY_FIN' along the circuit, and changes its state + to 'CLOSED'. When a stream already in 'DONE_PACKAGING' receives a + 'RELAY_FIN' cell, it sends a 'FIN' and changes its state to + 'CLOSED'. + + If an edge node encounters an error on any stream, it sends a + 'RELAY_END' cell (if possible) and closes the stream immediately. + +6.4. Remote hostname lookup + + To find the address associated with a hostname, the OP sends a + RELAY_RESOLVE cell containing the hostname to be resolved with a nul + terminating byte. (For a reverse lookup, the OP sends a RELAY_RESOLVE + cell containing an in-addr.arpa address.) The OR replies with a + RELAY_RESOLVED cell containing a status byte, and any number of + answers. Each answer is of the form: + Type (1 octet) + Length (1 octet) + Value (variable-width) + TTL (4 octets) + "Length" is the length of the Value field. + "Type" is one of: + 0x00 -- Hostname + 0x04 -- IPv4 address + 0x06 -- IPv6 address + 0xF0 -- Error, transient + 0xF1 -- Error, nontransient + + If any answer has a type of 'Error', then no other answer may be given. + + The RELAY_RESOLVE cell must use a nonzero, distinct streamID; the + corresponding RELAY_RESOLVED cell must use the same streamID. No stream + is actually created by the OR when resolving the name. + +7. Flow control + +7.1. Link throttling + + Each client or relay should do appropriate bandwidth throttling to + keep its user happy. + + Communicants rely on TCP's default flow control to push back when they + stop reading. + + The mainline Tor implementation uses token buckets (one for reads, + one for writes) for the rate limiting. + + Since 0.2.0.x, Tor has let the user specify an additional pair of + token buckets for "relayed" traffic, so people can deploy a Tor relay + with strict rate limiting, but also use the same Tor as a client. To + avoid partitioning concerns we combine both classes of traffic over a + given OR connection, and keep track of the last time we read or wrote + a high-priority (non-relayed) cell. If it's been less than N seconds + (currently N=30), we give the whole connection high priority, else we + give the whole connection low priority. We also give low priority + to reads and writes for connections that are serving directory + information. See proposal 111 for details. + +7.2. Link padding + + Link padding can be created by sending PADDING cells along the + connection; relay cells of type "DROP" can be used for long-range + padding. + + Currently nodes are not required to do any sort of link padding or + dummy traffic. Because strong attacks exist even with link padding, + and because link padding greatly increases the bandwidth requirements + for running a node, we plan to leave out link padding until this + tradeoff is better understood. + +7.3. Circuit-level flow control + + To control a circuit's bandwidth usage, each OR keeps track of two + 'windows', consisting of how many RELAY_DATA cells it is allowed to + originate (package for transmission), and how many RELAY_DATA cells + it is willing to consume (receive for local streams). These limits + do not apply to cells that the OR receives from one host and relays + to another. + + Each 'window' value is initially set to 1000 data cells + in each direction (cells that are not data cells do not affect + the window). When an OR is willing to deliver more cells, it sends a + RELAY_SENDME cell towards the OP, with Stream ID zero. When an OR + receives a RELAY_SENDME cell with stream ID zero, it increments its + packaging window. + + Each of these cells increments the corresponding window by 100. + + The OP behaves identically, except that it must track a packaging + window and a delivery window for every OR in the circuit. + + An OR or OP sends cells to increment its delivery window when the + corresponding window value falls under some threshold (900). + + If a packaging window reaches 0, the OR or OP stops reading from + TCP connections for all streams on the corresponding circuit, and + sends no more RELAY_DATA cells until receiving a RELAY_SENDME cell. +[this stuff is badly worded; copy in the tor-design section -RD] + +7.4. Stream-level flow control + + Edge nodes use RELAY_SENDME cells to implement end-to-end flow + control for individual connections across circuits. Similarly to + circuit-level flow control, edge nodes begin with a window of cells + (500) per stream, and increment the window by a fixed value (50) + upon receiving a RELAY_SENDME cell. Edge nodes initiate RELAY_SENDME + cells when both a) the window is <= 450, and b) there are less than + ten cell payloads remaining to be flushed at that edge. + +A.1. Differences between spec and implementation + +- The current specification requires all ORs to have IPv4 addresses, but + allows servers to exit and resolve to IPv6 addresses, and to declare IPv6 + addresses in their exit policies. The current codebase has no IPv6 + support at all. + diff --git a/orchid/logging.properties b/orchid/logging.properties new file mode 100644 index 00000000..5a212d03 --- /dev/null +++ b/orchid/logging.properties @@ -0,0 +1,8 @@ +handlers=java.util.logging.ConsoleHandler +.level = INFO + +java.util.logging.ConsoleHandler.level = FINEST +java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter +java.util.logging.SimpleFormatter.format =[%1$tT] %4$s: %5$s%6$s%n + +# com.subgraph.orchid.circuits.level=FINE diff --git a/orchid/opt/xmlrpc/com/subgraph/orchid/xmlrpc/OrchidXmlRpcTransport.java b/orchid/opt/xmlrpc/com/subgraph/orchid/xmlrpc/OrchidXmlRpcTransport.java new file mode 100644 index 00000000..bce6d6ab --- /dev/null +++ b/orchid/opt/xmlrpc/com/subgraph/orchid/xmlrpc/OrchidXmlRpcTransport.java @@ -0,0 +1,309 @@ +package com.subgraph.orchid.xmlrpc; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.net.ConnectException; +import java.net.Socket; +import java.net.URL; +import java.net.UnknownHostException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; +import java.util.logging.Logger; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; + +import org.apache.xmlrpc.XmlRpcException; +import org.apache.xmlrpc.XmlRpcRequest; +import org.apache.xmlrpc.client.XmlRpcClient; +import org.apache.xmlrpc.client.XmlRpcClientException; +import org.apache.xmlrpc.client.XmlRpcHttpClientConfig; +import org.apache.xmlrpc.client.XmlRpcHttpTransport; +import org.apache.xmlrpc.client.XmlRpcHttpTransportException; +import org.apache.xmlrpc.client.XmlRpcLiteHttpTransport; +import org.apache.xmlrpc.common.XmlRpcStreamRequestConfig; +import org.apache.xmlrpc.util.HttpUtil; +import org.apache.xmlrpc.util.LimitedInputStream; +import org.xml.sax.SAXException; + +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.sockets.AndroidSSLSocketFactory; + +public class OrchidXmlRpcTransport extends XmlRpcHttpTransport { + + private final static Logger logger = Logger.getLogger(OrchidXmlRpcTransport.class.getName()); + + private final SocketFactory socketFactory; + private final SSLContext sslContext; + + private SSLSocketFactory sslSocketFactory; + + public OrchidXmlRpcTransport(XmlRpcClient pClient, SocketFactory socketFactory, SSLContext sslContext) { + super(pClient, userAgent); + this.socketFactory = socketFactory; + this.sslContext = sslContext; + } + + public synchronized SSLSocketFactory getSSLSocketFactory() { + if(sslSocketFactory == null) { + sslSocketFactory = createSSLSocketFactory(); + } + return sslSocketFactory; + } + + private SSLSocketFactory createSSLSocketFactory() { + if(Tor.isAndroidRuntime()) { + return createAndroidSSLSocketFactory(); + } + if(sslContext == null) { + return (SSLSocketFactory) SSLSocketFactory.getDefault(); + } else { + return sslContext.getSocketFactory(); + } + } + + private SSLSocketFactory createAndroidSSLSocketFactory() { + if(sslContext == null) { + try { + return new AndroidSSLSocketFactory(); + } catch (NoSuchAlgorithmException e) { + logger.severe("Failed to create default ssl context"); + System.exit(1); + return null; + } + } else { + return new AndroidSSLSocketFactory(sslContext); + } + } + + protected Socket newSocket(boolean pSSL, String pHostName, int pPort) throws UnknownHostException, IOException { + final Socket s = socketFactory.createSocket(pHostName, pPort); + if(pSSL) { + return getSSLSocketFactory().createSocket(s, pHostName, pPort, true); + } else { + return s; + } + } + + private static final String userAgent = USER_AGENT + " (Lite HTTP Transport)"; + private boolean ssl; + private String hostname; + private String host; + private int port; + private String uri; + private Socket socket; + private OutputStream output; + private InputStream input; + private final Map headers = new HashMap(); + private boolean responseGzipCompressed = false; + private XmlRpcHttpClientConfig config; + + + public Object sendRequest(XmlRpcRequest pRequest) throws XmlRpcException { + config = (XmlRpcHttpClientConfig) pRequest.getConfig(); + URL url = config.getServerURL(); + ssl = "https".equals(url.getProtocol()); + hostname = url.getHost(); + int p = url.getPort(); + port = p < 1 ? 80 : p; + String u = url.getFile(); + uri = (u == null || "".equals(u)) ? "/" : u; + host = port == 80 ? hostname : hostname + ":" + port; + headers.put("Host", host); + return super.sendRequest(pRequest); + } + + protected void setRequestHeader(String pHeader, String pValue) { + Object value = headers.get(pHeader); + if (value == null) { + headers.put(pHeader, pValue); + } else { + List list; + if (value instanceof String) { + list = new ArrayList(); + list.add((String)value); + headers.put(pHeader, list); + } else { + list = (List) value; + } + list.add(pValue); + } + } + + protected void close() throws XmlRpcClientException { + IOException e = null; + if (input != null) { + try { + input.close(); + } catch (IOException ex) { + e = ex; + } + } + if (output != null) { + try { + output.close(); + } catch (IOException ex) { + if (e != null) { + e = ex; + } + } + } + if (socket != null) { + try { + socket.close(); + } catch (IOException ex) { + if (e != null) { + e = ex; + } + } + } + if (e != null) { + throw new XmlRpcClientException("Failed to close connection: " + e.getMessage(), e); + } + } + + private OutputStream getOutputStream() throws XmlRpcException { + try { + final int retries = 3; + final int delayMillis = 100; + + for (int tries = 0; ; tries++) { + try { + socket = newSocket(ssl, hostname, port); + output = new BufferedOutputStream(socket.getOutputStream()){ + /** Closing the output stream would close the whole socket, which we don't want, + * because the don't want until the request is processed completely. + * A close will later occur within + * {@link XmlRpcLiteHttpTransport#close()}. + */ + public void close() throws IOException { + flush(); + if(!(socket instanceof SSLSocket)) { + socket.shutdownOutput(); + } + } + }; + break; + } catch (ConnectException e) { + if (tries >= retries) { + throw new XmlRpcException("Failed to connect to " + + hostname + ":" + port + ": " + e.getMessage(), e); + } else { + try { + Thread.sleep(delayMillis); + } catch (InterruptedException ignore) { + } + } + } + } + sendRequestHeaders(output); + return output; + } catch (IOException e) { + throw new XmlRpcException("Failed to open connection to " + + hostname + ":" + port + ": " + e.getMessage(), e); + } + } + + + + private byte[] toHTTPBytes(String pValue) throws UnsupportedEncodingException { + return pValue.getBytes("US-ASCII"); + } + + private void sendHeader(OutputStream pOut, String pKey, String pValue) throws IOException { + pOut.write(toHTTPBytes(pKey + ": " + pValue + "\r\n")); + } + + private void sendRequestHeaders(OutputStream pOut) throws IOException { + pOut.write(("POST " + uri + " HTTP/1.0\r\n").getBytes("US-ASCII")); + for (Iterator iter = headers.entrySet().iterator(); iter.hasNext(); ) { + Map.Entry entry = (Map.Entry) iter.next(); + String key = (String) entry.getKey(); + Object value = entry.getValue(); + if (value instanceof String) { + sendHeader(pOut, key, (String) value); + } else { + List list = (List) value; + for (int i = 0; i < list.size(); i++) { + sendHeader(pOut, key, (String) list.get(i)); + } + } + } + pOut.write(toHTTPBytes("\r\n")); + } + + protected boolean isResponseGzipCompressed(XmlRpcStreamRequestConfig pConfig) { + return responseGzipCompressed; + } + + protected InputStream getInputStream() throws XmlRpcException { + final byte[] buffer = new byte[2048]; + try { + // If reply timeout specified, set the socket timeout accordingly + if (config.getReplyTimeout() != 0) + socket.setSoTimeout(config.getReplyTimeout()); + input = new BufferedInputStream(socket.getInputStream()); + // start reading server response headers + String line = HttpUtil.readLine(input, buffer); + StringTokenizer tokens = new StringTokenizer(line); + tokens.nextToken(); // Skip HTTP version + String statusCode = tokens.nextToken(); + String statusMsg = tokens.nextToken("\n\r"); + final int code; + try { + code = Integer.parseInt(statusCode); + } catch (NumberFormatException e) { + throw new XmlRpcClientException("Server returned invalid status code: " + + statusCode + " " + statusMsg, null); + } + if (code < 200 || code > 299) { + throw new XmlRpcHttpTransportException(code, statusMsg); + } + int contentLength = -1; + for (;;) { + line = HttpUtil.readLine(input, buffer); + if (line == null || "".equals(line)) { + break; + } + line = line.toLowerCase(); + if (line.startsWith("content-length:")) { + contentLength = Integer.parseInt(line.substring("content-length:".length()).trim()); + } else if (line.startsWith("content-encoding:")) { + responseGzipCompressed = HttpUtil.isUsingGzipEncoding(line.substring("content-encoding:".length())); + } + } + InputStream result; + if (contentLength == -1) { + result = input; + } else { + result = new LimitedInputStream(input, contentLength); + } + return result; + } catch (IOException e) { + throw new XmlRpcClientException("Failed to read server response: " + e.getMessage(), e); + } + } + + protected boolean isUsingByteArrayOutput(XmlRpcHttpClientConfig pConfig) { + boolean result = super.isUsingByteArrayOutput(pConfig); + if (!result) { + throw new IllegalStateException("The Content-Length header is required with HTTP/1.0, and HTTP/1.1 is unsupported by the Lite HTTP Transport."); + } + return result; + } + + protected void writeRequest(ReqWriter pWriter) throws XmlRpcException, IOException, SAXException { + pWriter.write(getOutputStream()); + } +} diff --git a/orchid/opt/xmlrpc/com/subgraph/orchid/xmlrpc/OrchidXmlRpcTransportFactory.java b/orchid/opt/xmlrpc/com/subgraph/orchid/xmlrpc/OrchidXmlRpcTransportFactory.java new file mode 100644 index 00000000..3f7bcfc7 --- /dev/null +++ b/orchid/opt/xmlrpc/com/subgraph/orchid/xmlrpc/OrchidXmlRpcTransportFactory.java @@ -0,0 +1,30 @@ +package com.subgraph.orchid.xmlrpc; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLContext; + +import org.apache.xmlrpc.client.XmlRpcClient; +import org.apache.xmlrpc.client.XmlRpcTransport; +import org.apache.xmlrpc.client.XmlRpcTransportFactory; +import com.subgraph.orchid.TorClient; +import com.subgraph.orchid.sockets.OrchidSocketFactory; + +public class OrchidXmlRpcTransportFactory implements XmlRpcTransportFactory { + private final XmlRpcClient client; + private final SSLContext sslContext; + private final SocketFactory socketFactory; + + public OrchidXmlRpcTransportFactory(XmlRpcClient client, TorClient torClient) { + this(client, torClient, null); + } + + public OrchidXmlRpcTransportFactory(XmlRpcClient client, TorClient torClient, SSLContext sslContext) { + this.client = client; + this.socketFactory = new OrchidSocketFactory(torClient); + this.sslContext = sslContext; + } + + public XmlRpcTransport getTransport() { + return new OrchidXmlRpcTransport(client, socketFactory, sslContext); + } +} diff --git a/orchid/pom.xml b/orchid/pom.xml new file mode 100644 index 00000000..3e255a65 --- /dev/null +++ b/orchid/pom.xml @@ -0,0 +1,99 @@ + + + 4.0.0 + + com.subgraph + orchid + 1.0-SNAPSHOT + + Orchid + Tor library + + jar + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + src + + + maven-compiler-plugin + 3.0 + + 1.6 + 1.6 + + + + + maven-surefire-plugin + 2.9 + + + + org.apache.maven.plugins + maven-source-plugin + 2.1.2 + + + attach-sources + verify + + jar-no-fork + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 1.6 + + false + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + package + + shade + + + true + bundled + + + + + + + + + + junit + junit + 4.11 + test + + + + diff --git a/orchid/src/com/subgraph/orchid/BridgeRouter.java b/orchid/src/com/subgraph/orchid/BridgeRouter.java new file mode 100644 index 00000000..f7a2dfa0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/BridgeRouter.java @@ -0,0 +1,8 @@ +package com.subgraph.orchid; + +import com.subgraph.orchid.data.HexDigest; + +public interface BridgeRouter extends Router { + void setIdentity(HexDigest identity); + void setDescriptor(RouterDescriptor descriptor); +} diff --git a/orchid/src/com/subgraph/orchid/Cell.java b/orchid/src/com/subgraph/orchid/Cell.java new file mode 100644 index 00000000..b5122210 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Cell.java @@ -0,0 +1,230 @@ +package com.subgraph.orchid; + + +public interface Cell { + /** Command constant for a PADDING type cell. */ + final static int PADDING = 0; + + /** Command constant for a CREATE type cell. */ + final static int CREATE = 1; + + /** Command constant for a CREATED type cell. */ + final static int CREATED = 2; + + /** Command constant for a RELAY type cell. */ + final static int RELAY = 3; + + /** Command constant for a DESTROY type cell. */ + final static int DESTROY = 4; + + /** Command constant for a CREATE_FAST type cell. */ + final static int CREATE_FAST = 5; + + /** Command constant for a CREATED_FAST type cell. */ + final static int CREATED_FAST = 6; + + /** Command constant for a VERSIONS type cell. */ + final static int VERSIONS = 7; + + /** Command constant for a NETINFO type cell. */ + final static int NETINFO = 8; + + /** Command constant for a RELAY_EARLY type cell. */ + final static int RELAY_EARLY = 9; + + final static int VPADDING = 128; + final static int CERTS = 129; + final static int AUTH_CHALLENGE = 130; + final static int AUTHENTICATE = 131; + final static int AUTHORIZE = 132; + + final static int ERROR_NONE = 0; + final static int ERROR_PROTOCOL = 1; + final static int ERROR_INTERNAL = 2; + final static int ERROR_REQUESTED = 3; + final static int ERROR_HIBERNATING = 4; + final static int ERROR_RESOURCELIMIT = 5; + final static int ERROR_CONNECTFAILED = 6; + final static int ERROR_OR_IDENTITY = 7; + final static int ERROR_OR_CONN_CLOSED = 8; + final static int ERROR_FINISHED = 9; + final static int ERROR_TIMEOUT = 10; + final static int ERROR_DESTROYED = 11; + final static int ERROR_NOSUCHSERVICE = 12; + + final static int ADDRESS_TYPE_HOSTNAME = 0x00; + final static int ADDRESS_TYPE_IPV4 = 0x04; + final static int ADRESS_TYPE_IPV6 = 0x06; + + /** + * The fixed size of a standard cell. + */ + final static int CELL_LEN = 512; + + /** + * The length of a standard cell header. + */ + final static int CELL_HEADER_LEN = 3; + + /** + * The header length for a variable length cell (ie: VERSIONS) + */ + final static int CELL_VAR_HEADER_LEN = 5; + + /** + * The length of the payload space in a standard cell. + */ + final static int CELL_PAYLOAD_LEN = CELL_LEN - CELL_HEADER_LEN; + + /** + * Return the circuit id field from this cell. + * + * @return The circuit id field of this cell. + */ + int getCircuitId(); + + /** + * Return the command field from this cell. + * + * @return The command field of this cell. + */ + int getCommand(); + + /** + * Set the internal pointer to the first byte after the cell header. + */ + void resetToPayload(); + + /** + * Return the next byte from the cell and increment the internal pointer by one byte. + * + * @return The byte at the current pointer location. + */ + int getByte(); + + /** + * Return the byte at the specified offset into the cell. + * + * @param index The cell offset. + * @return The byte at the specified offset. + */ + int getByteAt(int index); + + /** + * Return the next 16-bit big endian value from the cell and increment the internal pointer by two bytes. + * + * @return The 16-bit short value at the current pointer location. + */ + int getShort(); + + /** + * Return the 16-bit big endian value at the specified offset into the cell. + * + * @param index The cell offset. + * @return The 16-bit short value at the specified offset. + */ + int getShortAt(int index); + + /** + * Return the next 32-bit big endian value from the cell and increment the internal pointer by four bytes. + * + * @return The 32-bit integer value at the current pointer location. + */ + int getInt(); + + /** + * Copy buffer.length bytes from the cell into buffer. The data is copied starting + * from the current internal pointer location and afterwards the internal pointer is incremented by buffer.length + * bytes. + * + * @param buffer The array of bytes to copy the cell data into. + */ + void getByteArray(byte[] buffer); + + /** + * Return the number of bytes already packed (for outgoing cells) or unpacked (for incoming cells). This is + * equivalent to the internal pointer position. + * + * @return The number of bytes already consumed from this cell. + */ + int cellBytesConsumed(); + + /** + * Return the number of bytes remaining between the current internal pointer and the end of the cell. If fields + * are being added to a new cell for transmission then this value indicates the remaining space in bytes for + * adding new data. If fields are being read from a received cell then this value describes the number of bytes + * which can be read without overflowing the cell. + * + * @return The number of payload bytes remaining in this cell. + */ + int cellBytesRemaining(); + + /** + * Store a byte at the current pointer location and increment the pointer by one byte. + * + * @param value The byte value to store. + */ + void putByte(int value); + + /** + * Store a byte at the specified offset into the cell. + * + * @param index The offset in bytes into the cell. + * @param value The byte value to store. + */ + void putByteAt(int index, int value); + + /** + * Store a 16-bit short value in big endian order at the current pointer location and + * increment the pointer by two bytes. + * + * @param value The 16-bit short value to store. + */ + void putShort(int value); + + /** + * Store a 16-bit short value in big endian byte order at the specified offset into the cell + * and increment the pointer by two bytes. + * + * @param index The offset in bytes into the cell. + * @param value The 16-bit short value to store. + */ + void putShortAt(int index, int value); + + /** + * Store a 32-bit integer value in big endian order at the current pointer location and + * increment the pointer by 4 bytes. + * + * @param value The 32-bit integer value to store. + */ + void putInt(int value); + + /** + * Store the entire array data at the current pointer location and increment + * the pointer by data.length bytes. + * + * @param data The array of bytes to store in the cell. + */ + void putByteArray(byte[] data); + + /** + * Store length bytes of the byte array data starting from + * offset into the array at the current pointer location and increment + * the pointer by length bytes. + * + * @param data The source array of bytes. + * @param offset The offset into the source array. + * @param length The number of bytes from the source array to store. + */ + void putByteArray(byte[] data, int offset, int length); + + /** + * Return the entire cell data as a raw array of bytes. For all cells except + * VERSIONS, this array will be exactly CELL_LEN bytes long. + * + * @return The cell data as an array of bytes. + */ + byte[] getCellBytes(); + + void putString(String string); +} diff --git a/orchid/src/com/subgraph/orchid/Circuit.java b/orchid/src/com/subgraph/orchid/Circuit.java new file mode 100644 index 00000000..0a1341fe --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Circuit.java @@ -0,0 +1,95 @@ +package com.subgraph.orchid; + +import java.util.List; + +/** + * A Circuit represents a logical path through multiple ORs. Circuits are described in + * section 5 of tor-spec.txt. + * + */ +public interface Circuit { + + /** + * Return true if the circuit is presently in the connected state or + * false otherwise. + * + * @return Returns true if the circuit is presently connected, or + * false otherwise. + */ + boolean isConnected(); + + boolean isPending(); + + boolean isClean(); + + boolean isMarkedForClose(); + + int getSecondsDirty(); + + /** + * Returns the entry router Connection object of this Circuit. Throws + * a TorException if the circuit is not currently open. + * + * @return The Connection object for the network connection to the entry router of this + * circuit. + * @throws TorException If this circuit is not currently connected. + */ + Connection getConnection(); + + /** + * Returns the curcuit id value for this circuit. + * + * @return The circuit id value for this circuit. + */ + int getCircuitId(); + + /** + * Create a new relay cell which is configured for delivery to the specified + * circuit targetNode with command value relayCommand + * and a stream id value of streamId. The returned RelayCell + * can then be used to populate the payload of the cell before delivering it. + * + * @param relayCommand The command value to send in the relay cell header. + * @param streamId The stream id value to send in the relay cell header. + * @param targetNode The target circuit node to encrypt this cell for. + * @return A newly created relay cell object. + */ + RelayCell createRelayCell(int relayCommand, int streamId, CircuitNode targetNode); + + /** + * Returns the next relay response cell received on this circuit. If no response is + * received within CIRCUIT_RELAY_RESPONSE_TIMEOUT milliseconds, null + * is returned. + * + * @return The next relay response cell received on this circuit or null if + * a timeout is reached before the next relay cell arrives. + */ + RelayCell receiveRelayCell(); + + /** + * Encrypt and deliver the relay cell cell. + * + * @param cell The relay cell to deliver over this circuit. + */ + void sendRelayCell(RelayCell cell); + + /** + * Return the last node or 'hop' in this circuit. + * + * @return The final 'hop' or node of this circuit. + */ + CircuitNode getFinalCircuitNode(); + + + void destroyCircuit(); + + void deliverRelayCell(Cell cell); + + void deliverControlCell(Cell cell); + + List getActiveStreams(); + + void markForClose(); + + void appendNode(CircuitNode node); +} diff --git a/orchid/src/com/subgraph/orchid/CircuitBuildHandler.java b/orchid/src/com/subgraph/orchid/CircuitBuildHandler.java new file mode 100644 index 00000000..40f6d3bb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/CircuitBuildHandler.java @@ -0,0 +1,60 @@ +package com.subgraph.orchid; + +/** + * This callback interface is used for reporting progress when + * opening a new circuit. An instance of this interface is passed + * to the {@link Circuit#openCircuit(java.util.List, CircuitBuildHandler)} + * method. + * + * The normal sequence of callbacks which are fired when a circuit is opened + * successfully is {@link #connectionCompleted(Connection)} for the initial + * connection to the entry router, followed by one or more + * {@link #nodeAdded(CircuitNode)} as the circuit is extended with new nodes. + * When all requested nodes in the path have been added successfully to the + * circuit {@link #circuitBuildCompleted(Circuit)} is called and passed the + * newly constructed circuit. + * + * @see Circuit#openCircuit() + * + */ +public interface CircuitBuildHandler { + /** + * Called when a network connection to the entry node has completed + * successfully or if a network connection to the specified entry router + * already exists. + * + * @param connection The completed connection instance. + */ + void connectionCompleted(Connection connection); + + /** + * The circuit build has failed because the network connection to the + * entry node failed. No further callback methods will be called after + * this failure has been reported. + * + * @param reason A description of the reason for failing to connect to + * the entry node. + */ + void connectionFailed(String reason); + + /** + * A node or 'hop' has been added to the circuit which is being created. + * + * @param node The newly added circuit node. + */ + void nodeAdded(CircuitNode node); + + /** + * The circuit has been successfully built and is ready for use. + * + * @param circuit The newly constructed circuit. + */ + void circuitBuildCompleted(Circuit circuit); + + /** + * Called if the circuit build fails after connecting to the entry node. + * + * @param reason A description of the reason the circuit build has failed. + */ + void circuitBuildFailed(String reason); +} diff --git a/orchid/src/com/subgraph/orchid/CircuitManager.java b/orchid/src/com/subgraph/orchid/CircuitManager.java new file mode 100644 index 00000000..ee4fbc87 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/CircuitManager.java @@ -0,0 +1,51 @@ +package com.subgraph.orchid; + +import java.util.List; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.data.IPv4Address; + + +public interface CircuitManager { + + static int DIRECTORY_PURPOSE_CONSENSUS = 1; + static int DIRECTORY_PURPOSE_CERTIFICATES = 2; + static int DIRECTORY_PURPOSE_DESCRIPTORS = 3; + + /** + * Begin automatically building new circuits in the background. + */ + void startBuildingCircuits(); + void stopBuildingCircuits(boolean killCircuits); + /** + * Attempt to open an exit stream to the specified destination hostname and + * port. + * + * @param hostname The name of the host to open an exit connection to. + * @param port The port to open an exit connection to. + * @return The status response result of attempting to open the exit connection. + */ + Stream openExitStreamTo(String hostname, int port) throws InterruptedException, TimeoutException, OpenFailedException; + + /** + * Attempt to open an exit stream to the destination specified by address and + * port. + * + * @param address The address to open an exit connection to. + * @param port The port to open an exit connection to. + * @return The status response result of attempting the open the exit connection. + */ + Stream openExitStreamTo(IPv4Address address, int port) throws InterruptedException, TimeoutException, OpenFailedException; + + + Stream openDirectoryStream(int purpose) throws InterruptedException, TimeoutException, OpenFailedException; + + Stream openDirectoryStream() throws InterruptedException, TimeoutException, OpenFailedException; + + DirectoryCircuit openDirectoryCircuit() throws OpenFailedException; + Circuit getCleanInternalCircuit() throws InterruptedException; + + ExitCircuit openExitCircuitTo(List path) throws OpenFailedException; + InternalCircuit openInternalCircuitTo(List path) throws OpenFailedException; + DirectoryCircuit openDirectoryCircuitTo(List path) throws OpenFailedException; +} diff --git a/orchid/src/com/subgraph/orchid/CircuitNode.java b/orchid/src/com/subgraph/orchid/CircuitNode.java new file mode 100644 index 00000000..aa09b88e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/CircuitNode.java @@ -0,0 +1,90 @@ +package com.subgraph.orchid; + + + +/** + * Represents the state of a single onion router hop in a connected or connecting {@link Circuit} + */ +public interface CircuitNode { + /** + * Return the {@link Router} associated with this node. + * + * @return The {@link Router} for this hop of the circuit chain. + */ + Router getRouter(); + + /** + * Update the 'forward' cryptographic digest state for this + * node with the contents of cell + * + * @param cell The {@link RelayCell} to add to the digest. + */ + void updateForwardDigest(RelayCell cell); + + /** + * Return the current 'forward' running digest value for this + * node as an array of TOR_DIGEST_SIZE bytes. + * + * @return The current 'forward' running digest value for this node. + */ + byte[] getForwardDigestBytes(); + + /** + * Encrypt a {@link RelayCell} for this node with the current + * 'forward' cipher state. + * + * @param cell The {@link RelayCell} to encrypt. + */ + void encryptForwardCell(RelayCell cell); + + /** + * Return the {@link CircuitNode} which immediately preceeds this + * one in the circuit node chain or null if this is + * the first hop. + * + * @return The previous {@link CircuitNode} in the chain or + * null if this is the first node. + */ + CircuitNode getPreviousNode(); + + /** + * Return immediately if the packaging window for this node is open (ie: greater than 0), otherwise + * block until the circuit is destroyed or the window is incremented by receiving a RELAY_SENDME cell + * from this node. + */ + void waitForSendWindow(); + + /** + * If the packaging window for this node is open (ie: greater than 0) this method + * decrements the packaging window by 1 and returns immediately, otherwise it will + * block until the circuit is destroyed or the window is incremented by receiving + * a RELAY_SENDME cell from this node. This method will always decrement the packaging + * window before returning unless the circuit has been destroyed. + */ + void waitForSendWindowAndDecrement(); + + /** + * This method is called to signal that a RELAY_SENDME cell has been received from this + * node and the packaging window should be incremented. This will also wake up any threads + * that are waiting for the packaging window to open. + */ + void incrementSendWindow(); + + /** + * This method is called when a RELAY_DATA cell is received from this node to decrement + * the deliver window counter. + */ + void decrementDeliverWindow(); + + /** + * Examines the delivery window and determines if it would be an appropriate time to + * send a RELAY_SENDME cell. If this method returns true, it increments the delivery + * window assuming that a RELAY_SENDME cell will be transmitted. + * + * @return Returns true if the deliver window is small enough that sending a RELAY_SENDME + * cell would be appropriate. + */ + boolean considerSendingSendme(); + + boolean decryptBackwardCell(Cell cell); +} diff --git a/orchid/src/com/subgraph/orchid/Connection.java b/orchid/src/com/subgraph/orchid/Connection.java new file mode 100644 index 00000000..54f0dfed --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Connection.java @@ -0,0 +1,47 @@ +package com.subgraph.orchid; + + +/** + * A network connection to a Tor onion router. + */ +public interface Connection { + /** + * Return the {@link Router} associated with this connection. + * + * @return The entry router this connection represents. + */ + Router getRouter(); + + /** + * Return true if the socket for this connection has been closed. Otherwise, false. + * + * @return true if this connection is closed or false otherwise. + */ + boolean isClosed(); + /** + * Send a protocol {@link Cell} on this connection. + * + * @param cell The {@link Cell} to transfer. + * @throws ConnectionIOException If the cell could not be send because the connection is not connected + * or if an error occured while sending the cell data. + */ + void sendCell(Cell cell) throws ConnectionIOException; + + /** + * Remove a Circuit which has been bound to this Connection by a previous call to {@link #bindCircuit(Circuit) bindCircuit}. + * After removing a Circuit, any further received incoming cells for the Circuit will be discarded. + * + * @param circuit The Circuit to remove. + */ + void removeCircuit(Circuit circuit); + + /** + * Choose an available circuit id value and bind this Circuit to that id value, returning the id value. + * Once bound, any incoming relay cells will be delivered to the Circuit with {@link Circuit#deliverRelayCell(Cell)} + * and other cells will be delivered with {@link Circuit#deliverControlCell(Cell)}. + * + * @param circuit The Circuit to bind to this connection. + * @return the circuit id value for this binding. + */ + int bindCircuit(Circuit circuit); +} diff --git a/orchid/src/com/subgraph/orchid/ConnectionCache.java b/orchid/src/com/subgraph/orchid/ConnectionCache.java new file mode 100644 index 00000000..5210970c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ConnectionCache.java @@ -0,0 +1,21 @@ +package com.subgraph.orchid; + + +public interface ConnectionCache { + /** + * Returns a completed connection to the specified router. If an open connection + * to the requested router already exists it is returned, otherwise a new connection + * is opened. + * + * @param router The router to which a connection is requested. + * @param isDirectoryConnection Is this going to be used as a directory connection. + * @return a completed connection to the specified router. + * @throws InterruptedException if thread is interrupted while waiting for connection to complete. + * @throws ConnectionTimeoutException if timeout expires before connection completes. + * @throws ConnectionFailedException if connection fails due to I/O error + * @throws ConnectionHandshakeException if connection fails because an error occurred during handshake phase + */ + Connection getConnectionTo(Router router, boolean isDirectoryConnection) throws InterruptedException, ConnectionTimeoutException, ConnectionFailedException, ConnectionHandshakeException; + + void close(); +} diff --git a/orchid/src/com/subgraph/orchid/ConnectionFailedException.java b/orchid/src/com/subgraph/orchid/ConnectionFailedException.java new file mode 100644 index 00000000..b7f25c69 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ConnectionFailedException.java @@ -0,0 +1,11 @@ +package com.subgraph.orchid; + +public class ConnectionFailedException extends ConnectionIOException { + + private static final long serialVersionUID = -4484347156587613574L; + + public ConnectionFailedException(String message) { + super(message); + } + +} diff --git a/orchid/src/com/subgraph/orchid/ConnectionHandshakeException.java b/orchid/src/com/subgraph/orchid/ConnectionHandshakeException.java new file mode 100644 index 00000000..5e059319 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ConnectionHandshakeException.java @@ -0,0 +1,10 @@ +package com.subgraph.orchid; + +public class ConnectionHandshakeException extends ConnectionIOException { + + private static final long serialVersionUID = -2544633445932967966L; + + public ConnectionHandshakeException(String message) { + super(message); + } +} diff --git a/orchid/src/com/subgraph/orchid/ConnectionIOException.java b/orchid/src/com/subgraph/orchid/ConnectionIOException.java new file mode 100644 index 00000000..a80d0fd4 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ConnectionIOException.java @@ -0,0 +1,14 @@ +package com.subgraph.orchid; + +public class ConnectionIOException extends Exception { + + private static final long serialVersionUID = -5537650738995969203L; + + public ConnectionIOException() { + super(); + } + + public ConnectionIOException(String message) { + super(message); + } +} diff --git a/orchid/src/com/subgraph/orchid/ConnectionTimeoutException.java b/orchid/src/com/subgraph/orchid/ConnectionTimeoutException.java new file mode 100644 index 00000000..7a539b65 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ConnectionTimeoutException.java @@ -0,0 +1,14 @@ +package com.subgraph.orchid; + +public class ConnectionTimeoutException extends ConnectionIOException { + + private static final long serialVersionUID = -6098661610150140151L; + + public ConnectionTimeoutException() { + super(); + } + + public ConnectionTimeoutException(String message) { + super(message); + } +} diff --git a/orchid/src/com/subgraph/orchid/ConsensusDocument.java b/orchid/src/com/subgraph/orchid/ConsensusDocument.java new file mode 100644 index 00000000..ce31947f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ConsensusDocument.java @@ -0,0 +1,44 @@ +package com.subgraph.orchid; + +import java.util.List; +import java.util.Set; + +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.Timestamp; + +public interface ConsensusDocument extends Document { + enum ConsensusFlavor { NS, MICRODESC }; + enum SignatureStatus { STATUS_VERIFIED, STATUS_FAILED, STATUS_NEED_CERTS }; + + interface RequiredCertificate { + int getDownloadFailureCount(); + void incrementDownloadFailureCount(); + HexDigest getAuthorityIdentity(); + HexDigest getSigningKey(); + } + + ConsensusFlavor getFlavor(); + Timestamp getValidAfterTime(); + Timestamp getFreshUntilTime(); + Timestamp getValidUntilTime(); + int getConsensusMethod(); + int getVoteSeconds(); + int getDistSeconds(); + Set getClientVersions(); + Set getServerVersions(); + boolean isLive(); + List getRouterStatusEntries(); + + SignatureStatus verifySignatures(); + Set getRequiredCertificates(); + + HexDigest getSigningHash(); + HexDigest getSigningHash256(); + + int getCircWindowParameter(); + int getWeightScaleParameter(); + + int getBandwidthWeight(String tag); + + boolean getUseNTorHandshake(); +} diff --git a/orchid/src/com/subgraph/orchid/Descriptor.java b/orchid/src/com/subgraph/orchid/Descriptor.java new file mode 100644 index 00000000..4fa1cb07 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Descriptor.java @@ -0,0 +1,65 @@ +package com.subgraph.orchid; + +import java.util.Set; + +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +public interface Descriptor extends Document { + enum CacheLocation { NOT_CACHED, CACHED_CACHEFILE, CACHED_JOURNAL } + + HexDigest getDescriptorDigest(); + void setLastListed(long timestamp); + long getLastListed(); + void setCacheLocation(CacheLocation location); + CacheLocation getCacheLocation(); + int getBodyLength(); + + /** + * Return the public key used to encrypt EXTEND cells while establishing + * a circuit through this router. + * + * @return The onion routing protocol key for this router. + */ + TorPublicKey getOnionKey(); + byte[] getNTorOnionKey(); + + /** + * Return the IPv4 address of this router. + * + * @return The IPv4 address of this router. + */ + IPv4Address getAddress(); + + /** + * Return the port on which this node accepts TLS connections + * for the main OR protocol, or 0 if no router service is advertised. + * + * @return The onion routing port, or 0 if not a router. + */ + int getRouterPort(); + Set getFamilyMembers(); + + /** + * Return true if the exit policy of this router permits connections + * to the specified destination endpoint. + * + * @param address The IPv4 address of the destination. + * @param port The destination port. + * + * @return True if an exit connection to the specified destination is allowed + * or false otherwise. + */ + boolean exitPolicyAccepts(IPv4Address address, int port); + + /** + * Return true if the exit policy of this router accepts most connections + * to the specified destination port. + * + * @param port The destination port. + * @return True if an exit connection to the specified destination port is generally allowed + * or false otherwise. + */ + boolean exitPolicyAccepts(int port); +} diff --git a/orchid/src/com/subgraph/orchid/Directory.java b/orchid/src/com/subgraph/orchid/Directory.java new file mode 100644 index 00000000..5370e884 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Directory.java @@ -0,0 +1,48 @@ +package com.subgraph.orchid; + +import java.util.Collection; +import java.util.List; +import java.util.Set; + +import com.subgraph.orchid.ConsensusDocument.RequiredCertificate; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.events.EventHandler; + +/** + * + * Main interface for accessing directory information and interacting + * with directory authorities and caches. + * + */ +public interface Directory { + boolean haveMinimumRouterInfo(); + void loadFromStore(); + void close(); + void waitUntilLoaded(); + void storeCertificates(); + + Collection getDirectoryAuthorities(); + DirectoryServer getRandomDirectoryAuthority(); + void addCertificate(KeyCertificate certificate); + Set getRequiredCertificates(); + void addRouterMicrodescriptors(List microdescriptors); + void addRouterDescriptors(List descriptors); + void addConsensusDocument(ConsensusDocument consensus, boolean fromCache); + ConsensusDocument getCurrentConsensusDocument(); + boolean hasPendingConsensus(); + void registerConsensusChangedHandler(EventHandler handler); + void unregisterConsensusChangedHandler(EventHandler handler); + Router getRouterByName(String name); + Router getRouterByIdentity(HexDigest identity); + List getRouterListByNames(List names); + List getRoutersWithDownloadableDescriptors(); + List getAllRouters(); + + RouterMicrodescriptor getMicrodescriptorFromCache(HexDigest descriptorDigest); + RouterDescriptor getBasicDescriptorFromCache(HexDigest descriptorDigest); + + GuardEntry createGuardEntryFor(Router router); + List getGuardEntries(); + void removeGuardEntry(GuardEntry entry); + void addGuardEntry(GuardEntry entry); +} diff --git a/orchid/src/com/subgraph/orchid/DirectoryCircuit.java b/orchid/src/com/subgraph/orchid/DirectoryCircuit.java new file mode 100644 index 00000000..e99f6863 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/DirectoryCircuit.java @@ -0,0 +1,16 @@ +package com.subgraph.orchid; + +import java.util.concurrent.TimeoutException; + +public interface DirectoryCircuit extends Circuit { + /** + * Open an anonymous connection to the directory service running on the + * final node in this circuit. + * + * @param timeout in milliseconds + * @param autoclose if set to true, closing stream also marks this circuit for close + * + * @return The status response returned by trying to open the stream. + */ + Stream openDirectoryStream(long timeout, boolean autoclose) throws InterruptedException, TimeoutException, StreamConnectFailedException; +} diff --git a/orchid/src/com/subgraph/orchid/DirectoryDownloader.java b/orchid/src/com/subgraph/orchid/DirectoryDownloader.java new file mode 100644 index 00000000..5133212f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/DirectoryDownloader.java @@ -0,0 +1,27 @@ +package com.subgraph.orchid; + +import java.util.List; +import java.util.Set; + +import com.subgraph.orchid.ConsensusDocument.RequiredCertificate; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.downloader.DirectoryRequestFailedException; + +public interface DirectoryDownloader { + void start(Directory directory); + void stop(); + + RouterDescriptor downloadBridgeDescriptor(Router bridge) throws DirectoryRequestFailedException; + + ConsensusDocument downloadCurrentConsensus(boolean useMicrodescriptors) throws DirectoryRequestFailedException; + ConsensusDocument downloadCurrentConsensus(boolean useMicrodescriptors, DirectoryCircuit circuit) throws DirectoryRequestFailedException; + + List downloadKeyCertificates(Set required) throws DirectoryRequestFailedException; + List downloadKeyCertificates(Set required, DirectoryCircuit circuit) throws DirectoryRequestFailedException; + + List downloadRouterDescriptors(Set fingerprints) throws DirectoryRequestFailedException; + List downloadRouterDescriptors(Set fingerprints, DirectoryCircuit circuit) throws DirectoryRequestFailedException; + + List downloadRouterMicrodescriptors(Set fingerprints) throws DirectoryRequestFailedException; + List downloadRouterMicrodescriptors(Set fingerprints, DirectoryCircuit circuit) throws DirectoryRequestFailedException; +} diff --git a/orchid/src/com/subgraph/orchid/DirectoryServer.java b/orchid/src/com/subgraph/orchid/DirectoryServer.java new file mode 100644 index 00000000..7d443509 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/DirectoryServer.java @@ -0,0 +1,22 @@ +package com.subgraph.orchid; + +import java.util.List; + +import com.subgraph.orchid.data.HexDigest; + +/** + * Represents a directory authority server or a directory cache. + */ +public interface DirectoryServer extends Router { + int getDirectoryPort(); + boolean isV2Authority(); + boolean isV3Authority(); + HexDigest getV3Identity(); + boolean isHiddenServiceAuthority(); + boolean isBridgeAuthority(); + boolean isExtraInfoCache(); + + KeyCertificate getCertificateByFingerprint(HexDigest fingerprint); + List getCertificates(); + void addCertificate(KeyCertificate certificate); +} diff --git a/orchid/src/com/subgraph/orchid/DirectoryStore.java b/orchid/src/com/subgraph/orchid/DirectoryStore.java new file mode 100644 index 00000000..fa9e02b0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/DirectoryStore.java @@ -0,0 +1,36 @@ +package com.subgraph.orchid; + +import java.nio.ByteBuffer; +import java.util.List; + +public interface DirectoryStore { + enum CacheFile { + CERTIFICATES("certificates"), + CONSENSUS("consensus"), + CONSENSUS_MICRODESC("consensus-microdesc"), + MICRODESCRIPTOR_CACHE("cached-microdescs"), + MICRODESCRIPTOR_JOURNAL("cached-microdescs.new"), + DESCRIPTOR_CACHE("cached-descriptors"), + DESCRIPTOR_JOURNAL("cached-descriptors.new"), + STATE("state"); + + final private String filename; + + CacheFile(String filename) { + this.filename = filename; + } + + public String getFilename() { + return filename; + } + } + + ByteBuffer loadCacheFile(CacheFile cacheFile); + void writeData(CacheFile cacheFile, ByteBuffer data); + void writeDocument(CacheFile cacheFile, Document document); + void writeDocumentList(CacheFile cacheFile, List documents); + void appendDocumentList(CacheFile cacheFile, List documents); + + void removeCacheFile(CacheFile cacheFile); + void removeAllCacheFiles(); +} diff --git a/orchid/src/com/subgraph/orchid/Document.java b/orchid/src/com/subgraph/orchid/Document.java new file mode 100644 index 00000000..92e2bfaa --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Document.java @@ -0,0 +1,9 @@ +package com.subgraph.orchid; + +import java.nio.ByteBuffer; + +public interface Document { + ByteBuffer getRawDocumentBytes(); + String getRawDocumentData(); + boolean isValidDocument(); +} diff --git a/orchid/src/com/subgraph/orchid/ExitCircuit.java b/orchid/src/com/subgraph/orchid/ExitCircuit.java new file mode 100644 index 00000000..2178f13a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/ExitCircuit.java @@ -0,0 +1,50 @@ +package com.subgraph.orchid; + +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; + +public interface ExitCircuit extends Circuit { + + /** + * Open an exit stream from the final node in this circuit to the + * specified target address and port. + * + * @param address The network address of the exit target. + * @param port The port of the exit target. + * @return The status response returned by trying to open the stream. + */ + Stream openExitStream(IPv4Address address, int port, long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException; + + /** + * Open an exit stream from the final node in this circuit to the + * specified target hostname and port. + * + * @param hostname The network hostname of the exit target. + * @param port The port of the exit target. + * @return The status response returned by trying to open the stream. + */ + Stream openExitStream(String hostname, int port, long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException; + + /** + * Return true if the final node of this circuit is believed to be able to connect to + * the specified ExitTarget. Returns false if the target destination is + * not permitted by the exit policy of the final node in this circuit or if the target + * has been previously recorded to have failed through this circuit. + * + * @param target The exit destination. + * @return Return true if is likely that the final node of this circuit can connect to the specified exit target. + */ + boolean canHandleExitTo(ExitTarget target); + + boolean canHandleExitToPort(int port); + /** + * Records the specified ExitTarget as a failed connection so that {@link #canHandleExitTo(ExitTarget)} will + * no longer return true for this exit destination. + * + * @param target The ExitTarget to which a connection has failed through this circuit. + */ + public void recordFailedExitTarget(ExitTarget target); + +} diff --git a/orchid/src/com/subgraph/orchid/GuardEntry.java b/orchid/src/com/subgraph/orchid/GuardEntry.java new file mode 100644 index 00000000..efed1405 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/GuardEntry.java @@ -0,0 +1,18 @@ +package com.subgraph.orchid; + +import java.util.Date; + +public interface GuardEntry { + boolean isAdded(); + void markAsDown(); + void clearDownSince(); + String getNickname(); + String getIdentity(); + String getVersion(); + Date getCreatedTime(); + Date getDownSince(); + Date getLastConnectAttempt(); + Date getUnlistedSince(); + boolean testCurrentlyUsable(); + Router getRouterForEntry(); +} diff --git a/orchid/src/com/subgraph/orchid/HiddenServiceCircuit.java b/orchid/src/com/subgraph/orchid/HiddenServiceCircuit.java new file mode 100644 index 00000000..83b057ad --- /dev/null +++ b/orchid/src/com/subgraph/orchid/HiddenServiceCircuit.java @@ -0,0 +1,8 @@ +package com.subgraph.orchid; + +import java.util.concurrent.TimeoutException; + + +public interface HiddenServiceCircuit extends Circuit { + Stream openStream(int port, long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException; +} diff --git a/orchid/src/com/subgraph/orchid/InternalCircuit.java b/orchid/src/com/subgraph/orchid/InternalCircuit.java new file mode 100644 index 00000000..39975f24 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/InternalCircuit.java @@ -0,0 +1,7 @@ +package com.subgraph.orchid; + +public interface InternalCircuit extends Circuit { + DirectoryCircuit cannibalizeToDirectory(Router target); + Circuit cannibalizeToIntroductionPoint(Router target); + HiddenServiceCircuit connectHiddenService(CircuitNode node); +} diff --git a/orchid/src/com/subgraph/orchid/KeyCertificate.java b/orchid/src/com/subgraph/orchid/KeyCertificate.java new file mode 100644 index 00000000..f15b47bc --- /dev/null +++ b/orchid/src/com/subgraph/orchid/KeyCertificate.java @@ -0,0 +1,78 @@ +package com.subgraph.orchid; + +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; + +/** + * This class represents a key certificate document as specified in + * dir-spec.txt (section 3.1). These documents are published by + * directory authorities and bind a long-term identity key to a + * more temporary signing key. + */ +public interface KeyCertificate extends Document { + /** + * Return the network address of this directory authority + * or null if no address was specified in the certificate. + * + * @return The network address of the directory authority this certificate + * belongs to, or null if not available. + */ + IPv4Address getDirectoryAddress(); + + /** + * Return the port on which this directory authority answers + * directory requests or 0 if no port was specified in the certificate. + * + * @return The port of this directory authority listens on or 0 if + * no port was specified in the certificate. + */ + int getDirectoryPort(); + + /** + * Return fingerprint of the authority identity key as specified in + * the certificate. + * + * @return The authority identity key fingerprint. + */ + HexDigest getAuthorityFingerprint(); + + /** + * Return the authority identity public key from the certificate. + * + * @return The authority identity public key. + */ + TorPublicKey getAuthorityIdentityKey(); + + /** + * Return the authority signing public key from the certificate. + * + * @return The authority signing public key. + */ + TorPublicKey getAuthoritySigningKey(); + + /** + * Return the time when this document and corresponding keys were + * generated. + * + * @return The time this document was generated and published. + */ + Timestamp getKeyPublishedTime(); + + /** + * Return the time after which this document and signing key are + * no longer valid. + * + * @return The expiry time of this document and signing key. + */ + Timestamp getKeyExpiryTime(); + + /** + * Return true if the current time is past the key + * expiry time of this certificate. + * + * @return True if this certificate is currently expired. + */ + boolean isExpired(); +} diff --git a/orchid/src/com/subgraph/orchid/OpenFailedException.java b/orchid/src/com/subgraph/orchid/OpenFailedException.java new file mode 100644 index 00000000..4eb1fc47 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/OpenFailedException.java @@ -0,0 +1,13 @@ +package com.subgraph.orchid; + +public class OpenFailedException extends Exception { + + private static final long serialVersionUID = 1989001056577214666L; + + public OpenFailedException() { + } + + public OpenFailedException(String message) { + super(message); + } +} diff --git a/orchid/src/com/subgraph/orchid/RelayCell.java b/orchid/src/com/subgraph/orchid/RelayCell.java new file mode 100644 index 00000000..632b9cce --- /dev/null +++ b/orchid/src/com/subgraph/orchid/RelayCell.java @@ -0,0 +1,65 @@ +package com.subgraph.orchid; + +import java.nio.ByteBuffer; + + + +public interface RelayCell extends Cell { + + final static int LENGTH_OFFSET = 12; + final static int RECOGNIZED_OFFSET = 4; + final static int DIGEST_OFFSET = 8; + final static int HEADER_SIZE = 14; + + final static int RELAY_BEGIN = 1; + final static int RELAY_DATA = 2; + final static int RELAY_END = 3; + final static int RELAY_CONNECTED = 4; + final static int RELAY_SENDME = 5; + final static int RELAY_EXTEND = 6; + final static int RELAY_EXTENDED = 7; + final static int RELAY_TRUNCATE = 8; + final static int RELAY_TRUNCATED = 9; + final static int RELAY_DROP = 10; + final static int RELAY_RESOLVE = 11; + final static int RELAY_RESOLVED = 12; + final static int RELAY_BEGIN_DIR = 13; + final static int RELAY_EXTEND2 = 14; + final static int RELAY_EXTENDED2 = 15; + + final static int RELAY_COMMAND_ESTABLISH_INTRO = 32; + final static int RELAY_COMMAND_ESTABLISH_RENDEZVOUS = 33; + final static int RELAY_COMMAND_INTRODUCE1 = 34; + final static int RELAY_COMMAND_INTRODUCE2 = 35; + final static int RELAY_COMMAND_RENDEZVOUS1 = 36; + final static int RELAY_COMMAND_RENDEZVOUS2 = 37; + final static int RELAY_COMMAND_INTRO_ESTABLISHED = 38; + final static int RELAY_COMMAND_RENDEZVOUS_ESTABLISHED = 39; + final static int RELAY_COMMAND_INTRODUCE_ACK = 40; + + final static int REASON_MISC = 1; + final static int REASON_RESOLVEFAILED = 2; + final static int REASON_CONNECTREFUSED = 3; + final static int REASON_EXITPOLICY = 4; + final static int REASON_DESTROY = 5; + final static int REASON_DONE = 6; + final static int REASON_TIMEOUT = 7; + final static int REASON_NOROUTE = 8; + final static int REASON_HIBERNATING = 9; + final static int REASON_INTERNAL = 10; + final static int REASON_RESOURCELIMIT = 11; + final static int REASON_CONNRESET = 12; + final static int REASON_TORPROTOCOL = 13; + final static int REASON_NOTDIRECTORY = 14; + + int getStreamId(); + int getRelayCommand(); + /** + * Return the circuit node this cell was received from for outgoing cells or the destination circuit node + * for outgoing cells. + */ + CircuitNode getCircuitNode(); + ByteBuffer getPayloadBuffer(); + void setLength(); + void setDigest(byte[] digest); +} diff --git a/orchid/src/com/subgraph/orchid/Revision.java b/orchid/src/com/subgraph/orchid/Revision.java new file mode 100644 index 00000000..6dc6a5c9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Revision.java @@ -0,0 +1,35 @@ +package com.subgraph.orchid; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; + +public class Revision { + private final static String REVISION_FILE_PATH = "/build-revision"; + + public static String getBuildRevision() { + final InputStream input = tryResourceOpen(); + if(input == null) { + return ""; + } + try { + return readFirstLine(input); + } catch (IOException e) { + return ""; + } + } + + private static InputStream tryResourceOpen() { + return Revision.class.getResourceAsStream(REVISION_FILE_PATH); + } + + private static String readFirstLine(InputStream input) throws IOException { + try { + final BufferedReader reader = new BufferedReader(new InputStreamReader(input)); + return reader.readLine(); + } finally { + input.close(); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/Router.java b/orchid/src/com/subgraph/orchid/Router.java new file mode 100644 index 00000000..fcccd926 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Router.java @@ -0,0 +1,47 @@ +package com.subgraph.orchid; + +import java.util.Set; + +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +public interface Router { + + String getNickname(); + String getCountryCode(); + IPv4Address getAddress(); + int getOnionPort(); + int getDirectoryPort(); + TorPublicKey getIdentityKey(); + HexDigest getIdentityHash(); + boolean isDescriptorDownloadable(); + + String getVersion(); + Descriptor getCurrentDescriptor(); + HexDigest getDescriptorDigest(); + HexDigest getMicrodescriptorDigest(); + + TorPublicKey getOnionKey(); + byte[] getNTorOnionKey(); + + boolean hasBandwidth(); + int getEstimatedBandwidth(); + int getMeasuredBandwidth(); + + Set getFamilyMembers(); + int getAverageBandwidth(); + int getBurstBandwidth(); + int getObservedBandwidth(); + boolean isHibernating(); + boolean isRunning(); + boolean isValid(); + boolean isBadExit(); + boolean isPossibleGuard(); + boolean isExit(); + boolean isFast(); + boolean isStable(); + boolean isHSDirectory(); + boolean exitPolicyAccepts(IPv4Address address, int port); + boolean exitPolicyAccepts(int port); +} diff --git a/orchid/src/com/subgraph/orchid/RouterDescriptor.java b/orchid/src/com/subgraph/orchid/RouterDescriptor.java new file mode 100644 index 00000000..89085e46 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/RouterDescriptor.java @@ -0,0 +1,164 @@ +package com.subgraph.orchid; + +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.data.exitpolicy.ExitPolicy; + +/** + * Directory information about a single onion router. This interface + * provides access to the fields of a router descriptor document which + * has been published through to Tor directory system. + */ +public interface RouterDescriptor extends Descriptor { + /** + * Returns the nickname of this router. + * + * @return The nickname of this router. + */ + String getNickname(); + + + /** + * Return the port on which this router provides directory related + * HTTP connections, or 0 if this node does not provide directory + * services. + * + * @return The directory service port, or 0 if not a directory server. + */ + int getDirectoryPort(); + + /** + * Returns the volume of traffic in bytes per second that this router + * is willing to sustain over long periods. + * + * @return The average bandwidth of this router in bytes per second. + */ + int getAverageBandwidth(); + + /** + * Returns the volume of traffic in bytes per second that this router + * is willing to sustain in very short intervals. + * + * @return The burst bandwidth of this router in bytes per second. + */ + int getBurstBandwidth(); + + /** + * Returns the volume of traffic in bytes per second that this router + * is estimated to be able to sustain. + * + * @return The observed bandwidth capacity of this router in bytes per second. + */ + int getObservedBandwidth(); + + /** + * Return a human-readable string describing the system on which this router + * is running, including possibly the operating system version and Tor + * implementation version. + * + * @return A string describing the platform this router is running on. + */ + String getPlatform(); + + /** + * Return the time this descriptor was generated. + * + * @return The time this descriptor was generated. + */ + Timestamp getPublishedTime(); + + /** + * Return a fingerprint of the public key of this router. The fingerprint + * is an optional field, so this method may return null if the descriptor + * of the router did not include the 'fingerprint' field. + * + * @return The fingerprint of this router, or null if no fingerprint is available. + */ + HexDigest getFingerprint(); + + /** + * Return the number of seconds this router has been running. + * + * @return The number of seconds this router has been running. + */ + int getUptime(); + + /** + * Return the long-term identity and signing public key for this + * router. + * + * @return The long-term identity and signing public key for this router. + */ + TorPublicKey getIdentityKey(); + + /** + * Return a string which describes how to contact the server's administrator. + * This is an optional field, so this method will return null if the descriptor + * of this router did not include the 'contact' field. + * + * @return The contact information for this router, or null if not available. + */ + String getContact(); + + /** + * Return true if this router is currently hibernating and not suitable for + * building new circuits. + * + * @return True if this router is currently hibernating. + */ + boolean isHibernating(); + + /** + * Returns true if this router stores and serves hidden service descriptors. + * + * @return True if this router is a hidden service directory. + */ + boolean isHiddenServiceDirectory(); + + /** + * Return true if this router is running a version of Tor which supports the + * newer enhanced DNS logic. If false, this router should be used for reverse + * hostname lookups. + * + * @return True if this router supports newer enhanced DNS logic. + */ + boolean supportsEventDNS(); + + /** + * Returns true if this router is a directory cache that provides extra-info + * documents. + * + * @return True if this router provides an extra-info document directory service. + */ + boolean cachesExtraInfo(); + + /** + * Return a digest of this router's extra-info document, or null if not + * available. This is an optional field and will only be present if the + * 'extra-info-digest' field was present in the original router descriptor. + * + * @return The digest of the router extra-info-document, or null if not available. + */ + HexDigest getExtraInfoDigest(); + + /** + * Return true if this router allows single-hop circuits to make exit connections. + * + * @return True if this router allows single-hop circuits to make exit connections. + */ + boolean allowsSingleHopExits(); + + /** + * Compare two router descriptors and return true if this router descriptor was published + * at a later time than the other descriptor. + * + * @param other Another router descriptor to compare. + * @return True if this descriptor was published later than other + */ + boolean isNewerThan(RouterDescriptor other); + + ExitPolicy getExitPolicy(); + + +} diff --git a/orchid/src/com/subgraph/orchid/RouterMicrodescriptor.java b/orchid/src/com/subgraph/orchid/RouterMicrodescriptor.java new file mode 100644 index 00000000..818585c6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/RouterMicrodescriptor.java @@ -0,0 +1,6 @@ +package com.subgraph.orchid; + + +public interface RouterMicrodescriptor extends Descriptor { + +} diff --git a/orchid/src/com/subgraph/orchid/RouterStatus.java b/orchid/src/com/subgraph/orchid/RouterStatus.java new file mode 100644 index 00000000..87c1dd64 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/RouterStatus.java @@ -0,0 +1,24 @@ +package com.subgraph.orchid; + +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.data.exitpolicy.ExitPorts; + +public interface RouterStatus { + String getNickname(); + HexDigest getIdentity(); + HexDigest getDescriptorDigest(); + HexDigest getMicrodescriptorDigest(); + Timestamp getPublicationTime(); + IPv4Address getAddress(); + int getRouterPort(); + boolean isDirectory(); + int getDirectoryPort(); + boolean hasFlag(String flag); + String getVersion(); + boolean hasBandwidth(); + int getEstimatedBandwidth(); + int getMeasuredBandwidth(); + ExitPorts getExitPorts(); +} diff --git a/orchid/src/com/subgraph/orchid/SocksPortListener.java b/orchid/src/com/subgraph/orchid/SocksPortListener.java new file mode 100644 index 00000000..dd8a4d5a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/SocksPortListener.java @@ -0,0 +1,6 @@ +package com.subgraph.orchid; + +public interface SocksPortListener { + void addListeningPort(int port); + void stop(); +} diff --git a/orchid/src/com/subgraph/orchid/Stream.java b/orchid/src/com/subgraph/orchid/Stream.java new file mode 100644 index 00000000..2a4bc07e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Stream.java @@ -0,0 +1,49 @@ +package com.subgraph.orchid; + +import java.io.InputStream; +import java.io.OutputStream; + +public interface Stream { + /** + * Returns the {@link Circuit} this stream belongs to. + * + * @return The {@link Circuit} this stream belongs to. + */ + Circuit getCircuit(); + + /** + * Returns the stream id value of this stream. + * + * @return The stream id value of this stream. + */ + int getStreamId(); + + + CircuitNode getTargetNode(); + + /** + * Close this stream. + */ + void close(); + + /** + * Returns an {@link InputStream} for sending data on this stream. + * + * @return An {@link InputStream} for transferring data on this stream. + */ + InputStream getInputStream(); + + /** + * Returns an {@link OutputStream} for receiving data from this stream. + * + * @return An {@link OutputStream} for receiving data from this stream. + */ + OutputStream getOutputStream(); + + /** + * If the circuit and stream level packaging windows are open for this stream + * this method returns immediately, otherwise it blocks until both windows are + * open or the stream is closed. + */ + void waitForSendWindow(); +} diff --git a/orchid/src/com/subgraph/orchid/StreamConnectFailedException.java b/orchid/src/com/subgraph/orchid/StreamConnectFailedException.java new file mode 100644 index 00000000..4c944a86 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/StreamConnectFailedException.java @@ -0,0 +1,35 @@ +package com.subgraph.orchid; + + +public class StreamConnectFailedException extends Exception { + + private static final long serialVersionUID = 8103571310659595097L; + private final int reason; + + public StreamConnectFailedException(int reason) { + this.reason = reason; + } + + public int getReason() { + return reason; + } + + public boolean isReasonRetryable() { + return isRetryableReason(reason); + } + + /* Copied from edge_reason_is_retriable() since this is not specified */ + private static boolean isRetryableReason(int reasonCode) { + switch(reasonCode) { + case RelayCell.REASON_HIBERNATING: + case RelayCell.REASON_RESOURCELIMIT: + case RelayCell.REASON_RESOLVEFAILED: + case RelayCell.REASON_EXITPOLICY: + case RelayCell.REASON_MISC: + case RelayCell.REASON_NOROUTE: + return true; + default: + return false; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/Tor.java b/orchid/src/com/subgraph/orchid/Tor.java new file mode 100644 index 00000000..566ffe80 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/Tor.java @@ -0,0 +1,167 @@ +package com.subgraph.orchid; + +import java.lang.reflect.Proxy; +import java.nio.charset.Charset; +import java.util.logging.Logger; + +import com.subgraph.orchid.circuits.CircuitManagerImpl; +import com.subgraph.orchid.circuits.TorInitializationTracker; +import com.subgraph.orchid.config.TorConfigProxy; +import com.subgraph.orchid.connections.ConnectionCacheImpl; +import com.subgraph.orchid.directory.DirectoryImpl; +import com.subgraph.orchid.directory.downloader.DirectoryDownloaderImpl; +import com.subgraph.orchid.socks.SocksPortListenerImpl; + +/** + * The Tor class is a collection of static methods for instantiating + * various subsystem modules. + */ +public class Tor { + private final static Logger logger = Logger.getLogger(Tor.class.getName()); + + public final static int BOOTSTRAP_STATUS_STARTING = 0; + public final static int BOOTSTRAP_STATUS_CONN_DIR = 5; + public final static int BOOTSTRAP_STATUS_HANDSHAKE_DIR = 10; + public final static int BOOTSTRAP_STATUS_ONEHOP_CREATE = 15; + public final static int BOOTSTRAP_STATUS_REQUESTING_STATUS = 20; + public final static int BOOTSTRAP_STATUS_LOADING_STATUS = 25; + public final static int BOOTSTRAP_STATUS_REQUESTING_KEYS = 35; + public final static int BOOTSTRAP_STATUS_LOADING_KEYS = 40; + public final static int BOOTSTRAP_STATUS_REQUESTING_DESCRIPTORS = 45; + public final static int BOOTSTRAP_STATUS_LOADING_DESCRIPTORS = 50; + public final static int BOOTSTRAP_STATUS_CONN_OR = 80; + public final static int BOOTSTRAP_STATUS_HANDSHAKE_OR = 85; + public final static int BOOTSTRAP_STATUS_CIRCUIT_CREATE = 90; + public final static int BOOTSTRAP_STATUS_DONE = 100; + + + private final static String implementation = "Orchid"; + private final static String version = "1.0.0"; + + private final static Charset defaultCharset = createDefaultCharset(); + + private static Charset createDefaultCharset() { + return Charset.forName("ISO-8859-1"); + } + + public static Charset getDefaultCharset() { + return defaultCharset; + } + + public static String getBuildRevision() { + return Revision.getBuildRevision(); + } + + public static String getImplementation() { + return implementation; + } + + public static String getFullVersion() { + final String revision = getBuildRevision(); + if(revision == null || revision.isEmpty()) { + return getVersion(); + } else { + return getVersion() + "." + revision; + } + } + + /** + * Return a string describing the version of this software. + * + * @return A string representation of the software version. + */ + public static String getVersion() { + return version; + } + + /** + * Determine if running on Android by inspecting java.runtime.name property. + * + * @return True if running on Android. + */ + public static boolean isAndroidRuntime() { + final String runtime = System.getProperty("java.runtime.name"); + return runtime != null && runtime.equals("Android Runtime"); + } + + /** + * Create and return a new TorConfig instance. + * + * @param logManager This is a required dependency. You must create a LogManager + * before calling this method to create a TorConfig + * @return A new TorConfig instance. + * @see TorConfig + */ + static public TorConfig createConfig() { + final TorConfig config = (TorConfig) Proxy.newProxyInstance(TorConfigProxy.class.getClassLoader(), new Class[] { TorConfig.class }, new TorConfigProxy()); + if(isAndroidRuntime()) { + logger.warning("Android Runtime detected, disabling V2 Link protocol"); + config.setHandshakeV2Enabled(false); + } + return config; + } + + static public TorInitializationTracker createInitalizationTracker() { + return new TorInitializationTracker(); + } + + /** + * Create and return a new Directory instance. + * + * @param logManager This is a required dependency. You must create a LogManager + * before creating a Directory. + * @param config This is a required dependency. You must create a TorConfig before + * calling this method to create a Directory + * @return A new Directory instance. + * @see Directory + */ + static public Directory createDirectory(TorConfig config, DirectoryStore customDirectoryStore) { + return new DirectoryImpl(config, customDirectoryStore); + } + + static public ConnectionCache createConnectionCache(TorConfig config, TorInitializationTracker tracker) { + return new ConnectionCacheImpl(config, tracker); + } + /** + * Create and return a new CircuitManager instance. + * + * @return A new CircuitManager instance. + * @see CircuitManager + */ + static public CircuitManager createCircuitManager(TorConfig config, DirectoryDownloaderImpl directoryDownloader, Directory directory, ConnectionCache connectionCache, TorInitializationTracker tracker) { + return new CircuitManagerImpl(config, directoryDownloader, directory, connectionCache, tracker); + } + + /** + * Create and return a new SocksPortListener instance. + * + * @param logManager This is a required dependency. You must create a LogManager + * before calling this method to create a SocksPortListener. + * @param circuitManager This is a required dependency. You must create a CircuitManager + * before calling this method to create a SocksPortListener. + * @return A new SocksPortListener instance. + * @see SocksPortListener + */ + static public SocksPortListener createSocksPortListener(TorConfig config, CircuitManager circuitManager) { + return new SocksPortListenerImpl(config, circuitManager); + } + + /** + * Create and return a new DirectoryDownloader instance. + * + * @param logManager This is a required dependency. You must create a LogManager + * before calling this method to create a DirectoryDownloader. + + * @param directory This is a required dependency. You must create a Directory + * before calling this method to create a DirectoryDownloader + * + * @param circuitManager This is a required dependency. You must create a CircuitManager + * before calling this method to create a DirectoryDownloader. + * + * @return A new DirectoryDownloader instance. + * @see DirectoryDownloaderImpl + */ + static public DirectoryDownloaderImpl createDirectoryDownloader(TorConfig config, TorInitializationTracker initializationTracker) { + return new DirectoryDownloaderImpl(config, initializationTracker); + } +} diff --git a/orchid/src/com/subgraph/orchid/TorClient.java b/orchid/src/com/subgraph/orchid/TorClient.java new file mode 100644 index 00000000..c87ae135 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/TorClient.java @@ -0,0 +1,217 @@ +package com.subgraph.orchid; + +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.crypto.Cipher; +import javax.net.SocketFactory; + +import com.subgraph.orchid.circuits.TorInitializationTracker; +import com.subgraph.orchid.crypto.PRNGFixes; +import com.subgraph.orchid.dashboard.Dashboard; +import com.subgraph.orchid.directory.downloader.DirectoryDownloaderImpl; +import com.subgraph.orchid.sockets.OrchidSocketFactory; + +/** + * This class is the main entry-point for running a Tor proxy + * or client. + */ +public class TorClient { + private final static Logger logger = Logger.getLogger(TorClient.class.getName()); + private final TorConfig config; + private final Directory directory; + private final TorInitializationTracker initializationTracker; + private final ConnectionCache connectionCache; + private final CircuitManager circuitManager; + private final SocksPortListener socksListener; + private final DirectoryDownloaderImpl directoryDownloader; + private final Dashboard dashboard; + + private boolean isStarted = false; + private boolean isStopped = false; + + private final CountDownLatch readyLatch; + + public TorClient() { + this(null); + } + + public TorClient(DirectoryStore customDirectoryStore) { + if(Tor.isAndroidRuntime()) { + PRNGFixes.apply(); + } + config = Tor.createConfig(); + directory = Tor.createDirectory(config, customDirectoryStore); + initializationTracker = Tor.createInitalizationTracker(); + initializationTracker.addListener(createReadyFlagInitializationListener()); + connectionCache = Tor.createConnectionCache(config, initializationTracker); + directoryDownloader = Tor.createDirectoryDownloader(config, initializationTracker); + circuitManager = Tor.createCircuitManager(config, directoryDownloader, directory, connectionCache, initializationTracker); + socksListener = Tor.createSocksPortListener(config, circuitManager); + readyLatch = new CountDownLatch(1); + dashboard = new Dashboard(); + dashboard.addRenderables(circuitManager, directoryDownloader, socksListener); + } + + public TorConfig getConfig() { + return config; + } + + public SocketFactory getSocketFactory() { + return new OrchidSocketFactory(this); + } + + /** + * Start running the Tor client service. + */ + public synchronized void start() { + if(isStarted) { + return; + } + if(isStopped) { + throw new IllegalStateException("Cannot restart a TorClient instance. Create a new instance instead."); + } + logger.info("Starting Orchid (version: "+ Tor.getFullVersion() +")"); + verifyUnlimitedStrengthPolicyInstalled(); + directoryDownloader.start(directory); + circuitManager.startBuildingCircuits(); + if(dashboard.isEnabledByProperty()) { + dashboard.startListening(); + } + isStarted = true; + } + + public synchronized void stop() { + if(!isStarted || isStopped) { + return; + } + try { + socksListener.stop(); + if(dashboard.isListening()) { + dashboard.stopListening(); + } + directoryDownloader.stop(); + circuitManager.stopBuildingCircuits(true); + directory.close(); + connectionCache.close(); + } catch (Exception e) { + logger.log(Level.WARNING, "Unexpected exception while shutting down TorClient instance: "+ e, e); + } finally { + isStopped = true; + } + } + + public Directory getDirectory() { + return directory; + } + + public ConnectionCache getConnectionCache() { + return connectionCache; + } + + public CircuitManager getCircuitManager() { + return circuitManager; + } + + public void waitUntilReady() throws InterruptedException { + readyLatch.await(); + } + + public void waitUntilReady(long timeout) throws InterruptedException, TimeoutException { + if(!readyLatch.await(timeout, TimeUnit.MILLISECONDS)) { + throw new TimeoutException(); + } + } + + public Stream openExitStreamTo(String hostname, int port) throws InterruptedException, TimeoutException, OpenFailedException { + ensureStarted(); + return circuitManager.openExitStreamTo(hostname, port); + } + + private synchronized void ensureStarted() { + if(!isStarted) { + throw new IllegalStateException("Must call start() first"); + } + } + + public void enableSocksListener(int port) { + socksListener.addListeningPort(port); + } + + public void enableSocksListener() { + enableSocksListener(9150); + } + + public void enableDashboard() { + if(!dashboard.isListening()) { + dashboard.startListening(); + } + } + + public void enableDashboard(int port) { + dashboard.setListeningPort(port); + enableDashboard(); + } + + public void disableDashboard() { + if(dashboard.isListening()) { + dashboard.stopListening(); + } + } + + public void addInitializationListener(TorInitializationListener listener) { + initializationTracker.addListener(listener); + } + + public void removeInitializationListener(TorInitializationListener listener) { + initializationTracker.removeListener(listener); + } + + private TorInitializationListener createReadyFlagInitializationListener() { + return new TorInitializationListener() { + public void initializationProgress(String message, int percent) {} + public void initializationCompleted() { + readyLatch.countDown(); + } + }; + } + + public static void main(String[] args) { + final TorClient client = new TorClient(); + client.addInitializationListener(createInitalizationListner()); + client.start(); + client.enableSocksListener(); + } + + private static TorInitializationListener createInitalizationListner() { + return new TorInitializationListener() { + + public void initializationProgress(String message, int percent) { + System.out.println(">>> [ "+ percent + "% ]: "+ message); + } + + public void initializationCompleted() { + System.out.println("Tor is ready to go!"); + } + }; + } + + private void verifyUnlimitedStrengthPolicyInstalled() { + try { + if(Cipher.getMaxAllowedKeyLength("AES") < 256) { + final String message = "Unlimited Strength Jurisdiction Policy Files are required but not installed."; + logger.severe(message); + throw new TorException(message); + } + } catch (NoSuchAlgorithmException e) { + logger.log(Level.SEVERE, "No AES provider found"); + throw new TorException(e); + } catch (NoSuchMethodError e) { + logger.info("Skipped check for Unlimited Strength Jurisdiction Policy Files"); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/TorConfig.java b/orchid/src/com/subgraph/orchid/TorConfig.java new file mode 100644 index 00000000..83971330 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/TorConfig.java @@ -0,0 +1,151 @@ +package com.subgraph.orchid; + +import java.io.File; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import com.subgraph.orchid.circuits.hs.HSDescriptorCookie; +import com.subgraph.orchid.config.TorConfigBridgeLine; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + + +public interface TorConfig { + + @ConfigVar(type=ConfigVarType.PATH, defaultValue="~/.orchid") + File getDataDirectory(); + void setDataDirectory(File directory); + + @ConfigVar(type=ConfigVarType.INTERVAL, defaultValue="60 seconds") + long getCircuitBuildTimeout(); + void setCircuitBuildTimeout(long time, TimeUnit unit); + + @ConfigVar(type=ConfigVarType.INTERVAL, defaultValue="0") + long getCircuitStreamTimeout(); + void setCircuitStreamTimeout(long time, TimeUnit unit); + + @ConfigVar(type=ConfigVarType.INTERVAL, defaultValue="1 hour") + long getCircuitIdleTimeout(); + void setCircuitIdleTimeout(long time, TimeUnit unit); + + @ConfigVar(type=ConfigVarType.INTERVAL, defaultValue="30 seconds") + long getNewCircuitPeriod(); + void setNewCircuitPeriod(long time, TimeUnit unit); + + @ConfigVar(type=ConfigVarType.INTERVAL, defaultValue="10 minutes") + long getMaxCircuitDirtiness(); + void setMaxCircuitDirtiness(long time, TimeUnit unit); + + + @ConfigVar(type=ConfigVarType.INTEGER, defaultValue="32") + int getMaxClientCircuitsPending(); + void setMaxClientCircuitsPending(int value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getEnforceDistinctSubnets(); + void setEnforceDistinctSubnets(boolean value); + + @ConfigVar(type=ConfigVarType.INTERVAL, defaultValue="2 minutes") + long getSocksTimeout(); + void setSocksTimeout(long value); + + @ConfigVar(type=ConfigVarType.INTEGER, defaultValue="3") + int getNumEntryGuards(); + void setNumEntryGuards(int value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getUseEntryGuards(); + void setUseEntryGuards(boolean value); + + @ConfigVar(type=ConfigVarType.PORTLIST, defaultValue="21,22,706,1863,5050,5190,5222,5223,6523,6667,6697,8300") + List getLongLivedPorts(); + void setLongLivedPorts(List ports); + + @ConfigVar(type=ConfigVarType.STRINGLIST) + List getExcludeNodes(); + void setExcludeNodes(List nodes); + + @ConfigVar(type=ConfigVarType.STRINGLIST) + List getExcludeExitNodes(); + + void setExcludeExitNodes(List nodes); + + @ConfigVar(type=ConfigVarType.STRINGLIST) + List getExitNodes(); + void setExitNodes(List nodes); + + @ConfigVar(type=ConfigVarType.STRINGLIST) + List getEntryNodes(); + void setEntryNodes(List nodes); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="false") + boolean getStrictNodes(); + void setStrictNodes(boolean value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="false") + boolean getFascistFirewall(); + void setFascistFirewall(boolean value); + + @ConfigVar(type=ConfigVarType.PORTLIST, defaultValue="80,443") + List getFirewallPorts(); + void setFirewallPorts(List ports); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="false") + boolean getSafeSocks(); + void setSafeSocks(boolean value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getSafeLogging(); + void setSafeLogging(boolean value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getWarnUnsafeSocks(); + void setWarnUnsafeSocks(boolean value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getClientRejectInternalAddress(); + void setClientRejectInternalAddress(boolean value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getHandshakeV3Enabled(); + void setHandshakeV3Enabled(boolean value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="true") + boolean getHandshakeV2Enabled(); + void setHandshakeV2Enabled(boolean value); + + @ConfigVar(type=ConfigVarType.HS_AUTH) + HSDescriptorCookie getHidServAuth(String key); + void addHidServAuth(String key, String value); + + @ConfigVar(type=ConfigVarType.AUTOBOOL, defaultValue="auto") + AutoBoolValue getUseNTorHandshake(); + void setUseNTorHandshake(AutoBoolValue value); + + @ConfigVar(type=ConfigVarType.AUTOBOOL, defaultValue="auto") + AutoBoolValue getUseMicrodescriptors(); + void setUseMicrodescriptors(AutoBoolValue value); + + @ConfigVar(type=ConfigVarType.BOOLEAN, defaultValue="false") + boolean getUseBridges(); + void setUseBridges(boolean value); + + @ConfigVar(type=ConfigVarType.BRIDGE_LINE) + List getBridges(); + void addBridge(IPv4Address address, int port); + void addBridge(IPv4Address address, int port, HexDigest fingerprint); + + enum ConfigVarType { INTEGER, STRING, HS_AUTH, BOOLEAN, INTERVAL, PORTLIST, STRINGLIST, PATH, AUTOBOOL, BRIDGE_LINE }; + enum AutoBoolValue { TRUE, FALSE, AUTO } + + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + @interface ConfigVar { + ConfigVarType type(); + String defaultValue() default ""; + } +} \ No newline at end of file diff --git a/orchid/src/com/subgraph/orchid/TorException.java b/orchid/src/com/subgraph/orchid/TorException.java new file mode 100644 index 00000000..fd3406c0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/TorException.java @@ -0,0 +1,22 @@ +package com.subgraph.orchid; + +public class TorException extends RuntimeException { + + private static final long serialVersionUID = 2462760291055303580L; + + public TorException() { + super(); + } + + public TorException(String message) { + super(message); + } + + public TorException(String message, Throwable ex) { + super(message, ex); + } + + public TorException(Throwable ex) { + super(ex); + } +} diff --git a/orchid/src/com/subgraph/orchid/TorInitializationListener.java b/orchid/src/com/subgraph/orchid/TorInitializationListener.java new file mode 100644 index 00000000..e36d3e72 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/TorInitializationListener.java @@ -0,0 +1,6 @@ +package com.subgraph.orchid; + +public interface TorInitializationListener { + void initializationProgress(String message, int percent); + void initializationCompleted(); +} diff --git a/orchid/src/com/subgraph/orchid/TorParsingException.java b/orchid/src/com/subgraph/orchid/TorParsingException.java new file mode 100644 index 00000000..d55e08ce --- /dev/null +++ b/orchid/src/com/subgraph/orchid/TorParsingException.java @@ -0,0 +1,14 @@ +package com.subgraph.orchid; + + +public class TorParsingException extends TorException { + public TorParsingException(String string) { + super(string); + } + + public TorParsingException(String string, Throwable ex) { + super(string, ex); + } + + private static final long serialVersionUID = -4997757416476363399L; +} diff --git a/orchid/src/com/subgraph/orchid/VoteAuthorityEntry.java b/orchid/src/com/subgraph/orchid/VoteAuthorityEntry.java new file mode 100644 index 00000000..5693da6b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/VoteAuthorityEntry.java @@ -0,0 +1,19 @@ +package com.subgraph.orchid; + +import java.util.List; + +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.directory.consensus.DirectorySignature; + +public interface VoteAuthorityEntry { + String getNickname(); + HexDigest getIdentity(); + String getHostname(); + IPv4Address getAddress(); + int getDirectoryPort(); + int getRouterPort(); + String getContact(); + HexDigest getVoteDigest(); + List getSignatures(); +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitBuildTask.java b/orchid/src/com/subgraph/orchid/circuits/CircuitBuildTask.java new file mode 100644 index 00000000..2d699a56 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitBuildTask.java @@ -0,0 +1,127 @@ +package com.subgraph.orchid.circuits; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.ConnectionCache; +import com.subgraph.orchid.ConnectionFailedException; +import com.subgraph.orchid.ConnectionHandshakeException; +import com.subgraph.orchid.ConnectionTimeoutException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.path.PathSelectionFailedException; + +public class CircuitBuildTask implements Runnable { + private final static Logger logger = Logger.getLogger(CircuitBuildTask.class.getName()); + private final CircuitCreationRequest creationRequest; + private final ConnectionCache connectionCache; + private final TorInitializationTracker initializationTracker; + private final CircuitImpl circuit; + private final CircuitExtender extender; + + private Connection connection = null; + + public CircuitBuildTask(CircuitCreationRequest request, ConnectionCache connectionCache, boolean ntorEnabled) { + this(request, connectionCache, ntorEnabled, null); + } + + public CircuitBuildTask(CircuitCreationRequest request, ConnectionCache connectionCache, boolean ntorEnabled, TorInitializationTracker initializationTracker) { + this.creationRequest = request; + this.connectionCache = connectionCache; + this.initializationTracker = initializationTracker; + this.circuit = request.getCircuit(); + this.extender = new CircuitExtender(request.getCircuit(), ntorEnabled); + } + + public void run() { + Router firstRouter = null; + try { + circuit.notifyCircuitBuildStart(); + creationRequest.choosePath(); + if(logger.isLoggable(Level.FINE)) { + logger.fine("Opening a new circuit to "+ pathToString(creationRequest)); + } + firstRouter = creationRequest.getPathElement(0); + openEntryNodeConnection(firstRouter); + buildCircuit(firstRouter); + circuit.notifyCircuitBuildCompleted(); + } catch (ConnectionTimeoutException e) { + connectionFailed("Timeout connecting to "+ firstRouter); + } catch (ConnectionFailedException e) { + connectionFailed("Connection failed to "+ firstRouter + " : " + e.getMessage()); + } catch (ConnectionHandshakeException e) { + connectionFailed("Handshake error connecting to "+ firstRouter + " : " + e.getMessage()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + circuitBuildFailed("Circuit building thread interrupted"); + } catch(PathSelectionFailedException e) { + circuitBuildFailed(e.getMessage()); + } catch (TorException e) { + circuitBuildFailed(e.getMessage()); + } catch(Exception e) { + circuitBuildFailed("Unexpected exception: "+ e); + logger.log(Level.WARNING, "Unexpected exception while building circuit: "+ e, e); + } + } + + private String pathToString(CircuitCreationRequest ccr) { + final StringBuilder sb = new StringBuilder(); + sb.append("["); + for(Router r: ccr.getPath()) { + if(sb.length() > 1) + sb.append(","); + sb.append(r.getNickname()); + } + sb.append("]"); + return sb.toString(); + } + + private void connectionFailed(String message) { + creationRequest.connectionFailed(message); + circuit.notifyCircuitBuildFailed(); + } + + private void circuitBuildFailed(String message) { + creationRequest.circuitBuildFailed(message); + circuit.notifyCircuitBuildFailed(); + if(connection != null) { + connection.removeCircuit(circuit); + } + } + + private void openEntryNodeConnection(Router firstRouter) throws ConnectionTimeoutException, ConnectionFailedException, ConnectionHandshakeException, InterruptedException { + connection = connectionCache.getConnectionTo(firstRouter, creationRequest.isDirectoryCircuit()); + circuit.bindToConnection(connection); + creationRequest.connectionCompleted(connection); + } + + private void buildCircuit(Router firstRouter) throws TorException { + notifyInitialization(); + final CircuitNode firstNode = extender.createFastTo(firstRouter); + creationRequest.nodeAdded(firstNode); + + for(int i = 1; i < creationRequest.getPathLength(); i++) { + final CircuitNode extendedNode = extender.extendTo(creationRequest.getPathElement(i)); + creationRequest.nodeAdded(extendedNode); + } + creationRequest.circuitBuildCompleted(circuit); + notifyDone(); + } + + private void notifyInitialization() { + if(initializationTracker != null) { + final int event = creationRequest.isDirectoryCircuit() ? + Tor.BOOTSTRAP_STATUS_ONEHOP_CREATE : Tor.BOOTSTRAP_STATUS_CIRCUIT_CREATE; + initializationTracker.notifyEvent(event); + } + } + + private void notifyDone() { + if(initializationTracker != null && !creationRequest.isDirectoryCircuit()) { + initializationTracker.notifyEvent(Tor.BOOTSTRAP_STATUS_DONE); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitCreationRequest.java b/orchid/src/com/subgraph/orchid/circuits/CircuitCreationRequest.java new file mode 100644 index 00000000..59d0124b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitCreationRequest.java @@ -0,0 +1,91 @@ +package com.subgraph.orchid.circuits; + +import java.util.Collections; +import java.util.List; + +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.CircuitBuildHandler; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.circuits.path.PathSelectionFailedException; + +public class CircuitCreationRequest implements CircuitBuildHandler { + private final CircuitImpl circuit; + private final CircuitPathChooser pathChooser; + private final CircuitBuildHandler buildHandler; + private final boolean isDirectoryCircuit; + + private List path; + + public CircuitCreationRequest(CircuitPathChooser pathChooser, Circuit circuit, CircuitBuildHandler buildHandler, boolean isDirectoryCircuit) { + this.pathChooser = pathChooser; + this.circuit = (CircuitImpl) circuit; + this.buildHandler = buildHandler; + this.path = Collections.emptyList(); + this.isDirectoryCircuit = isDirectoryCircuit; + } + + void choosePath() throws InterruptedException, PathSelectionFailedException { + if(!(circuit instanceof CircuitImpl)) { + throw new IllegalArgumentException(); + } + path = ((CircuitImpl)circuit).choosePath(pathChooser); + + } + + CircuitImpl getCircuit() { + return circuit; + } + + List getPath() { + return path; + } + + int getPathLength() { + return path.size(); + } + + Router getPathElement(int idx) { + return path.get(idx); + } + + CircuitBuildHandler getBuildHandler() { + return buildHandler; + } + + boolean isDirectoryCircuit() { + return isDirectoryCircuit; + } + + public void connectionCompleted(Connection connection) { + if(buildHandler != null) { + buildHandler.connectionCompleted(connection); + } + } + + public void connectionFailed(String reason) { + if(buildHandler != null) { + buildHandler.connectionFailed(reason); + } + } + + public void nodeAdded(CircuitNode node) { + if(buildHandler != null) { + buildHandler.nodeAdded(node); + } + } + + public void circuitBuildCompleted(Circuit circuit) { + if(buildHandler != null) { + buildHandler.circuitBuildCompleted(circuit); + } + } + + public void circuitBuildFailed(String reason) { + if(buildHandler != null) { + buildHandler.circuitBuildFailed(reason); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitCreationTask.java b/orchid/src/com/subgraph/orchid/circuits/CircuitCreationTask.java new file mode 100644 index 00000000..71b428a0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitCreationTask.java @@ -0,0 +1,292 @@ +package com.subgraph.orchid.circuits; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.CircuitBuildHandler; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.ConnectionCache; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.ExitCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.CircuitManagerImpl.CircuitFilter; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; + +public class CircuitCreationTask implements Runnable { + private final static Logger logger = Logger.getLogger(CircuitCreationTask.class.getName()); + private final static int MAX_CIRCUIT_DIRTINESS = 300; // seconds + private final static int MAX_PENDING_CIRCUITS = 4; + + private final TorConfig config; + private final Directory directory; + private final ConnectionCache connectionCache; + private final CircuitManagerImpl circuitManager; + private final TorInitializationTracker initializationTracker; + private final CircuitPathChooser pathChooser; + private final Executor executor; + private final CircuitBuildHandler buildHandler; + private final CircuitBuildHandler internalBuildHandler; + // To avoid obnoxiously printing a warning every second + private int notEnoughDirectoryInformationWarningCounter = 0; + + private final CircuitPredictor predictor; + + private final AtomicLong lastNewCircuit; + + CircuitCreationTask(TorConfig config, Directory directory, ConnectionCache connectionCache, CircuitPathChooser pathChooser, CircuitManagerImpl circuitManager, TorInitializationTracker initializationTracker) { + this.config = config; + this.directory = directory; + this.connectionCache = connectionCache; + this.circuitManager = circuitManager; + this.initializationTracker = initializationTracker; + this.pathChooser = pathChooser; + this.executor = Executors.newCachedThreadPool(); + this.buildHandler = createCircuitBuildHandler(); + this.internalBuildHandler = createInternalCircuitBuildHandler(); + this.predictor = new CircuitPredictor(); + this.lastNewCircuit = new AtomicLong(); + } + + CircuitPredictor getCircuitPredictor() { + return predictor; + } + + public void run() { + expireOldCircuits(); + assignPendingStreamsToActiveCircuits(); + checkExpiredPendingCircuits(); + checkCircuitsForCreation(); + } + + void predictPort(int port) { + predictor.addExitPortRequest(port); + } + + private void assignPendingStreamsToActiveCircuits() { + final List pendingExitStreams = circuitManager.getPendingExitStreams(); + if(pendingExitStreams.isEmpty()) + return; + + for(ExitCircuit c: circuitManager.getRandomlyOrderedListOfExitCircuits()) { + final Iterator it = pendingExitStreams.iterator(); + while(it.hasNext()) { + if(attemptHandleStreamRequest(c, it.next())) + it.remove(); + } + } + } + + private boolean attemptHandleStreamRequest(ExitCircuit c, StreamExitRequest request) { + if(c.canHandleExitTo(request)) { + if(request.reserveRequest()) { + launchExitStreamTask(c, request); + } + // else request is reserved meaning another circuit is already trying to handle it + return true; + } + return false; + } + + private void launchExitStreamTask(ExitCircuit circuit, StreamExitRequest exitRequest) { + final OpenExitStreamTask task = new OpenExitStreamTask(circuit, exitRequest); + executor.execute(task); + } + + private void expireOldCircuits() { + final Set circuits = circuitManager.getCircuitsByFilter(new CircuitFilter() { + + public boolean filter(Circuit circuit) { + return !circuit.isMarkedForClose() && circuit.getSecondsDirty() > MAX_CIRCUIT_DIRTINESS; + } + }); + for(Circuit c: circuits) { + logger.fine("Closing idle dirty circuit: "+ c); + ((CircuitImpl)c).markForClose(); + } + } + private void checkExpiredPendingCircuits() { + // TODO Auto-generated method stub + } + + private void checkCircuitsForCreation() { + + if(!directory.haveMinimumRouterInfo()) { + if(notEnoughDirectoryInformationWarningCounter % 20 == 0) + logger.info("Cannot build circuits because we don't have enough directory information"); + notEnoughDirectoryInformationWarningCounter++; + return; + } + + + if(lastNewCircuit.get() != 0) { + final long now = System.currentTimeMillis(); + if((now - lastNewCircuit.get()) < config.getNewCircuitPeriod()) { + // return; + } + } + + buildCircuitIfNeeded(); + maybeBuildInternalCircuit(); + } + + private void buildCircuitIfNeeded() { + final List pendingExitStreams = circuitManager.getPendingExitStreams(); + final List predictedPorts = predictor.getPredictedPortTargets(); + final List exitTargets = new ArrayList(); + for(StreamExitRequest streamRequest: pendingExitStreams) { + if(!streamRequest.isReserved() && countCircuitsSupportingTarget(streamRequest, false) == 0) { + exitTargets.add(streamRequest); + } + } + for(PredictedPortTarget ppt: predictedPorts) { + if(countCircuitsSupportingTarget(ppt, true) < 2) { + exitTargets.add(ppt); + } + } + buildCircuitToHandleExitTargets(exitTargets); + } + + private void maybeBuildInternalCircuit() { + final int needed = circuitManager.getNeededCleanCircuitCount(predictor.isInternalPredicted()); + + if(needed > 0) { + launchBuildTaskForInternalCircuit(); + } + } + + private void launchBuildTaskForInternalCircuit() { + logger.fine("Launching new internal circuit"); + final InternalCircuitImpl circuit = new InternalCircuitImpl(circuitManager); + final CircuitCreationRequest request = new CircuitCreationRequest(pathChooser, circuit, internalBuildHandler, false); + final CircuitBuildTask task = new CircuitBuildTask(request, connectionCache, circuitManager.isNtorEnabled()); + executor.execute(task); + circuitManager.incrementPendingInternalCircuitCount(); + } + + private int countCircuitsSupportingTarget(final ExitTarget target, final boolean needClean) { + final CircuitFilter filter = new CircuitFilter() { + public boolean filter(Circuit circuit) { + if(!(circuit instanceof ExitCircuit)) { + return false; + } + final ExitCircuit ec = (ExitCircuit) circuit; + final boolean pendingOrConnected = circuit.isPending() || circuit.isConnected(); + final boolean isCleanIfNeeded = !(needClean && !circuit.isClean()); + return pendingOrConnected && isCleanIfNeeded && ec.canHandleExitTo(target); + } + }; + return circuitManager.getCircuitsByFilter(filter).size(); + } + + private void buildCircuitToHandleExitTargets(List exitTargets) { + if(exitTargets.isEmpty()) { + return; + } + if(!directory.haveMinimumRouterInfo()) + return; + if(circuitManager.getPendingCircuitCount() >= MAX_PENDING_CIRCUITS) + return; + + if(logger.isLoggable(Level.FINE)) { + logger.fine("Building new circuit to handle "+ exitTargets.size() +" pending streams and predicted ports"); + } + + launchBuildTaskForTargets(exitTargets); + } + + private void launchBuildTaskForTargets(List exitTargets) { + final Router exitRouter = pathChooser.chooseExitNodeForTargets(exitTargets); + if(exitRouter == null) { + logger.warning("Failed to select suitable exit node for targets"); + return; + } + + final Circuit circuit = circuitManager.createNewExitCircuit(exitRouter); + final CircuitCreationRequest request = new CircuitCreationRequest(pathChooser, circuit, buildHandler, false); + final CircuitBuildTask task = new CircuitBuildTask(request, connectionCache, circuitManager.isNtorEnabled(), initializationTracker); + executor.execute(task); + } + + private CircuitBuildHandler createCircuitBuildHandler() { + return new CircuitBuildHandler() { + + public void circuitBuildCompleted(Circuit circuit) { + logger.fine("Circuit completed to: "+ circuit); + circuitOpenedHandler(circuit); + lastNewCircuit.set(System.currentTimeMillis()); + } + + public void circuitBuildFailed(String reason) { + logger.fine("Circuit build failed: "+ reason); + buildCircuitIfNeeded(); + } + + public void connectionCompleted(Connection connection) { + logger.finer("Circuit connection completed to "+ connection); + } + + public void connectionFailed(String reason) { + logger.fine("Circuit connection failed: "+ reason); + buildCircuitIfNeeded(); + } + + public void nodeAdded(CircuitNode node) { + logger.finer("Node added to circuit: "+ node); + } + }; + } + + private void circuitOpenedHandler(Circuit circuit) { + if(!(circuit instanceof ExitCircuit)) { + return; + } + final ExitCircuit ec = (ExitCircuit) circuit; + final List pendingExitStreams = circuitManager.getPendingExitStreams(); + for(StreamExitRequest req: pendingExitStreams) { + if(ec.canHandleExitTo(req) && req.reserveRequest()) { + launchExitStreamTask(ec, req); + } + } + } + + private CircuitBuildHandler createInternalCircuitBuildHandler() { + return new CircuitBuildHandler() { + + public void nodeAdded(CircuitNode node) { + logger.finer("Node added to internal circuit: "+ node); + } + + public void connectionFailed(String reason) { + logger.fine("Circuit connection failed: "+ reason); + circuitManager.decrementPendingInternalCircuitCount(); + } + + public void connectionCompleted(Connection connection) { + logger.finer("Circuit connection completed to "+ connection); + } + + public void circuitBuildFailed(String reason) { + logger.fine("Circuit build failed: "+ reason); + circuitManager.decrementPendingInternalCircuitCount(); + } + + public void circuitBuildCompleted(Circuit circuit) { + logger.fine("Internal circuit build completed: "+ circuit); + lastNewCircuit.set(System.currentTimeMillis()); + circuitManager.addCleanInternalCircuit((InternalCircuit) circuit); + } + }; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitExtender.java b/orchid/src/com/subgraph/orchid/circuits/CircuitExtender.java new file mode 100644 index 00000000..b6fd1e19 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitExtender.java @@ -0,0 +1,155 @@ +package com.subgraph.orchid.circuits; + +import java.util.logging.Logger; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.cells.CellImpl; +import com.subgraph.orchid.circuits.cells.RelayCellImpl; +import com.subgraph.orchid.crypto.TorCreateFastKeyAgreement; +import com.subgraph.orchid.crypto.TorKeyAgreement; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorStreamCipher; + +public class CircuitExtender { + private final static Logger logger = Logger.getLogger(CircuitExtender.class.getName()); + + private final static int DH_BYTES = 1024 / 8; + private final static int PKCS1_OAEP_PADDING_OVERHEAD = 42; + private final static int CIPHER_KEY_LEN = TorStreamCipher.KEY_LEN; + final static int TAP_ONIONSKIN_LEN = PKCS1_OAEP_PADDING_OVERHEAD + CIPHER_KEY_LEN + DH_BYTES; + final static int TAP_ONIONSKIN_REPLY_LEN = DH_BYTES + TorMessageDigest.TOR_DIGEST_SIZE; + + + private final CircuitImpl circuit; + private final boolean ntorEnabled; + + + CircuitExtender(CircuitImpl circuit, boolean ntorEnabled) { + this.circuit = circuit; + this.ntorEnabled = ntorEnabled; + } + + + CircuitNode createFastTo(Router targetRouter) { + logger.fine("Creating 'fast' to "+ targetRouter); + final TorCreateFastKeyAgreement kex = new TorCreateFastKeyAgreement(); + sendCreateFastCell(kex); + return receiveAndProcessCreateFastResponse(targetRouter, kex); + } + + private void sendCreateFastCell(TorCreateFastKeyAgreement kex) { + final Cell cell = CellImpl.createCell(circuit.getCircuitId(), Cell.CREATE_FAST); + cell.putByteArray(kex.createOnionSkin()); + circuit.sendCell(cell); + } + + private CircuitNode receiveAndProcessCreateFastResponse(Router targetRouter, TorKeyAgreement kex) { + final Cell cell = circuit.receiveControlCellResponse(); + if(cell == null) { + throw new TorException("Timeout building circuit waiting for CREATE_FAST response from "+ targetRouter); + } + + return processCreatedFastCell(targetRouter, cell, kex); + } + + private CircuitNode processCreatedFastCell(Router targetRouter, Cell cell, TorKeyAgreement kex) { + final byte[] payload = new byte[TorMessageDigest.TOR_DIGEST_SIZE * 2]; + final byte[] keyMaterial = new byte[CircuitNodeCryptoState.KEY_MATERIAL_SIZE]; + final byte[] verifyHash = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + cell.getByteArray(payload); + if(!kex.deriveKeysFromHandshakeResponse(payload, keyMaterial, verifyHash)) { + // XXX + return null; + } + final CircuitNode node = CircuitNodeImpl.createFirstHop(targetRouter, keyMaterial, verifyHash); + circuit.appendNode(node); + return node; + } + + CircuitNode extendTo(Router targetRouter) { + if(circuit.getCircuitLength() == 0) { + throw new TorException("Cannot EXTEND an empty circuit"); + } + + if(useNtor(targetRouter)) { + final NTorCircuitExtender nce = new NTorCircuitExtender(this, targetRouter); + return nce.extendTo(); + } else { + final TapCircuitExtender tce = new TapCircuitExtender(this, targetRouter); + return tce.extendTo(); + } + } + + private boolean useNtor(Router targetRouter) { + return ntorEnabled && targetRouter.getNTorOnionKey() != null; + } + + private void logProtocolViolation(String sourceName, Router targetRouter) { + final String version = (targetRouter == null) ? "(none)" : targetRouter.getVersion(); + final String targetName = (targetRouter == null) ? "(none)" : targetRouter.getNickname(); + logger.warning("Protocol error extending circuit from ("+ sourceName +") to ("+ targetName +") [version: "+ version +"]"); + } + + private String nodeToName(CircuitNode node) { + if(node == null || node.getRouter() == null) { + return "(null)"; + } + final Router router = node.getRouter(); + return router.getNickname(); + } + + + public void sendRelayCell(RelayCell cell) { + circuit.sendRelayCell(cell); + } + + + public RelayCell receiveRelayResponse(int expectedCommand, Router extendTarget) { + final RelayCell cell = circuit.receiveRelayCell(); + if(cell == null) { + throw new TorException("Timeout building circuit"); + } + final int command = cell.getRelayCommand(); + if(command == RelayCell.RELAY_TRUNCATED) { + final int code = cell.getByte() & 0xFF; + final String msg = CellImpl.errorToDescription(code); + final String source = nodeToName(cell.getCircuitNode()); + if(code == Cell.ERROR_PROTOCOL) { + logProtocolViolation(source, extendTarget); + } + throw new TorException("Error from ("+ source +") while extending to ("+ extendTarget.getNickname() + "): "+ msg); + } else if(command != expectedCommand) { + final String expected = RelayCellImpl.commandToDescription(expectedCommand); + final String received = RelayCellImpl.commandToDescription(command); + throw new TorException("Received incorrect extend response, expecting "+ expected + " but received "+ received); + } else { + return cell; + } + } + + + public CircuitNode createNewNode(Router r, byte[] keyMaterial, byte[] verifyDigest) { + final CircuitNode node = CircuitNodeImpl.createNode(r, circuit.getFinalCircuitNode(), keyMaterial, verifyDigest); + logger.fine("Adding new circuit node for "+ r.getNickname()); + circuit.appendNode(node); + return node; + + } + + public RelayCell createRelayCell(int command) { + return new RelayCellImpl(circuit.getFinalCircuitNode(), circuit.getCircuitId(), 0, command, true); + } + + Router getFinalRouter() { + final CircuitNode node = circuit.getFinalCircuitNode(); + if(node != null) { + return node.getRouter(); + } else { + return null; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitIO.java b/orchid/src/com/subgraph/orchid/circuits/CircuitIO.java new file mode 100644 index 00000000..e53cccac --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitIO.java @@ -0,0 +1,320 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.ConnectionIOException; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.cells.CellImpl; +import com.subgraph.orchid.circuits.cells.RelayCellImpl; +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; + +public class CircuitIO implements DashboardRenderable { + private static final Logger logger = Logger.getLogger(CircuitIO.class.getName()); + private final static long CIRCUIT_BUILD_TIMEOUT_MS = 30 * 1000; + private final static long CIRCUIT_RELAY_RESPONSE_TIMEOUT = 20 * 1000; + + private final CircuitImpl circuit; + private final Connection connection; + private final int circuitId; + + private final BlockingQueue relayCellResponseQueue; + private final BlockingQueue controlCellResponseQueue; + private final Map streamMap; + private final Object relaySendLock = new Object(); + + private boolean isMarkedForClose; + private boolean isClosed; + + CircuitIO(CircuitImpl circuit, Connection connection, int circuitId) { + this.circuit = circuit; + this.connection = connection; + this.circuitId = circuitId; + + this.relayCellResponseQueue = new LinkedBlockingQueue(); + this.controlCellResponseQueue = new LinkedBlockingQueue(); + this.streamMap = new HashMap(); + } + + Connection getConnection() { + return connection; + } + + int getCircuitId() { + return circuitId; + } + + RelayCell dequeueRelayResponseCell() { + try { + final long timeout = getReceiveTimeout(); + return relayCellResponseQueue.poll(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } + } + + private RelayCell decryptRelayCell(Cell cell) { + for(CircuitNode node: circuit.getNodeList()) { + if(node.decryptBackwardCell(cell)) { + return RelayCellImpl.createFromCell(node, cell); + } + } + destroyCircuit(); + throw new TorException("Could not decrypt relay cell"); + } + + // Return null on timeout + Cell receiveControlCellResponse() { + try { + final long timeout = getReceiveTimeout(); + return controlCellResponseQueue.poll(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } + } + + + private long getReceiveTimeout() { + if(circuit.getStatus().isBuilding()) + return remainingBuildTime(); + else + return CIRCUIT_RELAY_RESPONSE_TIMEOUT; + } + + private long remainingBuildTime() { + final long elapsed = circuit.getStatus().getMillisecondsElapsedSinceCreated(); + if(elapsed == 0 || elapsed >= CIRCUIT_BUILD_TIMEOUT_MS) + return 0; + return CIRCUIT_BUILD_TIMEOUT_MS - elapsed; + } + + /* + * This is called by the cell reading thread in ConnectionImpl to deliver control cells + * associated with this circuit (CREATED, CREATED_FAST, or DESTROY). + */ + void deliverControlCell(Cell cell) { + if(cell.getCommand() == Cell.DESTROY) { + processDestroyCell(cell.getByte()); + } else { + controlCellResponseQueue.add(cell); + } + } + + private void processDestroyCell(int reason) { + logger.fine("DESTROY cell received ("+ CellImpl.errorToDescription(reason) +") on "+ circuit); + destroyCircuit(); + } + + /* This is called by the cell reading thread in ConnectionImpl to deliver RELAY cells. */ + void deliverRelayCell(Cell cell) { + circuit.getStatus().updateDirtyTimestamp(); + final RelayCell relayCell = decryptRelayCell(cell); + logRelayCell("Dispatching: ", relayCell); + switch(relayCell.getRelayCommand()) { + case RelayCell.RELAY_EXTENDED: + case RelayCell.RELAY_EXTENDED2: + case RelayCell.RELAY_RESOLVED: + case RelayCell.RELAY_TRUNCATED: + case RelayCell.RELAY_COMMAND_RENDEZVOUS_ESTABLISHED: + case RelayCell.RELAY_COMMAND_INTRODUCE_ACK: + case RelayCell.RELAY_COMMAND_RENDEZVOUS2: + relayCellResponseQueue.add(relayCell); + break; + case RelayCell.RELAY_DATA: + case RelayCell.RELAY_END: + case RelayCell.RELAY_CONNECTED: + processRelayDataCell(relayCell); + break; + + case RelayCell.RELAY_SENDME: + if(relayCell.getStreamId() != 0) + processRelayDataCell(relayCell); + else + processCircuitSendme(relayCell); + break; + case RelayCell.RELAY_BEGIN: + case RelayCell.RELAY_BEGIN_DIR: + case RelayCell.RELAY_EXTEND: + case RelayCell.RELAY_RESOLVE: + case RelayCell.RELAY_TRUNCATE: + destroyCircuit(); + throw new TorException("Unexpected 'forward' direction relay cell type: "+ relayCell.getRelayCommand()); + } + } + + /* Runs in the context of the connection cell reading thread */ + private void processRelayDataCell(RelayCell cell) { + if(cell.getRelayCommand() == RelayCell.RELAY_DATA) { + cell.getCircuitNode().decrementDeliverWindow(); + if(cell.getCircuitNode().considerSendingSendme()) { + final RelayCell sendme = createRelayCell(RelayCell.RELAY_SENDME, 0, cell.getCircuitNode()); + sendRelayCellTo(sendme, sendme.getCircuitNode()); + } + } + + synchronized(streamMap) { + final StreamImpl stream = streamMap.get(cell.getStreamId()); + // It's not unusual for the stream to not be found. For example, if a RELAY_CONNECTED arrives after + // the client has stopped waiting for it, the stream will never be tracked and eventually the edge node + // will send a RELAY_END for this stream. + if(stream != null) { + stream.addInputCell(cell); + } + } + } + + RelayCell createRelayCell(int relayCommand, int streamId, CircuitNode targetNode) { + return new RelayCellImpl(targetNode, circuitId, streamId, relayCommand); + } + + void sendRelayCellTo(RelayCell cell, CircuitNode targetNode) { + synchronized(relaySendLock) { + logRelayCell("Sending: ", cell); + cell.setLength(); + targetNode.updateForwardDigest(cell); + cell.setDigest(targetNode.getForwardDigestBytes()); + + for(CircuitNode node = targetNode; node != null; node = node.getPreviousNode()) + node.encryptForwardCell(cell); + + if(cell.getRelayCommand() == RelayCell.RELAY_DATA) + targetNode.waitForSendWindowAndDecrement(); + + sendCell(cell); + } + } + + + private void logRelayCell(String message, RelayCell cell) { + final Level level = getLogLevelForCell(cell); + if(!logger.isLoggable(level)) { + return; + } + logger.log(level, message + cell); + } + + private Level getLogLevelForCell(RelayCell cell) { + switch(cell.getRelayCommand()) { + case RelayCell.RELAY_DATA: + case RelayCell.RELAY_SENDME: + return Level.FINEST; + default: + return Level.FINER; + } + } + + void sendCell(Cell cell) { + final CircuitStatus status = circuit.getStatus(); + if(!(status.isConnected() || status.isBuilding())) + return; + try { + status.updateDirtyTimestamp(); + connection.sendCell(cell); + } catch (ConnectionIOException e) { + destroyCircuit(); + } + } + + void markForClose() { + synchronized (streamMap) { + if(isMarkedForClose) { + return; + } + isMarkedForClose = true; + if(streamMap.isEmpty()) { + closeCircuit(); + } + } + } + + boolean isMarkedForClose() { + return isMarkedForClose; + } + + private void closeCircuit() { + logger.fine("Closing circuit "+ circuit); + sendDestroyCell(Cell.ERROR_NONE); + connection.removeCircuit(circuit); + circuit.setStateDestroyed(); + isClosed = true; + } + + void sendDestroyCell(int reason) { + Cell destroy = CellImpl.createCell(circuitId, Cell.DESTROY); + destroy.putByte(reason); + try { + connection.sendCell(destroy); + } catch (ConnectionIOException e) { + logger.warning("Connection IO error sending DESTROY cell: "+ e.getMessage()); + } + } + + private void processCircuitSendme(RelayCell cell) { + cell.getCircuitNode().incrementSendWindow(); + } + + void destroyCircuit() { + synchronized(streamMap) { + if(isClosed) { + return; + } + circuit.setStateDestroyed(); + connection.removeCircuit(circuit); + final List tmpList = new ArrayList(streamMap.values()); + for(StreamImpl s: tmpList) { + s.close(); + } + isClosed = true; + } + } + + StreamImpl createNewStream(boolean autoclose) { + synchronized(streamMap) { + final int streamId = circuit.getStatus().nextStreamId(); + final StreamImpl stream = new StreamImpl(circuit, circuit.getFinalCircuitNode(), streamId, autoclose); + streamMap.put(streamId, stream); + return stream; + } + } + + void removeStream(StreamImpl stream) { + synchronized(streamMap) { + streamMap.remove(stream.getStreamId()); + if(streamMap.isEmpty() && isMarkedForClose) { + closeCircuit(); + } + } + } + + List getActiveStreams() { + synchronized (streamMap) { + return new ArrayList(streamMap.values()); + } + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException { + if((flags & DASHBOARD_STREAMS) == 0) { + return; + } + for(Stream s: getActiveStreams()) { + renderer.renderComponent(writer, flags, s); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitImpl.java b/orchid/src/com/subgraph/orchid/circuits/CircuitImpl.java new file mode 100644 index 00000000..7d2cf154 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitImpl.java @@ -0,0 +1,286 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.ExitCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.circuits.path.PathSelectionFailedException; +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; + +/** + * This class represents an established circuit through the Tor network. + * + */ +public abstract class CircuitImpl implements Circuit, DashboardRenderable { + protected final static Logger logger = Logger.getLogger(CircuitImpl.class.getName()); + + static ExitCircuit createExitCircuit(CircuitManagerImpl circuitManager, Router exitRouter) { + return new ExitCircuitImpl(circuitManager, exitRouter); + } + + static ExitCircuit createExitCircuitTo(CircuitManagerImpl circuitManager, List prechosenPath) { + return new ExitCircuitImpl(circuitManager, prechosenPath); + } + + static DirectoryCircuit createDirectoryCircuit(CircuitManagerImpl circuitManager) { + return new DirectoryCircuitImpl(circuitManager, null); + } + + static DirectoryCircuit createDirectoryCircuitTo(CircuitManagerImpl circuitManager, List prechosenPath) { + return new DirectoryCircuitImpl(circuitManager, prechosenPath); + } + + static InternalCircuit createInternalCircuitTo(CircuitManagerImpl circuitManager, List prechosenPath) { + return new InternalCircuitImpl(circuitManager, prechosenPath); + } + + private final CircuitManagerImpl circuitManager; + protected final List prechosenPath; + + private final List nodeList; + private final CircuitStatus status; + + private CircuitIO io; + + + + + + + protected CircuitImpl(CircuitManagerImpl circuitManager) { + this(circuitManager, null); + } + + protected CircuitImpl(CircuitManagerImpl circuitManager, List prechosenPath) { + nodeList = new ArrayList(); + this.circuitManager = circuitManager; + this.prechosenPath = prechosenPath; + status = new CircuitStatus(); + } + + List choosePath(CircuitPathChooser pathChooser) throws InterruptedException, PathSelectionFailedException { + if(prechosenPath != null) { + return new ArrayList(prechosenPath); + } else { + return choosePathForCircuit(pathChooser); + } + } + + protected abstract List choosePathForCircuit(CircuitPathChooser pathChooser) throws InterruptedException, PathSelectionFailedException; + + void bindToConnection(Connection connection) { + if(io != null) { + throw new IllegalStateException("Circuit already bound to a connection"); + } + final int id = connection.bindCircuit(this); + io = new CircuitIO(this, connection, id); + } + + public void markForClose() { + if(io != null) { + io.markForClose(); + } + } + + public boolean isMarkedForClose() { + if(io == null) { + return false; + } else { + return io.isMarkedForClose(); + } + } + + CircuitStatus getStatus() { + return status; + } + + public boolean isConnected() { + return status.isConnected(); + } + + public boolean isPending() { + return status.isBuilding(); + } + + public boolean isClean() { + return !status.isDirty(); + } + + public int getSecondsDirty() { + return (int) (status.getMillisecondsDirty() / 1000); + } + + void notifyCircuitBuildStart() { + if(!status.isUnconnected()) { + throw new IllegalStateException("Can only connect UNCONNECTED circuits"); + } + status.updateCreatedTimestamp(); + status.setStateBuilding(); + circuitManager.addActiveCircuit(this); + } + + void notifyCircuitBuildFailed() { + status.setStateFailed(); + circuitManager.removeActiveCircuit(this); + } + + void notifyCircuitBuildCompleted() { + status.setStateOpen(); + status.updateCreatedTimestamp(); + } + + public Connection getConnection() { + if(!isConnected()) + throw new TorException("Circuit is not connected."); + return io.getConnection(); + } + + public int getCircuitId() { + if(io == null) { + return 0; + } else { + return io.getCircuitId(); + } + } + + public void sendRelayCell(RelayCell cell) { + io.sendRelayCellTo(cell, cell.getCircuitNode()); + } + + public void sendRelayCellToFinalNode(RelayCell cell) { + io.sendRelayCellTo(cell, getFinalCircuitNode()); + } + + public void appendNode(CircuitNode node) { + nodeList.add(node); + } + + List getNodeList() { + return nodeList; + } + + int getCircuitLength() { + return nodeList.size(); + } + + public CircuitNode getFinalCircuitNode() { + if(nodeList.isEmpty()) + throw new TorException("getFinalCircuitNode() called on empty circuit"); + return nodeList.get( getCircuitLength() - 1); + } + + public RelayCell createRelayCell(int relayCommand, int streamId, CircuitNode targetNode) { + return io.createRelayCell(relayCommand, streamId, targetNode); + } + + public RelayCell receiveRelayCell() { + return io.dequeueRelayResponseCell(); + } + + void sendCell(Cell cell) { + io.sendCell(cell); + } + + Cell receiveControlCellResponse() { + return io.receiveControlCellResponse(); + } + + /* + * This is called by the cell reading thread in ConnectionImpl to deliver control cells + * associated with this circuit (CREATED or CREATED_FAST). + */ + public void deliverControlCell(Cell cell) { + io.deliverControlCell(cell); + } + + /* This is called by the cell reading thread in ConnectionImpl to deliver RELAY cells. */ + public void deliverRelayCell(Cell cell) { + io.deliverRelayCell(cell); + } + + protected StreamImpl createNewStream(boolean autoclose) { + return io.createNewStream(autoclose); + } + protected StreamImpl createNewStream() { + return createNewStream(false); + } + + void setStateDestroyed() { + status.setStateDestroyed(); + circuitManager.removeActiveCircuit(this); + } + + public void destroyCircuit() { + io.destroyCircuit(); + circuitManager.removeActiveCircuit(this); + } + + + public void removeStream(StreamImpl stream) { + io.removeStream(stream); + } + + protected Stream processStreamOpenException(Exception e) throws InterruptedException, TimeoutException, StreamConnectFailedException { + if(e instanceof InterruptedException) { + throw (InterruptedException) e; + } else if(e instanceof TimeoutException) { + throw(TimeoutException) e; + } else if(e instanceof StreamConnectFailedException) { + throw(StreamConnectFailedException) e; + } else { + throw new IllegalStateException(); + } + } + + protected abstract String getCircuitTypeLabel(); + + public String toString() { + return " Circuit ("+ getCircuitTypeLabel() + ") id="+ getCircuitId() +" state=" + status.getStateAsString() +" "+ pathToString(); + } + + + protected String pathToString() { + final StringBuilder sb = new StringBuilder(); + sb.append("["); + for(CircuitNode node: nodeList) { + if(sb.length() > 1) + sb.append(","); + sb.append(node.toString()); + } + sb.append("]"); + return sb.toString(); + } + + public List getActiveStreams() { + if(io == null) { + return Collections.emptyList(); + } else { + return io.getActiveStreams(); + } + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException { + if(io != null) { + writer.println(toString()); + renderer.renderComponent(writer, flags, io); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitManagerImpl.java b/orchid/src/com/subgraph/orchid/circuits/CircuitManagerImpl.java new file mode 100644 index 00000000..cac1142d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitManagerImpl.java @@ -0,0 +1,395 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.CircuitBuildHandler; +import com.subgraph.orchid.CircuitManager; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.ConnectionCache; +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.ExitCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.guards.EntryGuards; +import com.subgraph.orchid.circuits.hs.HiddenServiceManager; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.directory.downloader.DirectoryDownloaderImpl; + +public class CircuitManagerImpl implements CircuitManager, DashboardRenderable { + private final static int OPEN_DIRECTORY_STREAM_RETRY_COUNT = 5; + private final static int OPEN_DIRECTORY_STREAM_TIMEOUT = 10 * 1000; + + interface CircuitFilter { + boolean filter(Circuit circuit); + } + + private final TorConfig config; + private final Directory directory; + private final ConnectionCache connectionCache; + private final Set activeCircuits; + private final Queue cleanInternalCircuits; + private int requestedInternalCircuitCount = 0; + private int pendingInternalCircuitCount = 0; + private final TorRandom random; + private final PendingExitStreams pendingExitStreams; + private final ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); + private final CircuitCreationTask circuitCreationTask; + private final TorInitializationTracker initializationTracker; + private final CircuitPathChooser pathChooser; + private final HiddenServiceManager hiddenServiceManager; + + public CircuitManagerImpl(TorConfig config, DirectoryDownloaderImpl directoryDownloader, Directory directory, ConnectionCache connectionCache, TorInitializationTracker initializationTracker) { + this.config = config; + this.directory = directory; + this.connectionCache = connectionCache; + this.pathChooser = CircuitPathChooser.create(config, directory); + if(config.getUseEntryGuards() || config.getUseBridges()) { + this.pathChooser.enableEntryGuards(new EntryGuards(config, connectionCache, directoryDownloader, directory)); + } + this.pendingExitStreams = new PendingExitStreams(config); + this.circuitCreationTask = new CircuitCreationTask(config, directory, connectionCache, pathChooser, this, initializationTracker); + this.activeCircuits = new HashSet(); + this.cleanInternalCircuits = new LinkedList(); + this.random = new TorRandom(); + + this.initializationTracker = initializationTracker; + this.hiddenServiceManager = new HiddenServiceManager(config, directory, this); + + directoryDownloader.setCircuitManager(this); + } + + public void startBuildingCircuits() { + scheduledExecutor.scheduleAtFixedRate(circuitCreationTask, 0, 1000, TimeUnit.MILLISECONDS); + } + + public synchronized void stopBuildingCircuits(boolean killCircuits) { + scheduledExecutor.shutdownNow(); + if(killCircuits) { + List circuits = new ArrayList(activeCircuits); + for(CircuitImpl c: circuits) { + c.destroyCircuit(); + } + } + } + + public ExitCircuit createNewExitCircuit(Router exitRouter) { + return CircuitImpl.createExitCircuit(this, exitRouter); + } + + void addActiveCircuit(CircuitImpl circuit) { + synchronized (activeCircuits) { + activeCircuits.add(circuit); + activeCircuits.notifyAll(); + } + } + + void removeActiveCircuit(CircuitImpl circuit) { + synchronized (activeCircuits) { + activeCircuits.remove(circuit); + } + } + + synchronized int getActiveCircuitCount() { + return activeCircuits.size(); + } + + Set getPendingCircuits() { + return getCircuitsByFilter(new CircuitFilter() { + public boolean filter(Circuit circuit) { + return circuit.isPending(); + } + }); + } + + synchronized int getPendingCircuitCount() { + return getPendingCircuits().size(); + } + + Set getCircuitsByFilter(CircuitFilter filter) { + final Set result = new HashSet(); + synchronized (activeCircuits) { + for(CircuitImpl c: activeCircuits) { + if(filter == null || filter.filter(c)) { + result.add(c); + } + } + } + return result; + } + + List getRandomlyOrderedListOfExitCircuits() { + final Set notDirectory = getCircuitsByFilter(new CircuitFilter() { + + public boolean filter(Circuit circuit) { + final boolean exitType = circuit instanceof ExitCircuit; + return exitType && !circuit.isMarkedForClose() && circuit.isConnected(); + } + }); + final ArrayList ac = new ArrayList(); + for(Circuit c: notDirectory) { + if(c instanceof ExitCircuit) { + ac.add((ExitCircuit) c); + } + } + final int sz = ac.size(); + for(int i = 0; i < sz; i++) { + final ExitCircuit tmp = ac.get(i); + final int swapIdx = random.nextInt(sz); + ac.set(i, ac.get(swapIdx)); + ac.set(swapIdx, tmp); + } + return ac; + } + + public Stream openExitStreamTo(String hostname, int port) + throws InterruptedException, TimeoutException, OpenFailedException { + if(hostname.endsWith(".onion")) { + return hiddenServiceManager.getStreamTo(hostname, port); + } + validateHostname(hostname); + circuitCreationTask.predictPort(port); + return pendingExitStreams.openExitStream(hostname, port); + } + + private void validateHostname(String hostname) throws OpenFailedException { + maybeRejectInternalAddress(hostname); + if(hostname.toLowerCase().endsWith(".onion")) { + throw new OpenFailedException("Hidden services not supported"); + } else if(hostname.toLowerCase().endsWith(".exit")) { + throw new OpenFailedException(".exit addresses are not supported"); + } + } + + private void maybeRejectInternalAddress(String hostname) throws OpenFailedException { + if(IPv4Address.isValidIPv4AddressString(hostname)) { + maybeRejectInternalAddress(IPv4Address.createFromString(hostname)); + } + } + + private void maybeRejectInternalAddress(IPv4Address address) throws OpenFailedException { + final InetAddress inetAddress = address.toInetAddress(); + if(inetAddress.isSiteLocalAddress() && config.getClientRejectInternalAddress()) { + throw new OpenFailedException("Rejecting stream target with internal address: "+ address); + } + } + public Stream openExitStreamTo(IPv4Address address, int port) + throws InterruptedException, TimeoutException, OpenFailedException { + maybeRejectInternalAddress(address); + circuitCreationTask.predictPort(port); + return pendingExitStreams.openExitStream(address, port); + } + + public List getPendingExitStreams() { + return pendingExitStreams.getUnreservedPendingRequests(); + } + + public Stream openDirectoryStream() throws OpenFailedException, InterruptedException, TimeoutException { + return openDirectoryStream(0); + } + + public Stream openDirectoryStream(int purpose) throws OpenFailedException, InterruptedException { + final int requestEventCode = purposeToEventCode(purpose, false); + final int loadingEventCode = purposeToEventCode(purpose, true); + + int failCount = 0; + while(failCount < OPEN_DIRECTORY_STREAM_RETRY_COUNT) { + final DirectoryCircuit circuit = openDirectoryCircuit(); + if(requestEventCode > 0) { + initializationTracker.notifyEvent(requestEventCode); + } + try { + final Stream stream = circuit.openDirectoryStream(OPEN_DIRECTORY_STREAM_TIMEOUT, true); + if(loadingEventCode > 0) { + initializationTracker.notifyEvent(loadingEventCode); + } + return stream; + } catch (StreamConnectFailedException e) { + circuit.markForClose(); + failCount += 1; + } catch (TimeoutException e) { + circuit.markForClose(); + } + } + throw new OpenFailedException("Retry count exceeded opening directory stream"); + } + + public DirectoryCircuit openDirectoryCircuit() throws OpenFailedException { + int failCount = 0; + while(failCount < OPEN_DIRECTORY_STREAM_RETRY_COUNT) { + final DirectoryCircuit circuit = CircuitImpl.createDirectoryCircuit(this); + if(tryOpenCircuit(circuit, true, true)) { + return circuit; + } + failCount += 1; + } + throw new OpenFailedException("Could not create circuit for directory stream"); + } + + private int purposeToEventCode(int purpose, boolean getLoadingEvent) { + switch(purpose) { + case DIRECTORY_PURPOSE_CONSENSUS: + return getLoadingEvent ? Tor.BOOTSTRAP_STATUS_LOADING_STATUS : Tor.BOOTSTRAP_STATUS_REQUESTING_STATUS; + case DIRECTORY_PURPOSE_CERTIFICATES: + return getLoadingEvent ? Tor.BOOTSTRAP_STATUS_LOADING_KEYS : Tor.BOOTSTRAP_STATUS_REQUESTING_KEYS; + case DIRECTORY_PURPOSE_DESCRIPTORS: + return getLoadingEvent ? Tor.BOOTSTRAP_STATUS_LOADING_DESCRIPTORS : Tor.BOOTSTRAP_STATUS_REQUESTING_DESCRIPTORS; + default: + return 0; + } + } + + private static class DirectoryCircuitResult implements CircuitBuildHandler { + + private boolean isFailed; + + public void connectionCompleted(Connection connection) {} + public void nodeAdded(CircuitNode node) {} + public void circuitBuildCompleted(Circuit circuit) {} + + public void connectionFailed(String reason) { + isFailed = true; + } + + public void circuitBuildFailed(String reason) { + isFailed = true; + } + + boolean isSuccessful() { + return !isFailed; + } + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException { + if((flags & DASHBOARD_CIRCUITS) == 0) { + return; + } + renderer.renderComponent(writer, flags, connectionCache); + renderer.renderComponent(writer, flags, circuitCreationTask.getCircuitPredictor()); + writer.println("[Circuit Manager]"); + writer.println(); + for(Circuit c: getCircuitsByFilter(null)) { + renderer.renderComponent(writer, flags, c); + } + } + + public InternalCircuit getCleanInternalCircuit() throws InterruptedException { + synchronized(cleanInternalCircuits) { + try { + requestedInternalCircuitCount += 1; + while(cleanInternalCircuits.isEmpty()) { + cleanInternalCircuits.wait(); + } + return cleanInternalCircuits.remove(); + } finally { + requestedInternalCircuitCount -= 1; + } + } + } + + int getNeededCleanCircuitCount(boolean isPredicted) { + synchronized (cleanInternalCircuits) { + final int predictedCount = (isPredicted) ? 2 : 0; + final int needed = Math.max(requestedInternalCircuitCount, predictedCount) - (pendingInternalCircuitCount + cleanInternalCircuits.size()); + if(needed < 0) { + return 0; + } else { + return needed; + } + } + } + + void incrementPendingInternalCircuitCount() { + synchronized (cleanInternalCircuits) { + pendingInternalCircuitCount += 1; + } + } + + void decrementPendingInternalCircuitCount() { + synchronized (cleanInternalCircuits) { + pendingInternalCircuitCount -= 1; + } + } + + void addCleanInternalCircuit(InternalCircuit circuit) { + synchronized(cleanInternalCircuits) { + pendingInternalCircuitCount -= 1; + cleanInternalCircuits.add(circuit); + cleanInternalCircuits.notifyAll(); + } + } + + boolean isNtorEnabled() { + switch(config.getUseNTorHandshake()) { + case AUTO: + return isNtorEnabledInConsensus(); + case FALSE: + return false; + case TRUE: + return true; + default: + throw new IllegalArgumentException("getUseNTorHandshake() returned "+ config.getUseNTorHandshake()); + } + } + + boolean isNtorEnabledInConsensus() { + ConsensusDocument consensus = directory.getCurrentConsensusDocument(); + return (consensus != null) && (consensus.getUseNTorHandshake()); + } + + public DirectoryCircuit openDirectoryCircuitTo(List path) throws OpenFailedException { + final DirectoryCircuit circuit = CircuitImpl.createDirectoryCircuitTo(this, path); + if(!tryOpenCircuit(circuit, true, false)) { + throw new OpenFailedException("Could not create directory circuit for path"); + } + return circuit; + } + + public ExitCircuit openExitCircuitTo(List path) throws OpenFailedException { + final ExitCircuit circuit = CircuitImpl.createExitCircuitTo(this, path); + if(!tryOpenCircuit(circuit, false, false)) { + throw new OpenFailedException("Could not create exit circuit for path"); + } + return circuit; + } + + public InternalCircuit openInternalCircuitTo(List path) throws OpenFailedException { + final InternalCircuit circuit = CircuitImpl.createInternalCircuitTo(this, path); + if(!tryOpenCircuit(circuit, false, false)) { + throw new OpenFailedException("Could not create internal circuit for path"); + } + return circuit; + } + + private boolean tryOpenCircuit(Circuit circuit, boolean isDirectory, boolean trackInitialization) { + final DirectoryCircuitResult result = new DirectoryCircuitResult(); + final CircuitCreationRequest req = new CircuitCreationRequest(pathChooser, circuit, result, isDirectory); + final CircuitBuildTask task = new CircuitBuildTask(req, connectionCache, isNtorEnabled(), (trackInitialization) ? (initializationTracker) : (null)); + task.run(); + return result.isSuccessful(); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitNodeCryptoState.java b/orchid/src/com/subgraph/orchid/circuits/CircuitNodeCryptoState.java new file mode 100644 index 00000000..37cb58e9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitNodeCryptoState.java @@ -0,0 +1,102 @@ +package com.subgraph.orchid.circuits; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorStreamCipher; +import com.subgraph.orchid.data.HexDigest; + +public class CircuitNodeCryptoState { + public final static int KEY_MATERIAL_SIZE = TorMessageDigest.TOR_DIGEST_SIZE * 2 + TorStreamCipher.KEY_LEN * 2; + + public static CircuitNodeCryptoState createFromKeyMaterial(byte[] keyMaterial, byte[] verifyDigest) { + return new CircuitNodeCryptoState(keyMaterial, verifyDigest); + } + + private final HexDigest checksumDigest; + private final TorMessageDigest forwardDigest; + private final TorMessageDigest backwardDigest; + private final TorStreamCipher forwardCipher; + private final TorStreamCipher backwardCipher; + + static private byte[] extractDigestBytes(byte[] keyMaterial, int offset) { + final byte[] digestBytes = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + System.arraycopy(keyMaterial, offset, digestBytes, 0, TorMessageDigest.TOR_DIGEST_SIZE); + return digestBytes; + } + + static private byte[] extractCipherKey(byte[] keyMaterial, int offset) { + final byte[] keyBytes = new byte[TorStreamCipher.KEY_LEN]; + System.arraycopy(keyMaterial, offset, keyBytes, 0, TorStreamCipher.KEY_LEN); + return keyBytes; + } + + private CircuitNodeCryptoState(byte[] keyMaterial, byte[] verifyDigest) { + checksumDigest = HexDigest.createFromDigestBytes(verifyDigest); + int offset = 0; + + forwardDigest = new TorMessageDigest(); + forwardDigest.update(extractDigestBytes(keyMaterial, offset)); + offset += TorMessageDigest.TOR_DIGEST_SIZE; + + backwardDigest = new TorMessageDigest(); + backwardDigest.update(extractDigestBytes(keyMaterial, offset)); + offset += TorMessageDigest.TOR_DIGEST_SIZE; + + forwardCipher = TorStreamCipher.createFromKeyBytes(extractCipherKey(keyMaterial, offset)); + offset += TorStreamCipher.KEY_LEN; + + backwardCipher = TorStreamCipher.createFromKeyBytes(extractCipherKey(keyMaterial, offset)); + } + + boolean verifyPacketDigest(HexDigest packetDigest) { + return checksumDigest.equals(packetDigest); + } + + void encryptForwardCell(Cell cell) { + forwardCipher.encrypt(cell.getCellBytes(), Cell.CELL_HEADER_LEN, Cell.CELL_PAYLOAD_LEN); + } + + boolean decryptBackwardCell(Cell cell) { + backwardCipher.encrypt(cell.getCellBytes(), Cell.CELL_HEADER_LEN, Cell.CELL_PAYLOAD_LEN); + return isRecognizedCell(cell); + } + + void updateForwardDigest(Cell cell) { + forwardDigest.update(cell.getCellBytes(), Cell.CELL_HEADER_LEN, Cell.CELL_PAYLOAD_LEN); + } + + byte[] getForwardDigestBytes() { + return forwardDigest.getDigestBytes(); + } + + private boolean isRecognizedCell(Cell cell) { + if(cell.getShortAt(RelayCell.RECOGNIZED_OFFSET) != 0) + return false; + + final byte[] digest = extractRelayDigest(cell); + final byte[] peek = backwardDigest.peekDigest(cell.getCellBytes(), Cell.CELL_HEADER_LEN, Cell.CELL_PAYLOAD_LEN); + for(int i = 0; i < 4; i++) + if(digest[i] != peek[i]) { + replaceRelayDigest(cell, digest); + return false; + } + backwardDigest.update(cell.getCellBytes(), Cell.CELL_HEADER_LEN, Cell.CELL_PAYLOAD_LEN); + replaceRelayDigest(cell, digest); + return true; + } + + private byte[] extractRelayDigest(Cell cell) { + final byte[] digest = new byte[4]; + for(int i = 0; i < 4; i++) { + digest[i] = (byte) cell.getByteAt(i + RelayCell.DIGEST_OFFSET); + cell.putByteAt(i + RelayCell.DIGEST_OFFSET, 0); + } + return digest; + } + + private void replaceRelayDigest(Cell cell, byte[] digest) { + for(int i = 0; i < 4; i++) + cell.putByteAt(i + RelayCell.DIGEST_OFFSET, digest[i] & 0xFF); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitNodeImpl.java b/orchid/src/com/subgraph/orchid/circuits/CircuitNodeImpl.java new file mode 100644 index 00000000..c2e990f6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitNodeImpl.java @@ -0,0 +1,121 @@ +package com.subgraph.orchid.circuits; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorException; + +public class CircuitNodeImpl implements CircuitNode { + + public static CircuitNode createAnonymous(CircuitNode previous, byte[] keyMaterial, byte[] verifyDigest) { + return createNode(null, previous, keyMaterial, verifyDigest); + } + + public static CircuitNode createFirstHop(Router r, byte[] keyMaterial, byte[] verifyDigest) { + return createNode(r, null, keyMaterial, verifyDigest); + } + + public static CircuitNode createNode(Router r, CircuitNode previous, byte[] keyMaterial, byte[] verifyDigest) { + final CircuitNodeCryptoState cs = CircuitNodeCryptoState.createFromKeyMaterial(keyMaterial, verifyDigest); + return new CircuitNodeImpl(r, previous, cs); + } + + private final static int CIRCWINDOW_START = 1000; + private final static int CIRCWINDOW_INCREMENT = 100; + + private final Router router; + private final CircuitNodeCryptoState cryptoState; + private final CircuitNode previousNode; + + private final Object windowLock; + private int packageWindow; + private int deliverWindow; + + private CircuitNodeImpl(Router router, CircuitNode previous, CircuitNodeCryptoState cryptoState) { + previousNode = previous; + this.router = router; + this.cryptoState = cryptoState; + windowLock = new Object(); + packageWindow = CIRCWINDOW_START; + deliverWindow = CIRCWINDOW_START; + } + + public Router getRouter() { + return router; + } + + public CircuitNode getPreviousNode() { + return previousNode; + } + + public void encryptForwardCell(RelayCell cell) { + cryptoState.encryptForwardCell(cell); + } + + public boolean decryptBackwardCell(Cell cell) { + return cryptoState.decryptBackwardCell(cell); + } + + public void updateForwardDigest(RelayCell cell) { + cryptoState.updateForwardDigest(cell); + } + + public byte[] getForwardDigestBytes() { + return cryptoState.getForwardDigestBytes(); + } + + public String toString() { + if(router != null) { + return "|"+ router.getNickname() + "|"; + } else { + return "|()|"; + } + } + + public void decrementDeliverWindow() { + synchronized(windowLock) { + deliverWindow--; + } + } + + public boolean considerSendingSendme() { + synchronized(windowLock) { + if(deliverWindow <= (CIRCWINDOW_START - CIRCWINDOW_INCREMENT)) { + deliverWindow += CIRCWINDOW_INCREMENT; + return true; + } + return false; + } + } + + public void waitForSendWindow() { + waitForSendWindow(false); + } + + public void waitForSendWindowAndDecrement() { + waitForSendWindow(true); + } + + private void waitForSendWindow(boolean decrement) { + synchronized(windowLock) { + while(packageWindow == 0) { + try { + windowLock.wait(); + } catch (InterruptedException e) { + throw new TorException("Thread interrupted while waiting for circuit send window"); + } + } + if(decrement) + packageWindow--; + } + } + + public void incrementSendWindow() { + synchronized(windowLock) { + packageWindow += CIRCWINDOW_INCREMENT; + windowLock.notifyAll(); + } + + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitPredictor.java b/orchid/src/com/subgraph/orchid/circuits/CircuitPredictor.java new file mode 100644 index 00000000..21ed2ac6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitPredictor.java @@ -0,0 +1,99 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; + +public class CircuitPredictor implements DashboardRenderable { + + private final static Integer INTERNAL_CIRCUIT_PORT_VALUE = 0; + private final static long TIMEOUT_MS = 60 * 60 * 1000; // One hour + + private final Map portsSeen; + + public CircuitPredictor() { + portsSeen = new HashMap(); + addExitPortRequest(80); + addInternalRequest(); + } + + void addExitPortRequest(int port) { + synchronized (portsSeen) { + portsSeen.put(port, System.currentTimeMillis()); + } + } + + void addInternalRequest() { + addExitPortRequest(INTERNAL_CIRCUIT_PORT_VALUE); + } + + + private boolean isEntryExpired(Entry e, long now) { + return (now - e.getValue()) > TIMEOUT_MS; + } + + private void removeExpiredPorts() { + final long now = System.currentTimeMillis(); + final Iterator> it = portsSeen.entrySet().iterator(); + while(it.hasNext()) { + if(isEntryExpired(it.next(), now)) { + it.remove(); + } + } + } + + boolean isInternalPredicted() { + synchronized (portsSeen) { + removeExpiredPorts(); + return portsSeen.containsKey(INTERNAL_CIRCUIT_PORT_VALUE); + } + } + + Set getPredictedPorts() { + synchronized (portsSeen) { + removeExpiredPorts(); + final Set result = new HashSet(portsSeen.keySet()); + result.remove(INTERNAL_CIRCUIT_PORT_VALUE); + return result; + } + } + + List getPredictedPortTargets() { + final List targets = new ArrayList(); + for(int p: getPredictedPorts()) { + targets.add(new PredictedPortTarget(p)); + } + return targets; + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) + throws IOException { + + if((flags & DASHBOARD_PREDICTED_PORTS) == 0) { + return; + } + writer.println("[Predicted Ports] "); + for(int port : portsSeen.keySet()) { + writer.write(" "+ port); + Long lastSeen = portsSeen.get(port); + if(lastSeen != null) { + long now = System.currentTimeMillis(); + long ms = now - lastSeen; + writer.write(" (last seen "+ TimeUnit.MINUTES.convert(ms, TimeUnit.MILLISECONDS) +" minutes ago)"); + } + writer.println(); + } + writer.println(); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/CircuitStatus.java b/orchid/src/com/subgraph/orchid/circuits/CircuitStatus.java new file mode 100644 index 00000000..a9a994f3 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/CircuitStatus.java @@ -0,0 +1,115 @@ +package com.subgraph.orchid.circuits; + +import com.subgraph.orchid.crypto.TorRandom; + +public class CircuitStatus { + + enum CircuitState { + UNCONNECTED("Unconnected"), + BUILDING("Building"), + FAILED("Failed"), + OPEN("Open"), + DESTROYED("Destroyed"); + String name; + CircuitState(String name) { this.name = name; } + public String toString() { return name; } + } + + private long timestampCreated; + private long timestampDirty; + private int currentStreamId; + private Object streamIdLock = new Object(); + private volatile CircuitState state = CircuitState.UNCONNECTED; + + CircuitStatus() { + initializeCurrentStreamId(); + } + + private void initializeCurrentStreamId() { + final TorRandom random = new TorRandom(); + currentStreamId = random.nextInt(0xFFFF) + 1; + } + + synchronized void updateCreatedTimestamp() { + timestampCreated = System.currentTimeMillis(); + timestampDirty = 0; + } + + synchronized void updateDirtyTimestamp() { + if(timestampDirty == 0 && state != CircuitState.BUILDING) { + timestampDirty = System.currentTimeMillis(); + } + } + + synchronized long getMillisecondsElapsedSinceCreated() { + return millisecondsElapsedSince(timestampCreated); + } + + synchronized long getMillisecondsDirty() { + return millisecondsElapsedSince(timestampDirty); + } + + private static long millisecondsElapsedSince(long then) { + if(then == 0) { + return 0; + } + final long now = System.currentTimeMillis(); + return now - then; + } + + synchronized boolean isDirty() { + return timestampDirty != 0; + } + + void setStateBuilding() { + state = CircuitState.BUILDING; + } + + void setStateFailed() { + state = CircuitState.FAILED; + } + + void setStateOpen() { + state = CircuitState.OPEN; + } + + void setStateDestroyed() { + state = CircuitState.DESTROYED; + } + + boolean isBuilding() { + return state == CircuitState.BUILDING; + } + + boolean isConnected() { + return state == CircuitState.OPEN; + } + + boolean isUnconnected() { + return state == CircuitState.UNCONNECTED; + } + + String getStateAsString() { + if(state == CircuitState.OPEN) { + return state.toString() + " ["+ getDirtyString() + "]"; + } + return state.toString(); + } + + private String getDirtyString() { + if(!isDirty()) { + return "Clean"; + } else { + return "Dirty "+ (getMillisecondsDirty() / 1000) +"s"; + } + } + int nextStreamId() { + synchronized(streamIdLock) { + currentStreamId++; + if(currentStreamId > 0xFFFF) + currentStreamId = 1; + return currentStreamId; + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/circuits/DirectoryCircuitImpl.java b/orchid/src/com/subgraph/orchid/circuits/DirectoryCircuitImpl.java new file mode 100644 index 00000000..646930fb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/DirectoryCircuitImpl.java @@ -0,0 +1,42 @@ +package com.subgraph.orchid.circuits; + +import java.util.List; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.circuits.path.PathSelectionFailedException; + +public class DirectoryCircuitImpl extends CircuitImpl implements DirectoryCircuit { + + protected DirectoryCircuitImpl(CircuitManagerImpl circuitManager, List prechosenPath) { + super(circuitManager, prechosenPath); + } + + public Stream openDirectoryStream(long timeout, boolean autoclose) throws InterruptedException, TimeoutException, StreamConnectFailedException { + final StreamImpl stream = createNewStream(autoclose); + try { + stream.openDirectory(timeout); + return stream; + } catch (Exception e) { + removeStream(stream); + return processStreamOpenException(e); + } + } + + @Override + protected List choosePathForCircuit(CircuitPathChooser pathChooser) throws InterruptedException, PathSelectionFailedException { + if(prechosenPath != null) { + return prechosenPath; + } + return pathChooser.chooseDirectoryPath(); + } + + @Override + protected String getCircuitTypeLabel() { + return "Directory"; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/ExitCircuitImpl.java b/orchid/src/com/subgraph/orchid/circuits/ExitCircuitImpl.java new file mode 100644 index 00000000..e431ddf3 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/ExitCircuitImpl.java @@ -0,0 +1,87 @@ +package com.subgraph.orchid.circuits; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.ExitCircuit; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.circuits.path.PathSelectionFailedException; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; + +public class ExitCircuitImpl extends CircuitImpl implements ExitCircuit { + + private final Router exitRouter; + private final Set failedExitRequests; + + ExitCircuitImpl(CircuitManagerImpl circuitManager, List prechosenPath) { + super(circuitManager, prechosenPath); + this.exitRouter = prechosenPath.get(prechosenPath.size() - 1); + this.failedExitRequests = new HashSet(); + } + + ExitCircuitImpl(CircuitManagerImpl circuitManager, Router exitRouter) { + super(circuitManager); + this.exitRouter = exitRouter; + this.failedExitRequests = new HashSet(); + } + + public Stream openExitStream(IPv4Address address, int port, long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException { + return openExitStream(address.toString(), port, timeout); + } + + public Stream openExitStream(String target, int port, long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException { + final StreamImpl stream = createNewStream(); + try { + stream.openExit(target, port, timeout); + return stream; + } catch (Exception e) { + removeStream(stream); + return processStreamOpenException(e); + } + } + + public void recordFailedExitTarget(ExitTarget target) { + synchronized(failedExitRequests) { + failedExitRequests.add(target); + } + } + + public boolean canHandleExitTo(ExitTarget target) { + synchronized(failedExitRequests) { + if(failedExitRequests.contains(target)) { + return false; + } + } + + if(isMarkedForClose()) { + return false; + } + + if(target.isAddressTarget()) { + return exitRouter.exitPolicyAccepts(target.getAddress(), target.getPort()); + } else { + return exitRouter.exitPolicyAccepts(target.getPort()); + } + } + + public boolean canHandleExitToPort(int port) { + return exitRouter.exitPolicyAccepts(port); + } + + + @Override + protected List choosePathForCircuit(CircuitPathChooser pathChooser) throws InterruptedException, PathSelectionFailedException { + return pathChooser.choosePathWithExit(exitRouter); + } + + @Override + protected String getCircuitTypeLabel() { + return "Exit"; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/InternalCircuitImpl.java b/orchid/src/com/subgraph/orchid/circuits/InternalCircuitImpl.java new file mode 100644 index 00000000..06bcbe04 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/InternalCircuitImpl.java @@ -0,0 +1,118 @@ +package com.subgraph.orchid.circuits; + +import java.util.List; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.HiddenServiceCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.circuits.path.CircuitPathChooser; +import com.subgraph.orchid.circuits.path.PathSelectionFailedException; + +public class InternalCircuitImpl extends CircuitImpl implements InternalCircuit, DirectoryCircuit, HiddenServiceCircuit { + + private enum InternalType { UNUSED, HS_INTRODUCTION, HS_DIRECTORY, HS_CIRCUIT } + + private InternalType type; + private boolean ntorEnabled; + + InternalCircuitImpl(CircuitManagerImpl circuitManager, List prechosenPath) { + super(circuitManager, prechosenPath); + this.type = InternalType.UNUSED; + this.ntorEnabled = circuitManager.isNtorEnabled(); + } + + protected InternalCircuitImpl(CircuitManagerImpl circuitManager) { + this(circuitManager, null); + } + + @Override + protected List choosePathForCircuit(CircuitPathChooser pathChooser) + throws InterruptedException, PathSelectionFailedException { + return pathChooser.chooseInternalPath(); + } + + + public Circuit cannibalizeToIntroductionPoint(Router target) { + cannibalizeTo(target); + type = InternalType.HS_INTRODUCTION; + return this; + } + + private void cannibalizeTo(Router target) { + if(type != InternalType.UNUSED) { + throw new IllegalStateException("Cannot cannibalize internal circuit with type "+ type); + + } + final CircuitExtender extender = new CircuitExtender(this, ntorEnabled); + extender.extendTo(target); + } + + public Stream openDirectoryStream(long timeout, boolean autoclose) throws InterruptedException, TimeoutException, StreamConnectFailedException { + if(type != InternalType.HS_DIRECTORY) { + throw new IllegalStateException("Cannot open directory stream on internal circuit with type "+ type); + } + final StreamImpl stream = createNewStream(); + try { + stream.openDirectory(timeout); + return stream; + } catch (Exception e) { + removeStream(stream); + return processStreamOpenException(e); + } + } + + + public DirectoryCircuit cannibalizeToDirectory(Router target) { + cannibalizeTo(target); + type = InternalType.HS_DIRECTORY; + return this; + } + + + public HiddenServiceCircuit connectHiddenService(CircuitNode node) { + if(type != InternalType.UNUSED) { + throw new IllegalStateException("Cannot connect hidden service from internal circuit type "+ type); + } + appendNode(node); + type = InternalType.HS_CIRCUIT; + return this; + } + + public Stream openStream(int port, long timeout) + throws InterruptedException, TimeoutException, StreamConnectFailedException { + if(type != InternalType.HS_CIRCUIT) { + throw new IllegalStateException("Cannot open stream to hidden service from internal circuit type "+ type); + } + final StreamImpl stream = createNewStream(); + try { + stream.openExit("", port, timeout); + return stream; + } catch (Exception e) { + removeStream(stream); + return processStreamOpenException(e); + } + } + + + @Override + protected String getCircuitTypeLabel() { + switch(type) { + case HS_CIRCUIT: + return "Hidden Service"; + case HS_DIRECTORY: + return "HS Directory"; + case HS_INTRODUCTION: + return "HS Introduction"; + case UNUSED: + return "Internal"; + default: + return "(null)"; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/NTorCircuitExtender.java b/orchid/src/com/subgraph/orchid/circuits/NTorCircuitExtender.java new file mode 100644 index 00000000..45c705c1 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/NTorCircuitExtender.java @@ -0,0 +1,114 @@ +package com.subgraph.orchid.circuits; + +import java.util.logging.Logger; + +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorNTorKeyAgreement; + +public class NTorCircuitExtender { + private final static Logger logger = Logger.getLogger(NTorCircuitExtender.class.getName()); + + private final CircuitExtender extender; + private final Router router; + private final TorNTorKeyAgreement kex; + + public NTorCircuitExtender(CircuitExtender extender, Router router) { + this.extender = extender; + this.router = router; + this.kex = new TorNTorKeyAgreement(router.getIdentityHash(), router.getNTorOnionKey()); + } + + CircuitNode extendTo() { + final byte[] onion = kex.createOnionSkin(); + if(finalRouterSupportsExtend2()) { + logger.fine("Extending circuit to "+ router.getNickname() + " with NTor inside RELAY_EXTEND2"); + return extendWithExtend2(onion); + } else { + logger.fine("Extending circuit to "+ router.getNickname() + " with NTor inside RELAY_EXTEND"); + return extendWithTunneledExtend(onion); + } + } + + private CircuitNode extendWithExtend2(byte[] onion) { + final RelayCell cell = createExtend2Cell(onion); + extender.sendRelayCell(cell); + final RelayCell response = extender.receiveRelayResponse(RelayCell.RELAY_EXTENDED2, router); + return processExtended2(response); + } + + private CircuitNode extendWithTunneledExtend(byte[] onion) { + final RelayCell cell = createExtendCell(onion, kex.getNtorCreateMagic()); + extender.sendRelayCell(cell); + final RelayCell response = extender.receiveRelayResponse(RelayCell.RELAY_EXTENDED, router); + return processExtended(response); + } + + private boolean finalRouterSupportsExtend2() { + return extender.getFinalRouter().getNTorOnionKey() != null; + } + + private RelayCell createExtend2Cell(byte[] ntorOnionskin) { + final RelayCell cell = extender.createRelayCell(RelayCell.RELAY_EXTEND2); + + cell.putByte(2); + + cell.putByte(0); + cell.putByte(6); + cell.putByteArray(router.getAddress().getAddressDataBytes()); + cell.putShort(router.getOnionPort()); + + cell.putByte(2); + cell.putByte(20); + cell.putByteArray(router.getIdentityHash().getRawBytes()); + + cell.putShort(0x0002); + cell.putShort(ntorOnionskin.length); + cell.putByteArray(ntorOnionskin); + return cell; + } + + private RelayCell createExtendCell(byte[] ntorOnionskin, byte[] ntorMagic) { + final RelayCell cell = extender.createRelayCell(RelayCell.RELAY_EXTEND); + cell.putByteArray(router.getAddress().getAddressDataBytes()); + cell.putShort(router.getOnionPort()); + final int paddingLength = CircuitExtender.TAP_ONIONSKIN_LEN - (ntorOnionskin.length + ntorMagic.length); + final byte[] padding = new byte[paddingLength]; + cell.putByteArray(ntorMagic); + cell.putByteArray(ntorOnionskin); + cell.putByteArray(padding); + cell.putByteArray(router.getIdentityHash().getRawBytes()); + return cell; + } + + private CircuitNode processExtended(RelayCell cell) { + byte[] payload = new byte[CircuitExtender.TAP_ONIONSKIN_REPLY_LEN]; + cell.getByteArray(payload); + + return processPayload(payload); + } + + + private CircuitNode processExtended2(RelayCell cell) { + final int payloadLength = cell.getShort(); + if(payloadLength > cell.cellBytesRemaining()) { + throw new TorException("Incorrect payload length value in RELAY_EXTENED2 cell"); + } + byte[] payload = new byte[payloadLength]; + cell.getByteArray(payload); + + return processPayload(payload); + } + + private CircuitNode processPayload(byte[] payload) { + final byte[] keyMaterial = new byte[CircuitNodeCryptoState.KEY_MATERIAL_SIZE]; + final byte[] verifyDigest = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + if(!kex.deriveKeysFromHandshakeResponse(payload, keyMaterial, verifyDigest)) { + return null; + } + return extender.createNewNode(router, keyMaterial, verifyDigest); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/OpenExitStreamTask.java b/orchid/src/com/subgraph/orchid/circuits/OpenExitStreamTask.java new file mode 100644 index 00000000..3ea3d17e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/OpenExitStreamTask.java @@ -0,0 +1,50 @@ +package com.subgraph.orchid.circuits; + +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +import com.subgraph.orchid.ExitCircuit; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; + +public class OpenExitStreamTask implements Runnable { + private final static Logger logger = Logger.getLogger(OpenExitStreamTask.class.getName()); + private final ExitCircuit circuit; + private final StreamExitRequest exitRequest; + + OpenExitStreamTask(ExitCircuit circuit, StreamExitRequest exitRequest) { + this.circuit = circuit; + this.exitRequest = exitRequest; + } + + public void run() { + logger.fine("Attempting to open stream to "+ exitRequest); + try { + exitRequest.setCompletedSuccessfully(tryOpenExitStream()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + exitRequest.setInterrupted(); + } catch (TimeoutException e) { + circuit.markForClose(); + exitRequest.setCompletedTimeout(); + } catch (StreamConnectFailedException e) { + if(!e.isReasonRetryable()) { + exitRequest.setExitFailed(); + circuit.recordFailedExitTarget(exitRequest); + } else { + circuit.markForClose(); + exitRequest.setStreamOpenFailure(e.getReason()); + } + + } + } + + private Stream tryOpenExitStream() throws InterruptedException, TimeoutException, StreamConnectFailedException { + if(exitRequest.isAddressTarget()) { + return circuit.openExitStream(exitRequest.getAddress(), exitRequest.getPort(), exitRequest.getStreamTimeout()); + } else { + return circuit.openExitStream(exitRequest.getHostname(), exitRequest.getPort(), exitRequest.getStreamTimeout()); + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/circuits/PendingExitStreams.java b/orchid/src/com/subgraph/orchid/circuits/PendingExitStreams.java new file mode 100644 index 00000000..f10fb264 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/PendingExitStreams.java @@ -0,0 +1,77 @@ +package com.subgraph.orchid.circuits; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.data.IPv4Address; + +public class PendingExitStreams { + + private final Set pendingRequests; + private final Object lock = new Object(); + private final TorConfig config; + + PendingExitStreams(TorConfig config) { + this.config = config; + pendingRequests = new HashSet(); + } + + Stream openExitStream(IPv4Address address, int port) throws InterruptedException, OpenFailedException { + final StreamExitRequest request = new StreamExitRequest(lock, address, port); + return openExitStreamByRequest(request); + } + + Stream openExitStream(String hostname, int port) throws InterruptedException, OpenFailedException { + final StreamExitRequest request = new StreamExitRequest(lock, hostname, port); + return openExitStreamByRequest(request); + } + + private Stream openExitStreamByRequest(StreamExitRequest request) throws InterruptedException, OpenFailedException { + if(config.getCircuitStreamTimeout() != 0) { + request.setStreamTimeout(config.getCircuitStreamTimeout()); + } + + synchronized(lock) { + pendingRequests.add(request); + try { + return handleRequest(request); + } finally { + pendingRequests.remove(request); + } + } + } + + private Stream handleRequest(StreamExitRequest request) throws InterruptedException, OpenFailedException { + while(true) { + while(!request.isCompleted()) { + lock.wait(); + } + try { + return request.getStream(); + } catch (TimeoutException e) { + request.resetForRetry(); + } catch (StreamConnectFailedException e) { + request.resetForRetry(); + } + } + } + + List getUnreservedPendingRequests() { + final List result = new ArrayList(); + synchronized (lock) { + for(StreamExitRequest request: pendingRequests) { + if(!request.isReserved()) { + result.add(request); + } + } + } + return result; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/PredictedPortTarget.java b/orchid/src/com/subgraph/orchid/circuits/PredictedPortTarget.java new file mode 100644 index 00000000..62a1ac71 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/PredictedPortTarget.java @@ -0,0 +1,29 @@ +package com.subgraph.orchid.circuits; + +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; + +public class PredictedPortTarget implements ExitTarget { + + final int port; + + public PredictedPortTarget(int port) { + this.port = port; + } + + public boolean isAddressTarget() { + return false; + } + + public IPv4Address getAddress() { + return new IPv4Address(0); + } + + public String getHostname() { + return ""; + } + + public int getPort() { + return port; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/StreamExitRequest.java b/orchid/src/com/subgraph/orchid/circuits/StreamExitRequest.java new file mode 100644 index 00000000..b2913c6a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/StreamExitRequest.java @@ -0,0 +1,170 @@ +package com.subgraph.orchid.circuits; + +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; +import com.subgraph.orchid.misc.GuardedBy; + +public class StreamExitRequest implements ExitTarget { + + private enum CompletionStatus {NOT_COMPLETED, SUCCESS, TIMEOUT, STREAM_OPEN_FAILURE, EXIT_FAILURE, INTERRUPTED}; + + private final boolean isAddress; + private final IPv4Address address; + private final String hostname; + private final int port; + private final Object requestCompletionLock; + + @GuardedBy("requestCompletionLock") private CompletionStatus completionStatus; + @GuardedBy("requestCompletionLock") private Stream stream; + @GuardedBy("requestCompletionLock") private int streamOpenFailReason; + + @GuardedBy("this") private boolean isReserved; + @GuardedBy("this") private int retryCount; + @GuardedBy("this") private long specificTimeout; + + StreamExitRequest(Object requestCompletionLock, IPv4Address address, int port) { + this(requestCompletionLock, true, "", address, port); + } + + StreamExitRequest(Object requestCompletionLock, String hostname, int port) { + this(requestCompletionLock, false, hostname, null, port); + } + + private StreamExitRequest(Object requestCompletionLock, boolean isAddress, String hostname, IPv4Address address, int port) { + this.requestCompletionLock = requestCompletionLock; + this.isAddress = isAddress; + this.hostname = hostname; + this.address = address; + this.port = port; + this.completionStatus = CompletionStatus.NOT_COMPLETED; + } + + public boolean isAddressTarget() { + return isAddress; + } + + public IPv4Address getAddress() { + return address; + } + + public String getHostname() { + return hostname; + } + + public int getPort() { + return port; + } + + public synchronized void setStreamTimeout(long timeout) { + specificTimeout = timeout; + } + + public synchronized long getStreamTimeout() { + if(specificTimeout > 0) { + return specificTimeout; + } else if(retryCount < 2) { + return 10 * 1000; + } else { + return 15 * 1000; + } + } + + void setCompletedTimeout() { + synchronized (requestCompletionLock) { + newStatus(CompletionStatus.TIMEOUT); + } + } + + void setExitFailed() { + synchronized (requestCompletionLock) { + newStatus(CompletionStatus.EXIT_FAILURE); + } + } + + void setStreamOpenFailure(int reason) { + synchronized (requestCompletionLock) { + streamOpenFailReason = reason; + newStatus(CompletionStatus.STREAM_OPEN_FAILURE); + } + } + + void setCompletedSuccessfully(Stream stream) { + synchronized (requestCompletionLock) { + this.stream = stream; + newStatus(CompletionStatus.SUCCESS); + } + } + + void setInterrupted() { + synchronized (requestCompletionLock) { + newStatus(CompletionStatus.INTERRUPTED); + } + } + + private void newStatus(CompletionStatus newStatus) { + if(completionStatus != CompletionStatus.NOT_COMPLETED) { + throw new IllegalStateException("Attempt to set completion state to " + newStatus +" while status is "+ completionStatus); + } + completionStatus = newStatus; + requestCompletionLock.notifyAll(); + } + + + Stream getStream() throws OpenFailedException, TimeoutException, StreamConnectFailedException, InterruptedException { + synchronized(requestCompletionLock) { + switch(completionStatus) { + case NOT_COMPLETED: + throw new IllegalStateException("Request not completed"); + case EXIT_FAILURE: + throw new OpenFailedException("Failure at exit node"); + case TIMEOUT: + throw new TimeoutException(); + case STREAM_OPEN_FAILURE: + throw new StreamConnectFailedException(streamOpenFailReason); + case INTERRUPTED: + throw new InterruptedException(); + case SUCCESS: + return stream; + default: + throw new IllegalStateException("Unknown completion status"); + } + } + } + + synchronized void resetForRetry() { + synchronized (requestCompletionLock) { + streamOpenFailReason = 0; + completionStatus = CompletionStatus.NOT_COMPLETED; + } + retryCount += 1; + isReserved = false; + } + + boolean isCompleted() { + synchronized (requestCompletionLock) { + return completionStatus != CompletionStatus.NOT_COMPLETED; + } + } + + synchronized boolean reserveRequest() { + if(isReserved) return false; + isReserved = true; + return true; + } + + synchronized boolean isReserved() { + return isReserved; + } + + public String toString() { + if(isAddress) + return address + ":"+ port; + else + return hostname + ":"+ port; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/StreamImpl.java b/orchid/src/com/subgraph/orchid/circuits/StreamImpl.java new file mode 100644 index 00000000..461409b0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/StreamImpl.java @@ -0,0 +1,219 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintWriter; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.cells.RelayCellImpl; +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; + +public class StreamImpl implements Stream, DashboardRenderable { + private final static Logger logger = Logger.getLogger(StreamImpl.class.getName()); + + private final static int STREAMWINDOW_START = 500; + private final static int STREAMWINDOW_INCREMENT = 50; + private final static int STREAMWINDOW_MAX_UNFLUSHED = 10; + + private final CircuitImpl circuit; + + private final int streamId; + private final boolean autoclose; + + private final CircuitNode targetNode; + private final TorInputStream inputStream; + private final TorOutputStream outputStream; + + private boolean isClosed; + private boolean relayEndReceived; + private int relayEndReason; + private boolean relayConnectedReceived; + private final Object waitConnectLock = new Object(); + private final Object windowLock = new Object(); + private int packageWindow; + private int deliverWindow; + + private String streamTarget = ""; + + StreamImpl(CircuitImpl circuit, CircuitNode targetNode, int streamId, boolean autoclose) { + this.circuit = circuit; + this.targetNode = targetNode; + this.streamId = streamId; + this.autoclose = autoclose; + this.inputStream = new TorInputStream(this); + this.outputStream = new TorOutputStream(this); + packageWindow = STREAMWINDOW_START; + deliverWindow = STREAMWINDOW_START; + } + + void addInputCell(RelayCell cell) { + if(isClosed) + return; + if(cell.getRelayCommand() == RelayCell.RELAY_END) { + synchronized(waitConnectLock) { + relayEndReason = cell.getByte(); + relayEndReceived = true; + inputStream.addEndCell(cell); + waitConnectLock.notifyAll(); + } + } else if(cell.getRelayCommand() == RelayCell.RELAY_CONNECTED) { + synchronized(waitConnectLock) { + relayConnectedReceived = true; + waitConnectLock.notifyAll(); + } + } else if(cell.getRelayCommand() == RelayCell.RELAY_SENDME) { + synchronized(windowLock) { + packageWindow += STREAMWINDOW_INCREMENT; + windowLock.notifyAll(); + } + } + else { + inputStream.addInputCell(cell); + synchronized(windowLock) { + deliverWindow--; + if(deliverWindow < 0) + throw new TorException("Stream has negative delivery window"); + } + considerSendingSendme(); + } + } + + private void considerSendingSendme() { + synchronized(windowLock) { + if(deliverWindow > (STREAMWINDOW_START - STREAMWINDOW_INCREMENT)) + return; + + if(inputStream.unflushedCellCount() >= STREAMWINDOW_MAX_UNFLUSHED) + return; + + final RelayCell sendme = circuit.createRelayCell(RelayCell.RELAY_SENDME, streamId, targetNode); + circuit.sendRelayCell(sendme); + deliverWindow += STREAMWINDOW_INCREMENT; + } + } + + public int getStreamId() { + return streamId; + } + + public Circuit getCircuit() { + return circuit; + } + + public CircuitNode getTargetNode() { + return targetNode; + } + + public void close() { + if(isClosed) + return; + + logger.fine("Closing stream "+ this); + + isClosed = true; + inputStream.close(); + outputStream.close(); + circuit.removeStream(this); + if(autoclose) { + circuit.markForClose(); + } + + if(!relayEndReceived) { + final RelayCell cell = new RelayCellImpl(circuit.getFinalCircuitNode(), circuit.getCircuitId(), streamId, RelayCell.RELAY_END); + cell.putByte(RelayCell.REASON_DONE); + circuit.sendRelayCellToFinalNode(cell); + } + } + + public void openDirectory(long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException { + streamTarget = "[Directory]"; + final RelayCell cell = new RelayCellImpl(circuit.getFinalCircuitNode(), circuit.getCircuitId(), streamId, RelayCell.RELAY_BEGIN_DIR); + circuit.sendRelayCellToFinalNode(cell); + waitForRelayConnected(timeout); + } + + void openExit(String target, int port, long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException { + streamTarget = target + ":"+ port; + final RelayCell cell = new RelayCellImpl(circuit.getFinalCircuitNode(), circuit.getCircuitId(), streamId, RelayCell.RELAY_BEGIN); + cell.putString(target + ":"+ port); + circuit.sendRelayCellToFinalNode(cell); + waitForRelayConnected(timeout); + } + + private void waitForRelayConnected(long timeout) throws InterruptedException, TimeoutException, StreamConnectFailedException { + final long start = System.currentTimeMillis(); + long elapsed = 0; + synchronized(waitConnectLock) { + while(!relayConnectedReceived) { + + if(relayEndReceived) { + throw new StreamConnectFailedException(relayEndReason); + } + + if(elapsed >= timeout) { + throw new TimeoutException(); + } + + waitConnectLock.wait(timeout - elapsed); + + elapsed = System.currentTimeMillis() - start; + } + } + } + + public InputStream getInputStream() { + return inputStream; + } + + public OutputStream getOutputStream() { + return outputStream; + } + + public void waitForSendWindowAndDecrement() { + waitForSendWindow(true); + } + + public void waitForSendWindow() { + waitForSendWindow(false); + } + + public void waitForSendWindow(boolean decrement) { + synchronized(windowLock) { + while(packageWindow == 0) { + try { + windowLock.wait(); + } catch (InterruptedException e) { + throw new TorException("Thread interrupted while waiting for stream package window"); + } + } + if(decrement) + packageWindow--; + } + targetNode.waitForSendWindow(); + } + + public String toString() { + return "[Stream stream_id="+ streamId + " circuit="+ circuit +" target="+ streamTarget +"]"; + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException { + writer.print(" "); + writer.print("[Stream stream_id="+ streamId + " cid="+ circuit.getCircuitId()); + if(relayConnectedReceived) { + writer.print(" sent="+outputStream.getBytesSent() + " recv="+ inputStream.getBytesReceived()); + } else { + writer.print(" (waiting connect)"); + } + writer.print(" target="+ streamTarget); + writer.println("]"); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/TapCircuitExtender.java b/orchid/src/com/subgraph/orchid/circuits/TapCircuitExtender.java new file mode 100644 index 00000000..078fe4de --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/TapCircuitExtender.java @@ -0,0 +1,55 @@ +package com.subgraph.orchid.circuits; + +import java.util.logging.Logger; + +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorTapKeyAgreement; + +public class TapCircuitExtender { + private final static Logger logger = Logger.getLogger(TapCircuitExtender.class.getName()); + + private final CircuitExtender extender; + private final TorTapKeyAgreement kex; + private final Router router; + + public TapCircuitExtender(CircuitExtender extender, Router router) { + this.extender = extender; + this.router = router; + this.kex = new TorTapKeyAgreement(router.getOnionKey()); + } + + public CircuitNode extendTo() { + logger.fine("Extending to "+ router.getNickname() + " with TAP"); + final RelayCell cell = createRelayExtendCell(); + extender.sendRelayCell(cell); + final RelayCell response = extender.receiveRelayResponse(RelayCell.RELAY_EXTENDED, router); + if(response == null) { + return null; + } + return processExtendResponse(response); + } + + private CircuitNode processExtendResponse(RelayCell response) { + final byte[] handshakeResponse = new byte[TorTapKeyAgreement.DH_LEN + TorMessageDigest.TOR_DIGEST_SIZE]; + response.getByteArray(handshakeResponse); + + final byte[] keyMaterial = new byte[CircuitNodeCryptoState.KEY_MATERIAL_SIZE]; + final byte[] verifyDigest = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + if(!kex.deriveKeysFromHandshakeResponse(handshakeResponse, keyMaterial, verifyDigest)) { + return null; + } + return extender.createNewNode(router, keyMaterial, verifyDigest); + } + + private RelayCell createRelayExtendCell() { + final RelayCell cell = extender.createRelayCell(RelayCell.RELAY_EXTEND); + cell.putByteArray(router.getAddress().getAddressDataBytes()); + cell.putShort(router.getOnionPort()); + cell.putByteArray(kex.createOnionSkin()); + cell.putByteArray(router.getIdentityHash().getRawBytes()); + return cell; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/TorInitializationTracker.java b/orchid/src/com/subgraph/orchid/circuits/TorInitializationTracker.java new file mode 100644 index 00000000..4ad70dac --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/TorInitializationTracker.java @@ -0,0 +1,103 @@ +package com.subgraph.orchid.circuits; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.TorInitializationListener; + +public class TorInitializationTracker { + private final static Logger logger = Logger.getLogger(TorInitializationTracker.class.getName()); + private final static Map messageMap = new HashMap(); + + static { + messageMap.put(Tor.BOOTSTRAP_STATUS_STARTING, "Starting"); + messageMap.put(Tor.BOOTSTRAP_STATUS_CONN_DIR, "Connecting to directory server"); + messageMap.put(Tor.BOOTSTRAP_STATUS_HANDSHAKE_DIR, "Finishing handshake with directory server"); + messageMap.put(Tor.BOOTSTRAP_STATUS_ONEHOP_CREATE, "Establishing an encrypted directory connection"); + messageMap.put(Tor.BOOTSTRAP_STATUS_REQUESTING_STATUS, "Asking for networkstatus consensus"); + messageMap.put(Tor.BOOTSTRAP_STATUS_LOADING_STATUS, "Loading networkstatus consensus"); + messageMap.put(Tor.BOOTSTRAP_STATUS_REQUESTING_KEYS, "Asking for authority key certs"); + messageMap.put(Tor.BOOTSTRAP_STATUS_LOADING_KEYS, "Loading authority key certs"); + messageMap.put(Tor.BOOTSTRAP_STATUS_REQUESTING_DESCRIPTORS, "Asking for relay descriptors"); + messageMap.put(Tor.BOOTSTRAP_STATUS_LOADING_DESCRIPTORS, "Loading relay descriptors"); + messageMap.put(Tor.BOOTSTRAP_STATUS_CONN_OR, "Connecting to the Tor network"); + messageMap.put(Tor.BOOTSTRAP_STATUS_HANDSHAKE_OR, "Finished Handshake with first hop"); + messageMap.put(Tor.BOOTSTRAP_STATUS_CIRCUIT_CREATE, "Establishing a Tor circuit"); + messageMap.put(Tor.BOOTSTRAP_STATUS_DONE, "Done"); + } + + private final List listeners = new ArrayList(); + + private final Object stateLock = new Object(); + private int bootstrapState = Tor.BOOTSTRAP_STATUS_STARTING; + + + public void addListener(TorInitializationListener listener) { + synchronized(listeners) { + if(!listeners.contains(listener)) { + listeners.add(listener); + } + } + } + + public void removeListener(TorInitializationListener listener) { + synchronized(listeners) { + listeners.remove(listener); + } + } + + public int getBootstrapState() { + return bootstrapState; + } + + public void start() { + synchronized (stateLock) { + bootstrapState = Tor.BOOTSTRAP_STATUS_STARTING; + notifyListeners(Tor.BOOTSTRAP_STATUS_STARTING); + } + } + + public void notifyEvent(int eventCode) { + synchronized(stateLock) { + if(eventCode <= bootstrapState || eventCode > 100) { + return; + } + bootstrapState = eventCode; + notifyListeners(eventCode); + } + } + + private void notifyListeners(int code) { + final String message = getMessageForCode(code); + for(TorInitializationListener listener: getListeners()) { + try { + listener.initializationProgress(message, code); + if(code >= 100) { + listener.initializationCompleted(); + } + } catch(Exception e) { + logger.log(Level.SEVERE, "Exception occurred in TorInitializationListener callback: "+ e.getMessage(), e); + } + } + } + + private String getMessageForCode(int code) { + if(messageMap.containsKey(code)) { + return messageMap.get(code); + } else { + return "Unknown state"; + } + } + + private List getListeners() { + synchronized (listeners) { + return new ArrayList(listeners); + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/circuits/TorInputStream.java b/orchid/src/com/subgraph/orchid/circuits/TorInputStream.java new file mode 100644 index 00000000..a54d7739 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/TorInputStream.java @@ -0,0 +1,228 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.Queue; + +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.circuits.cells.RelayCellImpl; +import com.subgraph.orchid.misc.GuardedBy; +import com.subgraph.orchid.misc.ThreadSafe; + +@ThreadSafe +public class TorInputStream extends InputStream { + + private final static RelayCell CLOSE_SENTINEL = new RelayCellImpl(null, 0, 0, 0); + private final static ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0); + + private final Stream stream; + + private final Object lock = new Object(); + + /** Queue of RelayCells that have been received on this stream */ + @GuardedBy("lock") private final Queue incomingCells; + + /** Number of unread data bytes in current buffer and in RELAY_DATA cells on queue */ + @GuardedBy("lock") private int availableBytes; + + /** Total number of data bytes received in RELAY_DATA cells on this stream */ + @GuardedBy("lock") private long bytesReceived; + + /** Bytes of data from the RELAY_DATA cell currently being consumed */ + @GuardedBy("lock") private ByteBuffer currentBuffer; + + /** Set when a RELAY_END cell is received */ + @GuardedBy("lock") private boolean isEOF; + + /** Set when close() is called on this stream */ + @GuardedBy("lock") private boolean isClosed; + + TorInputStream(Stream stream) { + this.stream = stream; + this.incomingCells = new LinkedList(); + this.currentBuffer = EMPTY_BUFFER; + } + + long getBytesReceived() { + synchronized (lock) { + return bytesReceived; + } + } + + @Override + public int read() throws IOException { + synchronized (lock) { + if(isClosed) { + throw new IOException("Stream closed"); + } + refillBufferIfNeeded(); + if(isEOF) { + return -1; + } + availableBytes -= 1; + return currentBuffer.get() & 0xFF; + } + } + + + public int read(byte[] b) throws IOException { + return read(b, 0, b.length); + } + + public synchronized int read(byte[] b, int off, int len) throws IOException { + synchronized (lock) { + if(isClosed) { + throw new IOException("Stream closed"); + } + + checkReadArguments(b, off, len); + + if(len == 0) { + return 0; + } + + refillBufferIfNeeded(); + if(isEOF) { + return -1; + } + + int bytesRead = 0; + int bytesRemaining = len; + + while(bytesRemaining > 0 && !isEOF) { + refillBufferIfNeeded(); + bytesRead += readFromCurrentBuffer(b, off + bytesRead, len - bytesRead); + bytesRemaining = len - bytesRead; + if(availableBytes == 0) { + return bytesRead; + } + } + return bytesRead; + } + } + + @GuardedBy("lock") + private int readFromCurrentBuffer(byte[] b, int off, int len) { + final int readLength = (currentBuffer.remaining() >= len) ? (len) : (currentBuffer.remaining()); + currentBuffer.get(b, off, readLength); + availableBytes -= readLength; + return readLength; + } + + private void checkReadArguments(byte[] b, int off, int len) { + if(b == null) { + throw new NullPointerException(); + } + if( (off < 0) || (off >= b.length) || (len < 0) || + ((off + len) > b.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } + } + + public int available() { + synchronized(lock) { + return availableBytes; + } + } + + public void close() { + synchronized (lock) { + if(isClosed) { + return; + } + isClosed = true; + + incomingCells.add(CLOSE_SENTINEL); + lock.notifyAll(); + } + stream.close(); + } + + void addEndCell(RelayCell cell) { + synchronized (lock) { + if(isClosed) { + return; + } + incomingCells.add(cell); + lock.notifyAll(); + } + } + + void addInputCell(RelayCell cell) { + synchronized (lock) { + if(isClosed) { + return; + } + incomingCells.add(cell); + bytesReceived += cell.cellBytesRemaining(); + availableBytes += cell.cellBytesRemaining(); + lock.notifyAll(); + } + } + + @GuardedBy("lock") + // When this method (or fillBuffer()) returns either isEOF is set or currentBuffer has at least one byte to read + private void refillBufferIfNeeded() throws IOException { + if(!isEOF) { + if(currentBuffer.hasRemaining()) { + return; + } + fillBuffer(); + } + } + + @GuardedBy("lock") + private void fillBuffer() throws IOException { + while(true) { + processIncomingCell(getNextCell()); + if(isEOF || currentBuffer.hasRemaining()) { + return; + } + } + } + + @GuardedBy("lock") + private void processIncomingCell(RelayCell nextCell) throws IOException { + if(isClosed || nextCell == CLOSE_SENTINEL) { + throw new IOException("Input stream closed"); + } + + switch(nextCell.getRelayCommand()) { + case RelayCell.RELAY_DATA: + currentBuffer = nextCell.getPayloadBuffer(); + break; + case RelayCell.RELAY_END: + currentBuffer = EMPTY_BUFFER; + isEOF = true; + break; + default: + throw new IOException("Unexpected RelayCell command type in TorInputStream queue: "+ nextCell.getRelayCommand()); + } + } + + @GuardedBy("lock") + private RelayCell getNextCell() throws IOException { + try { + while(incomingCells.isEmpty()) { + lock.wait(); + } + return incomingCells.remove(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException("Read interrupted"); + } + } + + int unflushedCellCount() { + synchronized (lock) { + return incomingCells.size(); + } + } + + public String toString() { + return "TorInputStream stream="+ stream.getStreamId() +" node="+ stream.getTargetNode(); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/TorOutputStream.java b/orchid/src/com/subgraph/orchid/circuits/TorOutputStream.java new file mode 100644 index 00000000..83a54a4d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/TorOutputStream.java @@ -0,0 +1,85 @@ +package com.subgraph.orchid.circuits; + +import java.io.IOException; +import java.io.OutputStream; + +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.circuits.cells.RelayCellImpl; + +public class TorOutputStream extends OutputStream { + + private final StreamImpl stream; + private RelayCell currentOutputCell; + private volatile boolean isClosed; + private long bytesSent; + + TorOutputStream(StreamImpl stream) { + this.stream = stream; + this.bytesSent = 0; + } + + private void flushCurrentOutputCell() { + if(currentOutputCell != null && currentOutputCell.cellBytesConsumed() > RelayCell.HEADER_SIZE) { + stream.waitForSendWindowAndDecrement(); + stream.getCircuit().sendRelayCell(currentOutputCell); + bytesSent += (currentOutputCell.cellBytesConsumed() - RelayCell.HEADER_SIZE); + } + + currentOutputCell = new RelayCellImpl(stream.getTargetNode(), stream.getCircuit().getCircuitId(), + stream.getStreamId(), RelayCell.RELAY_DATA); + } + + long getBytesSent() { + return bytesSent; + } + + @Override + public synchronized void write(int b) throws IOException { + checkOpen(); + if(currentOutputCell == null || currentOutputCell.cellBytesRemaining() == 0) + flushCurrentOutputCell(); + currentOutputCell.putByte(b); + } + + public synchronized void write(byte[] data, int offset, int length) throws IOException { + checkOpen(); + if(currentOutputCell == null || currentOutputCell.cellBytesRemaining() == 0) + flushCurrentOutputCell(); + + while(length > 0) { + if(length < currentOutputCell.cellBytesRemaining()) { + currentOutputCell.putByteArray(data, offset, length); + return; + } + final int writeCount = currentOutputCell.cellBytesRemaining(); + currentOutputCell.putByteArray(data, offset, writeCount); + flushCurrentOutputCell(); + offset += writeCount; + length -= writeCount; + } + } + + private void checkOpen() throws IOException { + if(isClosed) + throw new IOException("Output stream is closed"); + } + + public synchronized void flush() { + if(isClosed) + return; + flushCurrentOutputCell(); + } + + public synchronized void close() { + if(isClosed) + return; + flush(); + isClosed = true; + currentOutputCell = null; + stream.close(); + } + + public String toString() { + return "TorOutputStream stream="+ stream.getStreamId() +" node="+ stream.getTargetNode(); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/cells/CellImpl.java b/orchid/src/com/subgraph/orchid/circuits/cells/CellImpl.java new file mode 100644 index 00000000..7c7496a8 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/cells/CellImpl.java @@ -0,0 +1,215 @@ +package com.subgraph.orchid.circuits.cells; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +import com.subgraph.orchid.Cell; + +public class CellImpl implements Cell { + + public static CellImpl createCell(int circuitId, int command) { + return new CellImpl(circuitId, command); + } + + public static CellImpl createVarCell(int circuitId, int command, int payloadLength) { + return new CellImpl(circuitId, command, payloadLength); + } + + public static CellImpl readFromInputStream(InputStream input) throws IOException { + final ByteBuffer header = readHeaderFromInputStream(input); + final int circuitId = header.getShort() & 0xFFFF; + final int command = header.get() & 0xFF; + + if(command == VERSIONS || command > 127) { + return readVarCell(circuitId, command, input); + } + + final CellImpl cell = new CellImpl(circuitId, command); + readAll(input, cell.getCellBytes(), CELL_HEADER_LEN, CELL_PAYLOAD_LEN); + + return cell; + } + + private static ByteBuffer readHeaderFromInputStream(InputStream input) throws IOException { + final byte[] cellHeader = new byte[CELL_HEADER_LEN]; + readAll(input, cellHeader); + return ByteBuffer.wrap(cellHeader); + } + + private static CellImpl readVarCell(int circuitId, int command, InputStream input) throws IOException { + final byte[] lengthField = new byte[2]; + readAll(input, lengthField); + final int length = ((lengthField[0] & 0xFF) << 8) | (lengthField[1] & 0xFF); + CellImpl cell = new CellImpl(circuitId, command, length); + readAll(input, cell.getCellBytes(), CELL_VAR_HEADER_LEN, length); + return cell; + } + + private static void readAll(InputStream input, byte[] buffer) throws IOException { + readAll(input, buffer, 0, buffer.length); + } + + private static void readAll(InputStream input, byte[] buffer, int offset, int length) throws IOException { + int bytesRead = 0; + while(bytesRead < length) { + final int n = input.read(buffer, offset + bytesRead, length - bytesRead); + if(n == -1) + throw new EOFException(); + bytesRead += n; + } + } + + private final int circuitId; + private final int command; + protected final ByteBuffer cellBuffer; + + /* Variable length cell constructor (ie: VERSIONS cells only) */ + private CellImpl(int circuitId, int command, int payloadLength) { + this.circuitId = circuitId; + this.command = command; + this.cellBuffer = ByteBuffer.wrap(new byte[CELL_VAR_HEADER_LEN + payloadLength]); + cellBuffer.putShort((short)circuitId); + cellBuffer.put((byte)command); + cellBuffer.putShort((short) payloadLength); + cellBuffer.mark(); + } + + /* Fixed length cell constructor */ + protected CellImpl(int circuitId, int command) { + this.circuitId = circuitId; + this.command = command; + this.cellBuffer = ByteBuffer.wrap(new byte[CELL_LEN]); + cellBuffer.putShort((short) circuitId); + cellBuffer.put((byte) command); + cellBuffer.mark(); + } + + protected CellImpl(byte[] rawCell) { + this.cellBuffer = ByteBuffer.wrap(rawCell); + this.circuitId = cellBuffer.getShort() & 0xFFFF; + this.command = cellBuffer.get() & 0xFF; + cellBuffer.mark(); + } + + public int getCircuitId() { + return circuitId; + } + + public int getCommand() { + return command; + } + + public void resetToPayload() { + cellBuffer.reset(); + } + + public int getByte() { + return cellBuffer.get() & 0xFF; + } + + public int getByteAt(int index) { + return cellBuffer.get(index) & 0xFF; + } + + public int getShort() { + return cellBuffer.getShort() & 0xFFFF; + } + + public int getInt() { + return cellBuffer.getInt(); + } + + public int getShortAt(int index) { + return cellBuffer.getShort(index) & 0xFFFF; + } + + public void getByteArray(byte[] buffer) { + cellBuffer.get(buffer); + } + + public int cellBytesConsumed() { + return cellBuffer.position(); + } + + public int cellBytesRemaining() { + return cellBuffer.remaining(); + } + + public void putByte(int value) { + cellBuffer.put((byte) value); + } + + public void putByteAt(int index, int value) { + cellBuffer.put(index, (byte) value); + } + + public void putShort(int value) { + cellBuffer.putShort((short) value); + } + + public void putShortAt(int index, int value) { + cellBuffer.putShort(index, (short) value); + } + + public void putInt(int value) { + cellBuffer.putInt(value); + } + + public void putString(String string) { + final byte[] bytes = new byte[string.length() + 1]; + for(int i = 0; i < string.length(); i++) + bytes[i] = (byte) string.charAt(i); + putByteArray(bytes); + } + + public void putByteArray(byte[] data) { + cellBuffer.put(data); + } + + public void putByteArray(byte[] data, int offset, int length) { + cellBuffer.put(data, offset, length); + } + + public byte[] getCellBytes() { + return cellBuffer.array(); + } + + public String toString() { + return "Cell: circuit_id="+ circuitId +" command="+ command +" payload_len="+ cellBuffer.position(); + } + + public static String errorToDescription(int errorCode) { + switch(errorCode) { + case ERROR_NONE: + return "No error reason given"; + case ERROR_PROTOCOL: + return "Tor protocol violation"; + case ERROR_INTERNAL: + return "Internal error"; + case ERROR_REQUESTED: + return "Response to a TRUNCATE command sent from client"; + case ERROR_HIBERNATING: + return "Not currently operating; trying to save bandwidth."; + case ERROR_RESOURCELIMIT: + return "Out of memory, sockets, or circuit IDs."; + case ERROR_CONNECTFAILED: + return "Unable to reach server."; + case ERROR_OR_IDENTITY: + return "Connected to server, but its OR identity was not as expected."; + case ERROR_OR_CONN_CLOSED: + return "The OR connection that was carrying this circuit died."; + case ERROR_FINISHED: + return "The circuit has expired for being dirty or old."; + case ERROR_TIMEOUT: + return "Circuit construction took too long."; + case ERROR_DESTROYED: + return "The circuit was destroyed without client TRUNCATE"; + case ERROR_NOSUCHSERVICE: + return "Request for unknown hidden service"; + default: + return "Error code "+ errorCode; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/cells/RelayCellImpl.java b/orchid/src/com/subgraph/orchid/circuits/cells/RelayCellImpl.java new file mode 100644 index 00000000..ccb09068 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/cells/RelayCellImpl.java @@ -0,0 +1,180 @@ +package com.subgraph.orchid.circuits.cells; + +import java.nio.ByteBuffer; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.CircuitNode; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.TorException; + +public class RelayCellImpl extends CellImpl implements RelayCell { + + public static RelayCell createFromCell(CircuitNode node, Cell cell) { + if(cell.getCommand() != Cell.RELAY) + throw new TorException("Attempted to create RelayCell from Cell type: "+ cell.getCommand()); + return new RelayCellImpl(node, cell.getCellBytes()); + } + + private final int streamId; + private final int relayCommand; + private final CircuitNode circuitNode; + private final boolean isOutgoing; + + /* + * The payload of each unencrypted RELAY cell consists of: + * Relay command [1 byte] + * 'Recognized' [2 bytes] + * StreamID [2 bytes] + * Digest [4 bytes] + * Length [2 bytes] + * Data [CELL_LEN-14 bytes] + */ + + public RelayCellImpl(CircuitNode node, int circuit, int stream, int relayCommand) { + this(node, circuit, stream, relayCommand, false); + } + + public RelayCellImpl(CircuitNode node, int circuit, int stream, int relayCommand, boolean isRelayEarly) { + super(circuit, (isRelayEarly) ? (Cell.RELAY_EARLY) : (Cell.RELAY)); + this.circuitNode = node; + this.relayCommand = relayCommand; + this.streamId = stream; + this.isOutgoing = true; + putByte(relayCommand); // Command + putShort(0); // 'Recognized' + putShort(stream); // Stream + putInt(0); // Digest + putShort(0); // Length + } + + private RelayCellImpl(CircuitNode node, byte[] rawCell) { + super(rawCell); + this.circuitNode = node; + this.relayCommand = getByte(); + getShort(); + this.streamId = getShort(); + this.isOutgoing = false; + getInt(); + int payloadLength = getShort(); + cellBuffer.mark(); // End of header + if(RelayCell.HEADER_SIZE + payloadLength > rawCell.length) + throw new TorException("Header length field exceeds total size of cell"); + cellBuffer.limit(RelayCell.HEADER_SIZE + payloadLength); + } + + public int getStreamId() { + return streamId; + } + + public int getRelayCommand() { + return relayCommand; + } + + public void setLength() { + putShortAt(LENGTH_OFFSET, (short) (cellBytesConsumed() - HEADER_SIZE)); + } + + public void setDigest(byte[] digest) { + for(int i = 0; i < 4; i++) + putByteAt(DIGEST_OFFSET + i, digest[i]); + } + + public ByteBuffer getPayloadBuffer() { + final ByteBuffer dup = cellBuffer.duplicate(); + dup.reset(); + return dup.slice(); + } + + public CircuitNode getCircuitNode() { + return circuitNode; + } + + public String toString() { + if(isOutgoing) + return "["+ commandToDescription(relayCommand) +" stream="+ streamId +" payload_len="+ (cellBytesConsumed() - HEADER_SIZE) +" dest="+ circuitNode +"]"; + else + return "["+ commandToString() + " stream="+ streamId + " payload_len="+ cellBuffer.remaining() +" source="+ circuitNode + "]"; + } + + public String commandToString() { + if(relayCommand == RELAY_TRUNCATED) { + final int code = getByteAt(HEADER_SIZE); + return commandToDescription(relayCommand) + " ("+ CellImpl.errorToDescription(code) +")"; + } else if(relayCommand == RELAY_END) { + final int code = getByteAt(HEADER_SIZE); + return commandToDescription(relayCommand) +" ("+ reasonToDescription(code) +")"; + } + else + return commandToDescription(relayCommand); + } + + public static String reasonToDescription(int reasonCode) { + switch(reasonCode) { + case REASON_MISC: + return "Unlisted reason"; + case REASON_RESOLVEFAILED: + return "Couldn't look up hostname"; + case REASON_CONNECTREFUSED: + return "Remote host refused connection"; + case REASON_EXITPOLICY: + return "OR refuses to connect to host or port"; + case REASON_DESTROY: + return "Circuit is being destroyed"; + case REASON_DONE: + return "Anonymized TCP connection was closed"; + case REASON_TIMEOUT: + return "Connection timed out, or OR timed out while connecting"; + case REASON_HIBERNATING: + return "OR is temporarily hibernating"; + case REASON_INTERNAL: + return "Internal error at the OR"; + case REASON_RESOURCELIMIT: + return "OR has no resources to fulfill request"; + case REASON_CONNRESET: + return "Connection was unexpectedly reset"; + case REASON_TORPROTOCOL: + return "Tor protocol violation"; + case REASON_NOTDIRECTORY: + return "Client sent RELAY_BEGIN_DIR to a non-directory server."; + default: + return "Reason code "+ reasonCode; + } + } + + public static String commandToDescription(int command) { + switch(command) { + case RELAY_BEGIN: + return "RELAY_BEGIN"; + case RELAY_DATA: + return "RELAY_DATA"; + case RELAY_END: + return "RELAY_END"; + case RELAY_CONNECTED: + return "RELAY_CONNECTED"; + case RELAY_SENDME: + return "RELAY_SENDME"; + case RELAY_EXTEND: + return "RELAY_EXTEND"; + case RELAY_EXTENDED: + return "RELAY_EXTENDED"; + case RELAY_TRUNCATE: + return "RELAY_TRUNCATE"; + case RELAY_TRUNCATED: + return "RELAY_TRUNCATED"; + case RELAY_DROP: + return "RELAY_DROP"; + case RELAY_RESOLVE: + return "RELAY_RESOLVE"; + case RELAY_RESOLVED: + return "RELAY_RESOLVED"; + case RELAY_BEGIN_DIR: + return "RELAY_BEGIN_DIR"; + case RELAY_EXTEND2: + return "RELAY_EXTEND2"; + case RELAY_EXTENDED2: + return "RELAY_EXTENDED2"; + default: + return "Relay command = "+ command; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/guards/BridgeRouterImpl.java b/orchid/src/com/subgraph/orchid/circuits/guards/BridgeRouterImpl.java new file mode 100644 index 00000000..f487ef2b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/guards/BridgeRouterImpl.java @@ -0,0 +1,226 @@ +package com.subgraph.orchid.circuits.guards; + +import java.util.Collections; +import java.util.Set; + +import com.subgraph.orchid.BridgeRouter; +import com.subgraph.orchid.Descriptor; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.geoip.CountryCodeService; + +public class BridgeRouterImpl implements BridgeRouter { + private final IPv4Address address; + private final int port; + + private HexDigest identity; + private Descriptor descriptor; + + private volatile String cachedCountryCode; + + BridgeRouterImpl(IPv4Address address, int port) { + this.address = address; + this.port = port; + } + + public IPv4Address getAddress() { + return address; + } + + public HexDigest getIdentity() { + return identity; + } + + public void setIdentity(HexDigest identity) { + this.identity = identity; + } + + public void setDescriptor(RouterDescriptor descriptor) { + this.descriptor = descriptor; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((address == null) ? 0 : address.hashCode()); + result = prime * result + port; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + BridgeRouterImpl other = (BridgeRouterImpl) obj; + if (address == null) { + if (other.address != null) { + return false; + } + } else if (!address.equals(other.address)) { + return false; + } + if (port != other.port) { + return false; + } + return true; + } + + public String getNickname() { + return toString(); + } + + public String getCountryCode() { + String cc = cachedCountryCode; + if(cc == null) { + cc = CountryCodeService.getInstance().getCountryCodeForAddress(getAddress()); + cachedCountryCode = cc; + } + return cc; + } + + public int getOnionPort() { + return port; + } + + public int getDirectoryPort() { + return 0; + } + + public TorPublicKey getIdentityKey() { + return null; + } + + public HexDigest getIdentityHash() { + return identity; + } + + public boolean isDescriptorDownloadable() { + return false; + } + + public String getVersion() { + return ""; + } + + public Descriptor getCurrentDescriptor() { + return descriptor; + } + + public HexDigest getDescriptorDigest() { + return null; + } + + public HexDigest getMicrodescriptorDigest() { + return null; + } + + public TorPublicKey getOnionKey() { + if(descriptor != null) { + return descriptor.getOnionKey(); + } else { + return null; + } + } + + public byte[] getNTorOnionKey() { + if(descriptor != null) { + return descriptor.getNTorOnionKey(); + } else { + return null; + } + } + + public boolean hasBandwidth() { + return false; + } + + public int getEstimatedBandwidth() { + return 0; + } + + public int getMeasuredBandwidth() { + return 0; + } + + public Set getFamilyMembers() { + if(descriptor != null) { + return descriptor.getFamilyMembers(); + } else { + return Collections.emptySet(); + } + } + + public int getAverageBandwidth() { + return 0; + } + + public int getBurstBandwidth() { + return 0; + } + + public int getObservedBandwidth() { + return 0; + } + + public boolean isHibernating() { + if(descriptor instanceof RouterDescriptor) { + return ((RouterDescriptor)descriptor).isHibernating(); + } else { + return false; + } + } + + public boolean isRunning() { + return true; + } + + public boolean isValid() { + return true; + } + + public boolean isBadExit() { + return false; + } + + public boolean isPossibleGuard() { + return true; + } + + public boolean isExit() { + return false; + } + + public boolean isFast() { + return true; + } + + public boolean isStable() { + return true; + } + + public boolean isHSDirectory() { + return false; + } + + public boolean exitPolicyAccepts(IPv4Address address, int port) { + return false; + } + + public boolean exitPolicyAccepts(int port) { + return false; + } + + public String toString() { + return "[Bridge "+ address + ":"+ port + "]"; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/guards/Bridges.java b/orchid/src/com/subgraph/orchid/circuits/guards/Bridges.java new file mode 100644 index 00000000..d11cbe99 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/guards/Bridges.java @@ -0,0 +1,163 @@ +package com.subgraph.orchid.circuits.guards; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +import com.subgraph.orchid.BridgeRouter; +import com.subgraph.orchid.DirectoryDownloader; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.config.TorConfigBridgeLine; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.directory.downloader.DirectoryRequestFailedException; + +public class Bridges { + private static final Logger logger = Logger.getLogger(Bridges.class.getName()); + + private class DescriptorDownloader implements Runnable { + + private final BridgeRouterImpl target; + + DescriptorDownloader(BridgeRouterImpl target) { + this.target = target; + } + + public void run() { + try { + downloadDescriptor(); + } finally { + decrementOutstandingTasks(); + } + } + + private void downloadDescriptor() { + logger.fine("Downloading descriptor for bridge: "+ target); + try { + final RouterDescriptor descriptor = directoryDownloader.downloadBridgeDescriptor(target); + if(descriptor != null) { + logger.fine("Descriptor received for bridge "+ target +". Adding to list of usable bridges"); + target.setDescriptor(descriptor); + synchronized(lock) { + bridgeRouters.add(target); + lock.notifyAll(); + } + } + } catch (DirectoryRequestFailedException e) { + logger.warning("Failed to download descriptor for bridge: "+ e.getMessage()); + } + } + + private void decrementOutstandingTasks() { + if(outstandingDownloadTasks.decrementAndGet() == 0) { + logger.fine("Initial descriptor fetch complete"); + synchronized(lock) { + bridgesInitialized = true; + lock.notifyAll(); + } + } + } + } + + private final TorConfig config; + private final DirectoryDownloader directoryDownloader; + + private final Set bridgeRouters; + private final TorRandom random; + private final Object lock; + + /** Initialization started */ + private boolean bridgesInitializing; + /** Initialization completed */ + private boolean bridgesInitialized; + + private AtomicInteger outstandingDownloadTasks; + + Bridges(TorConfig config, DirectoryDownloader directoryDownloader) { + this.config = config; + this.directoryDownloader = directoryDownloader; + this.bridgeRouters = new HashSet(); + this.random = new TorRandom(); + this.lock = new Object(); + this.outstandingDownloadTasks = new AtomicInteger(); + } + + BridgeRouter chooseRandomBridge(Set excluded) throws InterruptedException { + + synchronized(lock) { + if(!bridgesInitialized && !bridgesInitializing) { + initializeBridges(); + } + while(!bridgesInitialized && !hasCandidates(excluded)) { + lock.wait(); + } + final List candidates = getCandidates(excluded); + if(candidates.isEmpty()) { + logger.warning("Bridges enabled but no usable bridges configured"); + return null; + } + return candidates.get(random.nextInt(candidates.size())); + } + } + + private boolean hasCandidates(Set excluded) { + return !(getCandidates(excluded).isEmpty()); + } + + private List getCandidates(Set excluded) { + if(bridgeRouters.isEmpty()) { + return Collections.emptyList(); + } + final List candidates = new ArrayList(bridgeRouters.size()); + for(BridgeRouter br: bridgeRouters) { + if(!excluded.contains(br)) { + candidates.add(br); + } + } + return candidates; + } + + private void initializeBridges() { + logger.fine("Initializing bridges..."); + synchronized(lock) { + if(bridgesInitializing || bridgesInitialized) { + return; + } + if(directoryDownloader == null) { + throw new IllegalStateException("Cannot download bridge descriptors because DirectoryDownload instance not initialized"); + } + bridgesInitializing = true; + startAllDownloadTasks(); + } + } + + private List createDownloadTasks() { + final List tasks = new ArrayList(); + for(TorConfigBridgeLine line: config.getBridges()) { + tasks.add(new DescriptorDownloader(createBridgeFromLine(line))); + } + return tasks; + } + + private void startAllDownloadTasks() { + final List tasks = createDownloadTasks(); + outstandingDownloadTasks.set(tasks.size()); + for(Runnable r: tasks) { + final Thread thread = new Thread(r); + thread.start(); + } + } + + private BridgeRouterImpl createBridgeFromLine(TorConfigBridgeLine line) { + final BridgeRouterImpl bridge = new BridgeRouterImpl(line.getAddress(), line.getPort()); + if(line.getFingerprint() != null) { + bridge.setIdentity(line.getFingerprint()); + } + return bridge; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/guards/EntryGuards.java b/orchid/src/com/subgraph/orchid/circuits/guards/EntryGuards.java new file mode 100644 index 00000000..958470a4 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/guards/EntryGuards.java @@ -0,0 +1,305 @@ +package com.subgraph.orchid.circuits.guards; + +import java.util.ArrayList; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.subgraph.orchid.ConnectionCache; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.DirectoryDownloader; +import com.subgraph.orchid.GuardEntry; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.path.CircuitNodeChooser; +import com.subgraph.orchid.circuits.path.CircuitNodeChooser.WeightRule; +import com.subgraph.orchid.circuits.path.RouterFilter; +import com.subgraph.orchid.crypto.TorRandom; + +public class EntryGuards { + private final static Logger logger = Logger.getLogger(EntryGuards.class.getName()); + + private final static int MIN_USABLE_GUARDS = 2; + private final static int NUM_ENTRY_GUARDS = 3; + + private final TorConfig config; + private final TorRandom random; + private final CircuitNodeChooser nodeChooser; + private final ConnectionCache connectionCache; + private final Directory directory; + private final Set pendingProbes; + + private final Bridges bridges; + private final Object lock; + private final Executor executor; + + public EntryGuards(TorConfig config, ConnectionCache connectionCache, DirectoryDownloader directoryDownloader, Directory directory) { + this.config = config; + this.random = new TorRandom(); + this.nodeChooser = new CircuitNodeChooser(config, directory); + this.connectionCache = connectionCache; + this.directory = directory; + this.pendingProbes = new HashSet(); + this.bridges = new Bridges(config, directoryDownloader); + this.lock = new Object(); + this.executor = Executors.newCachedThreadPool(); + } + + public boolean isUsingBridges() { + return config.getUseBridges(); + } + + public Router chooseRandomGuard(Set excluded) throws InterruptedException { + if(config.getUseBridges()) { + return bridges.chooseRandomBridge(excluded); + } + + /* + * path-spec 5. + * + * When choosing the first hop of a circuit, Tor chooses at random from among the first + * NumEntryGuards (default 3) usable guards on the list. If there are not at least 2 + * usable guards on the list, Tor adds routers until there are, or until there are no + * more usable routers to add. + */ + + final List usableGuards = getMinimumUsableGuards(excluded, MIN_USABLE_GUARDS); + final int n = Math.min(usableGuards.size(), NUM_ENTRY_GUARDS); + return usableGuards.get(random.nextInt(n)); + } + + private List getMinimumUsableGuards(Set excluded, int minSize) throws InterruptedException { + synchronized(lock) { + testStatusOfAllGuards(); + while(true) { + List usableGuards = getUsableGuardRouters(excluded); + if(usableGuards.size() >= minSize) { + return usableGuards; + } else { + maybeChooseNew(usableGuards.size(), minSize, getExcludedForChooseNew(excluded, usableGuards)); + } + lock.wait(5000); + } + } + } + + void probeConnectionSucceeded(GuardEntry entry) { + synchronized (lock) { + pendingProbes.remove(entry); + if(entry.isAdded()) { + retestProbeSucceeded(entry); + } else { + initialProbeSucceeded(entry); + } + } + } + + void probeConnectionFailed(GuardEntry entry) { + synchronized (lock) { + pendingProbes.remove(entry); + if(entry.isAdded()) { + retestProbeFailed(entry); + } + lock.notifyAll(); + } + } + + /* all methods below called holding 'lock' */ + + private void retestProbeSucceeded(GuardEntry entry) { + entry.clearDownSince(); + } + + private void initialProbeSucceeded(GuardEntry entry) { + logger.fine("Probe connection to "+ entry.getRouterForEntry() + " succeeded. Adding it as a new entry guard."); + directory.addGuardEntry(entry); + retestAllUnreachable(); + } + + private void retestProbeFailed(GuardEntry entry) { + entry.markAsDown(); + } + + /* + * path-spec 5. + * + * Additionally, Tor retries unreachable guards the first time it adds a new + * guard to the list, since it is possible that the old guards were only marked + * as unreachable because the network was unreachable or down. + + */ + private void retestAllUnreachable() { + for(GuardEntry e: directory.getGuardEntries()) { + if(e.getDownSince() != null) { + launchEntryProbe(e); + } + } + } + + private void testStatusOfAllGuards() { + for(GuardEntry entry: directory.getGuardEntries()) { + if(isPermanentlyUnlisted(entry) || isExpired(entry)) { + directory.removeGuardEntry(entry); + } else if(needsUnreachableTest(entry)) { + launchEntryProbe(entry); + } + } + } + + private List getUsableGuardRouters(Set excluded) { + List usableRouters = new ArrayList(); + for(GuardEntry entry: directory.getGuardEntries()) { + addRouterIfUsableAndNotExcluded(entry, excluded, usableRouters); + } + return usableRouters; + } + + private void addRouterIfUsableAndNotExcluded(GuardEntry entry, Set excluded, List routers) { + if(entry.testCurrentlyUsable() && entry.getDownSince() == null) { + final Router r = entry.getRouterForEntry(); + if(r != null && !excluded.contains(r)) { + routers.add(r); + } + } + } + + private Set getExcludedForChooseNew(Set excluded, List usable) { + final Set set = new HashSet(); + set.addAll(excluded); + set.addAll(usable); + addPendingInitialConnections(set); + return set; + } + + private void addPendingInitialConnections(Set routerSet) { + for(GuardEntry entry: pendingProbes) { + if(!entry.isAdded()) { + Router r = entry.getRouterForEntry(); + if(r != null) { + routerSet.add(r); + } + } + } + } + + private void maybeChooseNew(int usableSize, int minSize, Set excluded) { + int sz = usableSize + countPendingInitialProbes(); + while(sz < minSize) { + Router newGuard = chooseNewGuard(excluded); + if(newGuard == null) { + logger.warning("Need to add entry guards but no suitable guard routers are available"); + return; + } + logger.fine("Testing "+ newGuard + " as a new guard since we only have "+ usableSize + " usable guards"); + final GuardEntry entry = directory.createGuardEntryFor(newGuard); + launchEntryProbe(entry); + sz += 1; + } + } + + private int countPendingInitialProbes() { + int count = 0; + for(GuardEntry entry: pendingProbes) { + if(!entry.isAdded()) { + count += 1; + } + } + return count; + } + + private Router chooseNewGuard(final Set excluded) { + return nodeChooser.chooseRandomNode(WeightRule.WEIGHT_FOR_GUARD, new RouterFilter() { + public boolean filter(Router router) { + return router.isValid() && router.isPossibleGuard() && router.isRunning() && !excluded.contains(router); + } + }); + } + + private void launchEntryProbe(GuardEntry entry) { + if(!entry.testCurrentlyUsable() || pendingProbes.contains(entry)) { + return; + } + pendingProbes.add(entry); + executor.execute(new GuardProbeTask(connectionCache, this, entry)); + } + + /* + * path-spec 5. + * + * If the guard is excluded because of its status in the networkstatuses for + * over 30 days, Tor removes it from the list entirely, preserving order. + */ + private boolean isPermanentlyUnlisted(GuardEntry entry) { + final Date unlistedSince = entry.getUnlistedSince(); + if(unlistedSince == null || pendingProbes.contains(entry)) { + return false; + } + final Date now = new Date(); + final long unlistedTime = now.getTime() - unlistedSince.getTime(); + return unlistedTime > THIRTY_DAYS; + } + + /* + * Expire guards after 60 days since creation time. + */ + private boolean isExpired(GuardEntry entry) { + final Date createdAt = entry.getCreatedTime(); + final Date now = new Date(); + final long createdAgo = now.getTime() - createdAt.getTime(); + return createdAgo > SIXTY_DAYS; + } + + private boolean needsUnreachableTest(GuardEntry entry) { + final Date downSince = entry.getDownSince(); + if(downSince == null || !entry.testCurrentlyUsable()) { + return false; + } + final Date now = new Date(); + final Date lastConnect = entry.getLastConnectAttempt(); + final long timeDown = now.getTime() - downSince.getTime(); + final long timeSinceLastRetest = (lastConnect == null) ? timeDown : (now.getTime() - lastConnect.getTime()); + + return timeSinceLastRetest > getRetestInterval(timeDown); + } + + private final static long ONE_HOUR = hoursToMs(1); + private final static long FOUR_HOURS = hoursToMs(4); + private final static long SIX_HOURS = hoursToMs(6); + private final static long EIGHTEEN_HOURS = hoursToMs(18); + private final static long THIRTYSIX_HOURS = hoursToMs(36); + private final static long THREE_DAYS = daysToMs(3); + private final static long SEVEN_DAYS = daysToMs(7); + private final static long THIRTY_DAYS = daysToMs(30); + private final static long SIXTY_DAYS = daysToMs(60); + + private static long hoursToMs(long n) { + return TimeUnit.MILLISECONDS.convert(n, TimeUnit.HOURS); + } + private static long daysToMs(long n) { + return TimeUnit.MILLISECONDS.convert(n, TimeUnit.DAYS); + } + /* + * path-spec 5. + * + * If Tor fails to connect to an otherwise usable guard, it retries + * periodically: every hour for six hours, every 4 hours for 3 days, every + * 18 hours for a week, and every 36 hours thereafter. + */ + + private long getRetestInterval(long timeDown) { + if(timeDown < SIX_HOURS) { + return ONE_HOUR; + } else if(timeDown < THREE_DAYS) { + return FOUR_HOURS; + } else if(timeDown < SEVEN_DAYS) { + return EIGHTEEN_HOURS; + } else { + return THIRTYSIX_HOURS; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/guards/GuardProbeTask.java b/orchid/src/com/subgraph/orchid/circuits/guards/GuardProbeTask.java new file mode 100644 index 00000000..553638e9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/guards/GuardProbeTask.java @@ -0,0 +1,42 @@ +package com.subgraph.orchid.circuits.guards; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.subgraph.orchid.ConnectionCache; +import com.subgraph.orchid.ConnectionIOException; +import com.subgraph.orchid.GuardEntry; +import com.subgraph.orchid.Router; + +public class GuardProbeTask implements Runnable{ + private final static Logger logger = Logger.getLogger(GuardProbeTask.class.getName()); + private final ConnectionCache connectionCache; + private final EntryGuards entryGuards; + private final GuardEntry entry; + + public GuardProbeTask(ConnectionCache connectionCache, EntryGuards entryGuards, GuardEntry entry) { + this.connectionCache = connectionCache; + this.entryGuards = entryGuards; + this.entry = entry; + } + + public void run() { + final Router router = entry.getRouterForEntry(); + if(router == null) { + entryGuards.probeConnectionFailed(entry); + return; + } + try { + connectionCache.getConnectionTo(router, false); + entryGuards.probeConnectionSucceeded(entry); + return; + } catch (ConnectionIOException e) { + logger.fine("IO exception probing entry guard "+ router + " : "+ e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch(Exception e) { + logger.log(Level.WARNING, "Unexpected exception probing entry guard: "+ e, e); + } + entryGuards.probeConnectionFailed(entry); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSAuthentication.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSAuthentication.java new file mode 100644 index 00000000..0eba30ef --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSAuthentication.java @@ -0,0 +1,120 @@ +package com.subgraph.orchid.circuits.hs; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.circuits.hs.HSDescriptorCookie.CookieType; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorStreamCipher; + +public class HSAuthentication { + private final static int BASIC_ID_LENGTH = 4; + private final HSDescriptorCookie cookie; + + public HSAuthentication(HSDescriptorCookie cookie) { + this.cookie = cookie; + } + + public byte[] decryptIntroductionPoints(byte[] content) throws HSAuthenticationException { + final ByteBuffer buffer = ByteBuffer.wrap(content); + final int firstByte = buffer.get() & 0xFF; + if(firstByte == 1) { + return decryptIntroductionPointsWithBasicAuth(buffer); + } else if(firstByte == 2) { + return decryptIntroductionPointsWithStealthAuth(buffer); + } else { + throw new HSAuthenticationException("Introduction points section begins with unrecognized byte ("+ firstByte +")"); + } + } + + private static class BasicAuthEntry { + final byte[] id; + final byte[] skey; + BasicAuthEntry(byte[] id, byte[] skey) { + this.id = id; + this.skey = skey; + } + } + + private BasicAuthEntry createEntry(ByteBuffer bb) { + final byte[] id = new byte[BASIC_ID_LENGTH]; + final byte[] skey = new byte[TorStreamCipher.KEY_LEN]; + bb.get(id); + bb.get(skey); + return new BasicAuthEntry(id, skey); + } + + private byte[] decryptIntroductionPointsWithBasicAuth(ByteBuffer buffer) throws HSAuthenticationException { + if(cookie == null || cookie.getType() != CookieType.COOKIE_BASIC) { + throw new TorParsingException("Introduction points encrypted with 'basic' authentication and no cookie available to decrypt"); + } + + final List entries = readBasicEntries(buffer); + final byte[] iv = readAuthIV(buffer); + final byte[] id = generateAuthId(iv); + final byte[] k = findKeyInAuthEntries(entries, id); + + return decryptRemaining(buffer, k, iv); + } + + private List readBasicEntries(ByteBuffer b) { + final int blockCount = b.get() & 0xFF; + final int entryCount = blockCount * 16; + final List entries = new ArrayList(entryCount); + for(int i = 0; i < entryCount; i++) { + entries.add( createEntry(b) ); + } + return entries; + } + + + private byte[] readAuthIV(ByteBuffer b) { + final byte[] iv = new byte[16]; + b.get(iv); + return iv; + } + + private byte[] generateAuthId(byte[] iv) { + final TorMessageDigest md = new TorMessageDigest(); + md.update(cookie.getValue()); + md.update(iv); + final byte[] digest = md.getDigestBytes(); + final byte[] id = new byte[BASIC_ID_LENGTH]; + System.arraycopy(digest, 0, id, 0, BASIC_ID_LENGTH); + return id; + } + + private byte[] findKeyInAuthEntries(List entries, byte[] id) throws HSAuthenticationException { + for(BasicAuthEntry e: entries) { + if(Arrays.equals(id, e.id)) { + return decryptAuthEntry(e); + } + } + throw new HSAuthenticationException("Could not find matching cookie id for basic authentication"); + } + + private byte[] decryptAuthEntry(BasicAuthEntry entry) throws HSAuthenticationException { + TorStreamCipher cipher = TorStreamCipher.createFromKeyBytes(cookie.getValue()); + cipher.encrypt(entry.skey); + return entry.skey; + } + + private byte[] decryptRemaining(ByteBuffer buffer, byte[] key, byte[] iv) { + TorStreamCipher streamCipher = TorStreamCipher.createFromKeyBytesWithIV(key, iv); + final byte[] remaining = new byte[buffer.remaining()]; + buffer.get(remaining); + streamCipher.encrypt(remaining); + return remaining; + } + + private byte[] decryptIntroductionPointsWithStealthAuth(ByteBuffer buffer) { + if(cookie == null || cookie.getType() != CookieType.COOKIE_STEALTH) { + throw new TorParsingException("Introduction points encrypted with 'stealth' authentication and no cookie available to descrypt"); + } + final byte[] iv = readAuthIV(buffer); + return decryptRemaining(buffer, cookie.getValue(), iv); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSAuthenticationException.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSAuthenticationException.java new file mode 100644 index 00000000..890fb8d1 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSAuthenticationException.java @@ -0,0 +1,14 @@ +package com.subgraph.orchid.circuits.hs; + +public class HSAuthenticationException extends Exception { + + private static final long serialVersionUID = 1L; + + HSAuthenticationException(String message) { + super(message); + } + + HSAuthenticationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptor.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptor.java new file mode 100644 index 00000000..9d4e7cc0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptor.java @@ -0,0 +1,109 @@ +package com.subgraph.orchid.circuits.hs; + +import java.util.ArrayList; +import java.util.List; + +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.Timestamp; + +public class HSDescriptor { + private final static long MS_24_HOURS = (24 * 60 * 60 * 1000); + private final HiddenService hiddenService; + private HexDigest descriptorId; + private Timestamp publicationTime; + private HexDigest secretIdPart; + private TorPublicKey permanentKey; + private int[] protocolVersions; + private List introductionPoints; + + public HSDescriptor(HiddenService hiddenService) { + this.hiddenService = hiddenService; + introductionPoints = new ArrayList(); + } + + HiddenService getHiddenService() { + return hiddenService; + } + + void setPublicationTime(Timestamp ts) { + this.publicationTime = ts; + } + + void setSecretIdPart(HexDigest secretIdPart) { + this.secretIdPart = secretIdPart; + } + + void setDescriptorId(HexDigest descriptorId) { + this.descriptorId = descriptorId; + } + + void setPermanentKey(TorPublicKey permanentKey) { + this.permanentKey = permanentKey; + } + + void setProtocolVersions(int[] protocolVersions) { + this.protocolVersions = protocolVersions; + } + + void addIntroductionPoint(IntroductionPoint ip) { + introductionPoints.add(ip); + } + + HexDigest getDescriptorId() { + return descriptorId; + } + + int getVersion() { + return 2; + } + + TorPublicKey getPermanentKey() { + return permanentKey; + } + + HexDigest getSecretIdPart() { + return secretIdPart; + } + + Timestamp getPublicationTime() { + return publicationTime; + } + + int[] getProtocolVersions() { + return protocolVersions; + } + + boolean isExpired() { + final long now = System.currentTimeMillis(); + final long then = publicationTime.getTime(); + return (now - then) > MS_24_HOURS; + } + + List getIntroductionPoints() { + return new ArrayList(introductionPoints); + } + + List getShuffledIntroductionPoints() { + return shuffle(getIntroductionPoints()); + } + + private List shuffle(List list) { + final TorRandom r = new TorRandom(); + final int sz = list.size(); + for(int i = 0; i < sz; i++) { + swap(list, i, r.nextInt(sz)); + } + return list; + } + + private void swap(List list, int a, int b) { + if(a == b) { + return; + } + final IntroductionPoint tmp = list.get(a); + list.set(a, list.get(b)); + list.set(b, tmp); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorCookie.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorCookie.java new file mode 100644 index 00000000..b0a38965 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorCookie.java @@ -0,0 +1,33 @@ +package com.subgraph.orchid.circuits.hs; + +public class HSDescriptorCookie { + + public enum CookieType { COOKIE_BASIC, COOKIE_STEALTH }; + + private final CookieType type; + private final byte[] value; + + public HSDescriptorCookie(CookieType type, byte[] value) { + this.type = type; + this.value = value; + } + + public byte getAuthTypeByte() { + switch(type) { + case COOKIE_BASIC: + return 1; + case COOKIE_STEALTH: + return 2; + default: + throw new IllegalStateException(); + } + } + + public CookieType getType() { + return type; + } + + public byte[] getValue() { + return value; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorDirectory.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorDirectory.java new file mode 100644 index 00000000..bec763ad --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorDirectory.java @@ -0,0 +1,28 @@ +package com.subgraph.orchid.circuits.hs; + +import com.subgraph.orchid.Router; +import com.subgraph.orchid.data.HexDigest; + +public class HSDescriptorDirectory { + + private final HexDigest descriptorId; + private final Router directory; + + HSDescriptorDirectory(HexDigest descriptorId, Router directory) { + this.descriptorId = descriptorId; + this.directory = directory; + } + + Router getDirectory() { + return directory; + } + + HexDigest getDescriptorId() { + return descriptorId; + } + + public String toString() { + return descriptorId + " : " + directory; + } + +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorDownloader.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorDownloader.java new file mode 100644 index 00000000..6a9d59ee --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorDownloader.java @@ -0,0 +1,135 @@ +package com.subgraph.orchid.circuits.hs; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.CircuitManagerImpl; +import com.subgraph.orchid.directory.DocumentFieldParserImpl; +import com.subgraph.orchid.directory.downloader.DirectoryRequestFailedException; +import com.subgraph.orchid.directory.downloader.HttpConnection; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; + +public class HSDescriptorDownloader { + private final static Logger logger = Logger.getLogger(HSDescriptorDirectory.class.getName()); + + private final HiddenService hiddenService; + private final CircuitManagerImpl circuitManager; + private final List directories; + + public HSDescriptorDownloader(HiddenService hiddenService, CircuitManagerImpl circuitManager, List directories) { + this.hiddenService = hiddenService; + this.circuitManager = circuitManager; + this.directories = directories; + } + + + public HSDescriptor downloadDescriptor() { + for(HSDescriptorDirectory d: directories) { + HSDescriptor descriptor = downloadDescriptorFrom(d); + if(descriptor != null) { + return descriptor; + } + } + // All directories failed + return null; + } + + private HSDescriptor downloadDescriptorFrom(HSDescriptorDirectory dd) { + logger.fine("Downloading descriptor from "+ dd.getDirectory()); + + Stream stream = null; + try { + stream = openHSDirectoryStream(dd.getDirectory()); + HttpConnection http = new HttpConnection(stream); + http.sendGetRequest("/tor/rendezvous2/"+ dd.getDescriptorId().toBase32()); + http.readResponse(); + if(http.getStatusCode() == 200) { + return readDocument(dd, http.getMessageBody()); + } else { + logger.fine("HS descriptor download for "+ hiddenService.getOnionAddressForLogging() + " failed with status "+ http.getStatusCode()); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } catch (TimeoutException e) { + logger.fine("Timeout downloading HS descriptor from "+ dd.getDirectory()); + e.printStackTrace(); + return null; + } catch (IOException e) { + logger.info("IOException downloading HS descriptor from "+ dd.getDirectory() +" : "+ e); + return null; + } catch (OpenFailedException e) { + logger.info("Failed to open stream to HS directory "+ dd.getDirectory() +" : "+ e.getMessage()); + return null; + } catch (DirectoryRequestFailedException e) { + logger.info("Directory request to HS directory "+ dd.getDirectory() + " failed "+ e.getMessage()); + return null; + } finally { + if(stream != null) { + stream.close(); + stream.getCircuit().markForClose(); + } + } + + return null; + + } + + private Stream openHSDirectoryStream(Router directory) throws TimeoutException, InterruptedException, OpenFailedException { + + final InternalCircuit circuit = circuitManager.getCleanInternalCircuit(); + + try { + final DirectoryCircuit dc = circuit.cannibalizeToDirectory(directory); + return dc.openDirectoryStream(10000, true); + } catch (StreamConnectFailedException e) { + circuit.markForClose(); + throw new OpenFailedException("Failed to open directory stream"); + } catch (TorException e) { + circuit.markForClose(); + throw new OpenFailedException("Failed to extend circuit to HS directory: "+ e.getMessage()); + } + } + + private HSDescriptor readDocument(HSDescriptorDirectory dd, ByteBuffer body) { + DocumentFieldParserImpl fieldParser = new DocumentFieldParserImpl(body); + HSDescriptorParser parser = new HSDescriptorParser(hiddenService, fieldParser, hiddenService.getAuthenticationCookie()); + DescriptorParseResult result = new DescriptorParseResult(dd); + parser.parse(result); + return result.getDescriptor(); + } + + private static class DescriptorParseResult implements DocumentParsingResultHandler { + HSDescriptorDirectory dd; + HSDescriptor descriptor; + + public DescriptorParseResult(HSDescriptorDirectory dd) { + this.dd = dd; + } + + HSDescriptor getDescriptor() { + return descriptor; + } + public void documentParsed(HSDescriptor document) { + this.descriptor = document; + } + + public void documentInvalid(HSDescriptor document, String message) { + logger.info("Invalid HS descriptor document received from "+ dd.getDirectory() + " for descriptor "+ dd.getDescriptorId()); + } + + public void parsingError(String message) { + logger.info("Failed to parse HS descriptor document received from "+ dd.getDirectory() + " for descriptor "+ dd.getDescriptorId() + " : " + message); + } + } +} \ No newline at end of file diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorKeyword.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorKeyword.java new file mode 100644 index 00000000..c883e86c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorKeyword.java @@ -0,0 +1,38 @@ +package com.subgraph.orchid.circuits.hs; + +public enum HSDescriptorKeyword { + RENDEZVOUS_SERVICE_DESCRIPTOR("rendezvous-service-descriptor", 1), + VERSION("version", 1), + PERMANENT_KEY("permanent-key", 0), + SECRET_ID_PART("secret-id-part", 1), + PUBLICATION_TIME("publication-time", 2), + PROTOCOL_VERSIONS("protocol-versions", 2), + INTRODUCTION_POINTS("introduction-points", 0), + SIGNATURE("signature", 0), + UNKNOWN_KEYWORD("KEYWORD NOT FOUND", 0); + + private final String keyword; + private final int argumentCount; + + HSDescriptorKeyword(String keyword, int argumentCount) { + this.keyword = keyword; + this.argumentCount = argumentCount; + } + + String getKeyword() { + return keyword; + } + + int getArgumentCount() { + return argumentCount; + } + + static HSDescriptorKeyword findKeyword(String keyword) { + for(HSDescriptorKeyword k: values()) { + if(k.getKeyword().equals(keyword)) { + return k; + } + } + return UNKNOWN_KEYWORD; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorParser.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorParser.java new file mode 100644 index 00000000..08046ed7 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDescriptorParser.java @@ -0,0 +1,160 @@ +package com.subgraph.orchid.circuits.hs; + +import java.nio.ByteBuffer; +import java.util.logging.Logger; + +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.directory.DocumentFieldParserImpl; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentObject; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; +import com.subgraph.orchid.encoders.Base64; + +public class HSDescriptorParser implements DocumentParser{ + private static final Logger logger = Logger.getLogger(HSDescriptor.class.getName()); + + private final DocumentFieldParser fieldParser; + private final HSDescriptor descriptor; + private final HSAuthentication authentication; + + private DocumentParsingResultHandler resultHandler; + + public HSDescriptorParser(HiddenService hiddenService, DocumentFieldParser fieldParser) { + this(hiddenService, fieldParser, null); + } + + public HSDescriptorParser(HiddenService hiddenService, DocumentFieldParser fieldParser, HSDescriptorCookie cookie) { + this.fieldParser = fieldParser; + this.fieldParser.setHandler(createParsingHandler()); + this.descriptor = new HSDescriptor(hiddenService); + this.authentication = new HSAuthentication(cookie); + } + + private DocumentParsingHandler createParsingHandler() { + return new DocumentParsingHandler() { + + public void parseKeywordLine() { + processKeywordLine(); + } + + public void endOfDocument() { + } + }; + } + + public boolean parse(DocumentParsingResultHandler resultHandler) { + this.resultHandler = resultHandler; + fieldParser.startSignedEntity(); + try { + fieldParser.processDocument(); + return true; + } catch(TorParsingException e) { + resultHandler.parsingError(e.getMessage()); + return false; + } + } + + + public DocumentParsingResult parse() { + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + parse(result); + return result; + } + + private void processKeywordLine() { + final HSDescriptorKeyword keyword = HSDescriptorKeyword.findKeyword(fieldParser.getCurrentKeyword()); + if(!keyword.equals(HSDescriptorKeyword.UNKNOWN_KEYWORD)) { + processKeyword(keyword); + } + } + + private void processKeyword(HSDescriptorKeyword keyword) { + switch(keyword) { + case RENDEZVOUS_SERVICE_DESCRIPTOR: + descriptor.setDescriptorId(fieldParser.parseBase32Digest()); + break; + case VERSION: + if(fieldParser.parseInteger() != 2) { + throw new TorParsingException("Unexpected Descriptor version"); + } + break; + + case PERMANENT_KEY: + descriptor.setPermanentKey(fieldParser.parsePublicKey()); + break; + + case SECRET_ID_PART: + descriptor.setSecretIdPart(fieldParser.parseBase32Digest()); + break; + + case PUBLICATION_TIME: + descriptor.setPublicationTime(fieldParser.parseTimestamp()); + break; + + case PROTOCOL_VERSIONS: + descriptor.setProtocolVersions(fieldParser.parseIntegerList()); + break; + + case INTRODUCTION_POINTS: + processIntroductionPoints(); + break; + + case SIGNATURE: + processSignature(); + break; + case UNKNOWN_KEYWORD: + break; + } + } + + private void processIntroductionPoints() { + final DocumentObject ob = fieldParser.parseObject(); + final ByteBuffer buffer = createIntroductionPointBuffer(ob); + final IntroductionPointParser parser = new IntroductionPointParser(new DocumentFieldParserImpl(buffer)); + parser.parse(new DocumentParsingResultHandler() { + + public void documentParsed(IntroductionPoint document) { + logger.fine("adding intro point "+ document.getIdentity()); + descriptor.addIntroductionPoint(document); + } + + public void documentInvalid(IntroductionPoint document, String message) { + logger.info("Invalid introduction point received"); + } + + public void parsingError(String message) { + logger.info("Error parsing introduction points: "+ message); + } + }); + } + + private ByteBuffer createIntroductionPointBuffer(DocumentObject ob) { + final byte[] content = Base64.decode(ob.getContent(false)); + if(content[0] == 'i') { + return ByteBuffer.wrap(content); + } else { + try { + byte[] decrypted = authentication.decryptIntroductionPoints(content); + return ByteBuffer.wrap(decrypted); + } catch (HSAuthenticationException e) { + throw new TorParsingException("Failed to decrypt introduction points: "+ e.getMessage()); + } + } + } + + private void processSignature() { + fieldParser.endSignedEntity(); + final TorSignature signature = fieldParser.parseSignature(); + if(!fieldParser.verifySignedEntity(descriptor.getPermanentKey(), signature)) { + resultHandler.documentInvalid(descriptor, "Signature verification failed"); + fieldParser.logWarn("Signature failed for descriptor: "+ descriptor.getDescriptorId().toBase32()); + return; + } + resultHandler.documentParsed(descriptor); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HSDirectories.java b/orchid/src/com/subgraph/orchid/circuits/hs/HSDirectories.java new file mode 100644 index 00000000..54292b3b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HSDirectories.java @@ -0,0 +1,116 @@ +package com.subgraph.orchid.circuits.hs; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.data.HexDigest; + +public class HSDirectories { + private final static int DIR_CLUSTER_SZ = 3; + private final Directory directory; + private final TorRandom random; + private ConsensusDocument currentConsensus; + private List hsDirectories; + + HSDirectories(Directory directory) { + this.directory = directory; + this.hsDirectories = new ArrayList(); + this.random = new TorRandom(); + } + + List getDirectoriesForHiddenService(HiddenService hs) { + final List dirs = new ArrayList(2 * DIR_CLUSTER_SZ); + for(HexDigest id: hs.getAllCurrentDescriptorIds()) { + for(Router r: getDirectoriesForDescriptorId(id)) { + dirs.add(new HSDescriptorDirectory(id, r)); + } + } + return dirs; + } + + private List getDirectoriesForDescriptorId(HexDigest descriptorId) { + final String hexId = descriptorId.toString(); + refreshFromDirectory(); + final int idx = getIndexForDescriptorId(hexId); + return selectDirectoriesAtIndex(idx); + } + + private int getIndexForDescriptorId(String hexId) { + for(int i = 0; i < hsDirectories.size(); i++) { + String routerId = getHexIdForIndex(i); + if(routerId.compareTo(hexId) > 0) { + return i; + } + } + return 0; + } + + private String getHexIdForIndex(int idx) { + final Router r = hsDirectories.get(idx); + return r.getIdentityHash().toString(); + } + + private List selectDirectoriesAtIndex(int idx) { + if(idx < 0 || idx >= hsDirectories.size()) { + throw new IllegalArgumentException("idx = "+ idx); + } + if(hsDirectories.size() < DIR_CLUSTER_SZ) { + throw new IllegalStateException(); + } + final List dirs = new ArrayList(DIR_CLUSTER_SZ); + for(int i = 0; i < DIR_CLUSTER_SZ; i++) { + dirs.add(hsDirectories.get(idx)); + idx += 1; + if(idx == hsDirectories.size()) { + idx = 0; + } + } + randomShuffle(dirs); + return dirs; + } + + + + private void refreshFromDirectory() { + ConsensusDocument consensus = directory.getCurrentConsensusDocument(); + if(currentConsensus == consensus) { + return; + } + currentConsensus = consensus; + hsDirectories.clear(); + for(Router r: directory.getAllRouters()) { + if(r.isHSDirectory()) { + hsDirectories.add(r); + } + } + + Collections.sort(hsDirectories, new Comparator() { + public int compare(Router r1, Router r2) { + final String s1 = r1.getIdentityHash().toString(); + final String s2 = r2.getIdentityHash().toString(); + return s1.compareTo(s2); + } + }); + } + + private void randomShuffle(List dirs) { + for(int i = 0; i < dirs.size(); i++) { + swap(dirs, i, random.nextInt(dirs.size())); + } + } + + private void swap(List dirs, int idx1, int idx2) { + if(idx1 != idx2) { + final Router r1 = dirs.get(idx1); + final Router r2 = dirs.get(idx2); + dirs.set(idx1, r2); + dirs.set(idx2, r1); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HiddenService.java b/orchid/src/com/subgraph/orchid/circuits/hs/HiddenService.java new file mode 100644 index 00000000..738590b7 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HiddenService.java @@ -0,0 +1,139 @@ +package com.subgraph.orchid.circuits.hs; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.subgraph.orchid.HiddenServiceCircuit; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.hs.HSDescriptorCookie.CookieType; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.data.Base32; +import com.subgraph.orchid.data.HexDigest; + +public class HiddenService { + + private final TorConfig config; + private final byte[] permanentId; + + private HSDescriptor descriptor; + private HiddenServiceCircuit circuit; + + static byte[] decodeOnion(String onionAddress) { + final int idx = onionAddress.indexOf(".onion"); + if(idx == -1) { + return Base32.base32Decode(onionAddress); + } else { + return Base32.base32Decode(onionAddress.substring(0, idx)); + } + } + + + HiddenService(TorConfig config, byte[] permanentId) { + this.config = config; + this.permanentId = permanentId; + } + + String getOnionAddressForLogging() { + if(config.getSafeLogging()) { + return "[scrubbed]"; + } else { + return getOnionAddress(); + } + } + + String getOnionAddress() { + return Base32.base32Encode(permanentId) + ".onion"; + } + + boolean hasCurrentDescriptor() { + return (descriptor != null && !descriptor.isExpired()); + } + + HSDescriptor getDescriptor() { + return descriptor; + } + + void setDescriptor(HSDescriptor descriptor) { + this.descriptor = descriptor; + } + + HiddenServiceCircuit getCircuit() { + return circuit; + } + + void setCircuit(HiddenServiceCircuit circuit) { + this.circuit = circuit; + } + + HSDescriptorCookie getAuthenticationCookie() { + return config.getHidServAuth(getOnionAddress()); + } + + List getAllCurrentDescriptorIds() { + final List ids = new ArrayList(); + ids.add(getCurrentDescriptorId(0)); + ids.add(getCurrentDescriptorId(1)); + return ids; + } + + HexDigest getCurrentDescriptorId(int replica) { + final TorMessageDigest digest = new TorMessageDigest(); + digest.update(permanentId); + digest.update(getCurrentSecretId(replica)); + return digest.getHexDigest(); + } + + byte[] getCurrentSecretId(int replica) { + final TorMessageDigest digest = new TorMessageDigest(); + digest.update(getCurrentTimePeriod()); + final HSDescriptorCookie cookie = getAuthenticationCookie(); + if(cookie != null && cookie.getType() == CookieType.COOKIE_STEALTH) { + digest.update(cookie.getValue()); + } + digest.update(new byte[] { (byte) replica }); + return digest.getDigestBytes(); + } + + byte[] getCurrentTimePeriod() { + final long now = System.currentTimeMillis() / 1000; + final int idByte = permanentId[0] & 0xFF; + return calculateTimePeriod(now, idByte); + } + + static byte[] calculateTimePeriod(long currentTime, int idByte) { + final long t = (currentTime + (idByte * 86400L / 256)) / 86400L; + return toNetworkBytes(t); + } + + static byte[] toNetworkBytes(long value) { + final byte[] result = new byte[4]; + for(int i = 3; i >= 0; i--) { + result[i] = (byte) (value & 0xFF); + value >>= 8; + } + return result; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(permanentId); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + HiddenService other = (HiddenService) obj; + if (!Arrays.equals(permanentId, other.permanentId)) + return false; + return true; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/HiddenServiceManager.java b/orchid/src/com/subgraph/orchid/circuits/hs/HiddenServiceManager.java new file mode 100644 index 00000000..955901d4 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/HiddenServiceManager.java @@ -0,0 +1,121 @@ +package com.subgraph.orchid.circuits.hs; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.HiddenServiceCircuit; +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.CircuitManagerImpl; + +public class HiddenServiceManager { + private final static int RENDEZVOUS_RETRY_COUNT = 5; + private final static int HS_STREAM_TIMEOUT = 20000; + + private final static Logger logger = Logger.getLogger(HiddenServiceManager.class.getName()); + + private final Map hiddenServices; + private final TorConfig config; + private final Directory directory; + private final HSDirectories hsDirectories; + private final CircuitManagerImpl circuitManager; + + public HiddenServiceManager(TorConfig config, Directory directory, CircuitManagerImpl circuitManager) { + this.config = config; + this.directory = directory; + this.hiddenServices = new HashMap(); + this.hsDirectories = new HSDirectories(directory); + this.circuitManager = circuitManager; + } + + public Stream getStreamTo(String onion, int port) throws OpenFailedException, InterruptedException, TimeoutException { + final HiddenService hs = getHiddenServiceForOnion(onion); + final HiddenServiceCircuit circuit = getCircuitTo(hs); + + try { + return circuit.openStream(port, HS_STREAM_TIMEOUT); + } catch (StreamConnectFailedException e) { + throw new OpenFailedException("Failed to open stream to hidden service "+ hs.getOnionAddressForLogging() + " reason "+ e.getReason()); + } + } + + private synchronized HiddenServiceCircuit getCircuitTo(HiddenService hs) throws OpenFailedException { + if(hs.getCircuit() == null) { + final HiddenServiceCircuit c = openCircuitTo(hs); + if(c == null) { + throw new OpenFailedException("Failed to open circuit to "+ hs.getOnionAddressForLogging()); + } + hs.setCircuit(c); + } + return hs.getCircuit(); + } + + private HiddenServiceCircuit openCircuitTo(HiddenService hs) throws OpenFailedException { + HSDescriptor descriptor = getDescriptorFor(hs); + + for(int i = 0; i < RENDEZVOUS_RETRY_COUNT; i++) { + final HiddenServiceCircuit c = openRendezvousCircuit(hs, descriptor); + if(c != null) { + return c; + } + } + throw new OpenFailedException("Failed to open circuit to "+ hs.getOnionAddressForLogging()); + } + + HSDescriptor getDescriptorFor(HiddenService hs) throws OpenFailedException { + if(hs.hasCurrentDescriptor()) { + return hs.getDescriptor(); + } + final HSDescriptor descriptor = downloadDescriptorFor(hs); + if(descriptor == null) { + final String msg = "Failed to download HS descriptor for "+ hs.getOnionAddressForLogging(); + logger.info(msg); + throw new OpenFailedException(msg); + } + hs.setDescriptor(descriptor); + return descriptor; + } + + private HSDescriptor downloadDescriptorFor(HiddenService hs) { + logger.fine("Downloading HS descriptor for "+ hs.getOnionAddressForLogging()); + final List dirs = hsDirectories.getDirectoriesForHiddenService(hs); + final HSDescriptorDownloader downloader = new HSDescriptorDownloader(hs, circuitManager, dirs); + return downloader.downloadDescriptor(); + } + + HiddenService getHiddenServiceForOnion(String onion) throws OpenFailedException { + final String key = onion.endsWith(".onion") ? onion.substring(0, onion.length() - 6) : onion; + synchronized(hiddenServices) { + if(!hiddenServices.containsKey(key)) { + hiddenServices.put(key, createHiddenServiceFor(key)); + } + return hiddenServices.get(key); + } + } + + private HiddenService createHiddenServiceFor(String key) throws OpenFailedException { + try { + byte[] decoded = HiddenService.decodeOnion(key); + return new HiddenService(config, decoded); + } catch (TorException e) { + final String target = config.getSafeLogging() ? "[scrubbed]" : (key + ".onion"); + throw new OpenFailedException("Failed to decode onion address "+ target + " : "+ e.getMessage()); + } + } + + private HiddenServiceCircuit openRendezvousCircuit(HiddenService hs, HSDescriptor descriptor) { + final RendezvousCircuitBuilder builder = new RendezvousCircuitBuilder(directory, circuitManager, hs, descriptor); + try { + return builder.call(); + } catch (Exception e) { + return null; + } + } +} \ No newline at end of file diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPoint.java b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPoint.java new file mode 100644 index 00000000..df204819 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPoint.java @@ -0,0 +1,58 @@ +package com.subgraph.orchid.circuits.hs; + +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +public class IntroductionPoint { + + private HexDigest identity; + private IPv4Address address; + private int onionPort; + private TorPublicKey onionKey; + private TorPublicKey serviceKey; + + IntroductionPoint(HexDigest identity) { + this.identity = identity; + } + + void setAddress(IPv4Address address) { + this.address = address; + } + + void setOnionPort(int onionPort) { + this.onionPort = onionPort; + } + + void setOnionKey(TorPublicKey onionKey) { + this.onionKey = onionKey; + } + + void setServiceKey(TorPublicKey serviceKey) { + this.serviceKey = serviceKey; + } + + boolean isValidDocument() { + return identity != null && address != null && onionPort != 0 && onionKey != null && serviceKey != null; + } + + public HexDigest getIdentity() { + return identity; + } + + public IPv4Address getAddress() { + return address; + } + + public int getPort() { + return onionPort; + } + + public TorPublicKey getOnionKey() { + return onionKey; + } + + public TorPublicKey getServiceKey() { + return serviceKey; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPointKeyword.java b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPointKeyword.java new file mode 100644 index 00000000..97ad5943 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPointKeyword.java @@ -0,0 +1,37 @@ +package com.subgraph.orchid.circuits.hs; + +public enum IntroductionPointKeyword { + SERVICE_AUTHENTICATION("service-authentication", 2), + INTRODUCTION_POINT("introduction-point", 1), + IP_ADDRESS("ip-address", 1), + ONION_PORT("onion-port", 1), + ONION_KEY("onion-key", 0), + SERVICE_KEY("service-key", 0), + INTRO_AUTHENTICATION("intro-authentication", 2), + UNKNOWN_KEYWORD("KEYWORD NOT FOUND", 0); + + private final String keyword; + private final int argumentCount; + + IntroductionPointKeyword(String keyword, int argumentCount) { + this.keyword = keyword; + this.argumentCount = argumentCount; + } + + String getKeyword() { + return keyword; + } + + int getArgumentCount() { + return argumentCount; + } + + static IntroductionPointKeyword findKeyword(String keyword) { + for(IntroductionPointKeyword k: values()) { + if(k.getKeyword().equals(keyword)) { + return k; + } + } + return UNKNOWN_KEYWORD; + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPointParser.java b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPointParser.java new file mode 100644 index 00000000..59d3a792 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionPointParser.java @@ -0,0 +1,118 @@ +package com.subgraph.orchid.circuits.hs; + +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; + +public class IntroductionPointParser implements DocumentParser{ + + private final DocumentFieldParser fieldParser; + + private DocumentParsingResultHandler resultHandler; + private IntroductionPoint currentIntroductionPoint; + + public IntroductionPointParser(DocumentFieldParser fieldParser) { + this.fieldParser = fieldParser; + this.fieldParser.setHandler(createParsingHandler()); + } + + public boolean parse(DocumentParsingResultHandler resultHandler) { + this.resultHandler = resultHandler; + try { + fieldParser.processDocument(); + return true; + } catch(TorParsingException e) { + resultHandler.parsingError(e.getMessage()); + return false; + } + } + + public DocumentParsingResult parse() { + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + parse(result); + return result; + } + + private DocumentParsingHandler createParsingHandler() { + return new DocumentParsingHandler() { + public void parseKeywordLine() { + processKeywordLine(); + } + + public void endOfDocument() { + validateAndReportIntroductionPoint(currentIntroductionPoint); + } + }; + } + + private void resetIntroductionPoint(HexDigest identity) { + validateAndReportIntroductionPoint(currentIntroductionPoint); + currentIntroductionPoint = new IntroductionPoint(identity); + } + + private void validateAndReportIntroductionPoint(IntroductionPoint introductionPoint) { + if(introductionPoint == null) { + return; + } + + if(introductionPoint.isValidDocument()) { + resultHandler.documentParsed(introductionPoint); + } else { + resultHandler.documentInvalid(introductionPoint, "Invalid introduction point"); + } + } + + + private void processKeywordLine() { + final IntroductionPointKeyword keyword = IntroductionPointKeyword.findKeyword(fieldParser.getCurrentKeyword()); + if(!keyword.equals(IntroductionPointKeyword.UNKNOWN_KEYWORD)) { + processKeyword(keyword); + } + } + + private void processKeyword(IntroductionPointKeyword keyword) { + switch(keyword) { + case INTRO_AUTHENTICATION: + break; + + case INTRODUCTION_POINT: + resetIntroductionPoint(fieldParser.parseBase32Digest()); + break; + + case IP_ADDRESS: + if(currentIntroductionPoint != null) { + currentIntroductionPoint.setAddress(fieldParser.parseAddress()); + } + break; + + case ONION_KEY: + if(currentIntroductionPoint != null) { + currentIntroductionPoint.setOnionKey(fieldParser.parsePublicKey()); + } + break; + + case ONION_PORT: + if(currentIntroductionPoint != null) { + currentIntroductionPoint.setOnionPort(fieldParser.parsePort()); + } + break; + + case SERVICE_KEY: + if(currentIntroductionPoint != null) { + currentIntroductionPoint.setServiceKey(fieldParser.parsePublicKey()); + } + break; + + case SERVICE_AUTHENTICATION: + break; + + case UNKNOWN_KEYWORD: + break; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionProcessor.java b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionProcessor.java new file mode 100644 index 00000000..d182f155 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/IntroductionProcessor.java @@ -0,0 +1,105 @@ +package com.subgraph.orchid.circuits.hs; + +import java.nio.ByteBuffer; +import java.util.logging.Logger; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.crypto.HybridEncryption; +import com.subgraph.orchid.crypto.TorPublicKey; + +public class IntroductionProcessor { + private final static Logger logger = Logger.getLogger(IntroductionProcessor.class.getName()); + private final static int INTRODUCTION_PROTOCOL_VERSION = 3; + + private final HiddenService hiddenService; + private final Circuit introductionCircuit; + private final IntroductionPoint introductionPoint; + + protected IntroductionProcessor(HiddenService hiddenService, Circuit introductionCircuit, IntroductionPoint introductionPoint) { + this.hiddenService = hiddenService; + this.introductionCircuit = introductionCircuit; + this.introductionPoint = introductionPoint; + } + + TorPublicKey getServiceKey() { + return introductionPoint.getServiceKey(); + } + + boolean sendIntroduce(TorPublicKey permanentKey, byte[] publicKeyBytes, byte[] rendezvousCookie, Router rendezvousRouter) { + final RelayCell introduceCell = introductionCircuit.createRelayCell(RelayCell.RELAY_COMMAND_INTRODUCE1, 0, introductionCircuit.getFinalCircuitNode()); + + final byte[] payload = createIntroductionPayload(rendezvousRouter, publicKeyBytes, rendezvousCookie, permanentKey); + final TorPublicKey serviceKey = introductionPoint.getServiceKey(); + introduceCell.putByteArray(serviceKey.getFingerprint().getRawBytes()); + introduceCell.putByteArray(payload); + introductionCircuit.sendRelayCell(introduceCell); + + final RelayCell response = introductionCircuit.receiveRelayCell(); + if(response == null) { + logger.fine("Timeout waiting for response to INTRODUCE1 cell"); + return false; + } else if(response.getRelayCommand() != RelayCell.RELAY_COMMAND_INTRODUCE_ACK) { + logger.info("Unexpected relay cell type received waiting for response to INTRODUCE1 cell: "+ response.getRelayCommand()); + return false; + } else if(response.cellBytesRemaining() == 0) { + return true; + } else { + logger.info("INTRODUCE_ACK indicates that introduction was not forwarded: "+ response.getByte()); + return false; + } + } + + void markCircuitForClose() { + introductionCircuit.markForClose(); + } + + private byte[] createIntroductionPayload(Router rendezvousRouter, byte[] publicKeyBytes, byte[] rendezvousCookie, TorPublicKey encryptionKey) { + final ByteBuffer buffer = createIntroductionBuffer((int) (System.currentTimeMillis() / 1000), rendezvousRouter, rendezvousCookie, publicKeyBytes); + return encryptIntroductionBuffer(buffer, encryptionKey); + } + + private ByteBuffer createIntroductionBuffer(int timestamp, Router rr, byte[] cookie, byte[] dhPublic) { + final ByteBuffer buffer = ByteBuffer.allocate(Cell.CELL_LEN); + final byte[] rpAddress = rr.getAddress().getAddressDataBytes(); + final short rpPort = (short) rr.getOnionPort(); + final byte[] rpIdentity = rr.getIdentityHash().getRawBytes(); + final byte[] rpOnionKey = rr.getOnionKey().getRawBytes(); + + buffer.put((byte) INTRODUCTION_PROTOCOL_VERSION); // VER Version byte: set to 3. [1 octet] + addAuthentication(buffer); + //buffer.put((byte) 0); // AUTHT The auth type that is used [1 octet] + buffer.putInt(timestamp); // TS A timestamp [4 octets] + buffer.put(rpAddress); // IP Rendezvous point's address [4 octets] + buffer.putShort(rpPort); // PORT Rendezvous point's OR port [2 octets] + buffer.put(rpIdentity); // ID Rendezvous point identity ID [20 octets] + buffer.putShort((short) rpOnionKey.length); // KLEN Length of onion key [2 octets] + buffer.put(rpOnionKey); // KEY Rendezvous point onion key [KLEN octets] + buffer.put(cookie); // RC Rendezvous cookie [20 octets] + buffer.put(dhPublic); // g^x Diffie-Hellman data, part 1 [128 octets] + + return buffer; + } + + private void addAuthentication(ByteBuffer buffer) { + HSDescriptorCookie cookie = hiddenService.getAuthenticationCookie(); + if(cookie == null) { + buffer.put((byte) 0); + } else { + buffer.put(cookie.getAuthTypeByte()); + buffer.putShort((short) cookie.getValue().length); + buffer.put(cookie.getValue()); + } + } + + private byte[] encryptIntroductionBuffer(ByteBuffer buffer, TorPublicKey key) { + final int len = buffer.position(); + final byte[] payload = new byte[len]; + buffer.flip(); + buffer.get(payload); + final HybridEncryption enc = new HybridEncryption(); + return enc.encrypt(payload, key); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/RendezvousCircuitBuilder.java b/orchid/src/com/subgraph/orchid/circuits/hs/RendezvousCircuitBuilder.java new file mode 100644 index 00000000..089fb227 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/RendezvousCircuitBuilder.java @@ -0,0 +1,99 @@ +package com.subgraph.orchid.circuits.hs; + +import java.util.concurrent.Callable; +import java.util.logging.Logger; + +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.HiddenServiceCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.CircuitManagerImpl; +import com.subgraph.orchid.crypto.TorTapKeyAgreement; + +public class RendezvousCircuitBuilder implements Callable{ + private final Logger logger = Logger.getLogger(RendezvousCircuitBuilder.class.getName()); + + private final Directory directory; + + private final CircuitManagerImpl circuitManager; + private final HiddenService hiddenService; + private final HSDescriptor serviceDescriptor; + + public RendezvousCircuitBuilder(Directory directory, CircuitManagerImpl circuitManager, HiddenService hiddenService, HSDescriptor descriptor) { + this.directory = directory; + this.circuitManager = circuitManager; + this.hiddenService = hiddenService; + this.serviceDescriptor = descriptor; + } + + public HiddenServiceCircuit call() throws Exception { + + logger.fine("Opening rendezvous circuit for "+ logServiceName()); + + final InternalCircuit rendezvous = circuitManager.getCleanInternalCircuit(); + logger.fine("Establishing rendezvous for "+ logServiceName()); + RendezvousProcessor rp = new RendezvousProcessor(rendezvous); + if(!rp.establishRendezvous()) { + rendezvous.markForClose(); + return null; + } + logger.fine("Opening introduction circuit for "+ logServiceName()); + final IntroductionProcessor introductionProcessor = openIntroduction(); + if(introductionProcessor == null) { + logger.info("Failed to open connection to any introduction point"); + rendezvous.markForClose(); + return null; + } + logger.fine("Sending introduce cell for "+ logServiceName()); + final TorTapKeyAgreement kex = new TorTapKeyAgreement(); + final boolean icResult = introductionProcessor.sendIntroduce(introductionProcessor.getServiceKey(), kex.getPublicKeyBytes(), rp.getCookie(), rp.getRendezvousRouter()); + introductionProcessor.markCircuitForClose(); + if(!icResult) { + rendezvous.markForClose(); + return null; + } + logger.fine("Processing RV2 for "+ logServiceName()); + HiddenServiceCircuit hsc = rp.processRendezvous2(kex); + if(hsc == null) { + rendezvous.markForClose(); + } + + logger.fine("Rendezvous circuit opened for "+ logServiceName()); + + return hsc; + } + + private String logServiceName() { + return hiddenService.getOnionAddressForLogging(); + } + + private IntroductionProcessor openIntroduction() { + for(IntroductionPoint ip: serviceDescriptor.getShuffledIntroductionPoints()) { + final Circuit circuit = attemptOpenIntroductionCircuit(ip); + if(circuit != null) { + return new IntroductionProcessor(hiddenService, circuit, ip); + } + } + return null; + } + + private Circuit attemptOpenIntroductionCircuit(IntroductionPoint ip) { + final Router r = directory.getRouterByIdentity(ip.getIdentity()); + if(r == null) { + return null; + } + + try { + final InternalCircuit circuit = circuitManager.getCleanInternalCircuit(); + return circuit.cannibalizeToIntroductionPoint(r); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } catch (TorException e) { + logger.fine("cannibalizeTo() failed : "+ e.getMessage()); + return null; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/hs/RendezvousProcessor.java b/orchid/src/com/subgraph/orchid/circuits/hs/RendezvousProcessor.java new file mode 100644 index 00000000..9584fae6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/hs/RendezvousProcessor.java @@ -0,0 +1,96 @@ +package com.subgraph.orchid.circuits.hs; + +import java.math.BigInteger; +import java.util.logging.Logger; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.HiddenServiceCircuit; +import com.subgraph.orchid.InternalCircuit; +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.circuits.CircuitNodeCryptoState; +import com.subgraph.orchid.circuits.CircuitNodeImpl; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.crypto.TorTapKeyAgreement; +import com.subgraph.orchid.data.HexDigest; + +public class RendezvousProcessor { + private final static Logger logger = Logger.getLogger(RendezvousProcessor.class.getName()); + + private final static int RENDEZVOUS_COOKIE_LEN = 20; + private final static TorRandom random = new TorRandom(); + + private final InternalCircuit circuit; + private final byte[] cookie; + + protected RendezvousProcessor(InternalCircuit circuit) { + this.circuit = circuit; + this.cookie = random.getBytes(RENDEZVOUS_COOKIE_LEN); + } + + boolean establishRendezvous() { + final RelayCell cell = circuit.createRelayCell(RelayCell.RELAY_COMMAND_ESTABLISH_RENDEZVOUS, 0, circuit.getFinalCircuitNode()); + cell.putByteArray(cookie); + circuit.sendRelayCell(cell); + final RelayCell response = circuit.receiveRelayCell(); + if(response == null) { + logger.info("Timeout waiting for Rendezvous establish response"); + return false; + } else if(response.getRelayCommand() != RelayCell.RELAY_COMMAND_RENDEZVOUS_ESTABLISHED) { + logger.info("Response received from Rendezvous establish was not expected acknowledgement, Relay Command: "+ response.getRelayCommand()); + return false; + } else { + return true; + } + } + + HiddenServiceCircuit processRendezvous2(TorTapKeyAgreement kex) { + final RelayCell cell = circuit.receiveRelayCell(); + if(cell == null) { + logger.info("Timeout waiting for RENDEZVOUS2"); + return null; + } else if (cell.getRelayCommand() != RelayCell.RELAY_COMMAND_RENDEZVOUS2) { + logger.info("Unexpected Relay cell type received while waiting for RENDEZVOUS2: "+ cell.getRelayCommand()); + return null; + } + final BigInteger peerPublic = readPeerPublic(cell); + final HexDigest handshakeDigest = readHandshakeDigest(cell); + if(peerPublic == null || handshakeDigest == null) { + return null; + } + final byte[] verifyHash = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + final byte[] keyMaterial = new byte[CircuitNodeCryptoState.KEY_MATERIAL_SIZE]; + if(!kex.deriveKeysFromDHPublicAndHash(peerPublic, handshakeDigest.getRawBytes(), keyMaterial, verifyHash)) { + logger.info("Error deriving session keys while extending to hidden service"); + return null; + } + return circuit.connectHiddenService(CircuitNodeImpl.createAnonymous(circuit.getFinalCircuitNode(), keyMaterial, verifyHash)); + } + + private BigInteger readPeerPublic(Cell cell) { + final byte[] dhPublic = new byte[TorTapKeyAgreement.DH_LEN]; + cell.getByteArray(dhPublic); + final BigInteger peerPublic = new BigInteger(1, dhPublic); + if(!TorTapKeyAgreement.isValidPublicValue(peerPublic)) { + logger.warning("Illegal DH public value received: "+ peerPublic); + return null; + } + return peerPublic; + } + + HexDigest readHandshakeDigest(Cell cell) { + final byte[] digestBytes = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + cell.getByteArray(digestBytes); + return HexDigest.createFromDigestBytes(digestBytes); + } + + + byte[] getCookie() { + return cookie; + } + + Router getRendezvousRouter() { + return circuit.getFinalCircuitNode().getRouter(); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/BandwidthWeightedRouters.java b/orchid/src/com/subgraph/orchid/circuits/path/BandwidthWeightedRouters.java new file mode 100644 index 00000000..1521d8a0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/BandwidthWeightedRouters.java @@ -0,0 +1,184 @@ +package com.subgraph.orchid.circuits.path; + +import java.util.ArrayList; +import java.util.List; + +import com.subgraph.orchid.Router; +import com.subgraph.orchid.crypto.TorRandom; + +public class BandwidthWeightedRouters { + private static class WeightedRouter { + private final Router router; + private boolean isUnknown; + private double weightedBandwidth; + private long scaledBandwidth; + + WeightedRouter(Router router, double bw) { + this.router = router; + this.weightedBandwidth = bw; + } + + void scaleBandwidth(double scaleFactor) { + scaledBandwidth = Math.round(weightedBandwidth * scaleFactor); + } + } + + private final static long MAX_SCALE = Long.MAX_VALUE / 4; + private final static double EPSILON = 0.1; + private final List weightedRouters = new ArrayList(); + private final TorRandom random = new TorRandom(); + + private double totalExitBw; + private double totalNonExitBw; + private double totalGuardBw; + + private boolean isScaled; + private int unknownCount; + + void addRouter(Router router, double weightedBandwidth) { + weightedRouters.add(new WeightedRouter(router, weightedBandwidth)); + adjustTotals(router, weightedBandwidth); + isScaled = false; + } + + + boolean isTotalBandwidthZero() { + return getTotalBandwidth() < EPSILON; + } + + double getTotalBandwidth() { + return totalExitBw + totalNonExitBw; + } + + double getTotalGuardBandwidth() { + return totalGuardBw; + } + + + double getTotalExitBandwidth() { + return totalExitBw; + } + + private void adjustTotals(Router router, double bw) { + if(router.isExit()) { + totalExitBw += bw; + } else { + totalNonExitBw += bw; + } + if(router.isPossibleGuard()) { + totalGuardBw += bw; + } + } + + void addRouterUnknown(Router router) { + final WeightedRouter wr = new WeightedRouter(router, 0); + wr.isUnknown = true; + weightedRouters.add(wr); + unknownCount += 1; + } + + int getRouterCount() { + return weightedRouters.size(); + } + + int getUnknownCount() { + return unknownCount; + } + + void fixUnknownValues() { + if(unknownCount == 0) { + return; + } + if(isTotalBandwidthZero()) { + fixUnknownValues(40000, 20000); + } else { + final int knownCount = weightedRouters.size() - unknownCount; + final long average = (long) (getTotalBandwidth() / knownCount); + fixUnknownValues(average, average); + } + } + + private void fixUnknownValues(long fastBw, long slowBw) { + for(WeightedRouter wr: weightedRouters) { + if(wr.isUnknown) { + long bw = wr.router.isFast() ? fastBw : slowBw; + wr.weightedBandwidth = bw; + wr.isUnknown = false; + adjustTotals(wr.router, bw); + } + } + unknownCount = 0; + isScaled = false; + } + + Router chooseRandomRouterByWeight() { + final long total = getScaledTotal(); + if(total == 0) { + if(weightedRouters.size() == 0) { + return null; + } + final int idx = random.nextInt(weightedRouters.size()); + return weightedRouters.get(idx).router; + } + return chooseFirstElementAboveRandom(random.nextLong(total)); + } + + void adjustWeights(double exitWeight, double guardWeight) { + for(WeightedRouter wr: weightedRouters) { + Router r = wr.router; + if(r.isExit() && r.isPossibleGuard()) { + wr.weightedBandwidth *= (exitWeight * guardWeight); + } else if(r.isPossibleGuard()) { + wr.weightedBandwidth *= guardWeight; + } else if(r.isExit()) { + wr.weightedBandwidth *= exitWeight; + } + } + scaleRouterWeights(); + } + + private Router chooseFirstElementAboveRandom(long randomValue) { + long sum = 0; + Router chosen = null; + for(WeightedRouter wr: weightedRouters) { + sum += wr.scaledBandwidth; + if(sum > randomValue) { + chosen = wr.router; + /* Don't return early to avoid leaking timing information about choice */ + randomValue = Long.MAX_VALUE; + } + } + if(chosen == null) { + return weightedRouters.get(weightedRouters.size() - 1).router; + } + return chosen; + } + + private double getWeightedTotal() { + double total = 0.0; + for(WeightedRouter wr: weightedRouters) { + total += wr.weightedBandwidth; + } + return total; + } + + private void scaleRouterWeights() { + final double scaleFactor = MAX_SCALE / getWeightedTotal(); + for(WeightedRouter wr: weightedRouters) { + wr.scaleBandwidth(scaleFactor); + } + isScaled = true; + } + + private long getScaledTotal() { + if(!isScaled) { + scaleRouterWeights(); + } + long total = 0; + for(WeightedRouter wr: weightedRouters) { + total += wr.scaledBandwidth; + } + return total; + } +} + \ No newline at end of file diff --git a/orchid/src/com/subgraph/orchid/circuits/path/CircuitNodeChooser.java b/orchid/src/com/subgraph/orchid/circuits/path/CircuitNodeChooser.java new file mode 100644 index 00000000..30047d94 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/CircuitNodeChooser.java @@ -0,0 +1,186 @@ +package com.subgraph.orchid.circuits.path; + +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Logger; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.crypto.TorRandom; + +public class CircuitNodeChooser { + private final static Logger logger = Logger.getLogger(CircuitNodeChooser.class.getName()); + + public enum WeightRule { WEIGHT_FOR_DIR, WEIGHT_FOR_EXIT, WEIGHT_FOR_MID, WEIGHT_FOR_GUARD, NO_WEIGHTING}; + private final Directory directory; + private final TorRandom random = new TorRandom(); + + private final TorConfigNodeFilter configNodeFilter; + + + public CircuitNodeChooser(TorConfig config, Directory directory) { + this.directory = directory; + this.configNodeFilter = new TorConfigNodeFilter(config); + } + + /** + * + * @param candidates + * @return The chosen exit router or 'null' if no suitable router is available + */ + public Router chooseExitNode(List candidates) { + final List filteredCandidates = configNodeFilter.filterExitCandidates(candidates); + return chooseByBandwidth(filteredCandidates, WeightRule.WEIGHT_FOR_EXIT); + } + + public Router chooseDirectory() { + final RouterFilter filter = new RouterFilter() { + public boolean filter(Router router) { + return router.getDirectoryPort() != 0; + } + }; + final List candidates = getFilteredRouters(filter, false); + final Router choice = chooseByBandwidth(candidates, WeightRule.WEIGHT_FOR_DIR); + if(choice == null) { + return directory.getRandomDirectoryAuthority(); + } else { + return choice; + } + } + + /** + * + * @param rule + * @param routerFilter + * @return The chosen router or 'null' if no suitable router is available. + */ + public Router chooseRandomNode(WeightRule rule, RouterFilter routerFilter) { + final List candidates = getFilteredRouters(routerFilter, true); + final Router choice = chooseByBandwidth(candidates, rule); + if(choice == null) { + // try again with more permissive flags + return null; + } + return choice; + } + + private List getFilteredRouters(RouterFilter rf, boolean needDescriptor) { + final List routers = new ArrayList(); + for(Router r: getUsableRouters(needDescriptor)) { + if(rf.filter(r)) { + routers.add(r); + } + } + return routers; + } + + List getUsableRouters(boolean needDescriptor) { + final List routers = new ArrayList(); + for(Router r: directory.getAllRouters()) { + if(r.isRunning() && + r.isValid() && + !r.isHibernating() && + !(needDescriptor && r.getCurrentDescriptor() == null)) { + + routers.add(r); + } + } + + return routers; + } + + private Router chooseByBandwidth(List candidates, WeightRule rule) { + final Router choice = chooseNodeByBandwidthWeights(candidates, rule); + if(choice != null) { + return choice; + } else { + return chooseNodeByBandwidth(candidates, rule); + } + } + + private Router chooseNodeByBandwidthWeights(List candidates, WeightRule rule) { + final ConsensusDocument consensus = directory.getCurrentConsensusDocument(); + if(consensus == null) { + return null; + } + final BandwidthWeightedRouters bwr = computeWeightedBandwidths(candidates, consensus, rule); + return bwr.chooseRandomRouterByWeight(); + } + + + private BandwidthWeightedRouters computeWeightedBandwidths(List candidates, ConsensusDocument consensus, WeightRule rule) { + final CircuitNodeChooserWeightParameters wp = CircuitNodeChooserWeightParameters.create(consensus, rule); + if(!wp.isValid()) { + logger.warning("Got invalid bandwidth weights. Falling back to old selection method"); + return null; + } + final BandwidthWeightedRouters weightedRouters = new BandwidthWeightedRouters(); + for(Router r: candidates) { + double wbw = wp.calculateWeightedBandwidth(r); + weightedRouters.addRouter(r, wbw); + } + return weightedRouters; + } + + private Router chooseNodeByBandwidth(List routers, WeightRule rule) { + final BandwidthWeightedRouters bwr = new BandwidthWeightedRouters(); + for(Router r: routers) { + long bw = getRouterBandwidthBytes(r); + if(bw == -1) { + bwr.addRouterUnknown(r); + } else { + bwr.addRouter(r, bw); + } + } + bwr.fixUnknownValues(); + if(bwr.isTotalBandwidthZero()) { + if(routers.size() == 0) { + return null; + } + + final int idx = random.nextInt(routers.size()); + return routers.get(idx); + } + + computeFinalWeights(bwr, rule); + return bwr.chooseRandomRouterByWeight(); + } + + + private final static double EPSILON = 0.1; + + private void computeFinalWeights(BandwidthWeightedRouters bwr, WeightRule rule) { + final double exitWeight = calculateWeight(rule == WeightRule.WEIGHT_FOR_EXIT, + bwr.getTotalExitBandwidth(), bwr.getTotalBandwidth()); + final double guardWeight = calculateWeight(rule == WeightRule.WEIGHT_FOR_GUARD, + bwr.getTotalGuardBandwidth(), bwr.getTotalBandwidth()); + + bwr.adjustWeights(exitWeight, guardWeight); + } + + private double calculateWeight(boolean matchesRule, double totalByType, double total) { + if(matchesRule || totalByType < EPSILON) { + return 1.0; + } + final double result = 1.0 - (total / (3.0 * totalByType)); + if(result <= 0.0) { + return 0.0; + } else { + return result; + } + } + + private long getRouterBandwidthBytes(Router r) { + if(!r.hasBandwidth()) { + return -1; + } else { + return kbToBytes(r.getEstimatedBandwidth()); + } + } + + private long kbToBytes(long bw) { + return (bw > (Long.MAX_VALUE / 1000) ? Long.MAX_VALUE : bw * 1000); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/CircuitNodeChooserWeightParameters.java b/orchid/src/com/subgraph/orchid/circuits/path/CircuitNodeChooserWeightParameters.java new file mode 100644 index 00000000..49125ea6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/CircuitNodeChooserWeightParameters.java @@ -0,0 +1,149 @@ +package com.subgraph.orchid.circuits.path; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.Router; + +class CircuitNodeChooserWeightParameters { + private final static int VAR_WG = 0; + private final static int VAR_WM = 1; + private final static int VAR_WE = 2; + private final static int VAR_WD = 3; + private final static int VAR_WGB = 4; + private final static int VAR_WMB = 5; + private final static int VAR_WEB = 6; + private final static int VAR_WDB = 7; + private final static int VAR_COUNT = 8; + + private final static String ZERO = "zero"; + private final static String ONE = "one"; + + static CircuitNodeChooserWeightParameters create(ConsensusDocument consensus, CircuitNodeChooser.WeightRule rule) { + final double[] vars = new double[VAR_COUNT]; + final long scale = consensus.getWeightScaleParameter(); + final String[] tags = getTagsForWeightRule(rule); + if(!populateVars(consensus, scale, tags, vars)) { + return new CircuitNodeChooserWeightParameters(new double[VAR_COUNT], false); + } else { + return new CircuitNodeChooserWeightParameters(vars, true); + } + } + + static boolean populateVars(ConsensusDocument consensus, long scale, String[] tags, double[] vars) { + for(int i = 0; i < VAR_COUNT; i++) { + vars[i] = tagToVarValue(consensus, scale, tags[i]); + if(vars[i] < 0.0) { + return false; + } else { + vars[i] /= scale; + } + } + return true; + } + + static double tagToVarValue(ConsensusDocument consensus, long scale, String tag) { + if(tag.equals(ZERO)) { + return 0.0; + } else if (tag.equals(ONE)) { + return 1.0; + } else { + return consensus.getBandwidthWeight(tag); + } + } + + static String[] getTagsForWeightRule(CircuitNodeChooser.WeightRule rule) { + switch(rule) { + case WEIGHT_FOR_GUARD: + return new String[] { + "Wgg", "Wgm", ZERO, "Wgd", + "Wgb", "Wmb", "Web", "Wdb"}; + + case WEIGHT_FOR_MID: + return new String[] { + "Wmg", "Wmm", "Wme", "Wmd", + "Wgb", "Wmb", "Web", "Wdb"}; + + case WEIGHT_FOR_EXIT: + return new String[] { + "Wee", "Wem", "Wed", "Weg", + "Wgb", "Wmb", "Web", "Wdb"}; + + case WEIGHT_FOR_DIR: + return new String[] { + "Wbe", "Wbm", "Wbd", "Wbg", + ONE, ONE, ONE, ONE }; + + case NO_WEIGHTING: + return new String[] { + ONE, ONE, ONE, ONE, + ONE, ONE, ONE, ONE }; + default: + throw new IllegalArgumentException("Unhandled WeightRule type: "+ rule); + } + } + + private final double[] vars; + private final boolean isValid; + + private CircuitNodeChooserWeightParameters(double[] vars, boolean isValid) { + this.vars = vars; + this.isValid = isValid; + } + + boolean isValid() { + return isValid; + } + + double getWg() { + return vars[VAR_WG]; + } + + double getWm() { + return vars[VAR_WM]; + } + + double getWe() { + return vars[VAR_WE]; + } + + double getWd() { + return vars[VAR_WD]; + } + + double getWgb() { + return vars[VAR_WGB]; + } + double getWmb() { + return vars[VAR_WMB]; + } + double getWeb() { + return vars[VAR_WEB]; + } + double getWdb() { + return vars[VAR_WDB]; + } + + double calculateWeightedBandwidth(Router router) { + final long bw = kbToBytes(router.getEstimatedBandwidth()); + final double w = calculateWeight( + router.isExit() && !router.isBadExit(), + router.isPossibleGuard(), + router.getDirectoryPort() != 0); + return (w * bw) + 0.5; + } + + long kbToBytes(long kb) { + return (kb > (Long.MAX_VALUE / 1000) ? Long.MAX_VALUE : kb * 1000); + } + + private double calculateWeight(boolean isExit, boolean isGuard, boolean isDir) { + if(isGuard && isExit) { + return (isDir) ? getWdb() * getWd() : getWd(); + } else if (isGuard) { + return (isDir) ? getWgb() * getWg() : getWg(); + } else if (isExit) { + return (isDir) ? getWeb() * getWe() : getWe(); + } else { // middle + return (isDir) ? getWmb() * getWm() : getWm(); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/CircuitPathChooser.java b/orchid/src/com/subgraph/orchid/circuits/path/CircuitPathChooser.java new file mode 100644 index 00000000..5bae0b64 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/CircuitPathChooser.java @@ -0,0 +1,202 @@ +package com.subgraph.orchid.circuits.path; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.guards.EntryGuards; +import com.subgraph.orchid.circuits.path.CircuitNodeChooser.WeightRule; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.exitpolicy.ExitTarget; + +public class CircuitPathChooser { + + public static CircuitPathChooser create(TorConfig config, Directory directory) { + return new CircuitPathChooser(config, directory, new CircuitNodeChooser(config, directory)); + } + + private final Directory directory; + private final CircuitNodeChooser nodeChooser; + + private EntryGuards entryGuards; + private boolean useEntryGuards; + + CircuitPathChooser(TorConfig config, Directory directory, CircuitNodeChooser nodeChooser) { + this.directory = directory; + this.nodeChooser = nodeChooser; + this.entryGuards = null; + this.useEntryGuards = false; + } + + public void enableEntryGuards(EntryGuards entryGuards) { + this.entryGuards = entryGuards; + this.useEntryGuards = true; + } + + public List chooseDirectoryPath() throws InterruptedException { + if(useEntryGuards && entryGuards.isUsingBridges()) { + final Set empty = Collections.emptySet(); + final Router bridge = entryGuards.chooseRandomGuard(empty); + if(bridge == null) { + throw new IllegalStateException("Failed to choose bridge for directory request"); + } + return Arrays.asList(bridge); + } + final Router dir = nodeChooser.chooseDirectory(); + return Arrays.asList(dir); + } + + public List chooseInternalPath() throws InterruptedException, PathSelectionFailedException { + final Set excluded = Collections.emptySet(); + final Router finalRouter = chooseMiddleNode(excluded); + return choosePathWithFinal(finalRouter); + } + + public List choosePathWithExit(Router exitRouter) throws InterruptedException, PathSelectionFailedException { + return choosePathWithFinal(exitRouter); + } + + public List choosePathWithFinal(Router finalRouter) throws InterruptedException, PathSelectionFailedException { + final Set excluded = new HashSet(); + excludeChosenRouterAndRelated(finalRouter, excluded); + + final Router middleRouter = chooseMiddleNode(excluded); + if(middleRouter == null) { + throw new PathSelectionFailedException("Failed to select suitable middle node"); + } + excludeChosenRouterAndRelated(middleRouter, excluded); + + final Router entryRouter = chooseEntryNode(excluded); + if(entryRouter == null) { + throw new PathSelectionFailedException("Failed to select suitable entry node"); + } + return Arrays.asList(entryRouter, middleRouter, finalRouter); + } + + public Router chooseEntryNode(final Set excludedRouters) throws InterruptedException { + if(useEntryGuards) { + return entryGuards.chooseRandomGuard(excludedRouters); + } + + return nodeChooser.chooseRandomNode(WeightRule.WEIGHT_FOR_GUARD, new RouterFilter() { + public boolean filter(Router router) { + return router.isPossibleGuard() && !excludedRouters.contains(router); + } + }); + } + + Router chooseMiddleNode(final Set excludedRouters) { + return nodeChooser.chooseRandomNode(WeightRule.WEIGHT_FOR_MID, new RouterFilter() { + public boolean filter(Router router) { + return router.isFast() && !excludedRouters.contains(router); + } + }); + } + + public Router chooseExitNodeForTargets(List targets) { + final List routers = filterForExitTargets( + getUsableExitRouters(), targets); + return nodeChooser.chooseExitNode(routers); + } + + private List getUsableExitRouters() { + final List result = new ArrayList(); + for(Router r: nodeChooser.getUsableRouters(true)) { + if(r.isExit() && !r.isBadExit()) { + result.add(r); + } + } + return result; + } + + private void excludeChosenRouterAndRelated(Router router, Set excludedRouters) { + excludedRouters.add(router); + for(Router r: directory.getAllRouters()) { + if(areInSameSlash16(router, r)) { + excludedRouters.add(r); + } + } + + for(String s: router.getFamilyMembers()) { + Router r = directory.getRouterByName(s); + if(r != null) { + // Is mutual? + if(isFamilyMember(r.getFamilyMembers(), router)) { + excludedRouters.add(r); + } + } + } + } + + private boolean isFamilyMember(Collection familyMemberNames, Router r) { + for(String s: familyMemberNames) { + Router member = directory.getRouterByName(s); + if(member != null && member.equals(r)) { + return true; + } + } + return false; + } + + // Are routers r1 and r2 in the same /16 network + private boolean areInSameSlash16(Router r1, Router r2) { + final IPv4Address a1 = r1.getAddress(); + final IPv4Address a2 = r2.getAddress(); + final int mask = 0xFFFF0000; + return (a1.getAddressData() & mask) == (a2.getAddressData() & mask); + } + + private List filterForExitTargets(List routers, List exitTargets) { + int bestSupport = 0; + if(exitTargets.isEmpty()) { + return routers; + } + + final int[] nSupport = new int[routers.size()]; + + for(int i = 0; i < routers.size(); i++) { + final Router r = routers.get(i); + nSupport[i] = countTargetSupport(r, exitTargets); + if(nSupport[i] > bestSupport) { + bestSupport = nSupport[i]; + } + } + + if(bestSupport == 0) { + return routers; + } + + final List results = new ArrayList(); + for(int i = 0; i < routers.size(); i++) { + if(nSupport[i] == bestSupport) { + results.add(routers.get(i)); + } + } + return results; + } + + private int countTargetSupport(Router router, List targets) { + int count = 0; + for(ExitTarget t: targets) { + if(routerSupportsTarget(router, t)) { + count += 1; + } + } + return count; + } + + private boolean routerSupportsTarget(Router router, ExitTarget target) { + if(target.isAddressTarget()) { + return router.exitPolicyAccepts(target.getAddress(), target.getPort()); + } else { + return router.exitPolicyAccepts(target.getPort()); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/ConfigNodeFilter.java b/orchid/src/com/subgraph/orchid/circuits/path/ConfigNodeFilter.java new file mode 100644 index 00000000..bb5d7524 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/ConfigNodeFilter.java @@ -0,0 +1,201 @@ +package com.subgraph.orchid.circuits.path; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.subgraph.orchid.Router; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +/** + * Implements configuration options: + * + * ExcludeNodes,ExcludeExitNodes,ExitNodes,EntryNodes + * + */ +public class ConfigNodeFilter implements RouterFilter { + + private final static Pattern NETMASK_PATTERN = Pattern.compile("^(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)/(\\d+)$"); + private final static Pattern ADDRESS_BITS_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+\\.\\d+)/(\\d+)$"); + + private final static Pattern IDENTITY_PATTERN = Pattern.compile("^[A-Fa-f0-9]{40}$"); + private final static Pattern COUNTRYCODE_PATTERN = Pattern.compile("^\\{([A-Za-z]{2})\\}$"); + private final static Pattern ROUTERNAME_PATTERN = Pattern.compile("^\\w{1,19}$"); + + static class MaskFilter implements RouterFilter { + + private final int network; + private final int bits; + private final int mask; + + + static int createMask(final int maskBitCount) { + return maskBitCount == 0 ? 0 : (1 << 31) >> (maskBitCount - 1); + } + + MaskFilter(IPv4Address network, int bits) { + this.bits = bits; + this.mask = createMask(bits); + this.network = network.getAddressData() & mask; + } + + public boolean filter(Router router) { + final int routerAddress = router.getAddress().getAddressData(); + return (routerAddress & mask) == network; + } + + public String toString() { + IPv4Address a = new IPv4Address(network); + return a.toString() + "/" + bits; + + } + } + + static class IdentityFilter implements RouterFilter { + private final HexDigest identity; + IdentityFilter(HexDigest identity) { + this.identity = identity; + } + public boolean filter(Router router) { + return router.getIdentityHash().equals(identity); + } + } + + static class NameFilter implements RouterFilter { + private final String name; + NameFilter(String name) { + this.name = name; + } + public boolean filter(Router router) { + return name.equals(router.getNickname()); + } + } + + static class CountryCodeFilter implements RouterFilter { + private final String countryCode; + public CountryCodeFilter(String countryCode) { + this.countryCode = countryCode; + } + public boolean filter(Router router) { + return countryCode.equalsIgnoreCase(router.getCountryCode()); + } + } + + static boolean isAddressString(String s) { + Matcher matcher = NETMASK_PATTERN.matcher(s); + if(!matcher.matches()) { + return false; + } + try { + for(int i = 1; i < 5; i++) { + if(!isValidOctetString(matcher.group(i))) { + return false; + } + } + return isValidMaskValue(matcher.group(5)); + } catch (NumberFormatException e) { + return false; + } + } + + private static boolean isValidOctetString(String s) { + int n = Integer.parseInt(s); + return n >= 0 && n <= 255; + } + + private static boolean isValidMaskValue(String s) { + int n = Integer.parseInt(s); + return n > 0 && n <= 32; + } + + static boolean isIdentityString(String s) { + return IDENTITY_PATTERN.matcher(s).matches(); + } + + static boolean isCountryCodeString(String s) { + return COUNTRYCODE_PATTERN.matcher(s).matches(); + } + + static boolean isNameString(String s) { + return ROUTERNAME_PATTERN.matcher(s).matches(); + } + + static RouterFilter createFilterFor(String s) { + if(isAddressString(s)) { + return createAddressFilter(s); + } else if(isCountryCodeString(s)) { + return createCountryCodeFilter(s); + } else if(isIdentityString(s)) { + return createIdentityFilter(s); + } else if (isNameString(s)) { + return createNameFilter(s); + } else { + return null; + } + } + + private static RouterFilter createAddressFilter(String s) { + final Matcher matcher = ADDRESS_BITS_PATTERN.matcher(s); + if(!matcher.matches()) { + throw new IllegalArgumentException(); + } + final IPv4Address network = IPv4Address.createFromString(matcher.group(1)); + final int bits = Integer.parseInt(matcher.group(2)); + return new MaskFilter(network, bits); + } + + private static RouterFilter createIdentityFilter(String s) { + if(isIdentityString(s)) { + throw new IllegalArgumentException(); + } + final HexDigest identity = HexDigest.createFromString(s); + return new IdentityFilter(identity); + } + + private static RouterFilter createCountryCodeFilter(String s) { + final Matcher matcher = COUNTRYCODE_PATTERN.matcher(s); + if(!matcher.matches()) { + throw new IllegalArgumentException(); + } + return new CountryCodeFilter(matcher.group(1)); + } + + private static RouterFilter createNameFilter(String s) { + if(!isNameString(s)) { + throw new IllegalArgumentException(); + } + return new NameFilter(s); + } + + static ConfigNodeFilter createFromStrings(List stringList) { + final List filters = new ArrayList(); + for(String s: stringList) { + RouterFilter f = createFilterFor(s); + if(f != null) { + filters.add(f); + } + } + return new ConfigNodeFilter(filters); + } + + private final List filterList; + + private ConfigNodeFilter(List filterList) { + this.filterList = filterList; + } + + public boolean filter(Router router) { + for(RouterFilter f: filterList) { + if(f.filter(router)) { + return true; + } + } + return false; + } + + boolean isEmpty() { + return filterList.isEmpty(); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/PathSelectionFailedException.java b/orchid/src/com/subgraph/orchid/circuits/path/PathSelectionFailedException.java new file mode 100644 index 00000000..a669ac2c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/PathSelectionFailedException.java @@ -0,0 +1,11 @@ +package com.subgraph.orchid.circuits.path; + +public class PathSelectionFailedException extends Exception { + private static final long serialVersionUID = -8855252756021674268L; + + public PathSelectionFailedException() {} + + public PathSelectionFailedException(String message) { + super(message); + } +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/RouterFilter.java b/orchid/src/com/subgraph/orchid/circuits/path/RouterFilter.java new file mode 100644 index 00000000..ae883017 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/RouterFilter.java @@ -0,0 +1,7 @@ +package com.subgraph.orchid.circuits.path; + +import com.subgraph.orchid.Router; + +public interface RouterFilter { + boolean filter(Router router); +} diff --git a/orchid/src/com/subgraph/orchid/circuits/path/TorConfigNodeFilter.java b/orchid/src/com/subgraph/orchid/circuits/path/TorConfigNodeFilter.java new file mode 100644 index 00000000..800509bb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/circuits/path/TorConfigNodeFilter.java @@ -0,0 +1,70 @@ +package com.subgraph.orchid.circuits.path; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; + +public class TorConfigNodeFilter { + + /* + * Even though these are exactly the configuration file variable names, they are only + * used here as keys into a Map + */ + private final static String EXCLUDE_NODES_FILTER = "ExcludeNodes"; + private final static String EXCLUDE_EXIT_NODES_FILTER = "ExcludeExitNodes"; + private final static String ENTRY_NODES_FILTER = "EntryNodes"; + private final static String EXIT_NODES_FILTER = "ExitNodes"; + + private final Map filters; + + TorConfigNodeFilter(TorConfig config) { + this.filters = new HashMap(); + addFilter(filters, EXCLUDE_NODES_FILTER, config.getExcludeNodes()); + addFilter(filters, EXCLUDE_EXIT_NODES_FILTER, config.getExcludeExitNodes()); + addFilter(filters, ENTRY_NODES_FILTER, config.getEntryNodes()); + addFilter(filters, EXIT_NODES_FILTER, config.getExitNodes()); + } + + private static void addFilter(Map filters, String name, List filterStrings) { + if(filterStrings == null || filterStrings.isEmpty()) { + return; + } + filters.put(name, ConfigNodeFilter.createFromStrings(filterStrings)); + } + + List filterExitCandidates(List candidates) { + final List filtered = new ArrayList(); + for(Router r: candidates) { + if(isExitNodeIncluded(r)) { + filtered.add(r); + } + } + return filtered; + } + + boolean isExitNodeIncluded(Router exitRouter) { + return isIncludedByFilter(exitRouter, EXIT_NODES_FILTER) && + !(isExcludedByFilter(exitRouter, EXCLUDE_EXIT_NODES_FILTER) || + isExcludedByFilter(exitRouter, EXCLUDE_NODES_FILTER)); + } + + boolean isIncludedByFilter(Router r, String filterName) { + final ConfigNodeFilter f = filters.get(filterName); + if(f == null || f.isEmpty()) { + return true; + } + return f.filter(r); + } + + boolean isExcludedByFilter(Router r, String filterName) { + final ConfigNodeFilter f = filters.get(filterName); + if(f == null || f.isEmpty()) { + return false; + } + return f.filter(r); + } +} diff --git a/orchid/src/com/subgraph/orchid/config/TorConfigBridgeLine.java b/orchid/src/com/subgraph/orchid/config/TorConfigBridgeLine.java new file mode 100644 index 00000000..ef25d31f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/config/TorConfigBridgeLine.java @@ -0,0 +1,29 @@ +package com.subgraph.orchid.config; + +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +public class TorConfigBridgeLine { + + private final IPv4Address address; + private final int port; + private final HexDigest fingerprint; + + TorConfigBridgeLine(IPv4Address address, int port, HexDigest fingerprint) { + this.address = address; + this.port = port; + this.fingerprint = fingerprint; + } + + public IPv4Address getAddress() { + return address; + } + + public int getPort() { + return port; + } + + public HexDigest getFingerprint() { + return fingerprint; + } +} diff --git a/orchid/src/com/subgraph/orchid/config/TorConfigHSAuth.java b/orchid/src/com/subgraph/orchid/config/TorConfigHSAuth.java new file mode 100644 index 00000000..612c527c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/config/TorConfigHSAuth.java @@ -0,0 +1,57 @@ +package com.subgraph.orchid.config; + +import java.util.HashMap; +import java.util.Map; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.hs.HSDescriptorCookie; +import com.subgraph.orchid.circuits.hs.HSDescriptorCookie.CookieType; +import com.subgraph.orchid.data.Base32; +import com.subgraph.orchid.encoders.Base64; + +public class TorConfigHSAuth { + + private final Map map = new HashMap(); + + void add(String key, String b64Value) { + final HSDescriptorCookie cookie = createFromBase64(b64Value); + final String k = validateKey(key); + map.put(k, cookie); + } + + private String validateKey(String key) { + final String k = (key.endsWith(".onion")) ? key.substring(0, (key.length() - 6)) : key; + try { + byte[] decoded = Base32.base32Decode(k); + if(decoded.length != 10) { + throw new IllegalArgumentException(); + } + return k; + } catch (TorException e) { + throw new IllegalArgumentException(e.getMessage()); + } + } + + HSDescriptorCookie get(String key) { + return map.get(validateKey(key)); + } + + private HSDescriptorCookie createFromBase64(String b64) { + if(b64.length() != 22) { + throw new IllegalArgumentException(); + } + final byte[] decoded = Base64.decode(b64 + "A="); + final byte lastByte = decoded[decoded.length - 1]; + final int flag = (lastByte & 0xFF) >> 4; + final byte[] cookie = new byte[decoded.length - 1]; + System.arraycopy(decoded, 0, cookie, 0, cookie.length); + switch(flag) { + case 0: + return new HSDescriptorCookie(CookieType.COOKIE_BASIC, cookie); + case 1: + return new HSDescriptorCookie(CookieType.COOKIE_STEALTH, cookie); + default: + throw new TorException("Illegal cookie descriptor with flag value: "+ flag); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/config/TorConfigInterval.java b/orchid/src/com/subgraph/orchid/config/TorConfigInterval.java new file mode 100644 index 00000000..ecb3b89e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/config/TorConfigInterval.java @@ -0,0 +1,111 @@ +package com.subgraph.orchid.config; + +import java.util.concurrent.TimeUnit; + +public class TorConfigInterval { + + public static TorConfigInterval createFrom(String s) { + final String[] ss = s.split(" "); + final long n = Long.parseLong(ss[0]); + if(ss.length == 1) { + return new TorConfigInterval(n, TimeUnit.SECONDS); + } else { + return createForValueAndUnit(n, ss[1]); + } + } + + private static TorConfigInterval createForValueAndUnit(long value, String unitName) { + if(stringMatchesUnit(unitName, "week")) { + return new TorConfigInterval(value * 7, TimeUnit.DAYS); + } else { + final TimeUnit unit = stringToUnit(unitName); + return new TorConfigInterval(value, unit); + } + } + + private static TimeUnit stringToUnit(String s) { + if(stringMatchesUnit(s, "day")) { + return TimeUnit.DAYS; + } else if(stringMatchesUnit(s, "hour")) { + return TimeUnit.HOURS; + } else if(stringMatchesUnit(s, "minute")) { + return TimeUnit.MINUTES; + } else if(stringMatchesUnit(s, "second")) { + return TimeUnit.SECONDS; + } else if(stringMatchesUnit(s, "millisecond")) { + return TimeUnit.MILLISECONDS; + } else { + throw new IllegalArgumentException(); + } + } + + private static boolean stringMatchesUnit(String s, String unitType) { + if(s == null) { + return false; + } else { + return s.equalsIgnoreCase(unitType) || s.equalsIgnoreCase(unitType + "s"); + } + } + + private final TimeUnit timeUnit; + private final long value; + + + public TorConfigInterval(long value, TimeUnit timeUnit) { + this.timeUnit = getTimeUnitFor(value, timeUnit); + this.value = getValueFor(value, timeUnit); + + } + + public long getMilliseconds() { + return TimeUnit.MILLISECONDS.convert(value, timeUnit); + } + + private static TimeUnit getTimeUnitFor(long value, TimeUnit timeUnit) { + if(timeUnit == TimeUnit.NANOSECONDS || timeUnit == TimeUnit.MICROSECONDS) { + return TimeUnit.MILLISECONDS; + } else { + return timeUnit; + } + } + + private static long getValueFor(long value, TimeUnit timeUnit) { + if(timeUnit == TimeUnit.NANOSECONDS || timeUnit == TimeUnit.MICROSECONDS) { + return TimeUnit.MILLISECONDS.convert(value, timeUnit); + } else { + return value; + } + } + + public String toString() { + if(timeUnit == TimeUnit.DAYS && (value % 7 == 0)) { + final long weeks = value / 7; + return (weeks == 1) ? "1 week" : (weeks + " weeks"); + } + final StringBuilder sb = new StringBuilder(); + sb.append(value); + sb.append(" "); + sb.append(unitToString(timeUnit)); + if(value != 1) { + sb.append("s"); + } + return sb.toString(); + } + + private static String unitToString(TimeUnit unit) { + switch(unit) { + case MILLISECONDS: + return "millisecond"; + case SECONDS: + return "second"; + case MINUTES: + return "minute"; + case HOURS: + return "hour"; + case DAYS: + return "days"; + default: + throw new IllegalArgumentException(); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/config/TorConfigParser.java b/orchid/src/com/subgraph/orchid/config/TorConfigParser.java new file mode 100644 index 00000000..c44a066f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/config/TorConfigParser.java @@ -0,0 +1,75 @@ +package com.subgraph.orchid.config; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorConfig.AutoBoolValue; +import com.subgraph.orchid.TorConfig.ConfigVarType; + +public class TorConfigParser { + + public Object parseValue(String value, ConfigVarType type) { + switch(type) { + case BOOLEAN: + return Boolean.parseBoolean(value); + case INTEGER: + return Integer.parseInt(value); + case INTERVAL: + return parseIntervalValue(value); + case PATH: + return parseFileValue(value); + case PORTLIST: + return parseIntegerList(value); + case STRING: + return value; + case STRINGLIST: + return parseCSV(value); + case AUTOBOOL: + return parseAutoBool(value); + case HS_AUTH: + default: + throw new IllegalArgumentException(); + } + } + + private File parseFileValue(String value) { + if(value.startsWith("~/")) { + final File home = new File(System.getProperty("user.home")); + return new File(home, value.substring(2)); + } + return new File(value); + } + private TorConfigInterval parseIntervalValue(String value) { + return TorConfigInterval.createFrom(value); + } + + private List parseIntegerList(String value) { + final List list = new ArrayList(); + for(String s: value.split(",")) { + list.add(Integer.parseInt(s)); + } + return list; + } + + private List parseCSV(String value) { + final List list = new ArrayList(); + for(String s: value.split(",")) { + list.add(s); + } + return list; + } + + private TorConfig.AutoBoolValue parseAutoBool(String value) { + if("auto".equalsIgnoreCase(value)) { + return AutoBoolValue.AUTO; + } else if("true".equalsIgnoreCase(value)) { + return AutoBoolValue.TRUE; + } else if("false".equalsIgnoreCase(value)) { + return AutoBoolValue.FALSE; + } else { + throw new IllegalArgumentException("Could not parse AutoBool value "+ value); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/config/TorConfigProxy.java b/orchid/src/com/subgraph/orchid/config/TorConfigProxy.java new file mode 100644 index 00000000..9e732293 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/config/TorConfigProxy.java @@ -0,0 +1,199 @@ +package com.subgraph.orchid.config; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorConfig.ConfigVar; +import com.subgraph.orchid.TorConfig.ConfigVarType; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +public class TorConfigProxy implements InvocationHandler { + + private final Map configValues; + private final List bridges; + private final TorConfigParser parser; + + public TorConfigProxy() { + this.configValues = new HashMap(); + this.bridges = new ArrayList(); + this.configValues.put("Bridges", bridges); + this.parser = new TorConfigParser(); + } + + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + if(method.getName().startsWith("set")) { + invokeSetMethod(method, args); + return null; + } else if(method.getName().startsWith("get")) { + if(args == null) { + return invokeGetMethod(method); + } else { + return invokeGetMethodWithArgs(method, args); + } + } else if(method.getName().startsWith("add")) { + invokeAddMethod(method, args); + return null; + } else { + throw new IllegalArgumentException(); + } + } + + void invokeSetMethod(Method method, Object[] args) { + final String name = getVariableNameForMethod(method); + final ConfigVar annotation = getAnnotationForVariable(name); + if(annotation != null && annotation.type() == ConfigVarType.INTERVAL) { + setIntervalValue(name, args); + } else { + configValues.put(name, args[0]); + } + } + + private void setIntervalValue(String varName, Object[] args) { + if(!(args[0] instanceof Long && args[1] instanceof TimeUnit)) { + throw new IllegalArgumentException(); + } + final long time = (Long) args[0]; + final TimeUnit unit = (TimeUnit) args[1]; + final TorConfigInterval interval = new TorConfigInterval(time, unit); + configValues.put(varName, interval); + } + + + private Object invokeGetMethodWithArgs(Method method, Object[] args) { + final String varName = getVariableNameForMethod(method); + if(getVariableType(varName) == ConfigVarType.HS_AUTH) { + return invokeHSAuthGet(varName, args); + } else { + throw new IllegalArgumentException(); + } + } + + private Object invokeGetMethod(Method method) { + final String varName = getVariableNameForMethod(method); + final Object value = getVariableValue(varName); + + if(value instanceof TorConfigInterval) { + final TorConfigInterval interval = (TorConfigInterval) value; + return interval.getMilliseconds(); + } else { + return value; + } + } + + private Object invokeHSAuthGet(String varName, Object[] args) { + if(!(args[0] instanceof String)) { + throw new IllegalArgumentException(); + } + final TorConfigHSAuth hsAuth = getHSAuth(varName); + return hsAuth.get((String) args[0]); + } + + private void invokeAddMethod(Method method, Object[] args) { + final String name = getVariableNameForMethod(method); + final ConfigVarType type = getVariableType(name); + switch(type) { + case HS_AUTH: + invokeHSAuthAdd(name, args); + break; + + case BRIDGE_LINE: + invokeBridgeAdd(args); + break; + + default: + throw new UnsupportedOperationException("addX configuration methods only supported for HS_AUTH or BRIDGE_LINE type"); + } + } + + private void invokeBridgeAdd(Object[] args) { + if(args.length >= 2 && (args[0] instanceof IPv4Address) && (args[1] instanceof Integer)) { + if(args.length == 2) { + bridges.add(new TorConfigBridgeLine((IPv4Address)args[0], (Integer)args[1], null)); + return; + } else if(args.length == 3 && (args[2] instanceof HexDigest)) { + bridges.add(new TorConfigBridgeLine((IPv4Address) args[0], (Integer) args[1], (HexDigest) args[2])); + return; + } + } + throw new IllegalArgumentException(); + } + + private void invokeHSAuthAdd(String name, Object[] args) { + if(!(args.length == 2 && (args[0] instanceof String) && (args[1] instanceof String))) { + throw new IllegalArgumentException(); + } + final TorConfigHSAuth hsAuth = getHSAuth(name); + hsAuth.add((String)args[0], (String)args[1]); + } + + private TorConfigHSAuth getHSAuth(String keyName) { + if(!configValues.containsKey(keyName)) { + configValues.put(keyName, new TorConfigHSAuth()); + } + return (TorConfigHSAuth) configValues.get(keyName); + } + + private Object getVariableValue(String varName) { + if(configValues.containsKey(varName)) { + return configValues.get(varName); + } else { + return getDefaultVariableValue(varName); + } + } + + private Object getDefaultVariableValue(String varName) { + final String defaultValue = getDefaultValueString(varName); + final ConfigVarType type = getVariableType(varName); + if(defaultValue == null || type == null) { + return null; + } + return parser.parseValue(defaultValue, type); + } + + private String getDefaultValueString(String varName) { + final ConfigVar var = getAnnotationForVariable(varName); + if(var == null) { + return null; + } else { + return var.defaultValue(); + } + } + + private ConfigVarType getVariableType(String varName) { + if("Bridge".equals(varName)) { + return ConfigVarType.BRIDGE_LINE; + } + + final ConfigVar var = getAnnotationForVariable(varName); + if(var == null) { + return null; + } else { + return var.type(); + } + } + + private String getVariableNameForMethod(Method method) { + final String methodName = method.getName(); + if(methodName.startsWith("get") || methodName.startsWith("set") || methodName.startsWith("add")) { + return methodName.substring(3); + } + throw new IllegalArgumentException(); + } + + private ConfigVar getAnnotationForVariable(String varName) { + final String getName = "get"+ varName; + for(Method m: TorConfig.class.getDeclaredMethods()) { + if(getName.equals(m.getName())) { + return m.getAnnotation(TorConfig.ConfigVar.class); + } + } + return null; + } +} diff --git a/orchid/src/com/subgraph/orchid/connections/ConnectionCacheImpl.java b/orchid/src/com/subgraph/orchid/connections/ConnectionCacheImpl.java new file mode 100644 index 00000000..bc3aa752 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/connections/ConnectionCacheImpl.java @@ -0,0 +1,196 @@ +package com.subgraph.orchid.connections; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import javax.net.ssl.SSLSocket; + +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.ConnectionCache; +import com.subgraph.orchid.ConnectionFailedException; +import com.subgraph.orchid.ConnectionHandshakeException; +import com.subgraph.orchid.ConnectionTimeoutException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.TorInitializationTracker; +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; + +public class ConnectionCacheImpl implements ConnectionCache, DashboardRenderable { + private final static Logger logger = Logger.getLogger(ConnectionCacheImpl.class.getName()); + + private class ConnectionTask implements Callable { + + private final Router router; + private final boolean isDirectoryConnection; + + ConnectionTask(Router router, boolean isDirectoryConnection) { + this.router = router; + this.isDirectoryConnection = isDirectoryConnection; + } + + public ConnectionImpl call() throws Exception { + final SSLSocket socket = factory.createSocket(); + final ConnectionImpl conn = new ConnectionImpl(config, socket, router, initializationTracker, isDirectoryConnection); + conn.connect(); + return conn; + } + } + + private class CloseIdleConnectionCheckTask implements Runnable { + public void run() { + for(Future f: activeConnections.values()) { + if(f.isDone()) { + try { + final ConnectionImpl c = f.get(); + c.idleCloseCheck(); + } catch (Exception e) { } + } + } + } + } + + private final ConcurrentMap> activeConnections = new ConcurrentHashMap>(); + private final ConnectionSocketFactory factory = new ConnectionSocketFactory(); + private final ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); + + private final TorConfig config; + private final TorInitializationTracker initializationTracker; + private volatile boolean isClosed; + + + public ConnectionCacheImpl(TorConfig config, TorInitializationTracker tracker) { + this.config = config; + this.initializationTracker = tracker; + scheduledExecutor.scheduleAtFixedRate(new CloseIdleConnectionCheckTask(), 5000, 5000, TimeUnit.MILLISECONDS); + } + + public void close() { + if(isClosed) { + return; + } + isClosed = true; + for(Future f: activeConnections.values()) { + if(f.isDone()) { + try { + ConnectionImpl conn = f.get(); + conn.closeSocket(); + } catch (InterruptedException e) { + logger.warning("Unexpected interruption while closing connection"); + } catch (ExecutionException e) { + logger.warning("Exception closing connection: "+ e.getCause()); + } + } else { + f.cancel(true); + } + } + activeConnections.clear(); + scheduledExecutor.shutdownNow(); + } + + public Connection getConnectionTo(Router router, boolean isDirectoryConnection) throws InterruptedException, ConnectionTimeoutException, ConnectionFailedException, ConnectionHandshakeException { + if(isClosed) { + throw new IllegalStateException("ConnectionCache has been closed"); + } + logger.fine("Get connection to "+ router.getAddress() + " "+ router.getOnionPort() + " " + router.getNickname()); + while(true) { + Future f = getFutureFor(router, isDirectoryConnection); + try { + Connection c = f.get(); + if(c.isClosed()) { + activeConnections.remove(router, f); + } else { + return c; + } + } catch (CancellationException e) { + activeConnections.remove(router, f); + } catch (ExecutionException e) { + activeConnections.remove(router, f); + final Throwable t = e.getCause(); + if(t instanceof ConnectionTimeoutException) { + throw (ConnectionTimeoutException) t; + } else if(t instanceof ConnectionFailedException) { + throw (ConnectionFailedException) t; + } else if(t instanceof ConnectionHandshakeException) { + throw (ConnectionHandshakeException) t; + } + throw new RuntimeException("Unexpected exception: "+ e, e); + } + } + } + + private Future getFutureFor(Router router, boolean isDirectoryConnection) { + Future f = activeConnections.get(router); + if(f != null) { + return f; + } + return createFutureForIfAbsent(router, isDirectoryConnection); + } + + private Future createFutureForIfAbsent(Router router, boolean isDirectoryConnection) { + final Callable task = new ConnectionTask(router, isDirectoryConnection); + final FutureTask futureTask = new FutureTask(task); + + final Future f = activeConnections.putIfAbsent(router, futureTask); + if(f != null) { + return f; + } + + futureTask.run(); + return futureTask; + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException { + if((flags & DASHBOARD_CONNECTIONS) == 0) { + return; + } + printDashboardBanner(writer, flags); + for(Connection c: getActiveConnections()) { + if(!c.isClosed()) { + renderer.renderComponent(writer, flags, c); + } + } + writer.println(); + } + + private void printDashboardBanner(PrintWriter writer, int flags) { + final boolean verbose = (flags & DASHBOARD_CONNECTIONS_VERBOSE) != 0; + if(verbose) { + writer.println("[Connection Cache (verbose)]"); + } else { + writer.println("[Connection Cache]"); + } + writer.println(); + } + + List getActiveConnections() { + final List cs = new ArrayList(); + for(Future future: activeConnections.values()) { + addConnectionFromFuture(future, cs); + } + return cs; + } + + private void addConnectionFromFuture(Future future, List connectionList) { + try { + if(future.isDone() && !future.isCancelled()) { + connectionList.add(future.get()); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { } + } +} diff --git a/orchid/src/com/subgraph/orchid/connections/ConnectionHandshake.java b/orchid/src/com/subgraph/orchid/connections/ConnectionHandshake.java new file mode 100644 index 00000000..052bf8f2 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/connections/ConnectionHandshake.java @@ -0,0 +1,158 @@ +package com.subgraph.orchid.connections; + +import java.io.IOException; +import java.security.PublicKey; +import java.security.interfaces.RSAPublicKey; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.logging.Logger; + +import javax.net.ssl.SSLSocket; + +import com.subgraph.orchid.BridgeRouter; +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.ConnectionHandshakeException; +import com.subgraph.orchid.ConnectionIOException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.cells.CellImpl; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.IPv4Address; + +public abstract class ConnectionHandshake { + private final static Logger logger = Logger.getLogger(ConnectionHandshake.class.getName()); + + static ConnectionHandshake createHandshake(TorConfig config, ConnectionImpl connection, SSLSocket socket) throws ConnectionHandshakeException { + if(config.getHandshakeV3Enabled() && ConnectionHandshakeV3.sessionSupportsHandshake(socket.getSession())) { + return new ConnectionHandshakeV3(connection, socket); + } else if(config.getHandshakeV2Enabled()) { + return new ConnectionHandshakeV2(connection, socket); + } else { + throw new ConnectionHandshakeException("No valid handshake type available for this connection"); + } + + } + + protected final ConnectionImpl connection; + protected final SSLSocket socket; + + protected final List remoteVersions; + private int remoteTimestamp; + private IPv4Address myAddress; + private final List remoteAddresses; + + ConnectionHandshake(ConnectionImpl connection, SSLSocket socket) { + this.connection = connection; + this.socket = socket; + this.remoteVersions = new ArrayList(); + this.remoteAddresses = new ArrayList(); + } + + abstract void runHandshake() throws IOException, InterruptedException, ConnectionIOException; + + int getRemoteTimestamp() { + return remoteTimestamp; + } + + IPv4Address getMyAddress() { + return myAddress; + } + + protected Cell expectCell(Integer... expectedTypes) throws ConnectionHandshakeException { + try { + final Cell c = connection.readConnectionControlCell(); + for(int t: expectedTypes) { + if(c.getCommand() == t) { + return c; + } + } + final List expected = Arrays.asList(expectedTypes); + throw new ConnectionHandshakeException("Expecting Cell command "+ expected + " and got [ "+ c.getCommand() +" ] instead"); + } catch (ConnectionIOException e) { + throw new ConnectionHandshakeException("Connection exception while performing handshake "+ e); + } + } + + protected void sendVersions(int... versions) throws ConnectionIOException { + final Cell cell = CellImpl.createVarCell(0, Cell.VERSIONS, versions.length * 2); + for(int v: versions) { + cell.putShort(v); + } + connection.sendCell(cell); + } + + protected void receiveVersions() throws ConnectionHandshakeException { + final Cell c = expectCell(Cell.VERSIONS); + while(c.cellBytesRemaining() >= 2) { + remoteVersions.add(c.getShort()); + } + } + + protected void sendNetinfo() throws ConnectionIOException { + final Cell cell = CellImpl.createCell(0, Cell.NETINFO); + putTimestamp(cell); + putIPv4Address(cell, connection.getRouter().getAddress()); + putMyAddresses(cell); + connection.sendCell(cell); + } + + private void putTimestamp(Cell cell) { + final Date now = new Date(); + cell.putInt((int) (now.getTime() / 1000)); + } + + private void putIPv4Address(Cell cell, IPv4Address address) { + final byte[] data = address.getAddressDataBytes(); + cell.putByte(Cell.ADDRESS_TYPE_IPV4); + cell.putByte(data.length); + cell.putByteArray(data); + } + + private void putMyAddresses(Cell cell) { + cell.putByte(1); + putIPv4Address(cell, new IPv4Address(0)); + } + + protected void recvNetinfo() throws ConnectionHandshakeException { + processNetInfo(expectCell(Cell.NETINFO)); + } + + protected void processNetInfo(Cell netinfoCell) { + remoteTimestamp = netinfoCell.getInt(); + myAddress = readAddress(netinfoCell); + final int addressCount = netinfoCell.getByte(); + for(int i = 0; i < addressCount; i++) { + IPv4Address addr = readAddress(netinfoCell); + if(addr != null) { + remoteAddresses.add(addr); + } + } + } + + private IPv4Address readAddress(Cell cell) { + final int type = cell.getByte(); + final int len = cell.getByte(); + if(type == Cell.ADDRESS_TYPE_IPV4 && len == 4) { + return new IPv4Address(cell.getInt()); + } + final byte[] buffer = new byte[len]; + cell.getByteArray(buffer); + return null; + } + + protected void verifyIdentityKey(PublicKey publicKey) throws ConnectionHandshakeException { + if(!(publicKey instanceof RSAPublicKey)) { + throw new ConnectionHandshakeException("Identity certificate public key is not an RSA key as expected"); + } + final TorPublicKey identityKey = new TorPublicKey((RSAPublicKey)publicKey); + final Router router = connection.getRouter(); + if((router instanceof BridgeRouter) && (router.getIdentityHash() == null)) { + logger.info("Setting Bridge fingerprint from connection handshake for "+ router); + ((BridgeRouter) router).setIdentity(identityKey.getFingerprint()); + } else if(!identityKey.getFingerprint().equals(router.getIdentityHash())) { + throw new ConnectionHandshakeException("Router identity does not match certificate key"); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/connections/ConnectionHandshakeV2.java b/orchid/src/com/subgraph/orchid/connections/ConnectionHandshakeV2.java new file mode 100644 index 00000000..130a287b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/connections/ConnectionHandshakeV2.java @@ -0,0 +1,90 @@ +package com.subgraph.orchid.connections; + +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.security.PublicKey; + +import javax.net.ssl.HandshakeCompletedEvent; +import javax.net.ssl.HandshakeCompletedListener; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import javax.security.cert.CertificateException; +import javax.security.cert.X509Certificate; + +import com.subgraph.orchid.ConnectionHandshakeException; +import com.subgraph.orchid.ConnectionIOException; + +/** + * This class performs a Version 2 handshake as described in section 2 of + * tor-spec.txt. The handshake is considered complete after VERSIONS and + * NETINFO cells have been exchanged between the two sides. + */ +public class ConnectionHandshakeV2 extends ConnectionHandshake { + + private static class HandshakeFinishedMonitor implements HandshakeCompletedListener { + final Object lock = new Object(); + boolean isFinished; + + public void handshakeCompleted(HandshakeCompletedEvent event) { + synchronized(lock) { + this.isFinished = true; + lock.notifyAll(); + } + } + + public void waitFinished() throws InterruptedException { + synchronized(lock) { + while(!isFinished) { + lock.wait(); + } + } + } + } + + ConnectionHandshakeV2(ConnectionImpl connection, SSLSocket socket) { + super(connection, socket); + } + + void runHandshake() throws IOException, InterruptedException, ConnectionIOException { + // Swap in V1-only ciphers for second handshake as a workaround for: + // + // https://trac.torproject.org/projects/tor/ticket/4591 + // + socket.setEnabledCipherSuites(ConnectionSocketFactory.V1_CIPHERS_ONLY); + + final HandshakeFinishedMonitor monitor = new HandshakeFinishedMonitor(); + socket.addHandshakeCompletedListener(monitor); + socket.startHandshake(); + monitor.waitFinished(); + socket.removeHandshakeCompletedListener(monitor); + + verifyIdentityKey(getIdentityKey()); + sendVersions(2); + receiveVersions(); + sendNetinfo(); + recvNetinfo(); + } + + private PublicKey getIdentityKey() throws ConnectionHandshakeException { + final X509Certificate identityCertificate = getIdentityCertificateFromSession(socket.getSession()); + return identityCertificate.getPublicKey(); + } + + private X509Certificate getIdentityCertificateFromSession(SSLSession session) throws ConnectionHandshakeException { + try { + X509Certificate[] chain = session.getPeerCertificateChain(); + if(chain.length != 2) { + throw new ConnectionHandshakeException("Expecting 2 certificate chain from router and received chain length "+ chain.length); + } + chain[0].verify(chain[1].getPublicKey()); + return chain[1]; + } catch (SSLPeerUnverifiedException e) { + throw new ConnectionHandshakeException("No certificates received from router"); + } catch (GeneralSecurityException e) { + throw new ConnectionHandshakeException("Incorrect signature on certificate chain"); + } catch (CertificateException e) { + throw new ConnectionHandshakeException("Malformed certificate received"); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/connections/ConnectionHandshakeV3.java b/orchid/src/com/subgraph/orchid/connections/ConnectionHandshakeV3.java new file mode 100644 index 00000000..ca2ff75b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/connections/ConnectionHandshakeV3.java @@ -0,0 +1,195 @@ +package com.subgraph.orchid.connections; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.math.BigInteger; +import java.security.GeneralSecurityException; +import java.security.Principal; +import java.security.PublicKey; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.security.interfaces.RSAPublicKey; + +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.ConnectionHandshakeException; +import com.subgraph.orchid.ConnectionIOException; + +public class ConnectionHandshakeV3 extends ConnectionHandshake { + + private X509Certificate linkCertificate; + private X509Certificate identityCertificate; + + ConnectionHandshakeV3(ConnectionImpl connection, SSLSocket socket) { + super(connection, socket); + } + + void runHandshake() throws IOException, InterruptedException, ConnectionIOException { + sendVersions(3); + receiveVersions(); + recvCerts(); + recvAuthChallengeAndNetinfo(); + verifyCertificates(); + sendNetinfo(); + } + + void recvCerts() throws ConnectionHandshakeException { + final Cell cell = expectCell(Cell.CERTS); + final int ncerts = cell.getByte(); + if(ncerts != 2) { + throw new ConnectionHandshakeException("Expecting 2 certificates and got "+ ncerts); + } + + linkCertificate = null; + identityCertificate = null; + + for(int i = 0; i < ncerts; i++) { + int type = cell.getByte(); + if(type == 1) { + linkCertificate = testAndReadCertificate(cell, linkCertificate, "Link (type = 1)"); + } else if(type == 2) { + identityCertificate = testAndReadCertificate(cell, identityCertificate, "Identity (type = 2)"); + } else { + throw new ConnectionHandshakeException("Unexpected certificate type = "+ type + " in CERTS cell"); + } + } + + } + + RSAPublicKey getConnectionPublicKey() { + try { + javax.security.cert.X509Certificate[] chain = socket.getSession().getPeerCertificateChain(); + return (RSAPublicKey) chain[0].getPublicKey(); + } catch (SSLPeerUnverifiedException e) { + return null; + } + } + + + private X509Certificate testAndReadCertificate(Cell cell, X509Certificate currentValue, String type) throws ConnectionHandshakeException { + if(currentValue == null) { + return readCertificateFromCell(cell); + } else { + throw new ConnectionHandshakeException("Duplicate "+ type + " certificates in CERTS cell"); + } + } + + private X509Certificate readCertificateFromCell(Cell cell) { + try { + final CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); + final int clen = cell.getShort(); + final byte[] certificateBuffer = new byte[clen]; + cell.getByteArray(certificateBuffer); + final ByteArrayInputStream bis = new ByteArrayInputStream(certificateBuffer); + return (X509Certificate) certificateFactory.generateCertificate(bis); + } catch (CertificateException e) { + return null; + } + + } + + void verifyCertificates() throws ConnectionHandshakeException { + PublicKey publicKey = identityCertificate.getPublicKey(); + verifyIdentityKey(publicKey); + RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; + + if(rsaPublicKey.getModulus().bitLength() != 1024) { + throw new ConnectionHandshakeException("Invalid RSA modulus length in router identity key"); + } + + try { + identityCertificate.checkValidity(); + identityCertificate.verify(rsaPublicKey); + linkCertificate.checkValidity(); + linkCertificate.verify(rsaPublicKey); + } catch (GeneralSecurityException e) { + throw new ConnectionHandshakeException("Router presented invalid certificate chain in CERTS cell"); + } + + RSAPublicKey rsa2 = (RSAPublicKey) linkCertificate.getPublicKey(); + if(!getConnectionPublicKey().getModulus().equals(rsa2.getModulus())) { + throw new ConnectionHandshakeException("Link certificate in CERTS cell does not match connection certificate"); + } + } + + void recvAuthChallengeAndNetinfo() throws ConnectionHandshakeException { + final Cell cell = expectCell(Cell.AUTH_CHALLENGE, Cell.NETINFO); + if(cell.getCommand() == Cell.NETINFO) { + processNetInfo(cell); + return; + } + final Cell netinfo = expectCell(Cell.NETINFO); + processNetInfo(netinfo); + } + + public static boolean sessionSupportsHandshake(SSLSession session) { + javax.security.cert.X509Certificate cert = getConnectionCertificateFromSession(session); + if(cert == null) { + return false; + } + return isSelfSigned(cert) || testDName(cert.getSubjectDN()) || + testDName(cert.getIssuerDN()) || testModulusLength(cert); + } + + static private javax.security.cert.X509Certificate getConnectionCertificateFromSession(SSLSession session) { + try { + final javax.security.cert.X509Certificate[] chain = session.getPeerCertificateChain(); + return chain[0]; + } catch (SSLPeerUnverifiedException e) { + return null; + } + } + + static private boolean isSelfSigned(javax.security.cert.X509Certificate certificate) { + try { + certificate.verify(certificate.getPublicKey()); + return true; + } catch (Exception e) { + return false; + } + } + + /* + * * Some component other than "commonName" is set in the subject or + * issuer DN of the certificate. + * + * * The commonName of the subject or issuer of the certificate ends + * with a suffix other than ".net". + */ + static private boolean testDName(Principal dn) { + final String dname = dn.getName(); + if(dname.indexOf(",") >= 0) { + return true; + } + return !getCN(dname).endsWith(".net"); + } + + /* + * * The certificate's public key modulus is longer than 1024 bits. + */ + static private boolean testModulusLength(javax.security.cert.X509Certificate cert) { + if(!(cert.getPublicKey() instanceof RSAPublicKey)) { + return false; + } + final RSAPublicKey rsaPublicKey = (RSAPublicKey) cert.getPublicKey(); + final BigInteger modulus = rsaPublicKey.getModulus(); + return modulus.bitLength() > 1024; + } + + static private String getCN(String dname) { + final int idx = dname.indexOf("CN="); + if(idx == -1) { + return ""; + } + final int comma = dname.indexOf(',', idx); + if(comma == -1) { + return dname.substring(idx); + } else { + return dname.substring(idx, comma); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/connections/ConnectionImpl.java b/orchid/src/com/subgraph/orchid/connections/ConnectionImpl.java new file mode 100644 index 00000000..3ca3b729 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/connections/ConnectionImpl.java @@ -0,0 +1,342 @@ +package com.subgraph.orchid.connections; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.SocketTimeoutException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.net.ssl.SSLSocket; + +import com.subgraph.orchid.Cell; +import com.subgraph.orchid.Circuit; +import com.subgraph.orchid.Connection; +import com.subgraph.orchid.ConnectionFailedException; +import com.subgraph.orchid.ConnectionHandshakeException; +import com.subgraph.orchid.ConnectionIOException; +import com.subgraph.orchid.ConnectionTimeoutException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.circuits.TorInitializationTracker; +import com.subgraph.orchid.circuits.cells.CellImpl; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.dashboard.DashboardRenderable; +import com.subgraph.orchid.dashboard.DashboardRenderer; + +/** + * This class represents a transport link between two onion routers or + * between an onion proxy and an entry router. + * + */ +public class ConnectionImpl implements Connection, DashboardRenderable { + private final static Logger logger = Logger.getLogger(ConnectionImpl.class.getName()); + private final static int CONNECTION_IDLE_TIMEOUT = 5 * 60 * 1000; // 5 minutes + private final static int DEFAULT_CONNECT_TIMEOUT = 5000; + private final static Cell connectionClosedSentinel = CellImpl.createCell(0, 0); + + private final TorConfig config; + private final SSLSocket socket; + private InputStream input; + private OutputStream output; + private final Router router; + private final Map circuitMap; + private final BlockingQueue connectionControlCells; + private final TorInitializationTracker initializationTracker; + private final boolean isDirectoryConnection; + + private int currentId = 1; + private boolean isConnected; + private volatile boolean isClosed; + private final Thread readCellsThread; + private final Object connectLock = new Object(); + private final AtomicLong lastActivity = new AtomicLong(); + + + public ConnectionImpl(TorConfig config, SSLSocket socket, Router router, TorInitializationTracker tracker, boolean isDirectoryConnection) { + this.config = config; + this.socket = socket; + this.router = router; + this.circuitMap = new HashMap(); + this.readCellsThread = new Thread(createReadCellsRunnable()); + this.readCellsThread.setDaemon(true); + this.connectionControlCells = new LinkedBlockingQueue(); + this.initializationTracker = tracker; + this.isDirectoryConnection = isDirectoryConnection; + initializeCurrentCircuitId(); + } + + private void initializeCurrentCircuitId() { + final TorRandom random = new TorRandom(); + currentId = random.nextInt(0xFFFF) + 1; + } + + public Router getRouter() { + return router; + } + + public boolean isClosed() { + return isClosed; + } + + public int bindCircuit(Circuit circuit) { + synchronized(circuitMap) { + while(circuitMap.containsKey(currentId)) + incrementNextId(); + final int id = currentId; + incrementNextId(); + circuitMap.put(id, circuit); + return id; + } + } + + private void incrementNextId() { + currentId++; + if(currentId > 0xFFFF) + currentId = 1; + } + + void connect() throws ConnectionFailedException, ConnectionTimeoutException, ConnectionHandshakeException { + synchronized (connectLock) { + if(isConnected) { + return; + } + try { + doConnect(); + } catch (SocketTimeoutException e) { + throw new ConnectionTimeoutException(); + } catch (IOException e) { + throw new ConnectionFailedException(e.getClass().getName() + " : "+ e.getMessage()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new ConnectionHandshakeException("Handshake interrupted"); + } catch (ConnectionHandshakeException e) { + throw e; + } catch (ConnectionIOException e) { + throw new ConnectionFailedException(e.getMessage()); + } + isConnected = true; + } + } + + private void doConnect() throws IOException, InterruptedException, ConnectionIOException { + connectSocket(); + final ConnectionHandshake handshake = ConnectionHandshake.createHandshake(config, this, socket); + input = socket.getInputStream(); + output = socket.getOutputStream(); + readCellsThread.start(); + handshake.runHandshake(); + updateLastActivity(); + } + + private void connectSocket() throws IOException { + if(initializationTracker != null) { + if(isDirectoryConnection) { + initializationTracker.notifyEvent(Tor.BOOTSTRAP_STATUS_CONN_DIR); + } else { + initializationTracker.notifyEvent(Tor.BOOTSTRAP_STATUS_CONN_OR); + } + } + + socket.connect(routerToSocketAddress(router), DEFAULT_CONNECT_TIMEOUT); + + if(initializationTracker != null) { + if(isDirectoryConnection) { + initializationTracker.notifyEvent(Tor.BOOTSTRAP_STATUS_HANDSHAKE_DIR); + } else { + initializationTracker.notifyEvent(Tor.BOOTSTRAP_STATUS_HANDSHAKE_OR); + } + } + } + + private SocketAddress routerToSocketAddress(Router router) { + final InetAddress address = router.getAddress().toInetAddress(); + return new InetSocketAddress(address, router.getOnionPort()); + } + + public void sendCell(Cell cell) throws ConnectionIOException { + if(!socket.isConnected()) { + throw new ConnectionIOException("Cannot send cell because connection is not connected"); + } + updateLastActivity(); + synchronized(output) { + try { + output.write(cell.getCellBytes()); + } catch (IOException e) { + logger.fine("IOException writing cell to connection "+ e.getMessage()); + closeSocket(); + throw new ConnectionIOException(e.getClass().getName() + " : "+ e.getMessage()); + } + } + } + + private Cell recvCell() throws ConnectionIOException { + try { + return CellImpl.readFromInputStream(input); + } catch(EOFException e) { + closeSocket(); + throw new ConnectionIOException(); + } catch (IOException e) { + if(!isClosed) { + logger.fine("IOException reading cell from connection "+ this + " : "+ e.getMessage()); + closeSocket(); + } + throw new ConnectionIOException(e.getClass().getName() + " " + e.getMessage()); + } + } + + void closeSocket() { + try { + logger.fine("Closing connection to "+ this); + isClosed = true; + socket.close(); + isConnected = false; + } catch (IOException e) { + logger.warning("Error closing socket: "+ e.getMessage()); + } + } + + private Runnable createReadCellsRunnable() { + return new Runnable() { + public void run() { + try { + readCellsLoop(); + } catch(Exception e) { + logger.log(Level.WARNING, "Unhandled exception processing incoming cells on connection "+ e, e); + } + } + }; + } + + private void readCellsLoop() { + while(!Thread.interrupted()) { + try { + processCell( recvCell() ); + } catch(ConnectionIOException e) { + connectionControlCells.add(connectionClosedSentinel); + notifyCircuitsLinkClosed(); + return; + } catch(TorException e) { + logger.log(Level.WARNING, "Unhandled Tor exception reading and processing cells: "+ e.getMessage(), e); + } + } + } + + private void notifyCircuitsLinkClosed() { + + } + + Cell readConnectionControlCell() throws ConnectionIOException { + try { + return connectionControlCells.take(); + } catch (InterruptedException e) { + closeSocket(); + throw new ConnectionIOException(); + } + } + + private void processCell(Cell cell) { + updateLastActivity(); + final int command = cell.getCommand(); + + if(command == Cell.RELAY) { + processRelayCell(cell); + return; + } + + switch(command) { + case Cell.NETINFO: + case Cell.VERSIONS: + case Cell.CERTS: + case Cell.AUTH_CHALLENGE: + connectionControlCells.add(cell); + break; + + case Cell.CREATED: + case Cell.CREATED_FAST: + case Cell.DESTROY: + processControlCell(cell); + break; + default: + // Ignore everything else + break; + } + } + + private void processRelayCell(Cell cell) { + synchronized(circuitMap) { + final Circuit circuit = circuitMap.get(cell.getCircuitId()); + if(circuit == null) { + logger.warning("Could not deliver relay cell for circuit id = "+ cell.getCircuitId() +" on connection "+ this +". Circuit not found"); + return; + } + circuit.deliverRelayCell(cell); + } + } + + private void processControlCell(Cell cell) { + synchronized(circuitMap) { + final Circuit circuit = circuitMap.get(cell.getCircuitId()); + if(circuit != null) { + circuit.deliverControlCell(cell); + } + } + } + + void idleCloseCheck() { + synchronized (circuitMap) { + final boolean needClose = (!isClosed && circuitMap.isEmpty() && getIdleMilliseconds() > CONNECTION_IDLE_TIMEOUT); + if(needClose) { + logger.fine("Closing connection to "+ this +" on idle timeout"); + closeSocket(); + } + } + } + + private void updateLastActivity() { + lastActivity.set(System.currentTimeMillis()); + } + + private long getIdleMilliseconds() { + if(lastActivity.get() == 0) { + return 0; + } + return System.currentTimeMillis() - lastActivity.get(); + } + + public void removeCircuit(Circuit circuit) { + synchronized(circuitMap) { + circuitMap.remove(circuit.getCircuitId()); + } + } + + public String toString() { + return "!" + router.getNickname() + "!"; + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException { + final int circuitCount; + synchronized (circuitMap) { + circuitCount = circuitMap.size(); + } + if(circuitCount == 0 && (flags & DASHBOARD_CONNECTIONS_VERBOSE) == 0) { + return; + } + writer.print(" [Connection router="+ router.getNickname()); + writer.print(" circuits="+ circuitCount); + writer.print(" idle="+ (getIdleMilliseconds()/1000) + "s"); + writer.println("]"); + } +} diff --git a/orchid/src/com/subgraph/orchid/connections/ConnectionSocketFactory.java b/orchid/src/com/subgraph/orchid/connections/ConnectionSocketFactory.java new file mode 100644 index 00000000..9ffdf6c9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/connections/ConnectionSocketFactory.java @@ -0,0 +1,76 @@ +package com.subgraph.orchid.connections; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; + +import com.subgraph.orchid.TorException; + +public class ConnectionSocketFactory { + static final String[] V1_CIPHERS_ONLY = { + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", + "SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA", + }; + + private static final String[] MANDATORY_CIPHERS = { + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", + "SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA", + "SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA"}; + + private static final TrustManager[] NULL_TRUST = { + new X509TrustManager() { + private final X509Certificate[] empty = {}; + public void checkClientTrusted(X509Certificate[] chain, String authType) + throws CertificateException { + } + + public void checkServerTrusted(X509Certificate[] chain, String authType) + throws CertificateException { + } + + public X509Certificate[] getAcceptedIssuers() { + return empty; + } + } + }; + + private static SSLContext createSSLContext() { + System.setProperty("sun.security.ssl.allowUnsafeRenegotiation", "true"); + try { + final SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(null, NULL_TRUST, null); + return sslContext; + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } catch (KeyManagementException e) { + throw new TorException(e); + } + } + + private final SSLSocketFactory socketFactory; + + ConnectionSocketFactory() { + socketFactory = createSSLContext().getSocketFactory(); + } + + SSLSocket createSocket() { + try { + final SSLSocket socket = (SSLSocket) socketFactory.createSocket(); + socket.setEnabledCipherSuites(MANDATORY_CIPHERS); + socket.setUseClientMode(true); + return socket; + } catch (IOException e) { + throw new TorException(e); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/ASN1Parser.java b/orchid/src/com/subgraph/orchid/crypto/ASN1Parser.java new file mode 100644 index 00000000..0c60837e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/ASN1Parser.java @@ -0,0 +1,146 @@ +package com.subgraph.orchid.crypto; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +/** + * A very minimal ASN.1 BER parser which only supports the ASN.1 object types needed + * for parsing encoded RSA public keys. + */ +public class ASN1Parser { + + private final static int ASN1_TAG_SEQUENCE = 16; + private final static int ASN1_TAG_INTEGER = 2; + private final static int ASN1_TAG_BITSTRING = 3; + + static interface ASN1Object {}; + + static class ASN1Sequence implements ASN1Object { + private final List items; + + ASN1Sequence(List items) { + this.items = items; + } + + List getItems() { + return items; + } + } + + static class ASN1Integer implements ASN1Object { + final BigInteger value; + ASN1Integer(BigInteger value) { + this.value = value; + } + BigInteger getValue() { + return value; + } + } + + + static class ASN1BitString implements ASN1Object { + final byte[] bytes; + + ASN1BitString(byte[] bytes) { + this.bytes = bytes; + } + + byte[] getBytes() { + return bytes; + } + } + + /* For object types we don't handle, just stuff the bytes into here */ + static class ASN1Blob extends ASN1BitString { + ASN1Blob(byte[] bytes) { + super(bytes); + } + } + + ASN1Object parseASN1(ByteBuffer data) { + final int typeOctet = data.get() & 0xFF; + final int tag = typeOctet & 0x1F; + final ByteBuffer objectBuffer = getObjectBuffer(data); + + switch(tag) { + case ASN1_TAG_SEQUENCE: + return parseASN1Sequence(objectBuffer); + case ASN1_TAG_INTEGER: + return parseASN1Integer(objectBuffer); + case ASN1_TAG_BITSTRING: + return parseASN1BitString(objectBuffer); + default: + return createBlob(objectBuffer); + } + + } + + /* + * Read 'length' from data buffer, create a new buffer as a slice() which + * contains 'length' bytes of data following length field and return this + * buffer. Increment position pointer of data buffer to skip over these bytes. + */ + ByteBuffer getObjectBuffer(ByteBuffer data) { + final int length = parseASN1Length(data); + if(length > data.remaining()) { + throw new IllegalArgumentException(); + } + final ByteBuffer objectBuffer = data.slice(); + objectBuffer.limit(length); + data.position(data.position() + length); + return objectBuffer; + } + + int parseASN1Length(ByteBuffer data) { + final int firstOctet = data.get() & 0xFF; + if(firstOctet < 0x80) { + return firstOctet; + } + return parseASN1LengthLong(firstOctet & 0x7F, data); + } + + int parseASN1LengthLong(int lengthOctets, ByteBuffer data) { + if(lengthOctets == 0 || lengthOctets > 3) { + // indefinite form or too long + throw new IllegalArgumentException(); + } + int length = 0; + for(int i = 0; i < lengthOctets; i++) { + length <<= 8; + length |= (data.get() & 0xFF); + } + return length; + } + + ASN1Sequence parseASN1Sequence(ByteBuffer data) { + final List obs = new ArrayList(); + while(data.hasRemaining()) { + obs.add(parseASN1(data)); + } + return new ASN1Sequence(obs); + } + + ASN1Integer parseASN1Integer(ByteBuffer data) { + return new ASN1Integer(new BigInteger(getRemainingBytes(data))); + } + + ASN1BitString parseASN1BitString(ByteBuffer data) { + final int unusedBits = data.get() & 0xFF; + if(unusedBits != 0) { + throw new IllegalArgumentException(); + } + return new ASN1BitString(getRemainingBytes(data)); + } + + ASN1Blob createBlob(ByteBuffer data) { + return new ASN1Blob(getRemainingBytes(data)); + } + + private byte[] getRemainingBytes(ByteBuffer data) { + final byte[] bs = new byte[data.remaining()]; + data.get(bs); + return bs; + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/Curve25519.java b/orchid/src/com/subgraph/orchid/crypto/Curve25519.java new file mode 100644 index 00000000..8cdb49c5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/Curve25519.java @@ -0,0 +1,469 @@ +package com.subgraph.orchid.crypto; + +// +// Copyright (c) 2011, Neil Alexander T. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with +// or without modification, are permitted provided that the following +// conditions are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +/* + * https://github.com/neilalexander/jnacl/blob/master/crypto/curve25519.java + */ +public class Curve25519 +{ + final int CRYPTO_BYTES = 32; + final int CRYPTO_SCALARBYTES = 32; + + static byte[] basev = { 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + static int[] minusp = { 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128 }; + + public static int crypto_scalarmult_base(byte[] q, byte[] n) + { + byte[] basevp = basev; + return crypto_scalarmult(q, n, basevp); + } + + static void add(int[] outv, int outvoffset, int[] a, int aoffset, int[] b, int boffset) + { + int u = 0; + + for (int j = 0; j < 31; ++j) + { + u += a[aoffset + j] + b[boffset + j]; + outv[outvoffset + j] = u & 255; + u >>>= 8; + } + + u += a[aoffset + 31] + b[boffset + 31]; + outv[outvoffset + 31] = u; + } + + static void sub(int[] outv, int outvoffset, int[] a, int aoffset, int[] b, int boffset) + { + int u = 218; + + for (int j = 0; j < 31; ++j) + { + u += a[aoffset + j] + 65280 - b[boffset + j]; + outv[outvoffset + j] = u & 255; + u >>>= 8; + } + + u += a[aoffset + 31] - b[boffset + 31]; + outv[outvoffset + 31] = u; + } + + static void squeeze(int[] a, int aoffset) + { + int u = 0; + + for (int j = 0; j < 31; ++j) + { + u += a[aoffset + j]; + a[aoffset + j] = u & 255; + u >>>= 8; + } + + u += a[aoffset + 31]; + a[aoffset + 31] = u & 127; + u = 19 * (u >>> 7); + + for (int j = 0; j < 31; ++j) + { + u += a[aoffset + j]; + a[aoffset + j] = u & 255; + u >>>= 8; + } + + u += a[aoffset + 31]; + a[aoffset + 31] = u; + } + + static void freeze(int[] a, int aoffset) + { + int[] aorig = new int[32]; + + for (int j = 0; j < 32; ++j) + aorig[j] = a[aoffset + j]; + + int[] minuspp = minusp; + + add(a, 0, a, 0, minuspp, 0); + + int negative = (int) (-((a[aoffset + 31] >>> 7) & 1)); + + for (int j = 0; j < 32; ++j) + a[aoffset + j] ^= negative & (aorig[j] ^ a[aoffset + j]); + } + + static void mult(int[] outv, int outvoffset, int[] a, int aoffset, int[] b, int boffset) + { + int j; + + for (int i = 0; i < 32; ++i) + { + int u = 0; + + for (j = 0; j <= i; ++j) + u += a[aoffset + j] * b[boffset + i - j]; + + for (j = i + 1; j < 32; ++j) + u += 38 * a[aoffset + j] * b[boffset + i + 32 - j]; + + outv[outvoffset + i] = u; + } + + squeeze(outv, outvoffset); + } + + static void mult121665(int[] outv, int[] a) + { + int j; + int u = 0; + + for (j = 0; j < 31; ++j) + { + u += 121665 * a[j]; + outv[j] = u & 255; + u >>>= 8; + } + + u += 121665 * a[31]; + outv[31] = u & 127; + u = 19 * (u >>> 7); + + for (j = 0; j < 31; ++j) + { + u += outv[j]; + outv[j] = u & 255; + u >>>= 8; + } + + u += outv[j]; + outv[j] = u; + } + + static void square(int[] outv, int outvoffset, int[] a, int aoffset) + { + int j; + + for (int i = 0; i < 32; ++i) + { + int u = 0; + + for (j = 0; j < i - j; ++j) + u += a[aoffset + j] * a[aoffset + i - j]; + + for (j = i + 1; j < i + 32 - j; ++j) + u += 38 * a[aoffset + j] * a[aoffset + i + 32 - j]; + + u *= 2; + + if ((i & 1) == 0) + { + u += a[aoffset + i / 2] * a[aoffset + i / 2]; + u += 38 * a[aoffset + i / 2 + 16] * a[aoffset + i / 2 + 16]; + } + + outv[outvoffset + i] = u; + } + + squeeze(outv, outvoffset); + } + + static void select(int[] p, int[] q, int[] r, int[] s, int b) + { + int bminus1 = b - 1; + + for (int j = 0; j < 64; ++j) + { + int t = bminus1 & (r[j] ^ s[j]); + p[j] = s[j] ^ t; + q[j] = r[j] ^ t; + } + } + + static void mainloop(int[] work, byte[] e) + { + int[] xzm1 = new int[64]; + int[] xzm = new int[64]; + int[] xzmb = new int[64]; + int[] xzm1b = new int[64]; + int[] xznb = new int[64]; + int[] xzn1b = new int[64]; + int[] a0 = new int[64]; + int[] a1 = new int[64]; + int[] b0 = new int[64]; + int[] b1 = new int[64]; + int[] c1 = new int[64]; + int[] r = new int[32]; + int[] s = new int[32]; + int[] t = new int[32]; + int[] u = new int[32]; + + for (int j = 0; j < 32; ++j) + xzm1[j] = work[j]; + + xzm1[32] = 1; + + for (int j = 33; j < 64; ++j) + xzm1[j] = 0; + + xzm[0] = 1; + + for (int j = 1; j < 64; ++j) + xzm[j] = 0; + + int[] xzmbp = xzmb, a0p = a0, xzm1bp = xzm1b; + int[] a1p = a1, b0p = b0, b1p = b1, c1p = c1; + int[] xznbp = xznb, up = u, xzn1bp = xzn1b; + int[] workp = work, sp = s, rp = r; + + for (int pos = 254; pos >= 0; --pos) + { + int b = ((int) ((e[pos / 8] & 0xFF) >>> (pos & 7))); + b &= 1; + select(xzmb, xzm1b, xzm, xzm1, b); + add(a0, 0, xzmb, 0, xzmbp, 32); + sub(a0p, 32, xzmb, 0, xzmbp, 32); + add(a1, 0, xzm1b, 0, xzm1bp, 32); + sub(a1p, 32, xzm1b, 0, xzm1bp, 32); + square(b0p, 0, a0p, 0); + square(b0p, 32, a0p, 32); + mult(b1p, 0, a1p, 0, a0p, 32); + mult(b1p, 32, a1p, 32, a0p, 0); + add(c1, 0, b1, 0, b1p, 32); + sub(c1p, 32, b1, 0, b1p, 32); + square(rp, 0, c1p, 32); + sub(sp, 0, b0, 0, b0p, 32); + mult121665(t, s); + add(u, 0, t, 0, b0p, 0); + mult(xznbp, 0, b0p, 0, b0p, 32); + mult(xznbp, 32, sp, 0, up, 0); + square(xzn1bp, 0, c1p, 0); + mult(xzn1bp, 32, rp, 0, workp, 0); + select(xzm, xzm1, xznb, xzn1b, b); + } + + for (int j = 0; j < 64; ++j) + work[j] = xzm[j]; + } + + static void recip(int[] outv, int outvoffset, int[] z, int zoffset) + { + int[] z2 = new int[32]; + int[] z9 = new int[32]; + int[] z11 = new int[32]; + int[] z2_5_0 = new int[32]; + int[] z2_10_0 = new int[32]; + int[] z2_20_0 = new int[32]; + int[] z2_50_0 = new int[32]; + int[] z2_100_0 = new int[32]; + int[] t0 = new int[32]; + int[] t1 = new int[32]; + + /* 2 */ + int[] z2p = z2; + square(z2p, 0, z, zoffset); + + /* 4 */ + square(t1, 0, z2, 0); + + /* 8 */ + square(t0, 0, t1, 0); + + /* 9 */ + int[] z9p = z9, t0p = t0; + mult(z9p, 0, t0p, 0, z, zoffset); + + /* 11 */ + mult(z11, 0, z9, 0, z2, 0); + + /* 22 */ + square(t0, 0, z11, 0); + + /* 2^5 - 2^0 = 31 */ + mult(z2_5_0, 0, t0, 0, z9, 0); + + /* 2^6 - 2^1 */ + square(t0, 0, z2_5_0, 0); + + /* 2^7 - 2^2 */ + square(t1, 0, t0, 0); + + /* 2^8 - 2^3 */ + square(t0, 0, t1, 0); + + /* 2^9 - 2^4 */ + square(t1, 0, t0, 0); + + /* 2^10 - 2^5 */ + square(t0, 0, t1, 0); + + /* 2^10 - 2^0 */ + mult(z2_10_0, 0, t0, 0, z2_5_0, 0); + + /* 2^11 - 2^1 */ + square(t0, 0, z2_10_0, 0); + + /* 2^12 - 2^2 */ + square(t1, 0, t0, 0); + + /* 2^20 - 2^10 */ + for (int i = 2; i < 10; i += 2) + { + square(t0, 0, t1, 0); + square(t1, 0, t0, 0); + } + + /* 2^20 - 2^0 */ + mult(z2_20_0, 0, t1, 0, z2_10_0, 0); + + /* 2^21 - 2^1 */ + square(t0, 0, z2_20_0, 0); + + /* 2^22 - 2^2 */ + square(t1, 0, t0, 0); + + /* 2^40 - 2^20 */ + for (int i = 2; i < 20; i += 2) + { + square(t0, 0, t1, 0); + square(t1, 0, t0, 0); + } + + /* 2^40 - 2^0 */ + mult(t0, 0, t1, 0, z2_20_0, 0); + + /* 2^41 - 2^1 */ + square(t1, 0, t0, 0); + + /* 2^42 - 2^2 */ + square(t0, 0, t1, 0); + + /* 2^50 - 2^10 */ + for (int i = 2; i < 10; i += 2) + { + square(t1, 0, t0, 0); + square(t0, 0, t1, 0); + } + + /* 2^50 - 2^0 */ + mult(z2_50_0, 0, t0, 0, z2_10_0, 0); + + /* 2^51 - 2^1 */ + square(t0, 0, z2_50_0, 0); + + /* 2^52 - 2^2 */ + square(t1, 0, t0, 0); + + /* 2^100 - 2^50 */ + for (int i = 2; i < 50; i += 2) + { + square(t0, 0, t1, 0); + square(t1, 0, t0, 0); + } + + /* 2^100 - 2^0 */ + mult(z2_100_0, 0, t1, 0, z2_50_0, 0); + + /* 2^101 - 2^1 */ + square(t1, 0, z2_100_0, 0); + + /* 2^102 - 2^2 */ + square(t0, 0, t1, 0); + + /* 2^200 - 2^100 */ + for (int i = 2; i < 100; i += 2) + { + square(t1, 0, t0, 0); + square(t0, 0, t1, 0); + } + + /* 2^200 - 2^0 */ + mult(t1, 0, t0, 0, z2_100_0, 0); + + /* 2^201 - 2^1 */ + square(t0, 0, t1, 0); + + /* 2^202 - 2^2 */ + square(t1, 0, t0, 0); + + /* 2^250 - 2^50 */ + for (int i = 2; i < 50; i += 2) + { + square(t0, 0, t1, 0); + square(t1, 0, t0, 0); + } + + /* 2^250 - 2^0 */ + mult(t0, 0, t1, 0, z2_50_0, 0); + + /* 2^251 - 2^1 */ + square(t1, 0, t0, 0); + + /* 2^252 - 2^2 */ + square(t0, 0, t1, 0); + + /* 2^253 - 2^3 */ + square(t1, 0, t0, 0); + + /* 2^254 - 2^4 */ + square(t0, 0, t1, 0); + + /* 2^255 - 2^5 */ + square(t1, 0, t0, 0); + + /* 2^255 - 21 */ + int[] t1p = t1, z11p = z11; + mult(outv, outvoffset, t1p, 0, z11p, 0); + } + + public static int crypto_scalarmult(byte[] q, byte[] n, byte[] p) + { + int[] work = new int[96]; + byte[] e = new byte[32]; + + for (int i = 0; i < 32; ++i) + e[i] = n[i]; + + e[0] &= 248; + e[31] &= 127; + e[31] |= 64; + + for (int i = 0; i < 32; ++i) + work[i] = p[i] & 0xFF; + + mainloop(work, e); + + recip(work, 32, work, 32); + mult(work, 64, work, 0, work, 32); + freeze(work, 64); + + for (int i = 0; i < 32; ++i) + q[i] = (byte) work[64 + i]; + + return 0; + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/HybridEncryption.java b/orchid/src/com/subgraph/orchid/crypto/HybridEncryption.java new file mode 100644 index 00000000..33a394c9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/HybridEncryption.java @@ -0,0 +1,150 @@ +package com.subgraph.orchid.crypto; + +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; + +import com.subgraph.orchid.TorException; + +/** + * The HybridEncryption class implements the "hybrid encryption" scheme + * as described in section 0.3 of the main Tor specification (tor-spec.txt). + */ +public class HybridEncryption { + + private final static int PK_ENC_LEN = 128; + private final static int PK_PAD_LEN = 42; + private final static int PK_DATA_LEN = PK_ENC_LEN - PK_PAD_LEN; // 86 bytes + private final static int PK_DATA_LEN_WITH_KEY = PK_DATA_LEN - TorStreamCipher.KEY_LEN; // 70 bytes + /* + * The "hybrid encryption" of a byte sequence M with a public key PK is + * computed as follows: + * + * 1. If M is less than PK_ENC_LEN-PK_PAD_LEN (86), pad and encrypt M with PK. + * 2. Otherwise, generate a KEY_LEN byte random key K. + * Let M1 = the first PK_ENC_LEN-PK_PAD_LEN-KEY_LEN (70) bytes of M, + * and let M2 = the rest of M. + * Pad and encrypt K|M1 with PK. Encrypt M2 with our stream cipher, + * using the key K. Concatenate these encrypted values. + */ + final private Cipher cipher; + + /** + * Create a new HybridEncryption instance which can be used for performing + * "hybrid encryption" operations as described in the main Tor specification (tor-spec.txt). + */ + public HybridEncryption() { + try { + cipher = Cipher.getInstance("RSA/ECB/OAEPWithSHA1AndMGF1Padding"); + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } catch (NoSuchPaddingException e) { + throw new TorException(e); + } + } + + /** + * Encrypt the entire contents of the byte array data with the given TorPublicKey + * according to the "hybrid encryption" scheme described in the main Tor specification (tor-spec.txt). + * + * @param data The bytes to be encrypted. + * @param publicKey The public key to use for encryption. + * @return A new array containing the encrypted data. + */ + public byte[] encrypt(byte[] data, TorPublicKey publicKey) { + if(data.length < PK_DATA_LEN) + return encryptSimple(data, publicKey); + + // RSA( K | M1 ) --> C1 + TorStreamCipher randomKeyCipher = TorStreamCipher.createWithRandomKey(); + final byte[] kAndM1 = new byte[PK_DATA_LEN]; + System.arraycopy(randomKeyCipher.getKeyBytes(), 0, kAndM1, 0, TorStreamCipher.KEY_LEN); + System.arraycopy(data, 0, kAndM1, TorStreamCipher.KEY_LEN, PK_DATA_LEN_WITH_KEY); + final byte[] c1 = encryptSimple(kAndM1, publicKey); + + // AES_CTR(M2) --> C2 + final byte[] c2 = new byte[data.length - PK_DATA_LEN_WITH_KEY]; + System.arraycopy(data, PK_DATA_LEN_WITH_KEY, c2, 0, c2.length); + randomKeyCipher.encrypt(c2); + //final byte[] c2 = randomKeyCipher.doFinal(data, PK_DATA_LEN_WITH_KEY, data.length - PK_DATA_LEN_WITH_KEY); + + // C1 | C2 + final byte[] output = new byte[c1.length + c2.length]; + System.arraycopy(c1, 0, output, 0, c1.length); + System.arraycopy(c2, 0, output, c1.length, c2.length); + return output; + } + + private byte[] encryptSimple(byte[] data, TorPublicKey publicKey) { + try { + cipher.init(Cipher.ENCRYPT_MODE, publicKey.getRSAPublicKey()); + return cipher.doFinal(data); + } catch (InvalidKeyException e) { + throw new TorException(e); + } catch (IllegalBlockSizeException e) { + throw new TorException(e); + } catch (BadPaddingException e) { + throw new TorException(e); + } + } + + /** + * Decrypt the contents of the byte array data with the given TorPrivateKey + * according to the "hybrid encryption" scheme described in the main Tor specification (tor-spec.txt). + * + * @param data Encrypted data to decrypt. + * @param privateKey The private key to use to decrypt the data. + * @return A new byte array containing the decrypted data. + */ + + public byte[] decrypt(byte[] data, TorPrivateKey privateKey) { + if(data.length < PK_ENC_LEN) + throw new TorException("Message is too short"); + + if(data.length == PK_ENC_LEN) + return decryptSimple(data, privateKey); + + // ( C1 | C2 ) --> C1, C2 + final byte[] c1 = new byte[PK_ENC_LEN]; + final byte[] c2 = new byte[data.length - PK_ENC_LEN]; + System.arraycopy(data, 0, c1, 0, PK_ENC_LEN); + System.arraycopy(data, PK_ENC_LEN, c2, 0, c2.length); + + // RSA( C1 ) --> ( K | M1 ) --> K, M1 + final byte[] kAndM1 = decryptSimple(c1, privateKey); + final byte[] streamKey = new byte[TorStreamCipher.KEY_LEN]; + final int m1Length = kAndM1.length - TorStreamCipher.KEY_LEN; + final byte[] m1 = new byte[m1Length]; + System.arraycopy(kAndM1, 0, streamKey, 0, TorStreamCipher.KEY_LEN); + System.arraycopy(kAndM1, TorStreamCipher.KEY_LEN, m1, 0, m1Length); + + // AES_CTR( C2 ) --> M2 + final TorStreamCipher streamCipher = TorStreamCipher.createFromKeyBytes(streamKey); + streamCipher.encrypt(c2); + final byte[] m2 = c2; + + final byte[] output = new byte[m1.length + m2.length]; + System.arraycopy(m1, 0, output, 0, m1.length); + System.arraycopy(m2, 0, output, m1.length, m2.length); + return output; + } + + private byte[] decryptSimple(byte[] data, TorPrivateKey privateKey) { + try { + cipher.init(Cipher.DECRYPT_MODE, privateKey.getRSAPrivateKey()); + return cipher.doFinal(data); + } catch (InvalidKeyException e) { + throw new TorException(e); + } catch (IllegalBlockSizeException e) { + throw new TorException(e); + } catch (BadPaddingException e) { + throw new TorException(e); + } + } + + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/PRNGFixes.java b/orchid/src/com/subgraph/orchid/crypto/PRNGFixes.java new file mode 100644 index 00000000..b49ffb5d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/PRNGFixes.java @@ -0,0 +1,360 @@ +package com.subgraph.orchid.crypto; + +/* + * This software is provided 'as-is', without any express or implied + * warranty. In no event will Google be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, as long as the origin is not misrepresented. + */ + +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.security.NoSuchAlgorithmException; +import java.security.Provider; +import java.security.SecureRandom; +import java.security.SecureRandomSpi; +import java.security.Security; +import java.util.logging.Logger; + +/** + * Fixes for the output of the default PRNG having low entropy. + * + * The fixes need to be applied via {@link #apply()} before any use of Java + * Cryptography Architecture primitives. A good place to invoke them is in the + * application's {@code onCreate}. + */ +public final class PRNGFixes { + + private final static Logger logger = Logger.getLogger(PRNGFixes.class.getName()); + + private static final int VERSION_CODE_JELLY_BEAN = 16; + private static final int VERSION_CODE_JELLY_BEAN_MR2 = 18; + private static final byte[] BUILD_FINGERPRINT_AND_DEVICE_SERIAL = + getBuildFingerprintAndDeviceSerial(); + + /** Hidden constructor to prevent instantiation. */ + private PRNGFixes() {} + + /** + * Applies all fixes. + * + * @throws SecurityException if a fix is needed but could not be applied. + */ + public static void apply() { + applyOpenSSLFix(); + installLinuxPRNGSecureRandom(); + } + + /** + * Applies the fix for OpenSSL PRNG having low entropy. Does nothing if the + * fix is not needed. + * + * @throws SecurityException if the fix is needed but could not be applied. + */ + private static void applyOpenSSLFix() throws SecurityException { + int sdkVersion = getSdkVersion(); + if ((sdkVersion < VERSION_CODE_JELLY_BEAN) + || (sdkVersion > VERSION_CODE_JELLY_BEAN_MR2)) { + // No need to apply the fix + return; + } + + try { + // Mix in the device- and invocation-specific seed. + Class.forName("org.apache.harmony.xnet.provider.jsse.NativeCrypto") + .getMethod("RAND_seed", byte[].class) + .invoke(null, generateSeed()); + + // Mix output of Linux PRNG into OpenSSL's PRNG + int bytesRead = (Integer) Class.forName( + "org.apache.harmony.xnet.provider.jsse.NativeCrypto") + .getMethod("RAND_load_file", String.class, long.class) + .invoke(null, "/dev/urandom", 1024); + if (bytesRead != 1024) { + throw new IOException( + "Unexpected number of bytes read from Linux PRNG: " + + bytesRead); + } + } catch (Exception e) { + throw new SecurityException("Failed to seed OpenSSL PRNG", e); + } + } + + /** + * Installs a Linux PRNG-backed {@code SecureRandom} implementation as the + * default. Does nothing if the implementation is already the default or if + * there is not need to install the implementation. + * + * @throws SecurityException if the fix is needed but could not be applied. + */ + private static void installLinuxPRNGSecureRandom() + throws SecurityException { + if (getSdkVersion() > VERSION_CODE_JELLY_BEAN_MR2) { + // No need to apply the fix + return; + } + + // Install a Linux PRNG-based SecureRandom implementation as the + // default, if not yet installed. + Provider[] secureRandomProviders = + Security.getProviders("SecureRandom.SHA1PRNG"); + if ((secureRandomProviders == null) + || (secureRandomProviders.length < 1) + || (!LinuxPRNGSecureRandomProvider.class.equals( + secureRandomProviders[0].getClass()))) { + Security.insertProviderAt(new LinuxPRNGSecureRandomProvider(), 1); + } + + // Assert that new SecureRandom() and + // SecureRandom.getInstance("SHA1PRNG") return a SecureRandom backed + // by the Linux PRNG-based SecureRandom implementation. + SecureRandom rng1 = new SecureRandom(); + if (!LinuxPRNGSecureRandomProvider.class.equals( + rng1.getProvider().getClass())) { + throw new SecurityException( + "new SecureRandom() backed by wrong Provider: " + + rng1.getProvider().getClass()); + } + + SecureRandom rng2; + try { + rng2 = SecureRandom.getInstance("SHA1PRNG"); + } catch (NoSuchAlgorithmException e) { + throw new SecurityException("SHA1PRNG not available", e); + } + if (!LinuxPRNGSecureRandomProvider.class.equals( + rng2.getProvider().getClass())) { + throw new SecurityException( + "SecureRandom.getInstance(\"SHA1PRNG\") backed by wrong" + + " Provider: " + rng2.getProvider().getClass()); + } + } + + /** + * {@code Provider} of {@code SecureRandom} engines which pass through + * all requests to the Linux PRNG. + */ + private static class LinuxPRNGSecureRandomProvider extends Provider { + + private static final long serialVersionUID = 1L; + + public LinuxPRNGSecureRandomProvider() { + super("LinuxPRNG", + 1.0, + "A Linux-specific random number provider that uses" + + " /dev/urandom"); + // Although /dev/urandom is not a SHA-1 PRNG, some apps + // explicitly request a SHA1PRNG SecureRandom and we thus need to + // prevent them from getting the default implementation whose output + // may have low entropy. + put("SecureRandom.SHA1PRNG", LinuxPRNGSecureRandom.class.getName()); + put("SecureRandom.SHA1PRNG ImplementedIn", "Software"); + } + } + + /** + * {@link SecureRandomSpi} which passes all requests to the Linux PRNG + * ({@code /dev/urandom}). + */ + public static class LinuxPRNGSecureRandom extends SecureRandomSpi { + + /* + * IMPLEMENTATION NOTE: Requests to generate bytes and to mix in a seed + * are passed through to the Linux PRNG (/dev/urandom). Instances of + * this class seed themselves by mixing in the current time, PID, UID, + * build fingerprint, and hardware serial number (where available) into + * Linux PRNG. + * + * Concurrency: Read requests to the underlying Linux PRNG are + * serialized (on sLock) to ensure that multiple threads do not get + * duplicated PRNG output. + */ + + + private static final long serialVersionUID = 1L; + + private static final File URANDOM_FILE = new File("/dev/urandom"); + + private static final Object sLock = new Object(); + + /** + * Input stream for reading from Linux PRNG or {@code null} if not yet + * opened. + * + * @GuardedBy("sLock") + */ + private static DataInputStream sUrandomIn; + + /** + * Output stream for writing to Linux PRNG or {@code null} if not yet + * opened. + * + * @GuardedBy("sLock") + */ + private static OutputStream sUrandomOut; + + /** + * Whether this engine instance has been seeded. This is needed because + * each instance needs to seed itself if the client does not explicitly + * seed it. + */ + private boolean mSeeded; + + @Override + protected void engineSetSeed(byte[] bytes) { + try { + OutputStream out; + synchronized (sLock) { + out = getUrandomOutputStream(); + } + out.write(bytes); + out.flush(); + } catch (IOException e) { + // On a small fraction of devices /dev/urandom is not writable. + // Log and ignore. + logger.warning("Failed to mix seed into " + URANDOM_FILE); + } finally { + mSeeded = true; + } + } + + @Override + protected void engineNextBytes(byte[] bytes) { + if (!mSeeded) { + // Mix in the device- and invocation-specific seed. + engineSetSeed(generateSeed()); + } + + try { + DataInputStream in; + synchronized (sLock) { + in = getUrandomInputStream(); + } + synchronized (in) { + in.readFully(bytes); + } + } catch (IOException e) { + throw new SecurityException( + "Failed to read from " + URANDOM_FILE, e); + } + } + + @Override + protected byte[] engineGenerateSeed(int size) { + byte[] seed = new byte[size]; + engineNextBytes(seed); + return seed; + } + + private DataInputStream getUrandomInputStream() { + synchronized (sLock) { + if (sUrandomIn == null) { + // NOTE: Consider inserting a BufferedInputStream between + // DataInputStream and FileInputStream if you need higher + // PRNG output performance and can live with future PRNG + // output being pulled into this process prematurely. + try { + sUrandomIn = new DataInputStream( + new FileInputStream(URANDOM_FILE)); + } catch (IOException e) { + throw new SecurityException("Failed to open " + + URANDOM_FILE + " for reading", e); + } + } + return sUrandomIn; + } + } + + private OutputStream getUrandomOutputStream() throws IOException { + synchronized (sLock) { + if (sUrandomOut == null) { + sUrandomOut = new FileOutputStream(URANDOM_FILE); + } + return sUrandomOut; + } + } + } + + /** + * Generates a device- and invocation-specific seed to be mixed into the + * Linux PRNG. + */ + private static byte[] generateSeed() { + try { + ByteArrayOutputStream seedBuffer = new ByteArrayOutputStream(); + DataOutputStream seedBufferOut = + new DataOutputStream(seedBuffer); + seedBufferOut.writeLong(System.currentTimeMillis()); + seedBufferOut.writeLong(System.nanoTime()); + //seedBufferOut.writeInt(Process.myPid()); + //seedBufferOut.writeInt(Process.myUid()); + seedBufferOut.write(BUILD_FINGERPRINT_AND_DEVICE_SERIAL); + seedBufferOut.close(); + return seedBuffer.toByteArray(); + } catch (IOException e) { + throw new SecurityException("Failed to generate seed", e); + } + } + + /** + * Gets the hardware serial number of this device. + * + * @return serial number or {@code null} if not available. + */ + private static String getDeviceSerialNumber() { + // We're using the Reflection API because Build.SERIAL is only available + // since API Level 9 (Gingerbread, Android 2.3). + try { + return (String) Class.forName("android.os.Build").getField("SERIAL").get(null); + //return (String) Build.class.getField("SERIAL").get(null); + } catch (Exception ignored) { + return null; + } + } + + private static int getSdkVersion() { + try { + return Class.forName("android.os.Build").getField("VERSION").getClass().getField("SDK_INT").getInt(null); + } catch (Exception e) { + logger.warning("Could not get Build.VERSION.SDK_INT value : "+ e); + return 0; + } + } + + private static String getBuildFingerprint() { + try { + return (String) Class.forName("android.os.Build").getField("FINGERPRINT").get(null); + } catch (Exception e) { + logger.warning("Could not get BUILD.FINGERPRINT value : "+ e); + return ""; + } + } + + private static byte[] getBuildFingerprintAndDeviceSerial() { + StringBuilder result = new StringBuilder(); + // String fingerprint = Build.FINGERPRINT; + String fingerprint = getBuildFingerprint(); + if (fingerprint != null) { + result.append(fingerprint); + } + String serial = getDeviceSerialNumber(); + if (serial != null) { + result.append(serial); + } + try { + return result.toString().getBytes("UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException("UTF-8 encoding not supported"); + } + } +} \ No newline at end of file diff --git a/orchid/src/com/subgraph/orchid/crypto/RSAKeyEncoder.java b/orchid/src/com/subgraph/orchid/crypto/RSAKeyEncoder.java new file mode 100644 index 00000000..28eb474a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/RSAKeyEncoder.java @@ -0,0 +1,131 @@ +package com.subgraph.orchid.crypto; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; +import java.security.InvalidKeyException; +import java.security.KeyFactory; +import java.security.interfaces.RSAPublicKey; +import java.security.spec.RSAPublicKeySpec; +import java.util.List; + +import com.subgraph.orchid.crypto.ASN1Parser.ASN1BitString; +import com.subgraph.orchid.crypto.ASN1Parser.ASN1Integer; +import com.subgraph.orchid.crypto.ASN1Parser.ASN1Object; +import com.subgraph.orchid.crypto.ASN1Parser.ASN1Sequence; +import com.subgraph.orchid.encoders.Base64; + +public class RSAKeyEncoder { + private final static String HEADER = "-----BEGIN RSA PUBLIC KEY-----"; + private final static String FOOTER = "-----END RSA PUBLIC KEY-----"; + + private final ASN1Parser asn1Parser = new ASN1Parser(); + + /** + * Parse a PKCS1 PEM encoded RSA public key into the modulus/exponent components + * and construct a new RSAPublicKey + * + * @param pem The PEM encoded string to parse. + * @return a new RSAPublicKey + * + * @throws GeneralSecurityException If an error occurs while parsing the pem argument or creating the RSA key. + */ + public RSAPublicKey parsePEMPublicKey(String pem) throws GeneralSecurityException { + try { + byte[] bs = decodeAsciiArmoredPEM(pem); + ByteBuffer data = ByteBuffer.wrap(bs); + final ASN1Object ob = asn1Parser.parseASN1(data); + final List seq = asn1ObjectToSequence(ob, 2); + final BigInteger modulus = asn1ObjectToBigInt(seq.get(0)); + final BigInteger exponent = asn1ObjectToBigInt(seq.get(1)); + return createKeyFromModulusAndExponent(modulus, exponent); + } catch (IllegalArgumentException e) { + throw new InvalidKeyException(); + } + } + + private RSAPublicKey createKeyFromModulusAndExponent(BigInteger modulus, BigInteger exponent) throws GeneralSecurityException { + RSAPublicKeySpec spec = new RSAPublicKeySpec(modulus, exponent); + KeyFactory fac = KeyFactory.getInstance("RSA"); + return (RSAPublicKey) fac.generatePublic(spec); + } + + /** + * Return the PKCS1 encoded representation of the specified RSAPublicKey. Since + * the primary encoding format for RSA public keys is X.509 SubjectPublicKeyInfo, + * this needs to be converted to PKCS1 by extracting the needed field. + * + * @param publicKey The RSA public key to encode. + * @return The PKCS1 encoded representation of the publicKey argument + */ + public byte[] getPKCS1Encoded(RSAPublicKey publicKey) { + return extractPKCS1KeyFromSubjectPublicKeyInfo(publicKey.getEncoded()); + } + + /* + * SubjectPublicKeyInfo encoding looks like this: + * + * SEQUENCE { + * SEQUENCE { + * OBJECT IDENTIFIER rsaEncryption (1 2 840 113549 1 1 1) + * NULL + * } + * BIT STRING (encapsulating) { <-- contains PKCS1 encoded key + * SEQUENCE { + * INTEGER (modulus) + * INTEGER (exponent) + * } + * } + * } + * + * See: http://www.jensign.com/JavaScience/dotnet/JKeyNet/index.html + */ + private byte[] extractPKCS1KeyFromSubjectPublicKeyInfo(byte[] input) { + final ASN1Object ob = asn1Parser.parseASN1(ByteBuffer.wrap(input)); + final List seq = asn1ObjectToSequence(ob, 2); + return asn1ObjectToBitString(seq.get(1)); + } + + private BigInteger asn1ObjectToBigInt(ASN1Object ob) { + if(!(ob instanceof ASN1Integer)) { + throw new IllegalArgumentException(); + } + final ASN1Integer n = (ASN1Integer) ob; + return n.getValue(); + } + + + private List asn1ObjectToSequence(ASN1Object ob, int expectedSize) { + if(ob instanceof ASN1Sequence) { + final ASN1Sequence seq = (ASN1Sequence) ob; + if(seq.getItems().size() != expectedSize) { + throw new IllegalArgumentException(); + } + return seq.getItems(); + } + throw new IllegalArgumentException(); + } + + private byte[] asn1ObjectToBitString(ASN1Object ob) { + if(!(ob instanceof ASN1BitString)) { + throw new IllegalArgumentException(); + } + final ASN1BitString bitstring = (ASN1BitString) ob; + return bitstring.getBytes(); + } + + private byte[] decodeAsciiArmoredPEM(String pem) { + final String trimmed = removeDelimiters(pem); + return Base64.decode(trimmed); + } + + private String removeDelimiters(String pem) { + final int headerIdx = pem.indexOf(HEADER); + final int footerIdx = pem.indexOf(FOOTER); + if(headerIdx == -1 || footerIdx == -1 || footerIdx <= headerIdx) { + throw new IllegalArgumentException("PEM object not formatted with expected header and footer"); + } + return pem.substring(headerIdx + HEADER.length(), footerIdx); + } + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorCreateFastKeyAgreement.java b/orchid/src/com/subgraph/orchid/crypto/TorCreateFastKeyAgreement.java new file mode 100644 index 00000000..7563b2af --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorCreateFastKeyAgreement.java @@ -0,0 +1,54 @@ +package com.subgraph.orchid.crypto; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +public class TorCreateFastKeyAgreement implements TorKeyAgreement { + + private final byte[] xValue; + private byte[] yValue; + + public TorCreateFastKeyAgreement() { + final TorRandom random = new TorRandom(); + xValue = random.getBytes(TorMessageDigest.TOR_DIGEST_SIZE); + } + + public byte[] getPublicValue() { + return Arrays.copyOf(xValue, xValue.length); + } + + public void setOtherValue(byte[] yValue) { + if(yValue == null || yValue.length != TorMessageDigest.TOR_DIGEST_SIZE) { + throw new IllegalArgumentException(); + } + this.yValue = Arrays.copyOf(yValue, yValue.length); + } + + public byte[] getDerivedValue() { + if(yValue == null) { + throw new IllegalStateException("Must call setOtherValue() first"); + } + final byte[] result = new byte[2 * TorMessageDigest.TOR_DIGEST_SIZE]; + System.arraycopy(xValue, 0, result, 0, TorMessageDigest.TOR_DIGEST_SIZE); + System.arraycopy(yValue, 0, result, TorMessageDigest.TOR_DIGEST_SIZE, TorMessageDigest.TOR_DIGEST_SIZE); + return result; + } + + public byte[] createOnionSkin() { + return getPublicValue(); + } + + public boolean deriveKeysFromHandshakeResponse(byte[] handshakeResponse, + byte[] keyMaterialOut, byte[] verifyHashOut) { + final ByteBuffer bb = ByteBuffer.wrap(handshakeResponse); + final byte[] peerValue = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + final byte[] keyHash = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + bb.get(peerValue); + bb.get(keyHash); + setOtherValue(peerValue); + final byte[] seed = getDerivedValue(); + final TorKeyDerivation kdf = new TorKeyDerivation(seed); + kdf.deriveKeys(keyMaterialOut, verifyHashOut); + return Arrays.equals(verifyHashOut, keyHash); + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorKeyAgreement.java b/orchid/src/com/subgraph/orchid/crypto/TorKeyAgreement.java new file mode 100644 index 00000000..c9467aa1 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorKeyAgreement.java @@ -0,0 +1,6 @@ +package com.subgraph.orchid.crypto; + +public interface TorKeyAgreement { + byte[] createOnionSkin(); + boolean deriveKeysFromHandshakeResponse(byte[] handshakeResponse, byte[] keyMaterialOut, byte[] verifyHashOut); +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorKeyDerivation.java b/orchid/src/com/subgraph/orchid/crypto/TorKeyDerivation.java new file mode 100644 index 00000000..e965984a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorKeyDerivation.java @@ -0,0 +1,40 @@ +package com.subgraph.orchid.crypto; + +import java.nio.ByteBuffer; + +public class TorKeyDerivation { + + private final byte[] kdfBuffer; + private int round; + + public TorKeyDerivation(byte[] seed) { + this.kdfBuffer = new byte[seed.length + 1]; + System.arraycopy(seed, 0, kdfBuffer, 0, seed.length); + } + public void deriveKeys(byte[] keyMaterialOut, byte[] verifyHashOut) { + final ByteBuffer keyData = deriveKeys(keyMaterialOut.length + verifyHashOut.length); + keyData.get(verifyHashOut); + keyData.get(keyMaterialOut); + } + + public ByteBuffer deriveKeys(int length) { + final ByteBuffer outputBuffer = ByteBuffer.allocate(length); + round = 0; + while(outputBuffer.hasRemaining()) { + byte[] bs = calculateRoundData(); + int n = Math.min(outputBuffer.remaining(), bs.length); + outputBuffer.put(bs, 0, n); + } + + outputBuffer.flip(); + return outputBuffer; + } + + private byte[] calculateRoundData() { + final TorMessageDigest md = new TorMessageDigest(); + kdfBuffer[kdfBuffer.length - 1] = (byte) round; + round += 1; + md.update(kdfBuffer); + return md.getDigestBytes(); + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorMessageDigest.java b/orchid/src/com/subgraph/orchid/crypto/TorMessageDigest.java new file mode 100644 index 00000000..d6857877 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorMessageDigest.java @@ -0,0 +1,128 @@ +package com.subgraph.orchid.crypto; + +import java.io.UnsupportedEncodingException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.data.HexDigest; + +/** + * This class wraps the default cryptographic message digest algorithm + * used in Tor (SHA-1). + */ +public class TorMessageDigest { + + + public static final int TOR_DIGEST_SIZE = 20; + public static final int TOR_DIGEST256_SIZE = 32; + + private static final String TOR_DIGEST_ALGORITHM = "SHA-1"; + private static final String TOR_DIGEST256_ALGORITHM = "SHA-256"; + + private final MessageDigest digestInstance; + private final boolean isDigest256; + + public TorMessageDigest(boolean isDigest256) { + digestInstance = createDigestInstance(isDigest256); + this.isDigest256 = isDigest256; + } + + public TorMessageDigest() { + this(false); + } + + private MessageDigest createDigestInstance(boolean isDigest256) { + try { + final String algorithm = (isDigest256) ? TOR_DIGEST256_ALGORITHM : TOR_DIGEST_ALGORITHM; + return MessageDigest.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } + } + + /** + * Return true if this is a 256 bit digest instance. + * + * @return true if this is a 256 bit digest instance. + */ + public boolean isDigest256() { + return isDigest256; + } + + /** + * Return the digest value of all data processed up until this point. + * @return The digest value as an array of TOR_DIGEST_SIZE or TOR_DIGEST256_SIZE bytes. + */ + public byte[] getDigestBytes() { + try { + // Make a clone because #digest() will reset the MessageDigest instance + // and we want to be able to use this class for running digests on circuits + final MessageDigest clone = (MessageDigest) digestInstance.clone(); + return clone.digest(); + } catch (CloneNotSupportedException e) { + throw new TorException(e); + } + } + + /** + * Return what the digest for the current running hash would be IF we + * added data, but don't really add the data to the digest + * calculation. + */ + public byte[] peekDigest(byte[] data, int offset, int length) { + try { + final MessageDigest clone = (MessageDigest) digestInstance.clone(); + clone.update(data, offset, length); + return clone.digest(); + } catch (CloneNotSupportedException e) { + throw new TorException(e); + } + } + + /** + * Calculate the digest value of all data processed up until this point and convert + * the digest into a HexDigest object. + * @return A new HexDigest object representing the current digest value. + * @see HexDigest + */ + public HexDigest getHexDigest() { + return HexDigest.createFromDigestBytes(getDigestBytes()); + } + + /** + * Add the entire contents of the byte array input to the current digest calculation. + * + * @param input An array of input bytes to process. + */ + public void update(byte[] input) { + digestInstance.update(input); + } + + /** + * Add length bytes of the contents of the byte array input beginning at + * offset into the array to the current digest calculation. + * + * @param input An array of input bytes to process. + * @param offset The offset into the input array to begin processing. + * @param length A count of how many bytes of the input array to process. + */ + public void update(byte[] input, int offset, int length) { + digestInstance.update(input, offset, length); + } + + /** + * Convert the String input into an array of bytes using the ISO-8859-1 encoding + * and add these bytes to the current digest calculation. + * + * @param input A string to process. + */ + public void update(String input) { + try { + digestInstance.update(input.getBytes("ISO-8859-1")); + } catch (UnsupportedEncodingException e) { + throw new TorException(e); + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorNTorKeyAgreement.java b/orchid/src/com/subgraph/orchid/crypto/TorNTorKeyAgreement.java new file mode 100644 index 00000000..fab6414e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorNTorKeyAgreement.java @@ -0,0 +1,160 @@ +package com.subgraph.orchid.crypto; + +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.misc.Utils; + +public class TorNTorKeyAgreement implements TorKeyAgreement { + public final static int CURVE25519_PUBKEY_LEN = 32; + final static int CURVE25519_OUTPUT_LEN = 32; + final static int DIGEST256_LEN = 32; + final static int DIGEST_LEN = 20; + final static int KEY_LEN = 16; + final static int NTOR_ONIONSKIN_LEN = 2 * CURVE25519_PUBKEY_LEN + DIGEST_LEN; + final static String PROTOID = "ntor-curve25519-sha256-1"; + final static String SERVER_STR = "Server"; + final static int SECRET_INPUT_LEN = CURVE25519_PUBKEY_LEN * 3 + CURVE25519_OUTPUT_LEN * 2 + DIGEST_LEN + PROTOID.length(); + final static int AUTH_INPUT_LEN = DIGEST256_LEN + DIGEST_LEN + (CURVE25519_PUBKEY_LEN * 3) + PROTOID.length() + SERVER_STR.length(); + final static Charset cs = Charset.forName("ISO-8859-1"); + + private final TorRandom random = new TorRandom(); + private final HexDigest peerIdentity; + private final byte[] peerNTorOnionKey; /* pubkey_B */ + private final byte[] secretKey_x; + private final byte[] publicKey_X; + + public TorNTorKeyAgreement(HexDigest peerIdentity, byte[] peerNTorOnionKey) { + this.peerIdentity = peerIdentity; + this.peerNTorOnionKey = peerNTorOnionKey; + this.secretKey_x = generateSecretKey(); + this.publicKey_X = getPublicKeyForPrivate(secretKey_x); + } + + + public byte[] createOnionSkin() { + final ByteBuffer buffer = makeBuffer(NTOR_ONIONSKIN_LEN); + buffer.put(peerIdentity.getRawBytes()); + buffer.put(peerNTorOnionKey); + buffer.put(publicKey_X); + return buffer.array(); + } + + private ByteBuffer makeBuffer(int sz) { + final byte[] array = new byte[sz]; + return ByteBuffer.wrap(array); + } + + byte[] generateSecretKey() { + final byte[]key = random.getBytes(32); + key[0] &= 248; + key[31] &= 127; + key[31] |= 64; + return key; + } + + byte[] getPublicKeyForPrivate(byte[] secretKey) { + final byte[] pub = new byte[32]; + Curve25519.crypto_scalarmult_base(pub, secretKey); + return pub; + } + + private boolean isBad; + + public boolean deriveKeysFromHandshakeResponse(byte[] handshakeResponse, byte[] keyMaterialOut, byte[] verifyHashOut) { + isBad = false; + + final ByteBuffer hr = ByteBuffer.wrap(handshakeResponse); + byte[] serverPub = new byte[CURVE25519_PUBKEY_LEN]; + byte[] authCandidate = new byte[DIGEST256_LEN]; + hr.get(serverPub); + hr.get(authCandidate); + + final byte[] secretInput = buildSecretInput(serverPub); + final byte[] verify = tweak("verify", secretInput); + final byte[] authInput = buildAuthInput(verify, serverPub); + final byte[] auth = tweak("mac", authInput); + isBad |= !Utils.constantTimeArrayEquals(auth, authCandidate); + final byte[] seed = tweak("key_extract", secretInput); + + final TorRFC5869KeyDerivation kdf = new TorRFC5869KeyDerivation(seed); + kdf.deriveKeys(keyMaterialOut, verifyHashOut); + + return !isBad; + } + + public byte[] getNtorCreateMagic() { + return "ntorNTORntorNTOR".getBytes(cs); + } + + private byte[] buildSecretInput(byte[] serverPublic_Y) { + final ByteBuffer bb = makeBuffer(SECRET_INPUT_LEN); + bb.put(scalarMult(serverPublic_Y)); + bb.put(scalarMult(peerNTorOnionKey)); + bb.put(peerIdentity.getRawBytes()); + bb.put(peerNTorOnionKey); + bb.put(publicKey_X); + bb.put(serverPublic_Y); + bb.put(PROTOID.getBytes()); + return bb.array(); + } + + private byte[] buildAuthInput(byte[] verify, byte[] serverPublic_Y) { + final ByteBuffer bb = makeBuffer(AUTH_INPUT_LEN); + bb.put(verify); + bb.put(peerIdentity.getRawBytes()); + bb.put(peerNTorOnionKey); + bb.put(serverPublic_Y); + bb.put(publicKey_X); + bb.put(PROTOID.getBytes(cs)); + bb.put(SERVER_STR.getBytes(cs)); + return bb.array(); + } + + private byte[] scalarMult(byte[] peerValue) { + final byte[] out = new byte[CURVE25519_OUTPUT_LEN]; + Curve25519.crypto_scalarmult(out, secretKey_x, peerValue); + isBad |= isAllZero(out); + return out; + } + + boolean isAllZero(byte[] bs) { + boolean result = true; + for(byte b: bs) { + result &= (b == 0); + } + return result; + } + + byte[] tweak(String suffix, byte[] input) { + return hmac256(input, getStringConstant(suffix)); + } + + byte[] hmac256(byte[] input, byte[] key) { + final SecretKeySpec keyspec = new SecretKeySpec(key, "HmacSHA256"); + try { + final Mac mac = Mac.getInstance("HmacSHA256"); + mac.init(keyspec); + return mac.doFinal(input); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("Failed to create HmacSHA256 instance: "+ e); + } catch (InvalidKeyException e) { + throw new IllegalStateException("Failed to create HmacSHA256 instance: "+ e); + } + } + + byte[] getStringConstant(String suffix) { + if(suffix == null || suffix.isEmpty()) { + return PROTOID.getBytes(cs); + } else { + return (PROTOID + ":" + suffix).getBytes(cs); + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorPrivateKey.java b/orchid/src/com/subgraph/orchid/crypto/TorPrivateKey.java new file mode 100644 index 00000000..f6c14771 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorPrivateKey.java @@ -0,0 +1,48 @@ +package com.subgraph.orchid.crypto; + +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.interfaces.RSAPrivateKey; +import java.security.interfaces.RSAPublicKey; + +import com.subgraph.orchid.TorException; + +public class TorPrivateKey { + + static public TorPrivateKey generateNewKeypair() { + KeyPairGenerator generator = createGenerator(); + generator.initialize(1024, new SecureRandom()); + KeyPair pair = generator.generateKeyPair(); + return new TorPrivateKey((RSAPrivateKey)pair.getPrivate(), (RSAPublicKey)pair.getPublic()); + } + + static KeyPairGenerator createGenerator() { + try { + return KeyPairGenerator.getInstance("RSA"); + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } + } + + private final TorPublicKey publicKey; + private final RSAPrivateKey privateKey; + + TorPrivateKey(RSAPrivateKey privateKey, RSAPublicKey publicKey) { + this.privateKey = privateKey; + this.publicKey = new TorPublicKey(publicKey); + } + + public TorPublicKey getPublicKey() { + return publicKey; + } + + public RSAPublicKey getRSAPublicKey() { + return publicKey.getRSAPublicKey(); + } + + public RSAPrivateKey getRSAPrivateKey() { + return privateKey; + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorPublicKey.java b/orchid/src/com/subgraph/orchid/crypto/TorPublicKey.java new file mode 100644 index 00000000..d66bedb3 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorPublicKey.java @@ -0,0 +1,133 @@ +package com.subgraph.orchid.crypto; + +import java.security.GeneralSecurityException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.interfaces.RSAPublicKey; + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.misc.Utils; + +/** + * This class wraps the RSA public keys used in the Tor protocol. + */ +public class TorPublicKey { + static public TorPublicKey createFromPEMBuffer(String buffer) { + return new TorPublicKey(buffer); + } + + private final String pemBuffer; + private RSAPublicKey key; + + private byte[] rawKeyBytes = null; + private HexDigest keyFingerprint = null; + + private TorPublicKey(String pemBuffer) { + this.pemBuffer = pemBuffer; + this.key = null; + } + + public TorPublicKey(RSAPublicKey key) { + this.pemBuffer = null; + this.key = key; + } + + private synchronized RSAPublicKey getKey() { + if(key != null) { + return key; + } else if(pemBuffer != null) { + final RSAKeyEncoder encoder = new RSAKeyEncoder(); + try { + key = encoder.parsePEMPublicKey(pemBuffer); + } catch (GeneralSecurityException e) { + throw new IllegalArgumentException("Failed to parse PEM encoded key: "+ e); + } + } + return key; + } + + public synchronized byte[] getRawBytes() { + if(rawKeyBytes == null) { + final RSAKeyEncoder encoder = new RSAKeyEncoder(); + rawKeyBytes = encoder.getPKCS1Encoded(getKey()); + } + return rawKeyBytes; + } + + public synchronized HexDigest getFingerprint() { + if(keyFingerprint == null) { + keyFingerprint = HexDigest.createDigestForData(getRawBytes()); + } + return keyFingerprint; + } + + public boolean verifySignature(TorSignature signature, HexDigest digest) { + return verifySignatureFromDigestBytes(signature, digest.getRawBytes()); + } + + public boolean verifySignature(TorSignature signature, TorMessageDigest digest) { + return verifySignatureFromDigestBytes(signature, digest.getDigestBytes()); + } + + public boolean verifySignatureFromDigestBytes(TorSignature signature, byte[] digestBytes) { + final Cipher cipher = createCipherInstance(); + try { + byte[] decrypted = cipher.doFinal(signature.getSignatureBytes()); + return Utils.constantTimeArrayEquals(decrypted, digestBytes); + } catch (IllegalBlockSizeException e) { + throw new TorException(e); + } catch (BadPaddingException e) { + throw new TorException(e); + } + } + + private Cipher createCipherInstance() { + try { + Cipher cipher = getCipherInstance(); + cipher.init(Cipher.DECRYPT_MODE, getKey()); + return cipher; + } catch (InvalidKeyException e) { + throw new TorException(e); + } + } + + private Cipher getCipherInstance() { + try { + try { + return Cipher.getInstance("RSA/ECB/PKCS1Padding", "SunJCE"); + } catch (NoSuchProviderException e) { + return Cipher.getInstance("RSA/ECB/PKCS1Padding"); + } + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } catch (NoSuchPaddingException e) { + throw new TorException(e); + } + } + + public RSAPublicKey getRSAPublicKey() { + return getKey(); + } + + public String toString() { + return "Tor Public Key: " + getFingerprint(); + } + + public boolean equals(Object o) { + if(!(o instanceof TorPublicKey)) + return false; + final TorPublicKey other = (TorPublicKey) o; + return other.getFingerprint().equals(getFingerprint()); + } + + public int hashCode() { + return getFingerprint().hashCode(); + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorRFC5869KeyDerivation.java b/orchid/src/com/subgraph/orchid/crypto/TorRFC5869KeyDerivation.java new file mode 100644 index 00000000..93f7cd55 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorRFC5869KeyDerivation.java @@ -0,0 +1,79 @@ +package com.subgraph.orchid.crypto; + +import java.nio.ByteBuffer; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import com.subgraph.orchid.Tor; + +public class TorRFC5869KeyDerivation { + private final static String PROTOID = "ntor-curve25519-sha256-1"; + private final static String M_EXPAND = PROTOID + ":key_expand"; + private final static byte[] M_EXPAND_BYTES = M_EXPAND.getBytes(Tor.getDefaultCharset()); + + private final byte[] seed; + + public TorRFC5869KeyDerivation(byte[] seed) { + this.seed = new byte[seed.length]; + System.arraycopy(seed, 0, this.seed, 0, seed.length); + } + + public void deriveKeys(byte[] keyMaterialOut, byte[] verifyHashOut) { + final ByteBuffer keyData = deriveKeys(keyMaterialOut.length + verifyHashOut.length); + keyData.get(keyMaterialOut); + keyData.get(verifyHashOut); + } + + public ByteBuffer deriveKeys(int length) { + int round = 1; + final ByteBuffer bb = makeBuffer(length); + byte[] macOutput = null; + while(bb.hasRemaining()) { + macOutput = expandRound(round, macOutput); + if(macOutput.length > bb.remaining()) { + bb.put(macOutput, 0, bb.remaining()); + } else { + bb.put(macOutput); + } + round += 1; + } + bb.flip(); + return bb; + } + + private byte[] expandRound(int round, byte[] priorMac) { + final ByteBuffer bb; + if(round == 1) { + bb = makeBuffer(M_EXPAND_BYTES.length + 1); + } else { + bb = makeBuffer(M_EXPAND_BYTES.length + TorMessageDigest.TOR_DIGEST256_SIZE + 1); + bb.put(priorMac); + } + bb.put(M_EXPAND_BYTES); + bb.put((byte) round); + + final Mac mac = createMacInstance(); + return mac.doFinal(bb.array()); + } + + private ByteBuffer makeBuffer(int len) { + final byte[] bs = new byte[len]; + return ByteBuffer.wrap(bs); + } + + private Mac createMacInstance() { + final SecretKeySpec keyspec = new SecretKeySpec(seed, "HmacSHA256"); + try { + final Mac mac = Mac.getInstance("HmacSHA256"); + mac.init(keyspec); + return mac; + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("Could not create HmacSHA256 instance: "+ e); + } catch (InvalidKeyException e) { + throw new IllegalStateException("Could not create HmacSHA256 instance: "+ e); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorRandom.java b/orchid/src/com/subgraph/orchid/crypto/TorRandom.java new file mode 100644 index 00000000..bbe9d07d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorRandom.java @@ -0,0 +1,56 @@ +package com.subgraph.orchid.crypto; + +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; + +import com.subgraph.orchid.TorException; + +public class TorRandom { + + private final SecureRandom random; + + public TorRandom() { + random = createRandom(); + } + + private static SecureRandom createRandom() { + try { + return SecureRandom.getInstance("SHA1PRNG"); + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } + } + + public byte[] getBytes(int n) { + final byte[] bs = new byte[n]; + random.nextBytes(bs); + return bs; + } + + public long nextLong(long n) { + long bits, val; + do { + bits = nextLong(); + val = bits % n; + } while(bits - val + (n - 1) < 0); + return val; + } + + public int nextInt(int n) { + return random.nextInt(n); + } + + public int nextInt() { + return random.nextInt() & Integer.MAX_VALUE; + } + + /** + * Return a uniformly distributed positive random value between 0 and Long.MAX_VALUE + * + * @return A positive random value between 0 and Long.MAX_VALUE. + */ + public long nextLong() { + return random.nextLong() & Long.MAX_VALUE; + } + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorSignature.java b/orchid/src/com/subgraph/orchid/crypto/TorSignature.java new file mode 100644 index 00000000..dcdbc49a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorSignature.java @@ -0,0 +1,75 @@ +package com.subgraph.orchid.crypto; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.StringReader; +import java.util.Arrays; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.encoders.Base64; +import com.subgraph.orchid.encoders.Hex; + +public class TorSignature { + private final static String SIGNATURE_BEGIN = "-----BEGIN SIGNATURE-----"; + private final static String ID_SIGNATURE_BEGIN = "-----BEGIN ID SIGNATURE-----"; + private final static String SIGNATURE_END = "-----END SIGNATURE-----"; + private final static String ID_SIGNATURE_END = "-----END ID SIGNATURE-----"; + + static public TorSignature createFromPEMBuffer(String buffer) { + BufferedReader reader = new BufferedReader(new StringReader(buffer)); + final String header = nextLine(reader); + if(!(SIGNATURE_BEGIN.equals(header) || ID_SIGNATURE_BEGIN.equals(header))) + throw new TorParsingException("Did not find expected signature BEGIN header"); + return new TorSignature(Base64.decode(parseBase64Data(reader)), DigestAlgorithm.DIGEST_SHA1); + } + static private String parseBase64Data(BufferedReader reader) { + final StringBuilder base64Data = new StringBuilder(); + while(true) { + final String line = nextLine(reader); + if(SIGNATURE_END.equals(line) || ID_SIGNATURE_END.equals(line)) + return base64Data.toString(); + base64Data.append(line); + } + } + static String nextLine(BufferedReader reader) { + try { + final String line = reader.readLine(); + if(line == null) + throw new TorParsingException("Did not find expected signature END header"); + return line; + } catch (IOException e) { + throw new TorException(e); + } + } + + public enum DigestAlgorithm { DIGEST_SHA1, DIGEST_SHA256 }; + + private final byte[] signatureBytes; + private final DigestAlgorithm digestAlgorithm; + + private TorSignature(byte[] signatureBytes, DigestAlgorithm digestAlgorithm) { + this.signatureBytes = signatureBytes; + this.digestAlgorithm = digestAlgorithm; + } + + + public byte[] getSignatureBytes() { + return Arrays.copyOf(signatureBytes, signatureBytes.length); + } + + public boolean verify(TorPublicKey publicKey, TorMessageDigest digest) { + return publicKey.verifySignature(this, digest); + } + + public DigestAlgorithm getDigestAlgorithm() { + return digestAlgorithm; + } + + public String toString() { + return "TorSignature: (" + signatureBytes.length + " bytes) " + new String(Hex.encode(signatureBytes)); + } + + + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorStreamCipher.java b/orchid/src/com/subgraph/orchid/crypto/TorStreamCipher.java new file mode 100644 index 00000000..57093258 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorStreamCipher.java @@ -0,0 +1,127 @@ +package com.subgraph.orchid.crypto; + +import java.security.GeneralSecurityException; + +import javax.crypto.Cipher; +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; + +import com.subgraph.orchid.TorException; + +public class TorStreamCipher { + public static final int KEY_LEN = 16; + + public static TorStreamCipher createWithRandomKey() { + final SecretKey randomKey = generateRandomKey(); + return new TorStreamCipher(randomKey.getEncoded()); + } + + public static TorStreamCipher createFromKeyBytes(byte[] keyBytes) { + return new TorStreamCipher(keyBytes); + } + + public static TorStreamCipher createFromKeyBytesWithIV(byte[] keyBytes, byte[] iv) { + return new TorStreamCipher(keyBytes, iv); + } + + private static final int BLOCK_SIZE = 16; + private final Cipher cipher; + private final byte[] counter; + private final byte[] counterOut; + /* Next byte of keystream in counterOut */ + private int keystreamPointer = -1; + private final SecretKeySpec key; + + + private TorStreamCipher(byte[] keyBytes) { + this(keyBytes, null); + } + + private TorStreamCipher(byte[] keyBytes, byte[] iv) { + key = keyBytesToSecretKey(keyBytes); + cipher = createCipher(key); + counter = new byte[BLOCK_SIZE]; + counterOut = new byte[BLOCK_SIZE]; + + if(iv != null) { + applyIV(iv); + } + } + + private void applyIV(byte[] iv) { + if(iv.length != BLOCK_SIZE) { + throw new IllegalArgumentException(); + } + System.arraycopy(iv, 0, counter, 0, BLOCK_SIZE); + } + + public void encrypt(byte[] data) { + encrypt(data, 0, data.length); + } + + public synchronized void encrypt(byte[] data, int offset, int length) { + for(int i = 0; i < length; i++) + data[i + offset] ^= nextKeystreamByte(); + } + + public byte[] getKeyBytes() { + return key.getEncoded(); + } + + private static SecretKeySpec keyBytesToSecretKey(byte[] keyBytes) { + return new SecretKeySpec(keyBytes, "AES"); + } + + private static Cipher createCipher(SecretKeySpec keySpec) { + try { + final Cipher cipher = Cipher.getInstance("AES/ECB/NoPadding"); + cipher.init(Cipher.ENCRYPT_MODE, keySpec); + return cipher; + } catch (GeneralSecurityException e) { + throw new TorException(e); + } + } + + private static SecretKey generateRandomKey() { + try { + KeyGenerator generator = KeyGenerator.getInstance("AES"); + generator.init(128); + return generator.generateKey(); + } catch (GeneralSecurityException e) { + throw new TorException(e); + } + } + + private byte nextKeystreamByte() { + if(keystreamPointer == -1 || (keystreamPointer >= BLOCK_SIZE)) + updateCounter(); + return counterOut[keystreamPointer++]; + } + private void updateCounter() { + encryptCounter(); + incrementCounter(); + keystreamPointer = 0; + } + + private void encryptCounter() { + try { + cipher.doFinal(counter, 0, BLOCK_SIZE, counterOut, 0); + } catch (GeneralSecurityException e) { + throw new TorException(e); + } + } + + private void incrementCounter() { + int carry = 1; + for(int i = counter.length - 1; i >= 0; i--) { + int x = (counter[i] & 0xff) + carry; + if(x > 0xff) + carry = 1; + else + carry = 0; + counter[i] = (byte)x; + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/crypto/TorTapKeyAgreement.java b/orchid/src/com/subgraph/orchid/crypto/TorTapKeyAgreement.java new file mode 100644 index 00000000..eae5eaef --- /dev/null +++ b/orchid/src/com/subgraph/orchid/crypto/TorTapKeyAgreement.java @@ -0,0 +1,195 @@ +package com.subgraph.orchid.crypto; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; +import java.security.KeyFactory; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.PublicKey; +import java.util.Arrays; + +import javax.crypto.KeyAgreement; +import javax.crypto.interfaces.DHPublicKey; +import javax.crypto.spec.DHParameterSpec; +import javax.crypto.spec.DHPublicKeySpec; + +import com.subgraph.orchid.TorException; +/** + * The TorKeyAgreement class implements the diffie-hellman key agreement + * protocol using the parameters specified in the main Tor specification (tor-spec.txt). + * + * An instance of this class can only be used to perform a single key agreement operation. + * + * After instantiating the class, a user calls {@link #getPublicValue()} or {@link #getPublicKeyBytes()} + * to retrieve the public value to transmit to the peer in the key agreement operation. After receiving + * a public value from the peer, this value should be converted into a BigInteger and + * {@link #isValidPublicValue(BigInteger)} should be called to verify that the peer has sent a safe + * and legal public value. If {@link #isValidPublicValue(BigInteger)} returns true, the peer public + * value is valid and {@link #getSharedSecret(BigInteger)} can be called to complete the key agreement + * protocol and return the shared secret value. + * + */ +public class TorTapKeyAgreement implements TorKeyAgreement { + public final static int DH_LEN = 128; + public final static int DH_SEC_LEN = 40; + /* + * tor-spec 0.3 + * + * For Diffie-Hellman, we use a generator (g) of 2. For the modulus (p), we + * use the 1024-bit safe prime from rfc2409 section 6.2 whose hex + * representation is: + */ + private static final BigInteger P1024 = new BigInteger( + "00FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + + "49286651ECE65381FFFFFFFFFFFFFFFF", 16); + private static final BigInteger G = new BigInteger("2"); + + /* + * tor-spec 0.3 + * + * As an optimization, implementations SHOULD choose DH private keys (x) of + * 320 bits. + */ + private static final int PRIVATE_KEY_SIZE = 320; + private static final DHParameterSpec DH_PARAMETER_SPEC = new DHParameterSpec(P1024, G, PRIVATE_KEY_SIZE); + + private final KeyAgreement dh; + private final KeyPair keyPair; + private final TorPublicKey onionKey; + + /** + * Create a new TorKeyAgreement instance which can be used to perform a single + * key agreement operation. A new set of ephemeral Diffie-Hellman parameters are generated + * when this class is instantiated. + */ + public TorTapKeyAgreement(TorPublicKey onionKey) { + this.keyPair = generateKeyPair(); + this.dh = createDH(); + this.onionKey = onionKey; + + } + + public TorTapKeyAgreement() { + this(null); + } + + /** + * Return the generated public value for this key agreement operation as a BigInteger. + * + * @return The diffie-hellman public value as a BigInteger. + */ + public BigInteger getPublicValue() { + DHPublicKey pubKey = (DHPublicKey) keyPair.getPublic(); + return pubKey.getY(); + } + + /** + * Return the generated public value for this key agreement operation as an array with the value + * encoded in big-endian byte order. + * + * @return A byte array containing the encoded public value for this key agreement operation. + */ + public byte[] getPublicKeyBytes() { + final byte[] output = new byte[128]; + final byte[] yBytes = getPublicValue().toByteArray(); + if(yBytes[0] == 0 && yBytes.length == (DH_LEN + 1)) { + System.arraycopy(yBytes, 1, output, 0, DH_LEN); + } else if (yBytes.length <= DH_LEN) { + final int offset = DH_LEN - yBytes.length; + System.arraycopy(yBytes, 0, output, offset, yBytes.length); + } else { + throw new IllegalStateException("Public value is longer than DH_LEN but not because of sign bit"); + } + return output; + } + + + + /** + * Return true if the specified value is a legal public + * value rather than a dangerous degenerate or confined subgroup value. + * + * tor-spec 5.2 + * Before computing g^xy, both client and server MUST verify that + * the received g^x or g^y value is not degenerate; that is, it must + * be strictly greater than 1 and strictly less than p-1 where p is + * the DH modulus. Implementations MUST NOT complete a handshake + * with degenerate keys. + */ + public static boolean isValidPublicValue(BigInteger publicValue) { + if(publicValue.signum() < 1 || publicValue.equals(BigInteger.ONE)) + return false; + if(publicValue.compareTo(P1024.subtract(BigInteger.ONE)) >= 0) + return false; + return true; + } + + /** + * Complete the key agreement protocol with the peer public value + * otherPublic and return the calculated shared secret. + * + * @param otherPublic The peer public value. + * @return The shared secret value produced by the protocol. + */ + public byte[] getSharedSecret(BigInteger otherPublic) { + try { + KeyFactory factory = KeyFactory.getInstance("DH"); + DHPublicKeySpec pub = new DHPublicKeySpec(otherPublic, P1024, G); + PublicKey key = factory.generatePublic(pub); + dh.doPhase(key, true); + return dh.generateSecret(); + } catch (GeneralSecurityException e) { + throw new TorException(e); + } + } + private final KeyAgreement createDH() { + try { + KeyAgreement dh = KeyAgreement.getInstance("DH"); + dh.init(keyPair.getPrivate()); + return dh; + } catch (GeneralSecurityException e) { + throw new TorException(e); + } + } + + private final KeyPair generateKeyPair() { + try { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("DH"); + keyGen.initialize(DH_PARAMETER_SPEC); + return keyGen.generateKeyPair(); + } catch (GeneralSecurityException e) { + throw new TorException(e); + } + } + + public byte[] createOnionSkin() { + final byte[] yBytes = getPublicKeyBytes(); + final HybridEncryption hybrid = new HybridEncryption(); + return hybrid.encrypt(yBytes, onionKey); + } + + public boolean deriveKeysFromHandshakeResponse(byte[] handshakeResponse, + byte[] keyMaterialOut, byte[] verifyHashOut) { + ByteBuffer bb = ByteBuffer.wrap(handshakeResponse); + byte[] dhPublic = new byte[DH_LEN]; + byte[] keyHash = new byte[TorMessageDigest.TOR_DIGEST_SIZE]; + bb.get(dhPublic); + bb.get(keyHash); + BigInteger peerPublic = new BigInteger(1, dhPublic); + return deriveKeysFromDHPublicAndHash(peerPublic, keyHash, keyMaterialOut, verifyHashOut); + } + + public boolean deriveKeysFromDHPublicAndHash(BigInteger peerPublic, byte[] keyHash, byte[] keyMaterialOut, byte[] verifyHashOut) { + if(!isValidPublicValue(peerPublic)) { + throw new TorException("Illegal DH public value"); + } + final byte[] sharedSecret = getSharedSecret(peerPublic); + final TorKeyDerivation kdf = new TorKeyDerivation(sharedSecret); + kdf.deriveKeys(keyMaterialOut, verifyHashOut); + return Arrays.equals(verifyHashOut, keyHash); + } +} diff --git a/orchid/src/com/subgraph/orchid/dashboard/Dashboard.java b/orchid/src/com/subgraph/orchid/dashboard/Dashboard.java new file mode 100644 index 00000000..966e920f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/dashboard/Dashboard.java @@ -0,0 +1,186 @@ +package com.subgraph.orchid.dashboard; + +import java.io.IOException; +import java.io.PrintWriter; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.logging.Logger; + +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.misc.GuardedBy; + +/** + * A debugging utility which displays continuously updated information about the internal state + * of various components to clients which connect to a network port listening on localhost. + */ +public class Dashboard implements DashboardRenderable, DashboardRenderer { + private final static Logger logger = Logger.getLogger(Dashboard.class.getName()); + + private final static String DASHBOARD_PORT_PROPERTY = "com.subgraph.orchid.dashboard.port"; + + private final static int DEFAULT_LISTENING_PORT = 12345; + private final static int DEFAULT_FLAGS = DASHBOARD_CIRCUITS | DASHBOARD_STREAMS; + private final static IPv4Address LOCALHOST = IPv4Address.createFromString("127.0.0.1"); + + @GuardedBy("this") private int listeningPort; + @GuardedBy("this") private int flags = DEFAULT_FLAGS; + @GuardedBy("this") private ServerSocket listeningSocket; + @GuardedBy("this") private boolean isListening; + + private final List renderables; + private final Executor executor; + + public Dashboard() { + renderables = new CopyOnWriteArrayList(); + renderables.add(this); + executor = Executors.newCachedThreadPool(); + listeningPort = chooseListeningPort(); + } + + private static int chooseListeningPort() { + final String dbPort = System.getProperty(DASHBOARD_PORT_PROPERTY); + final int port = parsePortProperty(dbPort); + if(port > 0 && port <= 0xFFFF) { + return port; + } else if(dbPort != null) { + logger.warning(DASHBOARD_PORT_PROPERTY + " was not a valid port value: "+ dbPort); + } + return DEFAULT_LISTENING_PORT; + } + + private static int parsePortProperty(String dbPort) { + if(dbPort == null) { + return -1; + } + try { + return Integer.parseInt(dbPort); + } catch (NumberFormatException e) { + return -1; + } + } + + public void addRenderables(Object...objects) { + for(Object ob: objects) { + if(ob instanceof DashboardRenderable) { + renderables.add((DashboardRenderable) ob); + } + } + } + + public void addRenderable(DashboardRenderable renderable) { + renderables.add(renderable); + } + + public synchronized void enableFlag(int flag) { + flags |= flag; + } + + public synchronized void disableFlag(int flag) { + flags &= ~flag; + } + + + public synchronized boolean isEnabled(int f) { + return (flags & f) != 0; + } + + public synchronized void setListeningPort(int port) { + if(port != listeningPort) { + listeningPort = port; + if(isListening) { + stopListening(); + startListening(); + } + } + } + + public boolean isEnabledByProperty() { + return System.getProperty(DASHBOARD_PORT_PROPERTY) != null; + } + + public synchronized void startListening() { + if(isListening) { + return; + } + try { + listeningSocket = new ServerSocket(listeningPort, 50, LOCALHOST.toInetAddress()); + isListening = true; + logger.info("Dashboard listening on "+ LOCALHOST + ":"+ listeningPort); + executor.execute(createAcceptLoopRunnable(listeningSocket)); + } catch (IOException e) { + logger.warning("Failed to create listening Dashboard socket on port "+ listeningPort +": "+ e); + } + } + + public synchronized void stopListening() { + if(!isListening) { + return; + } + if(listeningSocket != null) { + closeQuietly(listeningSocket); + listeningSocket = null; + } + isListening = false; + } + + public synchronized boolean isListening() { + return isListening; + } + + private Runnable createAcceptLoopRunnable(final ServerSocket ss) { + return new Runnable() { + public void run() { + acceptConnections(ss); + } + }; + } + + private void acceptConnections(ServerSocket ss) { + while(true) { + try { + Socket s = ss.accept(); + executor.execute(new DashboardConnection(this, s)); + } catch (IOException e) { + if(!ss.isClosed()) { + logger.warning("IOException on dashboard server socket: "+ e); + } + stopListening(); + return; + } + } + } + + void renderAll(PrintWriter writer) throws IOException { + final int fs; + synchronized (this) { + fs = flags; + } + + for(DashboardRenderable dr: renderables) { + dr.dashboardRender(this, writer, fs); + } + } + + private void closeQuietly(ServerSocket s) { + try { + s.close(); + } catch (IOException e) { } + } + + public void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) { + writer.println("[Dashboard]"); + writer.println(); + } + + public void renderComponent(PrintWriter writer, int flags, Object component) throws IOException { + if(!(component instanceof DashboardRenderable)) { + return; + } + final DashboardRenderable renderable = (DashboardRenderable) component; + renderable.dashboardRender(this, writer, flags); + } +} diff --git a/orchid/src/com/subgraph/orchid/dashboard/DashboardConnection.java b/orchid/src/com/subgraph/orchid/dashboard/DashboardConnection.java new file mode 100644 index 00000000..3d1baa15 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/dashboard/DashboardConnection.java @@ -0,0 +1,130 @@ +package com.subgraph.orchid.dashboard; + +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.io.Writer; +import java.net.Socket; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +public class DashboardConnection implements Runnable { + + private final static int REFRESH_INTERVAL = 1000; + + private final Dashboard dashboard; + private final Socket socket; + private final ScheduledExecutorService refreshExecutor; + + public DashboardConnection(Dashboard dashboard, Socket socket) { + this.dashboard = dashboard; + this.socket = socket; + this.refreshExecutor = new ScheduledThreadPoolExecutor(1); + } + + public void run() { + ScheduledFuture handle = null; + try { + final PrintWriter writer = new PrintWriter(socket.getOutputStream()); + handle = refreshExecutor.scheduleAtFixedRate(createRefreshRunnable(writer), 0, REFRESH_INTERVAL, TimeUnit.MILLISECONDS); + runInputLoop(socket.getInputStream()); + } catch (IOException e) { + closeQuietly(socket); + } finally { + if(handle != null) { + handle.cancel(true); + } + refreshExecutor.shutdown(); + } + } + + private void closeQuietly(Socket s) { + try { + s.close(); + } catch (IOException e) { } + } + + private void runInputLoop(InputStream input) throws IOException { + int c; + + while((c = input.read()) != -1) { + switch(c) { + case 'c': + toggleFlagWithVerbose(DashboardRenderable.DASHBOARD_CONNECTIONS, DashboardRenderable.DASHBOARD_CONNECTIONS_VERBOSE); + break; + case 'p': + toggleFlag(DashboardRenderable.DASHBOARD_PREDICTED_PORTS); + break; + default: + break; + } + } + } + + // Rotate between 3 states + // 0 (no flags), + // basicFlag, + // basicFlag|verboseFlag + private void toggleFlagWithVerbose(int basicFlag, int verboseFlag) { + if(dashboard.isEnabled(verboseFlag)) { + dashboard.disableFlag(basicFlag | verboseFlag); + } else if(dashboard.isEnabled(basicFlag)) { + dashboard.enableFlag(verboseFlag); + } else { + dashboard.enableFlag(basicFlag); + } + } + + private void toggleFlag(int flag) { + if(dashboard.isEnabled(flag)) { + dashboard.disableFlag(flag); + } else { + dashboard.enableFlag(flag); + } + } + + private void hideCursor(Writer writer) throws IOException { + emitCSI(writer); + writer.write("?25l"); + } + + private void emitCSI(Writer writer) throws IOException { + writer.append((char) 0x1B); + writer.append('['); + } + + private void clear(PrintWriter writer) throws IOException { + emitCSI(writer); + writer.write("2J"); + } + + private void moveTo(PrintWriter writer, int x, int y) throws IOException { + emitCSI(writer); + writer.printf("%d;%dH", x+1, y+1); + } + + private void refresh(PrintWriter writer) { + try { + if(socket.isClosed()) { + return; + } + hideCursor(writer); + clear(writer); + moveTo(writer, 0, 0); + dashboard.renderAll(writer); + writer.flush(); + } catch(IOException e) { + closeQuietly(socket); + } + } + + private Runnable createRefreshRunnable(final PrintWriter writer) { + return new Runnable() { + public void run() { + refresh(writer); + } + }; + } +} diff --git a/orchid/src/com/subgraph/orchid/dashboard/DashboardRenderable.java b/orchid/src/com/subgraph/orchid/dashboard/DashboardRenderable.java new file mode 100644 index 00000000..30a58b99 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/dashboard/DashboardRenderable.java @@ -0,0 +1,15 @@ +package com.subgraph.orchid.dashboard; + +import java.io.IOException; +import java.io.PrintWriter; + +public interface DashboardRenderable { + + static int DASHBOARD_CONNECTIONS = 1 << 0; + static int DASHBOARD_CONNECTIONS_VERBOSE = 1 << 1; + static int DASHBOARD_PREDICTED_PORTS = 1 << 2; + static int DASHBOARD_CIRCUITS = 1 << 3; + static int DASHBOARD_STREAMS = 1 << 4; + + void dashboardRender(DashboardRenderer renderer, PrintWriter writer, int flags) throws IOException; +} diff --git a/orchid/src/com/subgraph/orchid/dashboard/DashboardRenderer.java b/orchid/src/com/subgraph/orchid/dashboard/DashboardRenderer.java new file mode 100644 index 00000000..b14f602a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/dashboard/DashboardRenderer.java @@ -0,0 +1,8 @@ +package com.subgraph.orchid.dashboard; + +import java.io.IOException; +import java.io.PrintWriter; + +public interface DashboardRenderer { + void renderComponent(PrintWriter writer, int flags, Object component) throws IOException; +} diff --git a/orchid/src/com/subgraph/orchid/data/BandwidthHistory.java b/orchid/src/com/subgraph/orchid/data/BandwidthHistory.java new file mode 100644 index 00000000..9f8e4c80 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/BandwidthHistory.java @@ -0,0 +1,29 @@ +package com.subgraph.orchid.data; + +import java.util.ArrayList; +import java.util.List; + +public class BandwidthHistory { + + private final Timestamp reportingTime; + private final int reportingInterval; + private final List samples = new ArrayList(); + + public BandwidthHistory(Timestamp reportingTime, int reportingInterval) { + this.reportingTime = reportingTime; + this.reportingInterval = reportingInterval; + } + + public int getReportingInterval() { + return reportingInterval; + } + + public Timestamp getReportingTime() { + return reportingTime; + } + + public void addSample(int value) { + samples.add(value); + } + +} diff --git a/orchid/src/com/subgraph/orchid/data/Base32.java b/orchid/src/com/subgraph/orchid/data/Base32.java new file mode 100644 index 00000000..f0067eef --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/Base32.java @@ -0,0 +1,88 @@ +package com.subgraph.orchid.data; + +import com.subgraph.orchid.TorException; + +public class Base32 { + private final static String BASE32_CHARS = "abcdefghijklmnopqrstuvwxyz234567"; + + public static String base32Encode(byte[] source) { + return base32Encode(source, 0, source.length); + } + + public static String base32Encode(byte[] source, int offset, int length) { + final int nbits = length * 8; + if(nbits % 5 != 0) + throw new TorException("Base32 input length must be a multiple of 5 bits"); + + final int outlen = nbits / 5; + final StringBuffer outbuffer = new StringBuffer(); + int bit = 0; + for(int i = 0; i < outlen; i++) { + int v = (source[bit / 8] & 0xFF) << 8; + if(bit + 5 < nbits) v += (source[bit / 8 + 1] & 0xFF); + int u = (v >> (11 - (bit % 8))) & 0x1F; + outbuffer.append(BASE32_CHARS.charAt(u)); + bit += 5; + } + return outbuffer.toString(); + } + + public static byte[] base32Decode(String source) { + int[] v = stringToIntVector(source); + + int nbits = source.length() * 5; + if(nbits % 8 != 0) + throw new TorException("Base32 decoded array must be a muliple of 8 bits"); + + int outlen = nbits / 8; + byte[] outbytes = new byte[outlen]; + + int bit = 0; + for(int i = 0; i < outlen; i++) { + int bb = bit / 5; + outbytes[i] = (byte) decodeByte(bit, v[bb], v[bb + 1], v[bb + 2]); + bit += 8; + } + return outbytes; + } + + private static int decodeByte(int bitOffset, int b0, int b1, int b2) { + switch(bitOffset % 40) { + case 0: + return ls(b0, 3) + rs(b1, 2); + case 8: + return ls(b0, 6) + ls(b1, 1) + rs (b2, 4); + case 16: + return ls(b0, 4) + rs(b1, 1); + case 24: + return ls(b0, 7) + ls(b1, 2) + rs(b2, 3); + case 32: + return ls(b0, 5) + (b1 & 0xFF); + } + throw new TorException("Illegal bit offset"); + } + + private static int ls(int n, int shift) { + return ((n << shift) & 0xFF); + } + + private static int rs(int n, int shift) { + return ((n >> shift) & 0xFF); + } + + private static int[] stringToIntVector(String s) { + final int[] ints = new int[s.length() + 1]; + for(int i = 0; i < s.length(); i++) { + int b = s.charAt(i) & 0xFF; + if(b > 0x60 && b < 0x7B) + ints[i] = b - 0x61; + else if(b > 0x31 && b < 0x38) + ints[i] = b - 0x18; + else if(b > 0x40 && b < 0x5B) + ints[i] = b - 0x41; + else + throw new TorException("Illegal character in base32 encoded string: "+ s.charAt(i)); + } + return ints; + } +} diff --git a/orchid/src/com/subgraph/orchid/data/HexDigest.java b/orchid/src/com/subgraph/orchid/data/HexDigest.java new file mode 100644 index 00000000..aedb632f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/HexDigest.java @@ -0,0 +1,140 @@ +package com.subgraph.orchid.data; + +import java.util.Arrays; +import java.util.List; + +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.encoders.Base64; +import com.subgraph.orchid.encoders.Hex; + +/** + * This class represents both digests and fingerprints that appear in directory + * documents. The names fingerprint and digest are used interchangeably in + * the specification but generally a fingerprint is a message digest (ie: SHA1) + * over the DER ASN.1 encoding of a public key. A digest is usually + * a message digest over a set of fields in a directory document. + * + * Digests always appear as a 40 character hex string: + * + * 0EA20CAA3CE696E561BC08B15E00106700E8F682 + * + * Fingerprints may either appear as a single hex string as above or sometimes in + * a more easily human-parsed spaced format: + * + * 1E0F 5874 2268 E82F C600 D81D 9064 07C5 7CC2 C3A7 + * + */ +public class HexDigest { + public static HexDigest createFromStringList(List strings) { + StringBuilder builder = new StringBuilder(); + for(String chunk: strings) + builder.append(chunk); + return createFromString(builder.toString()); + } + + public static HexDigest createFromBase32String(String b32) { + return new HexDigest(Base32.base32Decode(b32)); + } + + public static HexDigest createFromString(String fingerprint) { + final String[] parts = fingerprint.split(" "); + if(parts.length > 1) + return createFromStringList(Arrays.asList(parts)); + final byte[] digestData = Hex.decode(fingerprint); + return new HexDigest(digestData); + } + + public static HexDigest createFromDigestBytes(byte[] data) { + return new HexDigest(data); + } + + public static HexDigest createDigestForData(byte[] data) { + final TorMessageDigest digest = new TorMessageDigest(); + digest.update(data); + return new HexDigest(digest.getDigestBytes()); + } + + private final byte[] digestBytes; + private final boolean isDigest256; + + private HexDigest(byte[] data) { + if(data.length != TorMessageDigest.TOR_DIGEST_SIZE && data.length != TorMessageDigest.TOR_DIGEST256_SIZE) { + throw new TorException("Digest data is not the correct length "+ data.length +" != (" + TorMessageDigest.TOR_DIGEST_SIZE + " or "+ TorMessageDigest.TOR_DIGEST256_SIZE +")"); + } + digestBytes = new byte[data.length]; + isDigest256 = digestBytes.length == TorMessageDigest.TOR_DIGEST256_SIZE; + System.arraycopy(data, 0, digestBytes, 0, data.length); + } + + public boolean isDigest256() { + return isDigest256; + } + + public byte[] getRawBytes() { + return Arrays.copyOf(digestBytes, digestBytes.length); + } + + public String toString() { + return new String(Hex.encode(digestBytes)); + } + + /** + * Return a spaced fingerprint representation of this HexDigest. + * + * ex: + * + * 1E0F 5874 2268 E82F C600 D81D 9064 07C5 7CC2 C3A7 + * + * @return A string representation of this HexDigest in the spaced fingerprint format. + */ + public String toSpacedString() { + final String original = toString(); + final StringBuilder builder = new StringBuilder(); + for(int i = 0; i < original.length(); i++) { + if(i > 0 && (i % 4) == 0) + builder.append(' '); + builder.append(original.charAt(i)); + } + return builder.toString(); + } + + public String toBase32() { + return Base32.base32Encode(digestBytes); + } + + public String toBase64(boolean stripTrailingEquals) { + final String b64 = new String(Base64.encode(digestBytes), Tor.getDefaultCharset()); + if(stripTrailingEquals) { + return stripTrailingEquals(b64); + } else { + return b64; + } + } + + private String stripTrailingEquals(String s) { + int idx = s.length(); + while(idx > 0 && s.charAt(idx - 1) == '=') { + idx -= 1; + } + return s.substring(0, idx); + } + + public boolean equals(Object o) { + if(!(o instanceof HexDigest)) + return false; + final HexDigest other = (HexDigest)o; + return Arrays.equals(other.digestBytes, this.digestBytes); + } + + public int hashCode() { + int hash = 0; + for(int i = 0; i < 4; i++) { + hash <<= 8; + hash |= (digestBytes[i] & 0xFF); + } + return hash; + } + +} diff --git a/orchid/src/com/subgraph/orchid/data/IPv4Address.java b/orchid/src/com/subgraph/orchid/data/IPv4Address.java new file mode 100644 index 00000000..77151457 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/IPv4Address.java @@ -0,0 +1,102 @@ +package com.subgraph.orchid.data; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.TorParsingException; + +public class IPv4Address { + + public static IPv4Address createFromString(String addressString) { + return new IPv4Address(parseStringToAddressData(addressString)); + } + + private static int parseStringToAddressData(String ipString) { + final String[] octets = ipString.split("\\."); + final int[] shifts = {24, 16, 8, 0}; + int addressData = 0; + int i = 0; + for(String o: octets) + addressData |= (octetStringToInt(o) << shifts[i++]); + + return addressData; + } + + private static int octetStringToInt(String octet) { + try { + int result = Integer.parseInt(octet); + if(result < 0 || result > 255) + throw new TorParsingException("Octet out of range: " + octet); + return result; + } catch(NumberFormatException e) { + throw new TorParsingException("Failed to parse octet: " + octet); + } + } + + public static boolean isValidIPv4AddressString(String addressString) { + try { + createFromString(addressString); + return true; + } catch (Exception e) { + return false; + } + } + + private final int addressData; + + public IPv4Address(int addressData) { + this.addressData = addressData; + + } + public int getAddressData() { + return addressData; + } + + public byte[] getAddressDataBytes() { + final byte[] result = new byte[4]; + result[0] = (byte)((addressData >> 24) & 0xFF); + result[1] = (byte)((addressData >> 16) & 0xFF); + result[2] = (byte)((addressData >> 8) & 0xFF); + result[3] = (byte)(addressData & 0xFF); + return result; + } + + public InetAddress toInetAddress() { + try { + return InetAddress.getByAddress(getAddressDataBytes()); + } catch (UnknownHostException e) { + throw new TorException(e); + } + } + + public static String stringFormat(int addressData) { + return ((addressData >> 24) & 0xFF) +"."+ + ((addressData >> 16) & 0xFF) +"."+ + ((addressData >> 8) & 0xFF) +"."+ + (addressData & 0xFF); + } + + public String toString() { + return stringFormat(addressData); + } + + public boolean equals(Object ob) { + if(this == ob) + return true; + if(!(ob instanceof IPv4Address)) + return false; + IPv4Address other = (IPv4Address)ob; + return (other.addressData == addressData); + } + + public int hashCode() { + int n = 0; + for(int i = 0; i < 4; i++) { + n <<= 4; + n ^= ((addressData >> (i * 8)) & 0xFF); + } + return n; + } + +} diff --git a/orchid/src/com/subgraph/orchid/data/RandomSet.java b/orchid/src/com/subgraph/orchid/data/RandomSet.java new file mode 100644 index 00000000..990d21c8 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/RandomSet.java @@ -0,0 +1,72 @@ +package com.subgraph.orchid.data; + +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.subgraph.orchid.TorException; + +public class RandomSet { + + private final Set set; + private final List list; + private final SecureRandom random; + + public RandomSet() { + set = new HashSet(); + list = new ArrayList(); + random = createRandom(); + } + + private static SecureRandom createRandom() { + try { + return SecureRandom.getInstance("SHA1PRNG"); + } catch (NoSuchAlgorithmException e) { + throw new TorException(e); + } + } + + public boolean add(E o) { + if(set.add(o)) { + list.add(o); + return true; + } else { + return false; + } + } + + public boolean contains(Object o) { + return set.contains(o); + } + + public boolean isEmpty() { + return set.isEmpty(); + } + + public void clear() { + set.clear(); + list.clear(); + } + + public boolean remove(Object o) { + if(set.remove(o)) { + list.remove(o); + return true; + } else { + return false; + } + } + + public int size() { + return set.size(); + } + + public E getRandomElement() { + int idx = random.nextInt(list.size()); + return list.get(idx); + } + +} diff --git a/orchid/src/com/subgraph/orchid/data/Timestamp.java b/orchid/src/com/subgraph/orchid/data/Timestamp.java new file mode 100644 index 00000000..c8272d24 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/Timestamp.java @@ -0,0 +1,50 @@ +package com.subgraph.orchid.data; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.TimeZone; + +import com.subgraph.orchid.TorParsingException; + +public class Timestamp { + private final Date date; + + public static Timestamp createFromDateAndTimeString(String dateAndTime) { + final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + format.setTimeZone(TimeZone.getTimeZone("GMT")); + format.setLenient(false); + try { + Timestamp ts = new Timestamp(format.parse(dateAndTime)); + return ts; + } catch (ParseException e) { + throw new TorParsingException("Could not parse timestamp string: "+ dateAndTime); + } + } + + public Timestamp(Date date) { + this.date = date; + } + + public long getTime() { + return date.getTime(); + } + + public Date getDate() { + return new Date(date.getTime()); + } + + public boolean hasPassed() { + final Date now = new Date(); + return date.before(now); + } + + public boolean isBefore(Timestamp ts) { + return date.before(ts.getDate()); + } + + public String toString() { + return date.toString(); + } + +} diff --git a/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitPolicy.java b/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitPolicy.java new file mode 100644 index 00000000..2a0f0651 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitPolicy.java @@ -0,0 +1,54 @@ +package com.subgraph.orchid.data.exitpolicy; + +import java.util.ArrayList; +import java.util.List; + +import com.subgraph.orchid.data.IPv4Address; + +public class ExitPolicy { + private final List rules = new ArrayList(); + + public void addAcceptRule(String rule) { + rules.add(PolicyRule.createAcceptFromString(rule)); + } + + public void addRejectRule(String rule) { + rules.add(PolicyRule.createRejectFromString(rule)); + } + + public boolean acceptsTarget(ExitTarget target) { + if(target.isAddressTarget()) + return acceptsDestination(target.getAddress(), target.getPort()); + else + return acceptsPort(target.getPort()); + } + + public boolean acceptsDestination(IPv4Address address, int port) { + if(address == null) + return acceptsPort(port); + + for(PolicyRule r: rules) { + if(r.matchesDestination(address, port)) + return r.isAcceptRule(); + } + // Default accept (see dir-spec.txt section 2.1, 'accept'/'reject' keywords) + return true; + } + + public boolean acceptsPort(int port) { + for(PolicyRule r: rules) { + if(r.matchesPort(port)) + return r.isAcceptRule(); + } + return false; + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + for(PolicyRule r: rules) { + sb.append(r); + sb.append("\n"); + } + return sb.toString(); + } +} diff --git a/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitPorts.java b/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitPorts.java new file mode 100644 index 00000000..9c8926bd --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitPorts.java @@ -0,0 +1,54 @@ +package com.subgraph.orchid.data.exitpolicy; + +import java.util.ArrayList; +import java.util.List; + + +/** + * Used by router status entries in consensus documents + */ +public class ExitPorts { + public static ExitPorts createAcceptExitPorts(String ports) { + final ExitPorts exitPorts = new ExitPorts(true); + exitPorts.parsePortRanges(ports); + return exitPorts; + } + + public static ExitPorts createRejectExitPorts(String ports) { + final ExitPorts exitPorts = new ExitPorts(false); + exitPorts.parsePortRanges(ports); + return exitPorts; + } + + private final List ranges = new ArrayList(); + private final boolean areAcceptPorts; + + private ExitPorts(boolean acceptPorts) { + this.areAcceptPorts = acceptPorts; + } + + public boolean areAcceptPorts() { + return areAcceptPorts; + } + + public boolean acceptsPort(int port) { + if(areAcceptPorts) + return contains(port); + else + return !contains(port); + } + public boolean contains(int port) { + for(PortRange r: ranges) + if(r.rangeContains(port)) + return true; + return false; + } + + private void parsePortRanges(String portRanges) { + final String[] args = portRanges.split(","); + for(String arg: args) + ranges.add(PortRange.createFromString(arg)); + } + + +} diff --git a/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitTarget.java b/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitTarget.java new file mode 100644 index 00000000..de9a7077 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/exitpolicy/ExitTarget.java @@ -0,0 +1,10 @@ +package com.subgraph.orchid.data.exitpolicy; + +import com.subgraph.orchid.data.IPv4Address; + +public interface ExitTarget { + boolean isAddressTarget(); + IPv4Address getAddress(); + String getHostname(); + int getPort(); +} diff --git a/orchid/src/com/subgraph/orchid/data/exitpolicy/Network.java b/orchid/src/com/subgraph/orchid/data/exitpolicy/Network.java new file mode 100644 index 00000000..a13c669e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/exitpolicy/Network.java @@ -0,0 +1,47 @@ +package com.subgraph.orchid.data.exitpolicy; + +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.data.IPv4Address; + +public class Network { + public static final Network ALL_ADDRESSES = new Network(IPv4Address.createFromString("0.0.0.0"), 0, "*"); + public static Network createFromString(String networkString) { + final String[] parts = networkString.split("/"); + final IPv4Address network = IPv4Address.createFromString(parts[0]); + if(parts.length == 1) + return new Network(network, 32, networkString); + + if(parts.length != 2) + throw new TorParsingException("Invalid network CIDR notation: " + networkString); + + try { + final int maskBits = Integer.parseInt(parts[1]); + return new Network(network, maskBits, networkString); + } catch(NumberFormatException e) { + throw new TorParsingException("Invalid netblock mask bit value: " + parts[1]); + } + } + + private final IPv4Address network; + private final int maskValue; + private final String originalString; + + Network(IPv4Address network, int bits, String originalString) { + this.network = network; + this.maskValue = createMask(bits); + this.originalString = originalString; + } + + private static int createMask(int maskBits) { + return maskBits == 0 ? 0 : (1 << 31) >> (maskBits - 1); + } + + public boolean contains(IPv4Address address) { + return (address.getAddressData() & maskValue) == (network.getAddressData() & maskValue); + } + + public String toString() { + return originalString; + } + +} diff --git a/orchid/src/com/subgraph/orchid/data/exitpolicy/PolicyRule.java b/orchid/src/com/subgraph/orchid/data/exitpolicy/PolicyRule.java new file mode 100644 index 00000000..94768acb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/exitpolicy/PolicyRule.java @@ -0,0 +1,69 @@ +package com.subgraph.orchid.data.exitpolicy; + +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.data.IPv4Address; + +public class PolicyRule { + private final static String WILDCARD = "*"; + + public static PolicyRule createAcceptFromString(String rule) { + return createRule(rule, true); + } + + public static PolicyRule createRejectFromString(String rule) { + return createRule(rule, false); + } + + private static PolicyRule createRule(String rule, boolean isAccept) { + final String[] args = rule.split(":"); + if(args.length != 2) + throw new TorParsingException("Could not parse exit policy rule: "+ rule); + + return new PolicyRule(parseNetwork(args[0]), parsePortRange(args[1]), isAccept); + } + + private static Network parseNetwork(String network) { + if(network.equals(WILDCARD)) + return Network.ALL_ADDRESSES; + else + return Network.createFromString(network); + } + + private static PortRange parsePortRange(String portRange) { + if(portRange.equals(WILDCARD)) + return PortRange.ALL_PORTS; + else + return PortRange.createFromString(portRange); + } + + private final boolean isAcceptRule; + private final Network network; + private final PortRange portRange; + + private PolicyRule(Network network, PortRange portRange, boolean isAccept) { + this.network = network; + this.portRange = portRange; + this.isAcceptRule = isAccept; + } + + public boolean matchesPort(int port) { + if(!network.equals(Network.ALL_ADDRESSES)) + return false; + return portRange.rangeContains(port); + } + + public boolean matchesDestination(IPv4Address address, int port) { + if(!network.contains(address)) + return false; + return portRange.rangeContains(port); + } + + public boolean isAcceptRule() { + return isAcceptRule; + } + + public String toString() { + final String keyword = isAcceptRule ? "accept" : "reject"; + return keyword + " "+ network + ":"+ portRange; + } +} diff --git a/orchid/src/com/subgraph/orchid/data/exitpolicy/PortRange.java b/orchid/src/com/subgraph/orchid/data/exitpolicy/PortRange.java new file mode 100644 index 00000000..67347204 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/data/exitpolicy/PortRange.java @@ -0,0 +1,72 @@ +package com.subgraph.orchid.data.exitpolicy; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.TorParsingException; + +public class PortRange { + + public static PortRange createFromString(String ports) { + final String[] parts = ports.split("-"); + if(parts.length == 1) { + return new PortRange(stringToPort(parts[0])); + } else if(parts.length == 2) { + return new PortRange(stringToPort(parts[0]), stringToPort(parts[1])); + } else { + throw new TorParsingException("Could not parse port range from string: " + ports); + } + } + + private static int stringToPort(String port) { + try { + final int portValue = Integer.parseInt(port); + if(!isValidPort(portValue)) + throw new TorParsingException("Illegal port value: "+ port); + return portValue; + } catch(NumberFormatException e) { + throw new TorParsingException("Could not parse port value: "+ port); + } + } + private final static int MAX_PORT = 0xFFFF; + public final static PortRange ALL_PORTS = new PortRange(1,MAX_PORT); + private final int portStart; + private final int portEnd; + + PortRange(int portValue) { + this(portValue, portValue); + } + + PortRange(int start, int end) { + if(!isValidRange(start, end)) + throw new TorException("Invalid port range: "+ start +"-"+ end); + portStart = start; + portEnd = end; + } + + static private boolean isValidRange(int start, int end) { + if(!(isValidPort(start) && isValidPort(end))) + return false; + else if(start > end) + return false; + else + return true; + } + + static private boolean isValidPort(int port) { + return port >= 0 && port <= MAX_PORT; + } + + public boolean rangeContains(int port) { + return port >= portStart && port <= portEnd; + } + + public String toString() { + if(portStart == 1 && portEnd == MAX_PORT) { + return "*"; + } else if(portStart == portEnd) { + return Integer.toString(portStart); + } else { + return Integer.toString(portStart) + "-" + Integer.toString(portEnd); + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/DescriptorCache.java b/orchid/src/com/subgraph/orchid/directory/DescriptorCache.java new file mode 100644 index 00000000..e40acef9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DescriptorCache.java @@ -0,0 +1,204 @@ +package com.subgraph.orchid.directory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.subgraph.orchid.Descriptor; +import com.subgraph.orchid.DirectoryStore; +import com.subgraph.orchid.DirectoryStore.CacheFile; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.misc.GuardedBy; + +public abstract class DescriptorCache { + private final static Logger logger = Logger.getLogger(DescriptorCache.class.getName()); + + private final DescriptorCacheData data; + + private final DirectoryStore store; + private final ScheduledExecutorService rebuildExecutor = Executors.newScheduledThreadPool(1); + private final CacheFile cacheFile; + private final CacheFile journalFile; + + @GuardedBy("this") + private int droppedBytes; + + @GuardedBy("this") + private int journalLength; + + @GuardedBy("this") + private int cacheLength; + + @GuardedBy("this") + private boolean initiallyLoaded; + + DescriptorCache(DirectoryStore store, CacheFile cacheFile, CacheFile journalFile) { + this.data = new DescriptorCacheData(); + this.store = store; + this.cacheFile = cacheFile; + this.journalFile = journalFile; + startRebuildTask(); + } + + public synchronized void initialLoad() { + if(initiallyLoaded) { + return; + } + reloadCache(); + } + + public void shutdown() { + rebuildExecutor.shutdownNow(); + } + + public T getDescriptor(HexDigest digest) { + return data.findByDigest(digest); + } + + public synchronized void addDescriptors(List descriptors) { + final List journalDescriptors = new ArrayList(); + int duplicateCount = 0; + for(T d: descriptors) { + if(data.addDescriptor(d)) { + if(d.getCacheLocation() == Descriptor.CacheLocation.NOT_CACHED) { + journalLength += d.getBodyLength(); + journalDescriptors.add(d); + } + } else { + duplicateCount += 1; + } + } + + if(!journalDescriptors.isEmpty()) { + store.appendDocumentList(journalFile, journalDescriptors); + } + if(duplicateCount > 0) { + logger.info("Duplicate descriptors added to journal, count = "+ duplicateCount); + } + } + + public void addDescriptor(T d) { + final List descriptors = new ArrayList(); + descriptors.add(d); + addDescriptors(descriptors); + } + + private synchronized void clearMemoryCache() { + data.clear(); + journalLength = 0; + cacheLength = 0; + droppedBytes = 0; + } + + private synchronized void reloadCache() { + clearMemoryCache(); + final ByteBuffer[] buffers = loadCacheBuffers(); + loadCacheFileBuffer(buffers[0]); + loadJournalFileBuffer(buffers[1]); + if(!initiallyLoaded) { + initiallyLoaded = true; + } + } + + private ByteBuffer[] loadCacheBuffers() { + synchronized (store) { + final ByteBuffer[] buffers = new ByteBuffer[2]; + buffers[0] = store.loadCacheFile(cacheFile); + buffers[1] = store.loadCacheFile(journalFile); + return buffers; + } + } + + private void loadCacheFileBuffer(ByteBuffer buffer) { + cacheLength = buffer.limit(); + if(cacheLength == 0) { + return; + } + final DocumentParser parser = createDocumentParser(buffer); + final DocumentParsingResult result = parser.parse(); + if(result.isOkay()) { + for(T d: result.getParsedDocuments()) { + d.setCacheLocation(Descriptor.CacheLocation.CACHED_CACHEFILE); + data.addDescriptor(d); + } + } + + } + + private void loadJournalFileBuffer(ByteBuffer buffer) { + journalLength = buffer.limit(); + if(journalLength == 0) { + return; + } + final DocumentParser parser = createDocumentParser(buffer); + final DocumentParsingResult result = parser.parse(); + if(result.isOkay()) { + int duplicateCount = 0; + logger.fine("Loaded "+ result.getParsedDocuments().size() + " descriptors from journal"); + for(T d: result.getParsedDocuments()) { + d.setCacheLocation(Descriptor.CacheLocation.CACHED_JOURNAL); + if(!data.addDescriptor(d)) { + duplicateCount += 1; + } + } + if(duplicateCount > 0) { + logger.info("Found "+ duplicateCount + " duplicate descriptors in journal file"); + } + } else if(result.isInvalid()) { + logger.warning("Invalid descriptor data parsing from journal file : "+ result.getMessage()); + } else if(result.isError()) { + logger.warning("Error parsing descriptors from journal file : "+ result.getMessage()); + } + } + + abstract protected DocumentParser createDocumentParser(ByteBuffer buffer); + + private ScheduledFuture startRebuildTask() { + return rebuildExecutor.scheduleAtFixedRate(new Runnable() { + public void run() { + maybeRebuildCache(); + } + }, 5, 30, TimeUnit.MINUTES); + } + + private synchronized void maybeRebuildCache() { + if(!initiallyLoaded) { + return; + } + + droppedBytes += data.cleanExpired(); + + if(!shouldRebuildCache()) { + return; + } + rebuildCache(); + } + + private boolean shouldRebuildCache() { + if(journalLength < 16384) { + return false; + } + if(droppedBytes > (journalLength + cacheLength) / 3) { + return true; + } + if(journalLength > (cacheLength / 2)) { + return true; + } + return false; + } + + private void rebuildCache() { + synchronized(store) { + store.writeDocumentList(cacheFile, data.getAllDescriptors()); + store.removeCacheFile(journalFile); + } + reloadCache(); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DescriptorCacheData.java b/orchid/src/com/subgraph/orchid/directory/DescriptorCacheData.java new file mode 100644 index 00000000..cfb65145 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DescriptorCacheData.java @@ -0,0 +1,88 @@ +package com.subgraph.orchid.directory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.subgraph.orchid.Descriptor; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.misc.GuardedBy; + + + +public class DescriptorCacheData { + + /** 7 days */ + private final static long EXPIRY_PERIOD = 7 * 24 * 60 * 60 * 1000; + + @GuardedBy("this") + private final Map descriptorMap; + + @GuardedBy("this") + private final List allDescriptors; + + public DescriptorCacheData() { + this.descriptorMap = new HashMap(); + this.allDescriptors = new ArrayList(); + } + + synchronized T findByDigest(HexDigest digest) { + return descriptorMap.get(digest); + } + + synchronized List getAllDescriptors() { + return new ArrayList(allDescriptors); + } + + synchronized boolean addDescriptor(T d) { + if(descriptorMap.containsKey(d.getDescriptorDigest())) { + return false; + } + descriptorMap.put(d.getDescriptorDigest(), d); + allDescriptors.add(d); + return true; + } + + synchronized void clear() { + descriptorMap.clear(); + allDescriptors.clear(); + } + + synchronized int cleanExpired() { + final Set expired = getExpiredSet(); + + if(expired.isEmpty()) { + return 0; + } + + clear(); + int dropped = 0; + for(T d: allDescriptors) { + if(expired.contains(d)) { + dropped += d.getBodyLength(); + } else { + addDescriptor(d); + } + } + + return dropped; + } + + private Set getExpiredSet() { + final long now = System.currentTimeMillis(); + final Set expired = new HashSet(); + for(T d: allDescriptors) { + if(isExpired(d, now)) { + expired.add(d); + } + } + return expired; + } + + private boolean isExpired(T d, long now) { + return d.getLastListed() != 0 && d.getLastListed() < (now - EXPIRY_PERIOD); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DirectoryAuthorityStatus.java b/orchid/src/com/subgraph/orchid/directory/DirectoryAuthorityStatus.java new file mode 100644 index 00000000..cf9c08a3 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DirectoryAuthorityStatus.java @@ -0,0 +1,102 @@ +package com.subgraph.orchid.directory; + +import java.util.HashSet; +import java.util.Set; + +import com.subgraph.orchid.RouterStatus; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.data.exitpolicy.ExitPorts; + +public class DirectoryAuthorityStatus implements RouterStatus { + + private String nickname; + private HexDigest identity; + private IPv4Address address; + private int routerPort; + private int directoryPort; + private Set flags = new HashSet(); + private HexDigest v3Ident; + + void setV1Authority() { } + void setHiddenServiceAuthority() { addFlag("HSDir"); } + void unsetHiddenServiceAuthority() { flags.remove("HSDir"); } + void setBridgeAuthority() { } + void unsetV2Authority() { flags.remove("V2Dir"); } + void setNickname(String name) { nickname = name; } + void setIdentity(HexDigest identity) { this.identity = identity; } + void setAddress(IPv4Address address) { this.address = address; } + void setRouterPort(int port) { this.routerPort = port; } + void setDirectoryPort(int port) { this.directoryPort = port; } + void addFlag(String flag) { this.flags.add(flag); } + void setV3Ident(HexDigest v3Ident) { this.v3Ident = v3Ident; } + + DirectoryAuthorityStatus() { + addFlag("Authority"); + addFlag("V2Dir"); + } + + public IPv4Address getAddress() { + return address; + } + + public HexDigest getDescriptorDigest() { + return null; + } + + public int getDirectoryPort() { + return directoryPort; + } + + public int getEstimatedBandwidth() { + return 0; + } + + public ExitPorts getExitPorts() { + return null; + } + + public HexDigest getIdentity() { + return identity; + } + + public boolean hasBandwidth() { + return false; + } + + public int getMeasuredBandwidth() { + return 0; + } + + public String getNickname() { + return nickname; + } + + public Timestamp getPublicationTime() { + return null; + } + + public int getRouterPort() { + return routerPort; + } + + public String getVersion() { + return null; + } + + public boolean hasFlag(String flag) { + return flags.contains(flag); + } + + public boolean isDirectory() { + return true; + } + + HexDigest getV3Ident() { + return v3Ident; + } + public HexDigest getMicrodescriptorDigest() { + return null; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DirectoryImpl.java b/orchid/src/com/subgraph/orchid/directory/DirectoryImpl.java new file mode 100644 index 00000000..9bcccbe7 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DirectoryImpl.java @@ -0,0 +1,513 @@ +package com.subgraph.orchid.directory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.ConsensusDocument.ConsensusFlavor; +import com.subgraph.orchid.ConsensusDocument.RequiredCertificate; +import com.subgraph.orchid.Descriptor; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.DirectoryServer; +import com.subgraph.orchid.DirectoryStore; +import com.subgraph.orchid.DirectoryStore.CacheFile; +import com.subgraph.orchid.GuardEntry; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.RouterStatus; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorConfig.AutoBoolValue; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.RandomSet; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParserFactory; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.events.Event; +import com.subgraph.orchid.events.EventHandler; +import com.subgraph.orchid.events.EventManager; + +public class DirectoryImpl implements Directory { + private final static Logger logger = Logger.getLogger(DirectoryImpl.class.getName()); + + private final Object loadLock = new Object(); + private boolean isLoaded = false; + + private final DirectoryStore store; + private final TorConfig config; + private final StateFile stateFile; + private final DescriptorCache microdescriptorCache; + private final DescriptorCache basicDescriptorCache; + + private final Map routersByIdentity; + private final Map routersByNickname; + private final RandomSet directoryCaches; + private final Set requiredCertificates; + private boolean haveMinimumRouterInfo; + private boolean needRecalculateMinimumRouterInfo; + private final EventManager consensusChangedManager; + private final TorRandom random; + private final static DocumentParserFactory parserFactory = new DocumentParserFactoryImpl(); + + private ConsensusDocument currentConsensus; + private ConsensusDocument consensusWaitingForCertificates; + + public DirectoryImpl(TorConfig config, DirectoryStore customDirectoryStore) { + store = (customDirectoryStore == null) ? (new DirectoryStoreImpl(config)) : (customDirectoryStore); + this.config = config; + stateFile = new StateFile(store, this); + microdescriptorCache = createMicrodescriptorCache(store); + basicDescriptorCache = createBasicDescriptorCache(store); + routersByIdentity = new HashMap(); + routersByNickname = new HashMap(); + directoryCaches = new RandomSet(); + requiredCertificates = new HashSet(); + consensusChangedManager = new EventManager(); + random = new TorRandom(); + } + + private static DescriptorCache createMicrodescriptorCache(DirectoryStore store) { + return new DescriptorCache(store, CacheFile.MICRODESCRIPTOR_CACHE, CacheFile.MICRODESCRIPTOR_JOURNAL) { + @Override + protected DocumentParser createDocumentParser(ByteBuffer buffer) { + return parserFactory.createRouterMicrodescriptorParser(buffer); + } + }; + } + + private static DescriptorCache createBasicDescriptorCache(DirectoryStore store) { + return new DescriptorCache(store, CacheFile.DESCRIPTOR_CACHE, CacheFile.DESCRIPTOR_JOURNAL) { + @Override + protected DocumentParser createDocumentParser(ByteBuffer buffer) { + return parserFactory.createRouterDescriptorParser(buffer, false); + } + }; + } + + public synchronized boolean haveMinimumRouterInfo() { + if(needRecalculateMinimumRouterInfo) { + checkMinimumRouterInfo(); + } + return haveMinimumRouterInfo; + } + + private synchronized void checkMinimumRouterInfo() { + if(currentConsensus == null || !currentConsensus.isLive()) { + needRecalculateMinimumRouterInfo = true; + haveMinimumRouterInfo = false; + return; + } + + int routerCount = 0; + int descriptorCount = 0; + for(Router r: routersByIdentity.values()) { + routerCount++; + if(!r.isDescriptorDownloadable()) + descriptorCount++; + } + needRecalculateMinimumRouterInfo = false; + haveMinimumRouterInfo = (descriptorCount * 4 > routerCount); + } + + public void loadFromStore() { + logger.info("Loading cached network information from disk"); + + synchronized(loadLock) { + if(isLoaded) { + return; + } + boolean useMicrodescriptors = config.getUseMicrodescriptors() != AutoBoolValue.FALSE; + last = System.currentTimeMillis(); + logger.info("Loading certificates"); + loadCertificates(store.loadCacheFile(CacheFile.CERTIFICATES)); + logElapsed(); + + logger.info("Loading consensus"); + loadConsensus(store.loadCacheFile(useMicrodescriptors ? CacheFile.CONSENSUS_MICRODESC : CacheFile.CONSENSUS)); + logElapsed(); + + if(!useMicrodescriptors) { + logger.info("Loading descriptors"); + basicDescriptorCache.initialLoad(); + } else { + logger.info("Loading microdescriptor cache"); + microdescriptorCache.initialLoad(); + } + needRecalculateMinimumRouterInfo = true; + logElapsed(); + + logger.info("loading state file"); + stateFile.parseBuffer(store.loadCacheFile(CacheFile.STATE)); + logElapsed(); + + isLoaded = true; + loadLock.notifyAll(); + } + } + + public void close() { + basicDescriptorCache.shutdown(); + microdescriptorCache.shutdown(); + } + + private long last = 0; + private void logElapsed() { + final long now = System.currentTimeMillis(); + final long elapsed = now - last; + last = now; + logger.fine("Loaded in "+ elapsed + " ms."); + } + + private void loadCertificates(ByteBuffer buffer) { + final DocumentParser parser = parserFactory.createKeyCertificateParser(buffer); + final DocumentParsingResult result = parser.parse(); + if(testResult(result, "certificates")) { + for(KeyCertificate cert: result.getParsedDocuments()) { + addCertificate(cert); + } + } + } + + private void loadConsensus(ByteBuffer buffer) { + final DocumentParser parser = parserFactory.createConsensusDocumentParser(buffer); + final DocumentParsingResult result = parser.parse(); + if(testResult(result, "consensus")) { + addConsensusDocument(result.getDocument(), true); + } + } + + private boolean testResult(DocumentParsingResult result, String type) { + if(result.isOkay()) { + return true; + } else if(result.isError()) { + logger.warning("Parsing error loading "+ type + " : "+ result.getMessage()); + } else if(result.isInvalid()) { + logger.warning("Problem loading "+ type + " : "+ result.getMessage()); + } else { + logger.warning("Unknown problem loading "+ type); + } + return false; + } + + public void waitUntilLoaded() { + synchronized (loadLock) { + while(!isLoaded) { + try { + loadLock.wait(); + } catch (InterruptedException e) { + logger.warning("Thread interrupted while waiting for directory to load from disk"); + } + } + } + } + + public Collection getDirectoryAuthorities() { + return TrustedAuthorities.getInstance().getAuthorityServers(); + } + + public DirectoryServer getRandomDirectoryAuthority() { + final List servers = TrustedAuthorities.getInstance().getAuthorityServers(); + final int idx = random.nextInt(servers.size()); + return servers.get(idx); + } + + public Set getRequiredCertificates() { + return new HashSet(requiredCertificates); + } + + public void addCertificate(KeyCertificate certificate) { + synchronized(TrustedAuthorities.getInstance()) { + final boolean wasRequired = removeRequiredCertificate(certificate); + final DirectoryServer as = TrustedAuthorities.getInstance().getAuthorityServerByIdentity(certificate.getAuthorityFingerprint()); + if(as == null) { + logger.warning("Certificate read for unknown directory authority with identity: "+ certificate.getAuthorityFingerprint()); + return; + } + as.addCertificate(certificate); + + if(consensusWaitingForCertificates != null && wasRequired) { + + switch(consensusWaitingForCertificates.verifySignatures()) { + case STATUS_FAILED: + consensusWaitingForCertificates = null; + return; + + case STATUS_VERIFIED: + addConsensusDocument(consensusWaitingForCertificates, false); + consensusWaitingForCertificates = null; + return; + + case STATUS_NEED_CERTS: + requiredCertificates.addAll(consensusWaitingForCertificates.getRequiredCertificates()); + return; + } + } + } + } + + private boolean removeRequiredCertificate(KeyCertificate certificate) { + final Iterator it = requiredCertificates.iterator(); + while(it.hasNext()) { + RequiredCertificate r = it.next(); + if(r.getSigningKey().equals(certificate.getAuthoritySigningKey().getFingerprint())) { + it.remove(); + return true; + } + } + return false; + } + + public void storeCertificates() { + synchronized(TrustedAuthorities.getInstance()) { + final List certs = new ArrayList(); + for(DirectoryServer ds: TrustedAuthorities.getInstance().getAuthorityServers()) { + certs.addAll(ds.getCertificates()); + } + store.writeDocumentList(CacheFile.CERTIFICATES, certs); + } + } + + public void addRouterDescriptors(List descriptors) { + basicDescriptorCache.addDescriptors(descriptors); + needRecalculateMinimumRouterInfo = true; + } + + public synchronized void addConsensusDocument(ConsensusDocument consensus, boolean fromCache) { + if(consensus.equals(currentConsensus)) + return; + + if(currentConsensus != null && consensus.getValidAfterTime().isBefore(currentConsensus.getValidAfterTime())) { + logger.warning("New consensus document is older than current consensus document"); + return; + } + + synchronized(TrustedAuthorities.getInstance()) { + switch(consensus.verifySignatures()) { + case STATUS_FAILED: + logger.warning("Unable to verify signatures on consensus document, discarding..."); + return; + + case STATUS_NEED_CERTS: + consensusWaitingForCertificates = consensus; + requiredCertificates.addAll(consensus.getRequiredCertificates()); + return; + + case STATUS_VERIFIED: + break; + } + requiredCertificates.addAll(consensus.getRequiredCertificates()); + + } + final Map oldRouterByIdentity = new HashMap(routersByIdentity); + + clearAll(); + + for(RouterStatus status: consensus.getRouterStatusEntries()) { + if(status.hasFlag("Running") && status.hasFlag("Valid")) { + final RouterImpl router = updateOrCreateRouter(status, oldRouterByIdentity); + addRouter(router); + classifyRouter(router); + } + final Descriptor d = getDescriptorForRouterStatus(status, consensus.getFlavor() == ConsensusFlavor.MICRODESC); + if(d != null) { + d.setLastListed(consensus.getValidAfterTime().getTime()); + } + } + + logger.fine("Loaded "+ routersByIdentity.size() +" routers from consensus document"); + currentConsensus = consensus; + + if(!fromCache) { + storeCurrentConsensus(); + } + consensusChangedManager.fireEvent(new Event() {}); + } + + private void storeCurrentConsensus() { + if(currentConsensus != null) { + if(currentConsensus.getFlavor() == ConsensusFlavor.MICRODESC) { + store.writeDocument(CacheFile.CONSENSUS_MICRODESC, currentConsensus); + } else { + store.writeDocument(CacheFile.CONSENSUS, currentConsensus); + } + } + } + + private Descriptor getDescriptorForRouterStatus(RouterStatus rs, boolean isMicrodescriptor) { + if(isMicrodescriptor) { + return microdescriptorCache.getDescriptor(rs.getMicrodescriptorDigest()); + } else { + return basicDescriptorCache.getDescriptor(rs.getDescriptorDigest()); + } + } + + private RouterImpl updateOrCreateRouter(RouterStatus status, Map knownRouters) { + final RouterImpl router = knownRouters.get(status.getIdentity()); + if(router == null) + return RouterImpl.createFromRouterStatus(this, status); + router.updateStatus(status); + return router; + } + + private void clearAll() { + routersByIdentity.clear(); + routersByNickname.clear(); + directoryCaches.clear(); + } + + private void classifyRouter(RouterImpl router) { + if(isValidDirectoryCache(router)) { + directoryCaches.add(router); + } else { + directoryCaches.remove(router); + } + } + + private boolean isValidDirectoryCache(RouterImpl router) { + if(router.getDirectoryPort() == 0) + return false; + if(router.hasFlag("BadDirectory")) + return false; + return router.hasFlag("V2Dir"); + } + + private void addRouter(RouterImpl router) { + routersByIdentity.put(router.getIdentityHash(), router); + addRouterByNickname(router); + } + + private void addRouterByNickname(RouterImpl router) { + final String name = router.getNickname(); + if(name == null || name.equals("Unnamed")) + return; + if(routersByNickname.containsKey(router.getNickname())) { + //logger.warn("Duplicate router nickname: "+ router.getNickname()); + return; + } + routersByNickname.put(name, router); + } + + public synchronized void addRouterMicrodescriptors(List microdescriptors) { + microdescriptorCache.addDescriptors(microdescriptors); + needRecalculateMinimumRouterInfo = true; + } + + synchronized public List getRoutersWithDownloadableDescriptors() { + waitUntilLoaded(); + final List routers = new ArrayList(); + for(RouterImpl router: routersByIdentity.values()) { + if(router.isDescriptorDownloadable()) + routers.add(router); + } + + for(int i = 0; i < routers.size(); i++) { + final Router a = routers.get(i); + final int swapIdx = random.nextInt(routers.size()); + final Router b = routers.get(swapIdx); + routers.set(i, b); + routers.set(swapIdx, a); + } + + return routers; + } + + public ConsensusDocument getCurrentConsensusDocument() { + return currentConsensus; + } + + public boolean hasPendingConsensus() { + synchronized (TrustedAuthorities.getInstance()) { + return consensusWaitingForCertificates != null; + } + } + + public void registerConsensusChangedHandler(EventHandler handler) { + consensusChangedManager.addListener(handler); + } + + public void unregisterConsensusChangedHandler(EventHandler handler) { + consensusChangedManager.removeListener(handler); + } + + public Router getRouterByName(String name) { + if(name.equals("Unnamed")) { + return null; + } + if(name.length() == 41 && name.charAt(0) == '$') { + try { + final HexDigest identity = HexDigest.createFromString(name.substring(1)); + return getRouterByIdentity(identity); + } catch (Exception e) { + return null; + } + } + waitUntilLoaded(); + return routersByNickname.get(name); + } + + public Router getRouterByIdentity(HexDigest identity) { + waitUntilLoaded(); + synchronized (routersByIdentity) { + return routersByIdentity.get(identity); + } + } + + public List getRouterListByNames(List names) { + waitUntilLoaded(); + final List routers = new ArrayList(); + for(String n: names) { + final Router r = getRouterByName(n); + if(r == null) + throw new TorException("Could not find router named: "+ n); + routers.add(r); + } + return routers; + } + + public List getAllRouters() { + waitUntilLoaded(); + synchronized(routersByIdentity) { + return new ArrayList(routersByIdentity.values()); + } + } + + public GuardEntry createGuardEntryFor(Router router) { + waitUntilLoaded(); + return stateFile.createGuardEntryFor(router); + } + + public List getGuardEntries() { + waitUntilLoaded(); + return stateFile.getGuardEntries(); + } + + public void removeGuardEntry(GuardEntry entry) { + waitUntilLoaded(); + stateFile.removeGuardEntry(entry); + } + + public void addGuardEntry(GuardEntry entry) { + waitUntilLoaded(); + stateFile.addGuardEntry(entry); + } + + public RouterMicrodescriptor getMicrodescriptorFromCache(HexDigest descriptorDigest) { + return microdescriptorCache.getDescriptor(descriptorDigest); + } + + + public RouterDescriptor getBasicDescriptorFromCache(HexDigest descriptorDigest) { + return basicDescriptorCache.getDescriptor(descriptorDigest); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DirectoryServerImpl.java b/orchid/src/com/subgraph/orchid/directory/DirectoryServerImpl.java new file mode 100644 index 00000000..d8f0bceb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DirectoryServerImpl.java @@ -0,0 +1,147 @@ +package com.subgraph.orchid.directory; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import com.subgraph.orchid.DirectoryServer; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.RouterStatus; +import com.subgraph.orchid.data.HexDigest; + +public class DirectoryServerImpl extends RouterImpl implements DirectoryServer { + + private List certificates = new ArrayList(); + + private boolean isHiddenServiceAuthority = false; + private boolean isBridgeAuthority = false; + private boolean isExtraInfoCache = false; + private int port; + private HexDigest v3Ident; + + DirectoryServerImpl(RouterStatus status) { + super(null, status); + } + + void setHiddenServiceAuthority() { isHiddenServiceAuthority = true; } + void unsetHiddenServiceAuthority() { isHiddenServiceAuthority = false; } + void setBridgeAuthority() { isBridgeAuthority = true; } + void setExtraInfoCache() { isExtraInfoCache = true; } + void setPort(int port) { this.port = port; } + void setV3Ident(HexDigest fingerprint) { this.v3Ident = fingerprint; } + + public boolean isTrustedAuthority() { + return true; + } + + /** + * Return true if this DirectoryServer entry has + * complete and valid information. + * @return + */ + public boolean isValid() { + return true; + } + + public boolean isV2Authority() { + return hasFlag("Authority") && hasFlag("V2Dir"); + } + + public boolean isV3Authority() { + return hasFlag("Authority") && v3Ident != null; + } + + public boolean isHiddenServiceAuthority() { + return isHiddenServiceAuthority; + } + + public boolean isBridgeAuthority() { + return isBridgeAuthority; + } + + public boolean isExtraInfoCache() { + return isExtraInfoCache; + } + + public HexDigest getV3Identity() { + return v3Ident; + } + + public KeyCertificate getCertificateByFingerprint(HexDigest fingerprint) { + for(KeyCertificate kc: getCertificates()) { + if(kc.getAuthoritySigningKey().getFingerprint().equals(fingerprint)) { + return kc; + } + } + return null; + } + + public List getCertificates() { + synchronized(certificates) { + purgeExpiredCertificates(); + purgeOldCertificates(); + return new ArrayList(certificates); + } + } + + private void purgeExpiredCertificates() { + Iterator it = certificates.iterator(); + while(it.hasNext()) { + KeyCertificate elem = it.next(); + if(elem.isExpired()) { + it.remove(); + } + } + } + + private void purgeOldCertificates() { + if(certificates.size() < 2) { + return; + } + final KeyCertificate newest = getNewestCertificate(); + final Iterator it = certificates.iterator(); + while(it.hasNext()) { + KeyCertificate elem = it.next(); + if(elem != newest && isMoreThan48HoursOlder(newest, elem)) { + it.remove(); + } + } + } + + private KeyCertificate getNewestCertificate() { + KeyCertificate newest = null; + for(KeyCertificate kc : certificates) { + if(newest == null || getPublishedMilliseconds(newest) > getPublishedMilliseconds(kc)) { + newest = kc; + } + } + return newest; + } + + private boolean isMoreThan48HoursOlder(KeyCertificate newer, KeyCertificate older) { + final long milliseconds = 48 * 60 * 60 * 1000; + return (getPublishedMilliseconds(newer) - getPublishedMilliseconds(older)) > milliseconds; + } + + private long getPublishedMilliseconds(KeyCertificate certificate) { + return certificate.getKeyPublishedTime().getDate().getTime(); + } + + public void addCertificate(KeyCertificate certificate) { + if(!certificate.getAuthorityFingerprint().equals(v3Ident)) { + throw new IllegalArgumentException("This certificate does not appear to belong to this directory authority"); + } + synchronized(certificates) { + certificates.add(certificate); + } + } + + public String toString() { + if(v3Ident != null) + return "(Directory: "+ getNickname() +" "+ getAddress() +":"+ port +" fingerprint="+ getIdentityHash() +" v3ident="+ + v3Ident +")"; + else + return "(Directory: "+ getNickname() +" "+ getAddress() +":"+ port +" fingerprint="+ getIdentityHash() +")"; + + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DirectoryStoreFile.java b/orchid/src/com/subgraph/orchid/directory/DirectoryStoreFile.java new file mode 100644 index 00000000..0e702f46 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DirectoryStoreFile.java @@ -0,0 +1,226 @@ +package com.subgraph.orchid.directory; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.WritableByteChannel; +import java.util.List; +import java.util.logging.Logger; + +import com.subgraph.orchid.Document; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.crypto.TorRandom; + +public class DirectoryStoreFile { + private final static Logger logger = Logger.getLogger(DirectoryStoreFile.class.getName()); + private final static ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0); + private final static TorRandom random = new TorRandom(); + + private final TorConfig config; + private final String cacheFilename; + + private RandomAccessFile openFile; + + private boolean openFileFailed; + private boolean directoryCreationFailed; + + DirectoryStoreFile(TorConfig config, String cacheFilename) { + this.config = config; + this.cacheFilename = cacheFilename; + } + + public void writeData(ByteBuffer data) { + final File tempFile = createTempFile(); + final FileOutputStream fos = openFileOutputStream(tempFile); + if(fos == null) { + return; + } + try { + writeAllToChannel(fos.getChannel(), data); + quietClose(fos); + installTempFile(tempFile); + } catch (IOException e) { + logger.warning("I/O error writing to temporary cache file "+ tempFile + " : "+ e); + return; + } finally { + quietClose(fos); + tempFile.delete(); + } + } + + public void writeDocuments(List documents) { + final File tempFile = createTempFile(); + final FileOutputStream fos = openFileOutputStream(tempFile); + if(fos == null) { + return; + } + try { + writeDocumentsToChannel(fos.getChannel(), documents); + quietClose(fos); + installTempFile(tempFile); + } catch (IOException e) { + logger.warning("I/O error writing to temporary cache file "+ tempFile + " : "+ e); + return; + } finally { + quietClose(fos); + tempFile.delete(); + } + } + + private FileOutputStream openFileOutputStream(File file) { + try { + createDirectoryIfMissing(); + return new FileOutputStream(file); + } catch (FileNotFoundException e) { + logger.warning("Failed to open file "+ file + " : "+ e); + return null; + } + } + + public void appendDocuments(List documents) { + if(!ensureOpened()) { + return; + } + try { + final FileChannel channel = openFile.getChannel(); + channel.position(channel.size()); + writeDocumentsToChannel(channel, documents); + channel.force(true); + } catch (IOException e) { + logger.warning("I/O error writing to cache file "+ cacheFilename); + return; + } + } + + public ByteBuffer loadContents() { + if(!(fileExists() && ensureOpened())) { + return EMPTY_BUFFER; + } + + try { + return readAllFromChannel(openFile.getChannel()); + } catch (IOException e) { + logger.warning("I/O error reading cache file "+ cacheFilename + " : "+ e); + return EMPTY_BUFFER; + } + } + + private ByteBuffer readAllFromChannel(FileChannel channel) throws IOException { + channel.position(0); + final ByteBuffer buffer = createBufferForChannel(channel); + while(buffer.hasRemaining()) { + if(channel.read(buffer) == -1) { + logger.warning("Unexpected EOF reading from cache file"); + return EMPTY_BUFFER; + } + } + buffer.rewind(); + return buffer; + } + + private ByteBuffer createBufferForChannel(FileChannel channel) throws IOException { + final int sz = (int) (channel.size() & 0xFFFFFFFF); + return ByteBuffer.allocateDirect(sz); + } + + void close() { + if(openFile != null) { + quietClose(openFile); + openFile = null; + } + } + + private boolean fileExists() { + final File file = getFile(); + return file.exists(); + } + + private boolean ensureOpened() { + if(openFileFailed) { + return false; + } + if(openFile != null) { + return true; + } + openFile = openFile(); + return openFile != null; + } + + private RandomAccessFile openFile() { + try { + final File f = new File(config.getDataDirectory(), cacheFilename); + createDirectoryIfMissing(); + return new RandomAccessFile(f, "rw"); + } catch (FileNotFoundException e) { + openFileFailed = true; + logger.warning("Failed to open cache file "+ cacheFilename); + return null; + } + } + + private void installTempFile(File tempFile) { + close(); + final File target = getFile(); + if(target.exists() && !target.delete()) { + logger.warning("Failed to delete file "+ target); + } + if(!tempFile.renameTo(target)) { + logger.warning("Failed to rename temp file "+ tempFile +" to "+ target); + } + tempFile.delete(); + ensureOpened(); + } + + private File createTempFile() { + final long n = random.nextLong(); + final File f = new File(config.getDataDirectory(), cacheFilename + Long.toString(n)); + f.deleteOnExit(); + return f; + } + + private void writeDocumentsToChannel(FileChannel channel, List documents) throws IOException { + for(Document d: documents) { + writeAllToChannel(channel, d.getRawDocumentBytes()); + } + } + + private void writeAllToChannel(WritableByteChannel channel, ByteBuffer data) throws IOException { + data.rewind(); + while(data.hasRemaining()) { + channel.write(data); + } + } + + private void quietClose(Closeable closeable) { + try { + closeable.close(); + } catch (IOException e) {} + } + + private File getFile() { + return new File(config.getDataDirectory(), cacheFilename); + } + + public void remove() { + close(); + getFile().delete(); + } + + private void createDirectoryIfMissing() { + if(directoryCreationFailed) { + return; + } + final File dd = config.getDataDirectory(); + if(!dd.exists()) { + if(!dd.mkdirs()) { + directoryCreationFailed = true; + logger.warning("Failed to create data directory "+ dd); + } + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DirectoryStoreImpl.java b/orchid/src/com/subgraph/orchid/directory/DirectoryStoreImpl.java new file mode 100644 index 00000000..f23b2935 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DirectoryStoreImpl.java @@ -0,0 +1,58 @@ +package com.subgraph.orchid.directory; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import com.subgraph.orchid.DirectoryStore; +import com.subgraph.orchid.Document; +import com.subgraph.orchid.TorConfig; + +public class DirectoryStoreImpl implements DirectoryStore { + + private final TorConfig config; + private Map fileMap; + + DirectoryStoreImpl(TorConfig config) { + this.config = config; + this.fileMap = new HashMap(); + } + + public synchronized ByteBuffer loadCacheFile(CacheFile cacheFile) { + return getStoreFile(cacheFile).loadContents(); + } + + public synchronized void writeData(CacheFile cacheFile, ByteBuffer data) { + getStoreFile(cacheFile).writeData(data); + } + + public synchronized void writeDocument(CacheFile cacheFile, Document document) { + writeDocumentList(cacheFile, Arrays.asList(document)); + } + + public synchronized void writeDocumentList(CacheFile cacheFile, List documents) { + getStoreFile(cacheFile).writeDocuments(documents); + } + + public synchronized void appendDocumentList(CacheFile cacheFile, List documents) { + getStoreFile(cacheFile).appendDocuments(documents); + } + + public synchronized void removeCacheFile(CacheFile cacheFile) { + getStoreFile(cacheFile).remove(); + } + + public synchronized void removeAllCacheFiles() { + for(CacheFile cf: CacheFile.values()) { + getStoreFile(cf).remove(); + } + } + + private DirectoryStoreFile getStoreFile(CacheFile cacheFile) { + if(!fileMap.containsKey(cacheFile)) { + fileMap.put(cacheFile, new DirectoryStoreFile(config, cacheFile.getFilename())); + } + return fileMap.get(cacheFile); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/DocumentFieldParserImpl.java b/orchid/src/com/subgraph/orchid/directory/DocumentFieldParserImpl.java new file mode 100644 index 00000000..24296e31 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DocumentFieldParserImpl.java @@ -0,0 +1,422 @@ +package com.subgraph.orchid.directory; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.List; +import java.util.TimeZone; +import java.util.logging.Logger; + +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorNTorKeyAgreement; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentObject; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.NameIntegerParameter; +import com.subgraph.orchid.encoders.Base64; + +public class DocumentFieldParserImpl implements DocumentFieldParser { + private final static Logger logger = Logger.getLogger(DocumentFieldParserImpl.class.getName()); + private final static String BEGIN_TAG = "-----BEGIN"; + private final static String END_TAG = "-----END"; + private final static String TAG_DELIMITER = "-----"; + private final static String DEFAULT_DELIMITER = " "; + private final ByteBuffer inputBuffer; + private final SimpleDateFormat dateFormat; + private String delimiter = DEFAULT_DELIMITER; + private String currentKeyword; + private List currentItems; + private int currentItemsPosition; + private boolean recognizeOpt; + /* If a line begins with this string do not include it in the current signature. */ + private String signatureIgnoreToken; + private boolean isProcessingSignedEntity = false; + private TorMessageDigest signatureDigest; + private TorMessageDigest signatureDigest256; + private StringBuilder rawDocumentBuffer; + + private DocumentParsingHandler callbackHandler; + + public DocumentFieldParserImpl(ByteBuffer buffer) { + buffer.rewind(); + this.inputBuffer = buffer; + rawDocumentBuffer = new StringBuilder(); + dateFormat = createDateFormat(); + } + + private static SimpleDateFormat createDateFormat() { + final SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + format.setTimeZone(TimeZone.getTimeZone("GMT")); + format.setLenient(false); + return format; + } + + public String parseNickname() { + // XXX verify valid nickname + return getItem(); + } + public String parseString() { + return getItem(); + } + + public void setRecognizeOpt() { + recognizeOpt = true; + } + + public void setHandler(DocumentParsingHandler handler) { + callbackHandler = handler; + } + + public void setDelimiter(String delimiter) { + this.delimiter = delimiter; + } + + public int argumentsRemaining() { + return currentItems.size() - currentItemsPosition; + } + + private String getItem() { + if(currentItemsPosition >= currentItems.size()) + throw new TorParsingException("Overrun while reading arguments"); + return currentItems.get(currentItemsPosition++); + } + /* + * Return a string containing all remaining arguments concatenated together + */ + public String parseConcatenatedString() { + StringBuilder result = new StringBuilder(); + while(argumentsRemaining() > 0) { + if(result.length() > 0) + result.append(" "); + result.append(getItem()); + } + return result.toString(); + } + + public boolean parseBoolean() { + final int i = parseInteger(); + if(i == 1) + return true; + else if(i == 0) + return false; + else + throw new TorParsingException("Illegal boolean value: "+ i); + } + + public int parseInteger() { + return parseInteger(getItem()); + } + + public int parseInteger(String item) { + try { + return Integer.parseInt(item); + } catch(NumberFormatException e) { + throw new TorParsingException("Failed to parse expected integer value: " + item); + } + } + + public int[] parseIntegerList() { + final String item = getItem(); + final String[] ns = item.split(","); + final int[] result = new int[ns.length]; + for(int i = 0; i < result.length; i++) { + result[i] = parseInteger(ns[i]); + } + return result; + } + + public int parsePort() { + return parsePort(getItem()); + } + + public int parsePort(String item) { + final int port = parseInteger(item); + if(port < 0 || port > 65535) + throw new TorParsingException("Illegal port value: " + port); + return port; + } + + + public Timestamp parseTimestamp() { + String timeAndDate = getItem() + " " + getItem(); + try { + return new Timestamp(dateFormat.parse(timeAndDate)); + } catch (ParseException e) { + throw new TorParsingException("Could not parse timestamp value: "+ timeAndDate); + } + } + + public HexDigest parseHexDigest() { + return HexDigest.createFromString(parseString()); + } + + public HexDigest parseBase32Digest() { + return HexDigest.createFromBase32String(parseString()); + } + + public HexDigest parseFingerprint() { + return HexDigest.createFromString(parseConcatenatedString()); + } + + public void verifyExpectedArgumentCount(String keyword, int argumentCount) { + verifyExpectedArgumentCount(keyword, argumentCount, argumentCount); + } + + private void verifyExpectedArgumentCount(String keyword, int expectedMin, int expectedMax) { + final int argumentCount = argumentsRemaining(); + if(expectedMin != -1 && argumentCount < expectedMin) + throw new TorParsingException("Not enough arguments for keyword '"+ keyword +"' expected "+ expectedMin +" and got "+ argumentCount); + + if(expectedMax != -1 && argumentCount > expectedMax) + // Is this the correct thing to do, or should just be a warning? + throw new TorParsingException("Too many arguments for keyword '"+ keyword +"' expected "+ expectedMax +" and got "+ argumentCount); + } + + public byte[] parseBase64Data() { + final StringBuilder string = new StringBuilder(getItem()); + switch(string.length() % 4) { + case 2: + string.append("=="); + break; + case 3: + string.append("="); + break; + default: + break; + } + try { + return Base64.decode(string.toString().getBytes("ISO-8859-1")); + } catch (UnsupportedEncodingException e) { + throw new TorException(e); + } + + } + + public IPv4Address parseAddress() { + return IPv4Address.createFromString(getItem()); + } + + public TorPublicKey parsePublicKey() { + final DocumentObject documentObject = parseObject(); + return TorPublicKey.createFromPEMBuffer(documentObject.getContent()); + } + + + public byte[] parseNtorPublicKey() { + final byte[] key = parseBase64Data(); + if(key.length != TorNTorKeyAgreement.CURVE25519_PUBKEY_LEN) { + throw new TorParsingException("NTor public key was not expected length after base64 decoding. Length is "+ key.length); + } + return key; + } + + public TorSignature parseSignature() { + final DocumentObject documentObject = parseObject(); + TorSignature s = TorSignature.createFromPEMBuffer(documentObject.getContent()); + return s; + } + + public NameIntegerParameter parseParameter() { + final String item = getItem(); + final int eq = item.indexOf('='); + if(eq == -1) { + throw new TorParsingException("Parameter not in expected form name=value"); + } + final String name = item.substring(0, eq); + validateParameterName(name); + final int value = parseInteger(item.substring(eq + 1)); + return new NameIntegerParameter(name, value); + } + + private void validateParameterName(String name) { + if(name.isEmpty()) { + throw new TorParsingException("Parameter name cannot be empty"); + } + for(char c: name.toCharArray()) { + if(!(Character.isLetterOrDigit(c) || c == '_')) { + throw new TorParsingException("Parameter name can only contain letters. Rejecting: "+ name); + } + } + } + + public DocumentObject parseTypedObject(String type) { + final DocumentObject object = parseObject(); + if(!type.equals(object.getKeyword())) + throw new TorParsingException("Unexpected object type. Expecting: "+ type +", but got: "+ object.getKeyword()); + return object; + } + + public DocumentObject parseObject() { + final String line = readLine(); + final String keyword = parseObjectHeader(line); + final DocumentObject object = new DocumentObject(keyword, line); + parseObjectBody(object, keyword); + return object; + } + + private String parseObjectHeader(String headerLine) { + if(!(headerLine.startsWith(BEGIN_TAG) && headerLine.endsWith(TAG_DELIMITER))) + throw new TorParsingException("Did not find expected object start tag."); + return headerLine.substring(BEGIN_TAG.length() + 1, + headerLine.length() - TAG_DELIMITER.length()); + } + + private void parseObjectBody(DocumentObject object, String keyword) { + final String endTag = END_TAG +" "+ keyword +TAG_DELIMITER; + while(true) { + final String line = readLine(); + if(line == null) { + throw new TorParsingException("EOF reached before end of '"+ keyword +"' object."); + } + if(line.equals(endTag)) { + object.addFooterLine(line); + return; + } + parseObjectContent(object, line); + } + } + + private void parseObjectContent(DocumentObject object, String content) { + // XXX verify legal base64 data + object.addContent(content); + } + + public String getCurrentKeyword() { + return currentKeyword; + } + + public void processDocument() { + if(callbackHandler == null) + throw new TorException("DocumentFieldParser#processDocument() called with null callbackHandler"); + + while(true) { + final String line = readLine(); + if(line == null) { + callbackHandler.endOfDocument(); + return; + } + if(processLine(line)) + callbackHandler.parseKeywordLine(); + } + } + + public void startSignedEntity() { + isProcessingSignedEntity = true; + signatureDigest = new TorMessageDigest(); + signatureDigest256 = new TorMessageDigest(true); + } + + public void endSignedEntity() { + isProcessingSignedEntity = false; + } + + public void setSignatureIgnoreToken(String token) { + signatureIgnoreToken = token; + } + + public TorMessageDigest getSignatureMessageDigest() { + return signatureDigest; + } + + public TorMessageDigest getSignatureMessageDigest256() { + return signatureDigest256; + } + + private void updateRawDocument(String line) { + rawDocumentBuffer.append(line); + rawDocumentBuffer.append('\n'); + } + + public String getRawDocument() { + return rawDocumentBuffer.toString(); + } + + public void resetRawDocument() { + rawDocumentBuffer = new StringBuilder(); + } + + public void resetRawDocument(String initialContent) { + rawDocumentBuffer = new StringBuilder(); + rawDocumentBuffer.append(initialContent); + } + + public boolean verifySignedEntity(TorPublicKey publicKey, TorSignature signature) { + isProcessingSignedEntity = false; + return publicKey.verifySignature(signature, signatureDigest); + } + + private String readLine() { + final String line = nextLineFromInputBuffer(); + if(line != null) { + updateCurrentSignature(line); + updateRawDocument(line); + } + return line; + } + + private String nextLineFromInputBuffer() { + if(!inputBuffer.hasRemaining()) { + return null; + } + final StringBuilder sb = new StringBuilder(); + while(inputBuffer.hasRemaining()) { + char c = (char) (inputBuffer.get() & 0xFF); + if(c == '\n') { + return sb.toString(); + } else if(c != '\r') { + sb.append(c); + } + } + return sb.toString(); + } + + private void updateCurrentSignature(String line) { + if(!isProcessingSignedEntity) + return; + if(signatureIgnoreToken != null && line.startsWith(signatureIgnoreToken)) + return; + signatureDigest.update(line + "\n"); + signatureDigest256.update(line + "\n"); + } + + private boolean processLine(String line) { + final List lineItems = Arrays.asList(line.split(delimiter)); + if(lineItems.size() == 0 || lineItems.get(0).length() == 0) { + // XXX warn + return false; + } + + currentKeyword = lineItems.get(0); + currentItems = lineItems; + currentItemsPosition = 1; + + if(recognizeOpt && currentKeyword.equals("opt") && lineItems.size() > 1) { + currentKeyword = lineItems.get(1); + currentItemsPosition = 2; + } + + return true; + } + + public void logDebug(String message) { + logger.fine(message); + } + + public void logError(String message) { + logger.warning(message); + } + + public void logWarn(String message) { + logger.info(message); + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/DocumentParserFactoryImpl.java b/orchid/src/com/subgraph/orchid/directory/DocumentParserFactoryImpl.java new file mode 100644 index 00000000..e837a85c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/DocumentParserFactoryImpl.java @@ -0,0 +1,36 @@ +package com.subgraph.orchid.directory; + +import java.nio.ByteBuffer; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.directory.certificate.KeyCertificateParser; +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParserFactory; +import com.subgraph.orchid.directory.router.RouterDescriptorParser; +import com.subgraph.orchid.directory.router.RouterMicrodescriptorParser; + +public class DocumentParserFactoryImpl implements DocumentParserFactory { + + public DocumentParser createKeyCertificateParser(ByteBuffer buffer) { + return new KeyCertificateParser(new DocumentFieldParserImpl(buffer)); + } + + public DocumentParser createRouterDescriptorParser(ByteBuffer buffer, boolean verifySignatures) { + return new RouterDescriptorParser(new DocumentFieldParserImpl(buffer), verifySignatures); + } + + public DocumentParser createRouterMicrodescriptorParser(ByteBuffer buffer) { + buffer.rewind(); + DocumentFieldParser dfp = new DocumentFieldParserImpl(buffer); + return new RouterMicrodescriptorParser(dfp); + } + + public DocumentParser createConsensusDocumentParser(ByteBuffer buffer) { + return new ConsensusDocumentParser(new DocumentFieldParserImpl(buffer)); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/GuardEntryImpl.java b/orchid/src/com/subgraph/orchid/directory/GuardEntryImpl.java new file mode 100644 index 00000000..6694cbde --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/GuardEntryImpl.java @@ -0,0 +1,274 @@ +package com.subgraph.orchid.directory; + +import java.util.Date; + +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.GuardEntry; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.data.HexDigest; + +public class GuardEntryImpl implements GuardEntry { + private final static String NL = System.getProperty("line.separator"); + + private final Directory directory; + private final StateFile stateFile; + private final String nickname; + private final String identity; + private final Object lock = new Object(); + private String version; + private Date createdTime; + + private boolean isAdded; + private Date unlistedSince; + private Date downSince; + private Date lastConnect; + + GuardEntryImpl(Directory directory, StateFile stateFile, String nickname, String identity) { + this.directory = directory; + this.stateFile = stateFile; + this.nickname = nickname; + this.identity = identity; + } + + void setAddedFlag() { + isAdded = true; + } + + void setVersion(String version) { + this.version = version; + } + + void setCreatedTime(Date date) { + this.createdTime = date; + } + + void setUnlistedSince(Date date) { + synchronized(lock) { + unlistedSince = date; + } + } + + void setDownSince(Date downSince, Date lastTried) { + synchronized (lock) { + this.downSince = downSince; + this.lastConnect = lastTried; + } + } + + public boolean isAdded() { + return isAdded; + } + + public void markAsDown() { + synchronized(lock) { + final Date now = new Date(); + if(downSince == null) { + downSince = now; + } else { + lastConnect = now; + } + } + if(isAdded) { + stateFile.writeFile(); + } + } + + public void clearDownSince() { + synchronized (lock) { + downSince = null; + lastConnect = null; + } + if(isAdded) { + stateFile.writeFile(); + } + } + + public void clearUnlistedSince() { + synchronized (lock) { + unlistedSince = null; + } + if(isAdded) { + stateFile.writeFile(); + } + } + + public String getNickname() { + return nickname; + } + + public String getIdentity() { + return identity; + } + + public String getVersion() { + return version; + } + + public Date getCreatedTime() { + synchronized (lock) { + return dup(createdTime); + } + } + + public Date getDownSince() { + synchronized (lock) { + return dup(downSince); + } + } + + public Date getLastConnectAttempt() { + synchronized (lock) { + return dup(lastConnect); + } + } + + public Date getUnlistedSince() { + synchronized (lock) { + return dup(unlistedSince); + } + } + + private Date dup(Date date) { + if(date == null) { + return null; + } else { + return new Date(date.getTime()); + } + } + + public String writeToString() { + final StringBuilder sb = new StringBuilder(); + synchronized (lock) { + appendEntryGuardLine(sb); + appendEntryGuardAddedBy(sb); + if(downSince != null) { + appendEntryGuardDownSince(sb); + } + if(unlistedSince != null) { + appendEntryGuardUnlistedSince(sb); + } + } + return sb.toString(); + } + + private void appendEntryGuardLine(StringBuilder sb) { + sb.append(StateFile.KEYWORD_ENTRY_GUARD); + sb.append(" "); + sb.append(nickname); + sb.append(" "); + sb.append(identity); + sb.append(NL); + } + + + private void appendEntryGuardAddedBy(StringBuilder sb) { + sb.append(StateFile.KEYWORD_ENTRY_GUARD_ADDED_BY); + sb.append(" "); + sb.append(identity); + sb.append(" "); + sb.append(version); + sb.append(" "); + sb.append(formatDate(createdTime)); + sb.append(NL); + } + + private void appendEntryGuardDownSince(StringBuilder sb) { + if(downSince == null) { + return; + } + sb.append(StateFile.KEYWORD_ENTRY_GUARD_DOWN_SINCE); + sb.append(" "); + sb.append(formatDate(downSince)); + if(lastConnect != null) { + sb.append(" "); + sb.append(formatDate(lastConnect)); + } + sb.append(NL); + } + + private void appendEntryGuardUnlistedSince(StringBuilder sb) { + if(unlistedSince == null) { + return; + } + sb.append(StateFile.KEYWORD_ENTRY_GUARD_UNLISTED_SINCE); + sb.append(" "); + sb.append(formatDate(unlistedSince)); + sb.append(NL); + } + + private String formatDate(Date date) { + return stateFile.formatDate(date); + } + + public Router getRouterForEntry() { + final HexDigest id = HexDigest.createFromString(identity); + return directory.getRouterByIdentity(id); + } + + public boolean testCurrentlyUsable() { + final Router router = getRouterForEntry(); + boolean isUsable = router != null && router.isValid() && router.isPossibleGuard() && router.isRunning(); + if(isUsable) { + markUsable(); + return true; + } else { + markUnusable(); + return false; + } + } + + private void markUsable() { + synchronized (lock) { + if(unlistedSince != null) { + unlistedSince = null; + if(isAdded) { + stateFile.writeFile(); + } + } + } + } + + private synchronized void markUnusable() { + synchronized (lock) { + if(unlistedSince == null) { + unlistedSince = new Date(); + if(isAdded) { + stateFile.writeFile(); + } + } + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((identity == null) ? 0 : identity.hashCode()); + result = prime * result + + ((nickname == null) ? 0 : nickname.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + GuardEntryImpl other = (GuardEntryImpl) obj; + if (identity == null) { + if (other.identity != null) + return false; + } else if (!identity.equals(other.identity)) + return false; + if (nickname == null) { + if (other.nickname != null) + return false; + } else if (!nickname.equals(other.nickname)) + return false; + return true; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/RouterImpl.java b/orchid/src/com/subgraph/orchid/directory/RouterImpl.java new file mode 100644 index 00000000..38991c09 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/RouterImpl.java @@ -0,0 +1,260 @@ +package com.subgraph.orchid.directory; + +import java.util.Collections; +import java.util.Set; + +import com.subgraph.orchid.Descriptor; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.RouterStatus; +import com.subgraph.orchid.TorException; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.geoip.CountryCodeService; + +public class RouterImpl implements Router { + static RouterImpl createFromRouterStatus(Directory directory, RouterStatus status) { + return new RouterImpl(directory, status); + } + + private final Directory directory; + private final HexDigest identityHash; + protected RouterStatus status; + private Descriptor descriptor; + + private volatile String cachedCountryCode; + + protected RouterImpl(Directory directory, RouterStatus status) { + this.directory = directory; + this.identityHash = status.getIdentity(); + this.status = status; + refreshDescriptor(); + } + + void updateStatus(RouterStatus status) { + if(!identityHash.equals(status.getIdentity())) + throw new TorException("Identity hash does not match status update"); + this.status = status; + this.cachedCountryCode = null; + this.descriptor = null; + refreshDescriptor(); + } + + public boolean isDescriptorDownloadable() { + refreshDescriptor(); + if(descriptor != null) { + return false; + } + + final long now = System.currentTimeMillis(); + final long diff = now - status.getPublicationTime().getDate().getTime(); + return diff > (1000 * 60 * 10); + } + + public String getVersion() { + return status.getVersion(); + } + + public HexDigest getDescriptorDigest() { + return status.getDescriptorDigest(); + } + + public IPv4Address getAddress() { + return status.getAddress(); + } + + public Descriptor getCurrentDescriptor() { + refreshDescriptor(); + return descriptor; + } + + private synchronized void refreshDescriptor() { + if(descriptor != null || directory == null) { + return; + } + if(status.getMicrodescriptorDigest() != null) { + descriptor = directory.getMicrodescriptorFromCache(status.getMicrodescriptorDigest()); + } else if(status.getDescriptorDigest() != null){ + descriptor = directory.getBasicDescriptorFromCache(status.getDescriptorDigest()); + } + } + + public HexDigest getMicrodescriptorDigest() { + return status.getMicrodescriptorDigest(); + } + + public boolean hasFlag(String flag) { + return status.hasFlag(flag); + } + + public boolean isHibernating() { + final RouterDescriptor rd = downcastDescriptor(); + if(rd == null) { + return false; + } else { + return rd.isHibernating(); + } + } + + public boolean isRunning() { + return hasFlag("Running"); + } + + public boolean isValid() { + return hasFlag("Valid"); + } + + public boolean isBadExit() { + return hasFlag("BadExit"); + } + + public boolean isPossibleGuard() { + return hasFlag("Guard"); + } + + public boolean isExit() { + return hasFlag("Exit"); + } + + public boolean isFast() { + return hasFlag("Fast"); + } + + public boolean isStable() { + return hasFlag("Stable"); + } + + public boolean isHSDirectory() { + return hasFlag("HSDir"); + } + + public int getDirectoryPort() { + return status.getDirectoryPort(); + } + + public HexDigest getIdentityHash() { + return identityHash; + } + + public TorPublicKey getIdentityKey() { + final RouterDescriptor rd = downcastDescriptor(); + if(rd != null) { + return rd.getIdentityKey(); + } else { + return null; + } + } + + public String getNickname() { + return status.getNickname(); + } + + public int getOnionPort() { + return status.getRouterPort(); + } + + public TorPublicKey getOnionKey() { + refreshDescriptor(); + if(descriptor != null) { + return descriptor.getOnionKey(); + } else { + return null; + } + } + + public byte[] getNTorOnionKey() { + refreshDescriptor(); + if(descriptor != null) { + return descriptor.getNTorOnionKey(); + } else { + return null; + } + } + + public boolean hasBandwidth() { + return status.hasBandwidth(); + } + + public int getEstimatedBandwidth() { + return status.getEstimatedBandwidth(); + } + + public int getMeasuredBandwidth() { + return status.getMeasuredBandwidth(); + } + + public Set getFamilyMembers() { + refreshDescriptor(); + if(descriptor != null) { + return descriptor.getFamilyMembers(); + } else { + return Collections.emptySet(); + } + } + + public int getAverageBandwidth() { + final RouterDescriptor rd = downcastDescriptor(); + if(rd == null) { + return 0; + } else { + return rd.getAverageBandwidth(); + } + } + + public int getBurstBandwidth() { + final RouterDescriptor rd = downcastDescriptor(); + if(rd == null) { + return 0; + } else { + return rd.getBurstBandwidth(); + } + } + + public int getObservedBandwidth() { + final RouterDescriptor rd = downcastDescriptor(); + if(rd == null) { + return 0; + } else { + return rd.getObservedBandwidth(); + } + } + + public boolean exitPolicyAccepts(IPv4Address address, int port) { + refreshDescriptor(); + if(descriptor == null) { + return false; + } else if(address == null) { + return descriptor.exitPolicyAccepts(port); + } else { + return descriptor.exitPolicyAccepts(address, port); + } + } + + public boolean exitPolicyAccepts(int port) { + return exitPolicyAccepts(null, port); + } + + public String toString() { + return "Router["+ getNickname() +" ("+getAddress() +":"+ getOnionPort() +")]"; + } + + public String getCountryCode() { + String cc = cachedCountryCode; + if(cc == null) { + cc = CountryCodeService.getInstance().getCountryCodeForAddress(getAddress()); + cachedCountryCode = cc; + } + return cc; + } + + private RouterDescriptor downcastDescriptor() { + refreshDescriptor(); + if(descriptor instanceof RouterDescriptor) { + return (RouterDescriptor) descriptor; + } else { + return null; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/StateFile.java b/orchid/src/com/subgraph/orchid/directory/StateFile.java new file mode 100644 index 00000000..8020547e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/StateFile.java @@ -0,0 +1,296 @@ +package com.subgraph.orchid.directory; + +import java.nio.ByteBuffer; +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.logging.Logger; + +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.DirectoryStore; +import com.subgraph.orchid.GuardEntry; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.DirectoryStore.CacheFile; +import com.subgraph.orchid.crypto.TorRandom; + +public class StateFile { + private final static Logger logger = Logger.getLogger(StateFile.class.getName()); + + private final static int DATE_LENGTH = 19; + + final static String KEYWORD_ENTRY_GUARD = "EntryGuard"; + final static String KEYWORD_ENTRY_GUARD_ADDED_BY = "EntryGuardAddedBy"; + final static String KEYWORD_ENTRY_GUARD_DOWN_SINCE = "EntryGuardDownSince"; + final static String KEYWORD_ENTRY_GUARD_UNLISTED_SINCE = "EntryGuardUnlistedSince"; + + private final List guardEntries = new ArrayList(); + private final TorRandom random = new TorRandom(); + private final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + private class Line { + final String line; + int offset; + + Line(String line) { + this.line = line; + offset = 0; + } + + private boolean hasChars() { + return offset < line.length(); + } + + private char getChar() { + return line.charAt(offset); + } + + private void incrementOffset(int n) { + offset += n; + if(offset > line.length()) { + offset = line.length(); + } + } + + private void skipWhitespace() { + while(hasChars() && Character.isWhitespace(getChar())) { + offset += 1; + } + } + + String nextToken() { + skipWhitespace(); + if(!hasChars()) { + return null; + } + + final StringBuilder token = new StringBuilder(); + while(hasChars() && !Character.isWhitespace(getChar())) { + token.append(getChar()); + offset += 1; + } + return token.toString(); + } + + Date parseDate() { + skipWhitespace(); + if(!hasChars()) { + return null; + } + try { + final Date date = dateFormat.parse(line.substring(offset)); + incrementOffset(DATE_LENGTH); + return date; + } catch (ParseException e) { + return null; + } + } + } + + String formatDate(Date date) { + return dateFormat.format(date); + } + + private final DirectoryStore directoryStore; + private final Directory directory; + + StateFile(DirectoryStore store, Directory directory) { + this.directoryStore = store; + this.directory = directory; + } + + public GuardEntry createGuardEntryFor(Router router) { + final GuardEntryImpl entry = new GuardEntryImpl(directory, this, router.getNickname(), router.getIdentityHash().toString()); + final String version = Tor.getImplementation() + "-" + Tor.getVersion(); + entry.setVersion(version); + + /* + * "Choose expiry time smudged over the last month." + * + * See add_an_entry_guard() in entrynodes.c + */ + final long createTime = (new Date()).getTime() - (random.nextInt(3600 * 24 * 30) * 1000L); + entry.setCreatedTime(new Date(createTime)); + return entry; + } + + public List getGuardEntries() { + synchronized (guardEntries) { + return new ArrayList(guardEntries); + } + } + + public void removeGuardEntry(GuardEntry entry) { + synchronized (guardEntries) { + guardEntries.remove(entry); + writeFile(); + } + } + + public void addGuardEntry(GuardEntry entry) { + addGuardEntry(entry, true); + } + + private void addGuardEntry(GuardEntry entry, boolean writeFile) { + synchronized(guardEntries) { + if(guardEntries.contains(entry)) { + return; + } + final GuardEntryImpl impl = (GuardEntryImpl) entry; + guardEntries.add(impl); + synchronized (impl) { + impl.setAddedFlag(); + if(writeFile) { + writeFile(); + } + } + } + } + + void writeFile() { + directoryStore.writeData(CacheFile.STATE, getFileContents()); + } + + ByteBuffer getFileContents() { + final StringBuilder sb = new StringBuilder(); + synchronized (guardEntries) { + for(GuardEntryImpl entry: guardEntries) { + sb.append(entry.writeToString()); + } + } + return ByteBuffer.wrap(sb.toString().getBytes(Tor.getDefaultCharset())); + } + + void parseBuffer(ByteBuffer buffer) { + synchronized (guardEntries) { + guardEntries.clear(); + loadGuardEntries(buffer); + } + } + + private void loadGuardEntries(ByteBuffer buffer) { + GuardEntryImpl currentEntry = null; + while(true) { + Line line = readLine(buffer); + if(line == null) { + addEntryIfValid(currentEntry); + return; + } + currentEntry = processLine(line, currentEntry); + } + } + + private GuardEntryImpl processLine(Line line, GuardEntryImpl current) { + final String keyword = line.nextToken(); + if(keyword == null) { + return current; + } else if(keyword.equals(KEYWORD_ENTRY_GUARD)) { + addEntryIfValid(current); + GuardEntryImpl newEntry = processEntryGuardLine(line); + if(newEntry == null) { + return current; + } else { + return newEntry; + } + } else if(keyword.equals(KEYWORD_ENTRY_GUARD_ADDED_BY)) { + processEntryGuardAddedBy(line, current); + return current; + } else if(keyword.equals(KEYWORD_ENTRY_GUARD_DOWN_SINCE)) { + processEntryGuardDownSince(line, current); + return current; + } else if(keyword.equals(KEYWORD_ENTRY_GUARD_UNLISTED_SINCE)) { + processEntryGuardUnlistedSince(line, current); + return current; + } else { + return current; + } + } + + private GuardEntryImpl processEntryGuardLine(Line line) { + final String name = line.nextToken(); + final String identity = line.nextToken(); + if(name == null || name.isEmpty() || identity == null || identity.isEmpty()) { + logger.warning("Failed to parse EntryGuard line: "+ line.line); + return null; + } + return new GuardEntryImpl(directory, this, name, identity); + } + + private void processEntryGuardAddedBy(Line line, GuardEntryImpl current) { + if(current == null) { + logger.warning("EntryGuardAddedBy line seen before EntryGuard in state file"); + return; + } + final String identity = line.nextToken(); + final String version = line.nextToken(); + final Date created = line.parseDate(); + if(identity == null || identity.isEmpty() || version == null || version.isEmpty() || created == null) { + logger.warning("Missing EntryGuardAddedBy field in state file"); + return; + } + current.setVersion(version); + current.setCreatedTime(created); + } + + private void processEntryGuardDownSince(Line line, GuardEntryImpl current) { + if(current == null) { + logger.warning("EntryGuardDownSince line seen before EntryGuard in state file"); + return; + } + + final Date downSince = line.parseDate(); + final Date lastTried = line.parseDate(); + if(downSince == null) { + logger.warning("Failed to parse date field in EntryGuardDownSince line in state file"); + return; + } + current.setDownSince(downSince, lastTried); + } + + private void processEntryGuardUnlistedSince(Line line, GuardEntryImpl current) { + if(current == null) { + logger.warning("EntryGuardUnlistedSince line seen before EntryGuard in state file"); + return; + } + final Date unlistedSince = line.parseDate(); + if(unlistedSince == null) { + logger.warning("Failed to parse date field in EntryGuardUnlistedSince line in state file"); + return; + } + current.setUnlistedSince(unlistedSince); + } + + private void addEntryIfValid(GuardEntryImpl entry) { + if(isValidEntry(entry)) { + addGuardEntry(entry, false); + } + } + + private boolean isValidEntry(GuardEntryImpl entry) { + return entry != null && + entry.getNickname() != null && + entry.getIdentity() != null && + entry.getVersion() != null && + entry.getCreatedTime() != null; + } + + private Line readLine(ByteBuffer buffer) { + if(!buffer.hasRemaining()) { + return null; + } + + final StringBuilder sb = new StringBuilder(); + while(buffer.hasRemaining()) { + char c = (char) (buffer.get() & 0xFF); + if(c == '\n') { + return new Line(sb.toString()); + } else if(c != '\r') { + sb.append(c); + } + } + return new Line(sb.toString()); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/TrustedAuthorities.java b/orchid/src/com/subgraph/orchid/directory/TrustedAuthorities.java new file mode 100644 index 00000000..3a3dd3cb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/TrustedAuthorities.java @@ -0,0 +1,138 @@ +package com.subgraph.orchid.directory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import com.subgraph.orchid.DirectoryServer; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; + +/* + * This class contains the hardcoded 'bootstrap' directory authority + * server information. + */ +public class TrustedAuthorities { + + private final static String[] dirServers = { + "authority moria1 orport=9101 no-v2 v3ident=D586D18309DED4CD6D57C18FDB97EFA96D330566 128.31.0.39:9131 9695 DFC3 5FFE B861 329B 9F1A B04C 4639 7020 CE31", + "authority tor26 v1 orport=443 v3ident=14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4 86.59.21.38:80 847B 1F85 0344 D787 6491 A548 92F9 0493 4E4E B85D", + "authority dizum orport=443 v3ident=E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58 194.109.206.212:80 7EA6 EAD6 FD83 083C 538F 4403 8BBF A077 587D D755", + "authority Tonga orport=443 bridge no-v2 82.94.251.203:80 4A0C CD2D DC79 9508 3D73 F5D6 6710 0C8A 5831 F16D", + "authority turtles orport=9090 no-v2 v3ident=27B6B5996C426270A5C95488AA5BCEB6BCC86956 76.73.17.194:9030 F397 038A DC51 3361 35E7 B80B D99C A384 4360 292B", + "authority dannenberg orport=443 no-v2 v3ident=585769C78764D58426B8B52B6651A5A71137189A 193.23.244.244:80 7BE6 83E6 5D48 1413 21C5 ED92 F075 C553 64AC 7123", + "authority urras orport=80 no-v2 v3ident=80550987E1D626E3EBA5E5E75A458DE0626D088C 208.83.223.34:443 0AD3 FA88 4D18 F89E EA2D 89C0 1937 9E0E 7FD9 4417", + "authority maatuska orport=80 no-v2 v3ident=49015F787433103580E3B66A1707A00E60F2D15B 171.25.193.9:443 BD6A 8292 55CB 08E6 6FBE 7D37 4836 3586 E46B 3810", + "authority Faravahar orport=443 no-v2 v3ident=EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97 154.35.32.5:80 CF6D 0AAF B385 BE71 B8E1 11FC 5CFF 4B47 9237 33BC", + "authority gabelmoo orport=443 no-v2 v3ident=ED03BB616EB2F60BEC80151114BB25CEF515B226 212.112.245.170:80 F204 4413 DAC2 E02E 3D6B CF47 35A1 9BCA 1DE9 7281", + }; + + private final List directoryServers = new ArrayList(); + private final int v3ServerCount; + + private final static TrustedAuthorities _instance = new TrustedAuthorities(); + + public static TrustedAuthorities getInstance() { + return _instance; + } + + private TrustedAuthorities() { + initialize(); + v3ServerCount = countV3Servers(); + } + + private int countV3Servers() { + int n = 0; + for(DirectoryServer ds: directoryServers) { + if(ds.getV3Identity() != null) { + n += 1; + } + } + return n; + } + + void initialize() { + final StringBuilder builder = new StringBuilder(); + for(String entry: dirServers) { + builder.append(entry); + builder.append('\n'); + } + final ByteBuffer buffer = ByteBuffer.wrap(builder.toString().getBytes(Tor.getDefaultCharset())); + final DocumentFieldParser parser = new DocumentFieldParserImpl(buffer); + + parser.setHandler(new DocumentParsingHandler() { + public void endOfDocument() {} + public void parseKeywordLine() { processKeywordLine(parser);} + }); + parser.processDocument(); + } + + private void processKeywordLine(DocumentFieldParser fieldParser) { + final DirectoryAuthorityStatus status = new DirectoryAuthorityStatus(); + status.setNickname(fieldParser.parseNickname()); + while(fieldParser.argumentsRemaining() > 0) + processArgument(fieldParser, status); + } + + private void processArgument(DocumentFieldParser fieldParser, DirectoryAuthorityStatus status) { + final String item = fieldParser.parseString(); + if(Character.isDigit(item.charAt(0))) { + parseAddressPort(fieldParser, item, status); + status.setIdentity(fieldParser.parseFingerprint()); + DirectoryServerImpl server = new DirectoryServerImpl(status); + if(status.getV3Ident() != null) { + server.setV3Ident(status.getV3Ident()); + } + fieldParser.logDebug("Adding trusted authority: " + server); + directoryServers.add(server); + return; + } else { + parseFlag(fieldParser, item, status); + } + } + + private void parseAddressPort(DocumentFieldParser parser, String item, DirectoryAuthorityStatus status) { + final String[] args = item.split(":"); + status.setAddress(IPv4Address.createFromString(args[0])); + status.setDirectoryPort(parser.parsePort(args[1])); + } + + private void parseFlag(DocumentFieldParser parser, String flag, DirectoryAuthorityStatus status) { + if(flag.equals("v1")) { + status.setV1Authority(); + status.setHiddenServiceAuthority(); + } else if(flag.equals("hs")) { + status.setHiddenServiceAuthority(); + } else if(flag.equals("no-hs")) { + status.unsetHiddenServiceAuthority(); + } else if(flag.equals("bridge")) { + status.setBridgeAuthority(); + } else if(flag.equals("no-v2")) { + status.unsetV2Authority(); + } else if(flag.startsWith("orport=")) { + status.setRouterPort( parser.parsePort(flag.substring(7))); + } else if(flag.startsWith("v3ident=")) { + status.setV3Ident(HexDigest.createFromString(flag.substring(8))); + } + } + + public int getV3AuthorityServerCount() { + return v3ServerCount; + } + + public List getAuthorityServers() { + return directoryServers; + } + + public DirectoryServer getAuthorityServerByIdentity(HexDigest identity) { + for(DirectoryServer ds: directoryServers) { + if(identity.equals(ds.getV3Identity())) { + return ds; + } + } + return null; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateImpl.java b/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateImpl.java new file mode 100644 index 00000000..cf3071bd --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateImpl.java @@ -0,0 +1,93 @@ +package com.subgraph.orchid.directory.certificate; + +import java.nio.ByteBuffer; + +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; + +public class KeyCertificateImpl implements KeyCertificate { + + private IPv4Address directoryAddress; + private int directoryPort; + private HexDigest fingerprint; + private TorPublicKey identityKey; + private Timestamp keyPublished; + private Timestamp keyExpires; + private TorPublicKey signingKey; + private String rawDocumentData; + + private boolean hasValidSignature = false; + + void setDirectoryPort(int port) { this.directoryPort = port; } + void setDirectoryAddress(IPv4Address address) { this.directoryAddress = address; } + void setAuthorityFingerprint(HexDigest fingerprint) { this.fingerprint = fingerprint;} + void setAuthorityIdentityKey(TorPublicKey key) { this.identityKey = key; } + void setAuthoritySigningKey(TorPublicKey key) { this.signingKey = key; } + void setKeyPublishedTime(Timestamp time) { this.keyPublished = time; } + void setKeyExpiryTime(Timestamp time) { this.keyExpires = time; } + void setValidSignature() { hasValidSignature = true;} + void setRawDocumentData(String rawData) { rawDocumentData = rawData; } + + public boolean isValidDocument() { + return hasValidSignature && (fingerprint != null) && (identityKey != null) && + (keyPublished != null) && (keyExpires != null) && (signingKey != null); + } + + public IPv4Address getDirectoryAddress() { + return directoryAddress; + } + + public int getDirectoryPort() { + return directoryPort; + } + + public HexDigest getAuthorityFingerprint() { + return fingerprint; + } + + public TorPublicKey getAuthorityIdentityKey() { + return identityKey; + } + + public TorPublicKey getAuthoritySigningKey() { + return signingKey; + } + + public Timestamp getKeyPublishedTime() { + return keyPublished; + } + + public Timestamp getKeyExpiryTime() { + return keyExpires; + } + + public boolean isExpired() { + if(keyExpires != null) { + return keyExpires.hasPassed(); + } else { + return false; + } + } + + public String getRawDocumentData() { + return rawDocumentData; + } + + public ByteBuffer getRawDocumentBytes() { + if(getRawDocumentData() == null) { + return ByteBuffer.allocate(0); + } else { + return ByteBuffer.wrap(getRawDocumentData().getBytes(Tor.getDefaultCharset())); + } + } + + public String toString() { + return "(Certificate: address="+ directoryAddress +":"+ directoryPort + +" fingerprint="+ fingerprint +" published="+ keyPublished +" expires="+ keyExpires +")"+ + "\nident="+ identityKey +" sign="+ signingKey; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateKeyword.java b/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateKeyword.java new file mode 100644 index 00000000..89737668 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateKeyword.java @@ -0,0 +1,42 @@ +package com.subgraph.orchid.directory.certificate; + +public enum KeyCertificateKeyword { + /* + * See dir-spec.txt + * Section 3.1 Key certificates + */ + DIR_KEY_CERTIFICATE_VERSION("dir-key-certificate-version", 1), + DIR_ADDRESS("dir-address", 1), + FINGERPRINT("fingerprint", 1), + DIR_IDENTITY_KEY("dir-identity-key", 0), + DIR_KEY_PUBLISHED("dir-key-published", 2), + DIR_KEY_EXPIRES("dir-key-expires", 2), + DIR_SIGNING_KEY("dir-signing-key", 0), + DIR_KEY_CROSSCERT("dir-key-crosscert", 0), + DIR_KEY_CERTIFICATION("dir-key-certification", 0), + UNKNOWN_KEYWORD("KEYWORD NOT FOUND", 0); + + private final String keyword; + private final int argumentCount; + + KeyCertificateKeyword(String keyword, int argumentCount) { + this.keyword = keyword; + this.argumentCount = argumentCount; + } + + String getKeyword() { + return keyword; + } + + int getArgumentCount() { + return argumentCount; + } + + static KeyCertificateKeyword findKeyword(String keyword) { + for(KeyCertificateKeyword k: values()) + if(k.getKeyword().equals(keyword)) + return k; + return UNKNOWN_KEYWORD; + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateParser.java b/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateParser.java new file mode 100644 index 00000000..7b69f381 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/certificate/KeyCertificateParser.java @@ -0,0 +1,151 @@ +package com.subgraph.orchid.directory.certificate; + +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; + +public class KeyCertificateParser implements DocumentParser { + private final static int CURRENT_CERTIFICATE_VERSION = 3; + private final DocumentFieldParser fieldParser; + private KeyCertificateImpl currentCertificate; + private DocumentParsingResultHandler resultHandler; + + public KeyCertificateParser(DocumentFieldParser fieldParser) { + this.fieldParser = fieldParser; + this.fieldParser.setHandler(createParsingHandler()); + } + + private DocumentParsingHandler createParsingHandler() { + return new DocumentParsingHandler() { + public void parseKeywordLine() { + processKeywordLine(); + } + + public void endOfDocument() { + } + }; + } + + private void processKeywordLine() { + final KeyCertificateKeyword keyword = KeyCertificateKeyword.findKeyword(fieldParser.getCurrentKeyword()); + /* + * dirspec.txt (1.2) + * When interpreting a Document, software MUST ignore any KeywordLine that + * starts with a keyword it doesn't recognize; + */ + if(!keyword.equals(KeyCertificateKeyword.UNKNOWN_KEYWORD)) + processKeyword(keyword); + } + + private void startNewCertificate() { + fieldParser.resetRawDocument(); + fieldParser.startSignedEntity(); + currentCertificate = new KeyCertificateImpl(); + } + + public boolean parse(DocumentParsingResultHandler resultHandler) { + this.resultHandler = resultHandler; + startNewCertificate(); + try { + fieldParser.processDocument(); + return true; + } catch(TorParsingException e) { + resultHandler.parsingError(e.getMessage()); + return false; + } + } + + public DocumentParsingResult parse() { + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + parse(result); + return result; + } + + private void processKeyword(KeyCertificateKeyword keyword) { + switch(keyword) { + case DIR_KEY_CERTIFICATE_VERSION: + processCertificateVersion(); + break; + case DIR_ADDRESS: + processDirectoryAddress(); + break; + case FINGERPRINT: + currentCertificate.setAuthorityFingerprint(fieldParser.parseHexDigest()); + break; + case DIR_IDENTITY_KEY: + currentCertificate.setAuthorityIdentityKey(fieldParser.parsePublicKey()); + break; + case DIR_SIGNING_KEY: + currentCertificate.setAuthoritySigningKey(fieldParser.parsePublicKey()); + break; + case DIR_KEY_PUBLISHED: + currentCertificate.setKeyPublishedTime(fieldParser.parseTimestamp()); + break; + case DIR_KEY_EXPIRES: + currentCertificate.setKeyExpiryTime(fieldParser.parseTimestamp()); + break; + case DIR_KEY_CROSSCERT: + verifyCrossSignature(fieldParser.parseSignature()); + break; + case DIR_KEY_CERTIFICATION: + processCertificateSignature(); + break; + case UNKNOWN_KEYWORD: + break; + } + } + + private void processCertificateVersion() { + final int version = fieldParser.parseInteger(); + if(version != CURRENT_CERTIFICATE_VERSION) + throw new TorParsingException("Unexpected certificate version: " + version); + } + + private void processDirectoryAddress() { + final String addrport = fieldParser.parseString(); + final String[] args = addrport.split(":"); + if(args.length != 2) + throw new TorParsingException("Address/Port string incorrectly formed: " + addrport); + currentCertificate.setDirectoryAddress(IPv4Address.createFromString(args[0])); + currentCertificate.setDirectoryPort(fieldParser.parsePort(args[1])); + } + + private void verifyCrossSignature(TorSignature crossSignature) { + TorPublicKey identityKey = currentCertificate.getAuthorityIdentityKey(); + TorPublicKey signingKey = currentCertificate.getAuthoritySigningKey(); + if(!signingKey.verifySignature(crossSignature, identityKey.getFingerprint())) + throw new TorParsingException("Cross signature on certificate failed."); + } + + private boolean verifyCurrentCertificate(TorSignature signature) { + if(!fieldParser.verifySignedEntity(currentCertificate.getAuthorityIdentityKey(), signature)) { + resultHandler.documentInvalid(currentCertificate, "Signature failed"); + fieldParser.logWarn("Signature failed for certificate with fingerprint: "+ currentCertificate.getAuthorityFingerprint()); + return false; + } + currentCertificate.setValidSignature(); + final boolean isValid = currentCertificate.isValidDocument(); + if(!isValid) { + resultHandler.documentInvalid(currentCertificate, "Certificate data is invalid"); + fieldParser.logWarn("Certificate data is invalid for certificate with fingerprint: "+ currentCertificate.getAuthorityFingerprint()); + } + return isValid; + } + + private void processCertificateSignature() { + fieldParser.endSignedEntity(); + if(verifyCurrentCertificate(fieldParser.parseSignature())) { + currentCertificate.setRawDocumentData(fieldParser.getRawDocument()); + resultHandler.documentParsed(currentCertificate); + } + startNewCertificate(); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/AuthoritySectionParser.java b/orchid/src/com/subgraph/orchid/directory/consensus/AuthoritySectionParser.java new file mode 100644 index 00000000..e215bf52 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/AuthoritySectionParser.java @@ -0,0 +1,64 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser.DocumentSection; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; + +public class AuthoritySectionParser extends ConsensusDocumentSectionParser { + + private VoteAuthorityEntryImpl currentEntry = null; + + AuthoritySectionParser(DocumentFieldParser parser , ConsensusDocumentImpl document) { + super(parser, document); + startEntry(); + } + + @Override + void parseLine(DocumentKeyword keyword) { + switch(keyword) { + case DIR_SOURCE: + parseDirSource(); + break; + case CONTACT: + currentEntry.setContact(fieldParser.parseConcatenatedString()); + break; + case VOTE_DIGEST: + currentEntry.setVoteDigest(fieldParser.parseHexDigest()); + addCurrentEntry(); + break; + default: + break; + } + } + + private void startEntry() { + currentEntry = new VoteAuthorityEntryImpl(); + } + + private void addCurrentEntry() { + document.addVoteAuthorityEntry(currentEntry); + startEntry(); + } + + private void parseDirSource() { + currentEntry.setNickname(fieldParser.parseNickname()); + currentEntry.setIdentity(fieldParser.parseHexDigest()); + currentEntry.setHostname(fieldParser.parseString()); + currentEntry.setAddress(fieldParser.parseAddress()); + currentEntry.setDirectoryPort(fieldParser.parsePort()); + currentEntry.setRouterPort(fieldParser.parsePort()); + } + + @Override + String getNextStateKeyword() { + return "r"; + } + + @Override + DocumentSection getSection() { + return DocumentSection.AUTHORITY; + } + + DocumentSection nextSection() { + return DocumentSection.ROUTER_STATUS; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentImpl.java b/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentImpl.java new file mode 100644 index 00000000..7421f8bc --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentImpl.java @@ -0,0 +1,344 @@ +package com.subgraph.orchid.directory.consensus; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.DirectoryServer; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.RouterStatus; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.VoteAuthorityEntry; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.crypto.TorSignature.DigestAlgorithm; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.directory.TrustedAuthorities; + +public class ConsensusDocumentImpl implements ConsensusDocument { + + enum SignatureVerifyStatus { STATUS_UNVERIFIED, STATUS_NEED_CERTS, STATUS_VERIFIED }; + + private final static Logger logger = Logger.getLogger(ConsensusDocumentImpl.class.getName()); + + private final static String BW_WEIGHT_SCALE_PARAM = "bwweightscale"; + private final static int BW_WEIGHT_SCALE_DEFAULT = 10000; + private final static int BW_WEIGHT_SCALE_MIN = 1; + private final static int BW_WEIGHT_SCALE_MAX = Integer.MAX_VALUE; + + private final static String CIRCWINDOW_PARAM = "circwindow"; + private final static int CIRCWINDOW_DEFAULT = 1000; + private final static int CIRCWINDOW_MIN = 100; + private final static int CIRCWINDOW_MAX = 1000; + + private final static String USE_NTOR_HANDSHAKE_PARAM = "UseNTorHandshake"; + + private Set requiredCertificates = new HashSet(); + + + private int consensusMethod; + private ConsensusFlavor flavor; + private Timestamp validAfter; + private Timestamp freshUntil; + private Timestamp validUntil; + private int distDelaySeconds; + private int voteDelaySeconds; + private Set clientVersions; + private Set serverVersions; + private Set knownFlags; + private HexDigest signingHash; + private HexDigest signingHash256; + private Map voteAuthorityEntries; + private List routerStatusEntries; + private Map bandwidthWeights; + private Map parameters; + private int signatureCount; + private boolean isFirstCallToVerifySignatures = true; + private String rawDocumentData; + + void setConsensusFlavor(ConsensusFlavor flavor) { this.flavor = flavor; } + void setConsensusMethod(int method) { consensusMethod = method; } + void setValidAfter(Timestamp ts) { validAfter = ts; } + void setFreshUntil(Timestamp ts) { freshUntil = ts; } + void setValidUntil(Timestamp ts) { validUntil = ts; } + void setDistDelaySeconds(int seconds) { distDelaySeconds = seconds; } + void setVoteDelaySeconds(int seconds) { voteDelaySeconds = seconds; } + void addClientVersion(String version) { clientVersions.add(version); } + void addServerVersion(String version) { serverVersions.add(version); } + void addParameter(String name, int value) { parameters.put(name, value); } + void addBandwidthWeight(String name, int value) { bandwidthWeights.put(name, value); } + + void addSignature(DirectorySignature signature) { + final VoteAuthorityEntry voteAuthority = voteAuthorityEntries.get(signature.getIdentityDigest()); + if(voteAuthority == null) { + logger.warning("Consensus contains signature for source not declared in authority section: "+ signature.getIdentityDigest()); + return; + } + final List signatures = voteAuthority.getSignatures(); + final DigestAlgorithm newSignatureAlgorithm = signature.getSignature().getDigestAlgorithm(); + for(DirectorySignature sig: signatures) { + DigestAlgorithm algo = sig.getSignature().getDigestAlgorithm(); + if(algo.equals(newSignatureAlgorithm)) { + logger.warning("Consensus contains two or more signatures for same source with same algorithm"); + return; + } + } + signatureCount += 1; + signatures.add(signature); + } + + void setSigningHash(HexDigest hash) { signingHash = hash; } + void setSigningHash256(HexDigest hash) { signingHash256 = hash; } + void setRawDocumentData(String rawData) { rawDocumentData = rawData; } + + ConsensusDocumentImpl() { + clientVersions = new HashSet(); + serverVersions = new HashSet(); + knownFlags = new HashSet(); + voteAuthorityEntries = new HashMap(); + routerStatusEntries = new ArrayList(); + bandwidthWeights = new HashMap(); + parameters = new HashMap(); + } + + void addKnownFlag(String flag) { + knownFlags.add(flag); + } + + void addVoteAuthorityEntry(VoteAuthorityEntry entry) { + voteAuthorityEntries.put(entry.getIdentity(), entry); + } + + void addRouterStatusEntry(RouterStatusImpl entry) { + routerStatusEntries.add(entry); + } + + public ConsensusFlavor getFlavor() { + return flavor; + } + + public Timestamp getValidAfterTime() { + return validAfter; + } + + public Timestamp getFreshUntilTime() { + return freshUntil; + } + + public Timestamp getValidUntilTime() { + return validUntil; + } + + public int getConsensusMethod() { + return consensusMethod; + } + + public int getVoteSeconds() { + return voteDelaySeconds; + } + + public int getDistSeconds() { + return distDelaySeconds; + } + + public Set getClientVersions() { + return clientVersions; + } + + public Set getServerVersions() { + return serverVersions; + } + + public boolean isLive() { + if(validUntil == null) { + return false; + } else { + return !validUntil.hasPassed(); + } + } + + public List getRouterStatusEntries() { + return Collections.unmodifiableList(routerStatusEntries); + } + + public String getRawDocumentData() { + return rawDocumentData; + } + + public ByteBuffer getRawDocumentBytes() { + if(getRawDocumentData() == null) { + return ByteBuffer.allocate(0); + } else { + return ByteBuffer.wrap(getRawDocumentData().getBytes(Tor.getDefaultCharset())); + } + } + + public boolean isValidDocument() { + return (validAfter != null) && (freshUntil != null) && (validUntil != null) && + (voteDelaySeconds > 0) && (distDelaySeconds > 0) && (signingHash != null) && + (signatureCount > 0); + } + + public HexDigest getSigningHash() { + return signingHash; + } + + public HexDigest getSigningHash256() { + return signingHash256; + } + + public synchronized SignatureStatus verifySignatures() { + boolean firstCall = isFirstCallToVerifySignatures; + isFirstCallToVerifySignatures = false; + requiredCertificates.clear(); + int verifiedCount = 0; + int certsNeededCount = 0; + final int v3Count = TrustedAuthorities.getInstance().getV3AuthorityServerCount(); + final int required = (v3Count / 2) + 1; + + for(VoteAuthorityEntry entry: voteAuthorityEntries.values()) { + switch(verifySingleAuthority(entry)) { + case STATUS_FAILED: + break; + case STATUS_NEED_CERTS: + certsNeededCount += 1; + break; + case STATUS_VERIFIED: + verifiedCount += 1; + break; + } + } + + if(verifiedCount >= required) { + return SignatureStatus.STATUS_VERIFIED; + } else if(verifiedCount + certsNeededCount >= required) { + if(firstCall) { + logger.info("Certificates need to be retrieved to verify consensus"); + } + return SignatureStatus.STATUS_NEED_CERTS; + } else { + return SignatureStatus.STATUS_FAILED; + } + } + + private SignatureStatus verifySingleAuthority(VoteAuthorityEntry authority) { + + boolean certsNeeded = false; + boolean validSignature = false; + + for(DirectorySignature s: authority.getSignatures()) { + DirectoryServer trusted = TrustedAuthorities.getInstance().getAuthorityServerByIdentity(s.getIdentityDigest()); + if(trusted == null) { + logger.warning("Consensus signed by unrecognized directory authority: "+ s.getIdentityDigest()); + return SignatureStatus.STATUS_FAILED; + } else { + switch(verifySignatureForTrustedAuthority(trusted, s)) { + case STATUS_NEED_CERTS: + certsNeeded = true; + break; + case STATUS_VERIFIED: + validSignature = true; + break; + default: + break; + } + } + } + + if(validSignature) { + return SignatureStatus.STATUS_VERIFIED; + } else if(certsNeeded) { + return SignatureStatus.STATUS_NEED_CERTS; + } else { + return SignatureStatus.STATUS_FAILED; + } + } + + private SignatureStatus verifySignatureForTrustedAuthority(DirectoryServer trustedAuthority, DirectorySignature signature) { + final KeyCertificate certificate = trustedAuthority.getCertificateByFingerprint(signature.getSigningKeyDigest()); + if(certificate == null) { + logger.fine("Missing certificate for signing key: "+ signature.getSigningKeyDigest()); + addRequiredCertificateForSignature(signature); + return SignatureStatus.STATUS_NEED_CERTS; + } + if(certificate.isExpired()) { + return SignatureStatus.STATUS_FAILED; + } + + final TorPublicKey signingKey = certificate.getAuthoritySigningKey(); + final HexDigest d = (signature.useSha256()) ? signingHash256 : signingHash; + if(!signingKey.verifySignature(signature.getSignature(), d)) { + logger.warning("Signature failed on consensus for signing key: "+ signature.getSigningKeyDigest()); + return SignatureStatus.STATUS_FAILED; + } + return SignatureStatus.STATUS_VERIFIED; + } + + public Set getRequiredCertificates() { + return requiredCertificates; + } + + private void addRequiredCertificateForSignature(DirectorySignature signature) { + requiredCertificates.add(new RequiredCertificateImpl(signature.getIdentityDigest(), signature.getSigningKeyDigest())); + } + + public boolean equals(Object o) { + if(!(o instanceof ConsensusDocumentImpl)) + return false; + final ConsensusDocumentImpl other = (ConsensusDocumentImpl) o; + return other.getSigningHash().equals(signingHash); + } + + public int hashCode() { + return (signingHash == null) ? 0 : signingHash.hashCode(); + } + + private int getParameterValue(String name, int defaultValue, int minValue, int maxValue) { + if(!parameters.containsKey(name)) { + return defaultValue; + } + final int value = parameters.get(name); + if(value < minValue) { + return minValue; + } else if(value > maxValue) { + return maxValue; + } else { + return value; + } + } + + private boolean getBooleanParameterValue(String name, boolean defaultValue) { + if(!parameters.containsKey(name)) { + return defaultValue; + } + final int value = parameters.get(name); + return value != 0; + } + + public int getCircWindowParameter() { + return getParameterValue(CIRCWINDOW_PARAM, CIRCWINDOW_DEFAULT, CIRCWINDOW_MIN, CIRCWINDOW_MAX); + } + + public int getWeightScaleParameter() { + return getParameterValue(BW_WEIGHT_SCALE_PARAM, BW_WEIGHT_SCALE_DEFAULT, BW_WEIGHT_SCALE_MIN, BW_WEIGHT_SCALE_MAX); + } + + public int getBandwidthWeight(String tag) { + if(bandwidthWeights.containsKey(tag)) { + return bandwidthWeights.get(tag); + } else { + return -1; + } + } + + public boolean getUseNTorHandshake() { + return getBooleanParameterValue(USE_NTOR_HANDSHAKE_PARAM, false); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentParser.java b/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentParser.java new file mode 100644 index 00000000..71c2a4e2 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentParser.java @@ -0,0 +1,106 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; + +public class ConsensusDocumentParser implements DocumentParser { + public enum DocumentSection { NO_SECTION, PREAMBLE, AUTHORITY, ROUTER_STATUS, FOOTER }; + + // dir-spec.txt 3.2 + // Unlike other formats described above, a SP in these documents must be a + // single space character (hex 20). + private final static String ITEM_DELIMITER = " "; + + private final PreambleSectionParser preambleParser; + private final AuthoritySectionParser authorityParser; + private final RouterStatusSectionParser routerStatusParser; + private final FooterSectionParser footerParser; + private final DocumentFieldParser fieldParser; + private DocumentSection currentSection = DocumentSection.PREAMBLE; + private final ConsensusDocumentImpl document; + + private DocumentParsingResultHandler resultHandler; + + public ConsensusDocumentParser(DocumentFieldParser fieldParser) { + this.fieldParser = fieldParser; + initializeParser(); + + document = new ConsensusDocumentImpl(); + preambleParser = new PreambleSectionParser(fieldParser, document); + authorityParser = new AuthoritySectionParser(fieldParser, document); + routerStatusParser = new RouterStatusSectionParser(fieldParser, document); + footerParser = new FooterSectionParser(fieldParser, document); + } + + private void initializeParser() { + fieldParser.resetRawDocument(); + fieldParser.setHandler(createParsingHandler()); + fieldParser.setDelimiter(ITEM_DELIMITER); + fieldParser.setSignatureIgnoreToken("directory-signature"); + fieldParser.startSignedEntity(); + } + + public boolean parse(DocumentParsingResultHandler resultHandler) { + this.resultHandler = resultHandler; + try { + fieldParser.processDocument(); + return true; + } catch(TorParsingException e) { + resultHandler.parsingError(e.getMessage()); + return false; + } + } + + public DocumentParsingResult parse() { + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + parse(result); + return result; + } + + private DocumentParsingHandler createParsingHandler() { + return new DocumentParsingHandler() { + + public void endOfDocument() { + document.setRawDocumentData(fieldParser.getRawDocument()); + resultHandler.documentParsed(document); + fieldParser.logDebug("Finished parsing status document."); + } + public void parseKeywordLine() { + processKeywordLine(); + } + + }; + } + private void processKeywordLine() { + DocumentSection newSection = null; + while(currentSection != DocumentSection.NO_SECTION) { + switch(currentSection) { + case PREAMBLE: + newSection = preambleParser.parseKeywordLine(); + break; + case AUTHORITY: + newSection = authorityParser.parseKeywordLine(); + break; + case ROUTER_STATUS: + newSection = routerStatusParser.parseKeywordLine(); + break; + case FOOTER: + newSection = footerParser.parseKeywordLine(); + break; + default: + break; + } + if(newSection == currentSection) + return; + + currentSection = newSection; + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentSectionParser.java b/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentSectionParser.java new file mode 100644 index 00000000..0190d320 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/ConsensusDocumentSectionParser.java @@ -0,0 +1,38 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser.DocumentSection; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; + +public abstract class ConsensusDocumentSectionParser { + + protected final ConsensusDocumentImpl document; + protected final DocumentFieldParser fieldParser; + + + ConsensusDocumentSectionParser(DocumentFieldParser parser, ConsensusDocumentImpl document) { + this.fieldParser = parser; + this.document = document; + } + + DocumentSection parseKeywordLine() { + String keywordString = fieldParser.getCurrentKeyword(); + if(getNextStateKeyword() != null && getNextStateKeyword().equals(keywordString)) + return nextSection(); + + final DocumentKeyword keyword = DocumentKeyword.findKeyword(keywordString, getSection()); + /* + * dirspec.txt (1.2) + * When interpreting a Document, software MUST ignore any KeywordLine that + * starts with a keyword it doesn't recognize; + */ + if(!keyword.equals(DocumentKeyword.UNKNOWN_KEYWORD)) + parseLine(keyword); + + return getSection(); + } + + abstract void parseLine(DocumentKeyword keyword); + abstract String getNextStateKeyword(); + abstract DocumentSection getSection(); + abstract DocumentSection nextSection(); +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/DirectorySignature.java b/orchid/src/com/subgraph/orchid/directory/consensus/DirectorySignature.java new file mode 100644 index 00000000..305f828d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/DirectorySignature.java @@ -0,0 +1,35 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.data.HexDigest; + +public class DirectorySignature { + + private final HexDigest identityDigest; + private final HexDigest signingKeyDigest; + private final TorSignature signature; + private final boolean useSha256; + + DirectorySignature(HexDigest identityDigest, HexDigest signingKeyDigest, TorSignature signature, boolean useSha256) { + this.identityDigest = identityDigest; + this.signingKeyDigest = signingKeyDigest; + this.signature = signature; + this.useSha256 = useSha256; + } + + public HexDigest getIdentityDigest() { + return identityDigest; + } + + public HexDigest getSigningKeyDigest() { + return signingKeyDigest; + } + + public TorSignature getSignature() { + return signature; + } + + public boolean useSha256() { + return useSha256; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/DocumentKeyword.java b/orchid/src/com/subgraph/orchid/directory/consensus/DocumentKeyword.java new file mode 100644 index 00000000..78913ac5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/DocumentKeyword.java @@ -0,0 +1,103 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser.DocumentSection; + +enum DocumentKeyword { + /* + * See dirspec.txt section 3.2 + */ + NETWORK_STATUS_VERSION("network-status-version", DocumentSection.PREAMBLE, 1), + VOTE_STATUS("vote-status", DocumentSection.PREAMBLE, 1), + CONSENSUS_METHODS("consensus-methods", DocumentSection.PREAMBLE, 1, true), + CONSENSUS_METHOD("consensus-method", DocumentSection.PREAMBLE, 1, false, true), + PUBLISHED("published", DocumentSection.PREAMBLE, 2, true), + VALID_AFTER("valid-after", DocumentSection.PREAMBLE,2), + FRESH_UNTIL("fresh-until", DocumentSection.PREAMBLE,2), + VALID_UNTIL("valid-until", DocumentSection.PREAMBLE,2), + VOTING_DELAY("voting-delay", DocumentSection.PREAMBLE,2), + CLIENT_VERSIONS("client-versions", DocumentSection.PREAMBLE,1), + SERVER_VERSIONS("server-versions", DocumentSection.PREAMBLE,1), + KNOWN_FLAGS("known-flags", DocumentSection.PREAMBLE), + PARAMS("params", DocumentSection.PREAMBLE), + + DIR_SOURCE("dir-source", DocumentSection.AUTHORITY, 6), + CONTACT("contact", DocumentSection.AUTHORITY), + VOTE_DIGEST("vote-digest", DocumentSection.AUTHORITY, 1, false, true), + + R("r", DocumentSection.ROUTER_STATUS, 8), + S("s", DocumentSection.ROUTER_STATUS), + V("v", DocumentSection.ROUTER_STATUS), + W("w", DocumentSection.ROUTER_STATUS, 1), + P("p", DocumentSection.ROUTER_STATUS, 2), + M("m", DocumentSection.ROUTER_STATUS, 1), + + DIRECTORY_FOOTER("directory-footer", DocumentSection.FOOTER), + BANDWIDTH_WEIGHTS("bandwidth-weights", DocumentSection.FOOTER, 19), + DIRECTORY_SIGNATURE("directory-signature", DocumentSection.FOOTER, 2), + + UNKNOWN_KEYWORD("KEYWORD NOT FOUND"); + + + public final static int VARIABLE_ARGUMENT_COUNT = -1; + + private final String keyword; + private final DocumentSection section; + private final int argumentCount; + private final boolean voteOnly; + private final boolean consensusOnly; + + + DocumentKeyword(String keyword) { + this(keyword, DocumentSection.NO_SECTION); + } + + DocumentKeyword(String keyword, DocumentSection section) { + this(keyword, section, VARIABLE_ARGUMENT_COUNT); + } + DocumentKeyword(String keyword, DocumentSection section, int argumentCount) { + this(keyword, section, argumentCount, false); + } + + DocumentKeyword(String keyword, DocumentSection section, int argumentCount, boolean voteOnly) { + this(keyword, section, argumentCount, voteOnly, false); + } + + + DocumentKeyword(String keyword, DocumentSection section, int argumentCount, boolean voteOnly, boolean consensusOnly) { + this.keyword = keyword; + this.section = section; + this.argumentCount = argumentCount; + this.voteOnly = voteOnly; + this.consensusOnly = consensusOnly; + } + + static DocumentKeyword findKeyword(String keyword, DocumentSection section) { + for(DocumentKeyword k : values()) { + if(k.getKeyword().equals(keyword) && k.getSection().equals(section)) + return k; + } + return UNKNOWN_KEYWORD; + } + + public String getKeyword() { + return keyword; + } + + public DocumentSection getSection() { + return section; + } + + public int getArgumentCount() { + return argumentCount; + } + + public boolean isConsensusOnly() { + return consensusOnly; + } + + public boolean isVoteOnly() { + return voteOnly; + } + + +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/FooterSectionParser.java b/orchid/src/com/subgraph/orchid/directory/consensus/FooterSectionParser.java new file mode 100644 index 00000000..fcceb0f5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/FooterSectionParser.java @@ -0,0 +1,85 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser.DocumentSection; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.NameIntegerParameter; + +public class FooterSectionParser extends ConsensusDocumentSectionParser { + + private boolean seenFirstSignature = false; + + FooterSectionParser(DocumentFieldParser parser, ConsensusDocumentImpl document) { + super(parser, document); + } + + @Override + String getNextStateKeyword() { + return null; + } + + @Override + DocumentSection getSection() { + return DocumentSection.FOOTER; + } + + DocumentSection nextSection() { + return DocumentSection.NO_SECTION; + } + + @Override + void parseLine(DocumentKeyword keyword) { + switch(keyword) { + case BANDWIDTH_WEIGHTS: + processBandwidthWeights(); + break; + + case DIRECTORY_SIGNATURE: + processSignature(); + break; + + default: + break; + } + } + + private void doFirstSignature() { + seenFirstSignature = true; + fieldParser.endSignedEntity(); + final TorMessageDigest messageDigest = fieldParser.getSignatureMessageDigest(); + messageDigest.update("directory-signature "); + document.setSigningHash(messageDigest.getHexDigest()); + + TorMessageDigest messageDigest256 = fieldParser.getSignatureMessageDigest256(); + messageDigest256.update("directory-signature "); + document.setSigningHash256(messageDigest256.getHexDigest()); + } + + private void processSignature() { + if(!seenFirstSignature) { + doFirstSignature(); + } + final String s = fieldParser.parseString(); + final HexDigest identity; + boolean useSha256 = false; + if(s.length() < TorMessageDigest.TOR_DIGEST_SIZE) { + useSha256 = ("sha256".equals(s)); + identity = fieldParser.parseHexDigest(); + } else { + identity = HexDigest.createFromString(s); + } + HexDigest signingKey = fieldParser.parseHexDigest(); + TorSignature signature = fieldParser.parseSignature(); + document.addSignature(new DirectorySignature(identity, signingKey, signature, useSha256)); + } + + private void processBandwidthWeights() { + final int remaining = fieldParser.argumentsRemaining(); + for(int i = 0; i < remaining; i++) { + NameIntegerParameter p = fieldParser.parseParameter(); + document.addBandwidthWeight(p.getName(), p.getValue()); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/PreambleSectionParser.java b/orchid/src/com/subgraph/orchid/directory/consensus/PreambleSectionParser.java new file mode 100644 index 00000000..f67d0f0a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/PreambleSectionParser.java @@ -0,0 +1,131 @@ +package com.subgraph.orchid.directory.consensus; + +import java.util.Arrays; +import java.util.List; + +import com.subgraph.orchid.ConsensusDocument.ConsensusFlavor; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser.DocumentSection; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.NameIntegerParameter; + +public class PreambleSectionParser extends ConsensusDocumentSectionParser { + private final static int CURRENT_DOCUMENT_VERSION = 3; + private boolean isFirstLine = true; + + PreambleSectionParser(DocumentFieldParser parser, ConsensusDocumentImpl document) { + super(parser, document); + } + + String getNextStateKeyword() { + return "dir-source"; + } + + DocumentSection getSection() { + return DocumentSection.PREAMBLE; + } + + DocumentSection nextSection() { + return DocumentSection.AUTHORITY; + } + + @Override + void parseLine(DocumentKeyword keyword) { + if(isFirstLine) { + parseFirstLine(keyword); + } else { + processKeyword(keyword); + } + } + + private void processKeyword(DocumentKeyword keyword) { + switch(keyword) { + case NETWORK_STATUS_VERSION: + throw new TorParsingException("Network status version may only appear on the first line of status document"); + case VOTE_STATUS: + final String voteStatus = fieldParser.parseString(); + if(!voteStatus.equals("consensus")) + throw new TorParsingException("Unexpected vote-status type: "+ voteStatus); + break; + case CONSENSUS_METHOD: + document.setConsensusMethod(fieldParser.parseInteger()); + break; + + case VALID_AFTER: + document.setValidAfter(fieldParser.parseTimestamp()); + break; + + case FRESH_UNTIL: + document.setFreshUntil(fieldParser.parseTimestamp()); + break; + + case VALID_UNTIL: + document.setValidUntil(fieldParser.parseTimestamp()); + break; + + case VOTING_DELAY: + document.setVoteDelaySeconds(fieldParser.parseInteger()); + document.setDistDelaySeconds(fieldParser.parseInteger()); + break; + + case CLIENT_VERSIONS: + for(String version: parseVersions(fieldParser.parseString())) + document.addClientVersion(version); + break; + case SERVER_VERSIONS: + for(String version: parseVersions(fieldParser.parseString())) + document.addServerVersion(version); + break; + case KNOWN_FLAGS: + while(fieldParser.argumentsRemaining() > 0) + document.addKnownFlag(fieldParser.parseString()); + break; + + case PARAMS: + parseParams(); + break; + + default: + break; + } + + } + + private void parseFirstLine(DocumentKeyword keyword) { + if(keyword != DocumentKeyword.NETWORK_STATUS_VERSION) + throw new TorParsingException("network-status-version not found at beginning of consensus document as expected."); + + final int documentVersion = fieldParser.parseInteger(); + + if(documentVersion != CURRENT_DOCUMENT_VERSION) + throw new TorParsingException("Unexpected consensus document version number: " + documentVersion); + + if(fieldParser.argumentsRemaining() > 0) { + parseConsensusFlavor(); + } + isFirstLine = false; + } + + private void parseConsensusFlavor() { + final String flavor = fieldParser.parseString(); + if("ns".equals(flavor)) { + document.setConsensusFlavor(ConsensusFlavor.NS); + } else if("microdesc".equals(flavor)) { + document.setConsensusFlavor(ConsensusFlavor.MICRODESC); + } else { + fieldParser.logWarn("Unknown consensus flavor: "+ flavor); + } + } + + private List parseVersions(String versions) { + return Arrays.asList(versions.split(",")); + } + + private void parseParams() { + final int remaining = fieldParser.argumentsRemaining(); + for(int i = 0; i < remaining; i++) { + NameIntegerParameter p = fieldParser.parseParameter(); + document.addParameter(p.getName(), p.getValue()); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/RequiredCertificateImpl.java b/orchid/src/com/subgraph/orchid/directory/consensus/RequiredCertificateImpl.java new file mode 100644 index 00000000..f706ecae --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/RequiredCertificateImpl.java @@ -0,0 +1,69 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.data.HexDigest; + +public class RequiredCertificateImpl implements ConsensusDocument.RequiredCertificate { + + private final HexDigest identity; + private final HexDigest signingKey; + + private int downloadFailureCount; + + public RequiredCertificateImpl(HexDigest identity, HexDigest signingKey) { + this.identity = identity; + this.signingKey = signingKey; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((identity == null) ? 0 : identity.hashCode()); + result = prime * result + + ((signingKey == null) ? 0 : signingKey.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + RequiredCertificateImpl other = (RequiredCertificateImpl) obj; + if (identity == null) { + if (other.identity != null) + return false; + } else if (!identity.equals(other.identity)) + return false; + if (signingKey == null) { + if (other.signingKey != null) + return false; + } else if (!signingKey.equals(other.signingKey)) + return false; + return true; + } + + public void incrementDownloadFailureCount() { + downloadFailureCount += 1; + } + + public int getDownloadFailureCount() { + return downloadFailureCount; + } + + public HexDigest getAuthorityIdentity() { + return identity; + } + + public HexDigest getSigningKey() { + return signingKey; + } + + + +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/RouterStatusImpl.java b/orchid/src/com/subgraph/orchid/directory/consensus/RouterStatusImpl.java new file mode 100644 index 00000000..4fe9a896 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/RouterStatusImpl.java @@ -0,0 +1,106 @@ +package com.subgraph.orchid.directory.consensus; + +import java.util.HashSet; +import java.util.Set; + +import com.subgraph.orchid.RouterStatus; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.data.exitpolicy.ExitPorts; + +public class RouterStatusImpl implements RouterStatus { + + private String nickname; + private HexDigest identity; + private HexDigest digest; + private HexDigest microdescriptorDigest; + private Timestamp publicationTime; + private IPv4Address address; + private int routerPort; + private int directoryPort; + private Set flags = new HashSet(); + private String version; + private int bandwidthEstimate; + private int bandwidthMeasured; + private boolean hasBandwidth; + private ExitPorts exitPorts; + + void setNickname(String nickname) { this.nickname = nickname; } + void setIdentity(HexDigest identity) { this.identity = identity; } + void setDigest(HexDigest digest) { this.digest = digest; } + void setMicrodescriptorDigest(HexDigest digest) { this.microdescriptorDigest = digest; } + void setPublicationTime(Timestamp timestamp) { this.publicationTime = timestamp; } + void setAddress(IPv4Address address) { this.address = address; } + void setRouterPort(int port) { this.routerPort = port; } + void setDirectoryPort(int port) { this.directoryPort = port; } + void addFlag(String flag) { this.flags.add(flag); } + void setVersion(String version) { this.version = version; } + void setEstimatedBandwidth(int bandwidth) { this.bandwidthEstimate = bandwidth; hasBandwidth = true; } + void setMeasuredBandwidth(int bandwidth) { this.bandwidthMeasured = bandwidth; } + void setAcceptedPorts(String portList) { this.exitPorts = ExitPorts.createAcceptExitPorts(portList); } + void setRejectedPorts(String portList) { this.exitPorts = ExitPorts.createRejectExitPorts(portList); } + + public String toString() { + return "Router: ("+ nickname +" "+ identity +" "+ digest +" "+ address +" "+ routerPort +" " + directoryPort + +" "+ version +" "+ exitPorts +")"; + } + public String getNickname() { + return nickname; + } + + public HexDigest getIdentity() { + return identity; + } + + public HexDigest getDescriptorDigest() { + return digest; + } + + public HexDigest getMicrodescriptorDigest() { + return microdescriptorDigest; + } + + public Timestamp getPublicationTime() { + return publicationTime; + } + + public IPv4Address getAddress() { + return address; + } + + public int getRouterPort() { + return routerPort; + } + + public boolean isDirectory() { + return directoryPort != 0; + } + public int getDirectoryPort() { + return directoryPort; + } + + public boolean hasFlag(String flag) { + return flags.contains(flag); + } + + public String getVersion() { + return version; + } + + public boolean hasBandwidth() { + return hasBandwidth; + } + + public int getEstimatedBandwidth() { + return bandwidthEstimate; + } + + public int getMeasuredBandwidth() { + return bandwidthMeasured; + } + + public ExitPorts getExitPorts() { + return exitPorts; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/RouterStatusSectionParser.java b/orchid/src/com/subgraph/orchid/directory/consensus/RouterStatusSectionParser.java new file mode 100644 index 00000000..79f4078c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/RouterStatusSectionParser.java @@ -0,0 +1,141 @@ +package com.subgraph.orchid.directory.consensus; + +import com.subgraph.orchid.ConsensusDocument.ConsensusFlavor; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.consensus.ConsensusDocumentParser.DocumentSection; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; + +public class RouterStatusSectionParser extends ConsensusDocumentSectionParser { + + private RouterStatusImpl currentEntry = null; + + RouterStatusSectionParser(DocumentFieldParser parser, ConsensusDocumentImpl document) { + super(parser, document); + } + + @Override + void parseLine(DocumentKeyword keyword) { + if(!keyword.equals(DocumentKeyword.R)) + assertCurrentEntry(); + switch(keyword) { + case R: + parseFirstLine(); + break; + case S: + parseFlags(); + break; + case V: + parseVersion(); + break; + case W: + parseBandwidth(); + break; + case P: + parsePortList(); + break; + case M: + parseMicrodescriptorHash(); + break; + default: + break; + } + } + + private void assertCurrentEntry() { + if(currentEntry == null) + throw new TorParsingException("Router status entry must begin with an 'r' line"); + } + + private void addCurrentEntry() { + assertCurrentEntry(); + document.addRouterStatusEntry(currentEntry); + currentEntry = null; + } + + private void parseFirstLine() { + if(currentEntry != null) + throw new TorParsingException("Unterminated router status entry."); + currentEntry = new RouterStatusImpl(); + currentEntry.setNickname(fieldParser.parseNickname()); + currentEntry.setIdentity(parseBase64Digest()); + if(document.getFlavor() != ConsensusFlavor.MICRODESC) { + currentEntry.setDigest(parseBase64Digest()); + } + currentEntry.setPublicationTime(fieldParser.parseTimestamp()); + currentEntry.setAddress(fieldParser.parseAddress()); + currentEntry.setRouterPort(fieldParser.parsePort()); + currentEntry.setDirectoryPort(fieldParser.parsePort()); + } + + private HexDigest parseBase64Digest() { + return HexDigest.createFromDigestBytes(fieldParser.parseBase64Data()); + } + + private void parseFlags() { + while(fieldParser.argumentsRemaining() > 0) + currentEntry.addFlag(fieldParser.parseString()); + } + + private void parseVersion() { + currentEntry.setVersion(fieldParser.parseConcatenatedString()); + } + + private void parseBandwidth() { + while(fieldParser.argumentsRemaining() > 0) { + final String[] parts = fieldParser.parseString().split("="); + if(parts.length == 2) + parseBandwidthItem(parts[0], fieldParser.parseInteger(parts[1])); + } + if(document.getFlavor() == ConsensusFlavor.MICRODESC) { + addCurrentEntry(); + } + } + + private void parseBandwidthItem(String key, int value) { + if(key.equals("Bandwidth")) + currentEntry.setEstimatedBandwidth(value); + else if(key.equals("Measured")) + currentEntry.setMeasuredBandwidth(value); + } + + private void parsePortList() { + if(document.getFlavor() == ConsensusFlavor.MICRODESC) { + throw new TorParsingException("'p' line does not appear in consensus flavor 'microdesc'"); + } + final String arg = fieldParser.parseString(); + if(arg.equals("accept")) { + currentEntry.setAcceptedPorts(fieldParser.parseString()); + } else if(arg.equals("reject")) { + currentEntry.setRejectedPorts(fieldParser.parseString()); + } + addCurrentEntry(); + } + + private void parseMicrodescriptorHash() { + if(document.getFlavor() != ConsensusFlavor.MICRODESC) { + throw new TorParsingException("'m' line is invalid unless consensus flavor is microdesc"); + } + final byte[] hashBytes = fieldParser.parseBase64Data(); + if(hashBytes.length != TorMessageDigest.TOR_DIGEST256_SIZE) { + throw new TorParsingException("'m' line has incorrect digest size "+ hashBytes.length +" != "+ TorMessageDigest.TOR_DIGEST256_SIZE); + } + currentEntry.setMicrodescriptorDigest(HexDigest.createFromDigestBytes(hashBytes)); + } + + @Override + String getNextStateKeyword() { + return "directory-footer"; + } + + @Override + DocumentSection getSection() { + return DocumentSection.ROUTER_STATUS; + } + + DocumentSection nextSection() { + return DocumentSection.FOOTER; + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/consensus/VoteAuthorityEntryImpl.java b/orchid/src/com/subgraph/orchid/directory/consensus/VoteAuthorityEntryImpl.java new file mode 100644 index 00000000..f9be7a49 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/consensus/VoteAuthorityEntryImpl.java @@ -0,0 +1,67 @@ +package com.subgraph.orchid.directory.consensus; + +import java.util.ArrayList; +import java.util.List; + +import com.subgraph.orchid.VoteAuthorityEntry; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; + +public class VoteAuthorityEntryImpl implements VoteAuthorityEntry { + private String nickname; + private HexDigest identity; + private String hostname; + private IPv4Address address; + private int dirport = -1; + private int orport = -1; + + private String contact; + private HexDigest voteDigest; + + private final List signatures = new ArrayList(); + + void setNickname(String nickname) { this.nickname = nickname; } + void setIdentity(HexDigest identity) { this.identity = identity; } + void setHostname(String hostname) { this.hostname = hostname; } + void setAddress(IPv4Address address) { this.address = address; } + void setDirectoryPort(int port) { this.dirport = port; } + void setRouterPort(int port) { this.orport = port; } + void setContact(String contact) { this.contact = contact; } + void setVoteDigest(HexDigest digest) { this.voteDigest = digest; } + + public String getNickname() { + return nickname; + } + + public HexDigest getIdentity() { + return identity; + } + + public String getHostname() { + return hostname; + } + + public IPv4Address getAddress() { + return address; + } + + public int getDirectoryPort() { + return dirport; + } + + public int getRouterPort() { + return orport; + } + + public String getContact() { + return contact; + } + + public HexDigest getVoteDigest() { + return voteDigest; + } + + public List getSignatures() { + return signatures; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/BridgeDescriptorFetcher.java b/orchid/src/com/subgraph/orchid/directory/downloader/BridgeDescriptorFetcher.java new file mode 100644 index 00000000..4514bbd6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/BridgeDescriptorFetcher.java @@ -0,0 +1,19 @@ +package com.subgraph.orchid.directory.downloader; + +import java.nio.ByteBuffer; + +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.directory.parsing.DocumentParser; + +public class BridgeDescriptorFetcher extends DocumentFetcher{ + + @Override + String getRequestPath() { + return "/tor/server/authority"; + } + + @Override + DocumentParser createParser(ByteBuffer response) { + return PARSER_FACTORY.createRouterDescriptorParser(response, true); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/CertificateFetcher.java b/orchid/src/com/subgraph/orchid/directory/downloader/CertificateFetcher.java new file mode 100644 index 00000000..799d8e9a --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/CertificateFetcher.java @@ -0,0 +1,40 @@ +package com.subgraph.orchid.directory.downloader; + +import java.nio.ByteBuffer; +import java.util.Set; + +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.ConsensusDocument.RequiredCertificate; +import com.subgraph.orchid.directory.parsing.DocumentParser; + +public class CertificateFetcher extends DocumentFetcher{ + + private final Set requiredCertificates; + + public CertificateFetcher(Set requiredCertificates) { + this.requiredCertificates = requiredCertificates; + } + + @Override + String getRequestPath() { + return "/tor/keys/fp-sk/"+ getRequiredCertificatesRequestString(); + } + + private String getRequiredCertificatesRequestString() { + final StringBuilder sb = new StringBuilder(); + for(RequiredCertificate rc: requiredCertificates) { + if(sb.length() > 0) { + sb.append("+"); + } + sb.append(rc.getAuthorityIdentity().toString()); + sb.append("-"); + sb.append(rc.getSigningKey().toString()); + } + return sb.toString(); + } + + @Override + DocumentParser createParser(ByteBuffer response) { + return PARSER_FACTORY.createKeyCertificateParser(response); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/ConsensusFetcher.java b/orchid/src/com/subgraph/orchid/directory/downloader/ConsensusFetcher.java new file mode 100644 index 00000000..9e92e09f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/ConsensusFetcher.java @@ -0,0 +1,29 @@ +package com.subgraph.orchid.directory.downloader; + +import java.nio.ByteBuffer; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.directory.parsing.DocumentParser; + +public class ConsensusFetcher extends DocumentFetcher{ + + private final static String CONSENSUS_BASE_PATH = "/tor/status-vote/current/"; + + private final boolean useMicrodescriptors; + + + public ConsensusFetcher(boolean useMicrodescriptors) { + this.useMicrodescriptors = useMicrodescriptors; + } + + @Override + String getRequestPath() { + return CONSENSUS_BASE_PATH + ((useMicrodescriptors) ? + ("consensus-microdesc") : ("consensus")); + } + + @Override + DocumentParser createParser(ByteBuffer response) { + return PARSER_FACTORY.createConsensusDocumentParser(response); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/DescriptorProcessor.java b/orchid/src/com/subgraph/orchid/directory/downloader/DescriptorProcessor.java new file mode 100644 index 00000000..175d1b4b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/DescriptorProcessor.java @@ -0,0 +1,112 @@ +package com.subgraph.orchid.directory.downloader; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorConfig.AutoBoolValue; +import com.subgraph.orchid.data.HexDigest; + +public class DescriptorProcessor { + private final static int MAX_DL_PER_REQUEST = 96; + private final static int MAX_DL_TO_DELAY = 16; + private final static int MIN_DL_REQUESTS = 3; + private final static int MAX_CLIENT_INTERVAL_WITHOUT_REQUEST = 10 * 60 * 1000; + + private final TorConfig config; + private final Directory directory; + + private Date lastDescriptorDownload; + + + DescriptorProcessor(TorConfig config, Directory directory) { + this.config = config; + this.directory = directory; + } + + private boolean canDownloadDescriptors(int downloadableCount) { + if(downloadableCount >= MAX_DL_TO_DELAY) + return true; + if(downloadableCount == 0) + return false; + if(lastDescriptorDownload == null) + return true; + final Date now = new Date(); + final long diff = now.getTime() - lastDescriptorDownload.getTime(); + return diff > MAX_CLIENT_INTERVAL_WITHOUT_REQUEST; + } + + /* + * dir-spec.txt section 5.3 + */ + private List< List > partitionDescriptors(List descriptors) { + final int size = descriptors.size(); + final List< List > partitions = new ArrayList< List >(); + if(size <= 10) { + partitions.add(createPartitionList(descriptors, 0, size)); + return partitions; + } else if(size <= (MIN_DL_REQUESTS * MAX_DL_PER_REQUEST)) { + final int chunk = size / MIN_DL_REQUESTS; + int over = size % MIN_DL_REQUESTS; + int off = 0; + for(int i = 0; i < MIN_DL_REQUESTS; i++) { + int sz = chunk; + if(over != 0) { + sz++; + over--; + } + partitions.add(createPartitionList(descriptors, off, sz)); + off += sz; + } + return partitions; + + } else { + int off = 0; + while(off < descriptors.size()) { + partitions.add(createPartitionList(descriptors, off, MAX_DL_PER_REQUEST)); + off += MAX_DL_PER_REQUEST; + } + return partitions; + } + } + + private List createPartitionList(List descriptors, int offset, int size) { + final List newList = new ArrayList(); + for(int i = offset; i < (offset + size) && i < descriptors.size(); i++) { + final HexDigest digest = getDescriptorDigestForRouter(descriptors.get(i)); + newList.add(digest); + } + return newList; + } + + private HexDigest getDescriptorDigestForRouter(Router r) { + if(useMicrodescriptors()) { + return r.getMicrodescriptorDigest(); + } else { + return r.getDescriptorDigest(); + } + } + + private boolean useMicrodescriptors() { + return config.getUseMicrodescriptors() != AutoBoolValue.FALSE; + } + + List< List > getDescriptorDigestsToDownload() { + final ConsensusDocument consensus = directory.getCurrentConsensusDocument(); + if(consensus == null || !consensus.isLive()) { + return Collections.emptyList(); + } + final List downloadables = directory.getRoutersWithDownloadableDescriptors(); + if(!canDownloadDescriptors(downloadables.size())) { + return Collections.emptyList(); + } + + lastDescriptorDownload = new Date(); + return partitionDescriptors(downloadables); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDocumentRequestor.java b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDocumentRequestor.java new file mode 100644 index 00000000..347c38c5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDocumentRequestor.java @@ -0,0 +1,125 @@ +package com.subgraph.orchid.directory.downloader; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.CircuitManager; +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.ConsensusDocument.RequiredCertificate; +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.StreamConnectFailedException; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.circuits.TorInitializationTracker; +import com.subgraph.orchid.data.HexDigest; + +/** + * Synchronously downloads directory documents. + */ +public class DirectoryDocumentRequestor { + private final static int OPEN_DIRECTORY_STREAM_TIMEOUT = 10 * 1000; + + private final DirectoryCircuit circuit; + private final TorInitializationTracker initializationTracker; + + + public DirectoryDocumentRequestor(DirectoryCircuit circuit) { + this(circuit, null); + } + + public DirectoryDocumentRequestor(DirectoryCircuit circuit, TorInitializationTracker initializationTracker) { + this.circuit = circuit; + this.initializationTracker = initializationTracker; + } + + public RouterDescriptor downloadBridgeDescriptor(Router bridge) throws DirectoryRequestFailedException { + return fetchSingleDocument(new BridgeDescriptorFetcher()); + } + + public ConsensusDocument downloadCurrentConsensus(boolean useMicrodescriptors) throws DirectoryRequestFailedException { + return fetchSingleDocument(new ConsensusFetcher(useMicrodescriptors), CircuitManager.DIRECTORY_PURPOSE_CONSENSUS); + } + + public List downloadKeyCertificates(Set required) throws DirectoryRequestFailedException { + return fetchDocuments(new CertificateFetcher(required), CircuitManager.DIRECTORY_PURPOSE_CERTIFICATES); + } + + public List downloadRouterDescriptors(Set fingerprints) throws DirectoryRequestFailedException { + return fetchDocuments(new RouterDescriptorFetcher(fingerprints), CircuitManager.DIRECTORY_PURPOSE_DESCRIPTORS); + } + + public List downloadRouterMicrodescriptors(Set fingerprints) throws DirectoryRequestFailedException { + return fetchDocuments(new MicrodescriptorFetcher(fingerprints), CircuitManager.DIRECTORY_PURPOSE_DESCRIPTORS); + } + + private T fetchSingleDocument(DocumentFetcher fetcher) throws DirectoryRequestFailedException { + return fetchSingleDocument(fetcher, 0); + } + + private T fetchSingleDocument(DocumentFetcher fetcher, int purpose) throws DirectoryRequestFailedException { + final List result = fetchDocuments(fetcher, purpose); + if(result.size() == 1) { + return result.get(0); + } + return null; + } + + private List fetchDocuments(DocumentFetcher fetcher, int purpose) throws DirectoryRequestFailedException { + try { + final HttpConnection http = createHttpConnection(purpose); + try { + return fetcher.requestDocuments(http); + } finally { + http.close(); + } + } catch (TimeoutException e) { + throw new DirectoryRequestFailedException("Directory request timed out"); + } catch (StreamConnectFailedException e) { + throw new DirectoryRequestFailedException("Failed to open directory stream", e); + } catch (IOException e) { + throw new DirectoryRequestFailedException("I/O exception processing directory request", e); + } catch (InterruptedException e) { + throw new DirectoryRequestFailedException("Directory request interrupted"); + } + } + + private HttpConnection createHttpConnection(int purpose) throws InterruptedException, TimeoutException, StreamConnectFailedException { + return new HttpConnection(openDirectoryStream(purpose)); + } + + private Stream openDirectoryStream(int purpose) throws InterruptedException, TimeoutException, StreamConnectFailedException { + final int requestEventCode = purposeToEventCode(purpose, false); + final int loadingEventCode = purposeToEventCode(purpose, true); + + notifyInitialization(requestEventCode); + + final Stream stream = circuit.openDirectoryStream(OPEN_DIRECTORY_STREAM_TIMEOUT, true); + notifyInitialization(loadingEventCode); + return stream; + } + + private int purposeToEventCode(int purpose, boolean getLoadingEvent) { + switch(purpose) { + case CircuitManager.DIRECTORY_PURPOSE_CONSENSUS: + return getLoadingEvent ? Tor.BOOTSTRAP_STATUS_LOADING_STATUS : Tor.BOOTSTRAP_STATUS_REQUESTING_STATUS; + case CircuitManager.DIRECTORY_PURPOSE_CERTIFICATES: + return getLoadingEvent ? Tor.BOOTSTRAP_STATUS_LOADING_KEYS : Tor.BOOTSTRAP_STATUS_REQUESTING_KEYS; + case CircuitManager.DIRECTORY_PURPOSE_DESCRIPTORS: + return getLoadingEvent ? Tor.BOOTSTRAP_STATUS_LOADING_DESCRIPTORS : Tor.BOOTSTRAP_STATUS_REQUESTING_DESCRIPTORS; + default: + return 0; + } + } + + private void notifyInitialization(int code) { + if(code > 0 && initializationTracker != null) { + initializationTracker.notifyEvent(code); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDownloadTask.java b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDownloadTask.java new file mode 100644 index 00000000..761e6264 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDownloadTask.java @@ -0,0 +1,228 @@ +package com.subgraph.orchid.directory.downloader; + +import java.util.Collection; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.DirectoryDownloader; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorConfig.AutoBoolValue; +import com.subgraph.orchid.crypto.TorRandom; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.Timestamp; + +public class DirectoryDownloadTask implements Runnable { + private final static Logger logger = Logger.getLogger(DirectoryDownloadTask.class.getName()); + + private final TorConfig config; + private final Directory directory; + + private final DirectoryDownloader downloader; + + private final TorRandom random; + private final DescriptorProcessor descriptorProcessor; + + private final ExecutorService executor = Executors.newCachedThreadPool(); + + private volatile boolean isDownloadingCertificates; + private volatile boolean isDownloadingConsensus; + private final AtomicInteger outstandingDescriptorTasks; + + private ConsensusDocument currentConsensus; + private Date consensusDownloadTime; + + private volatile boolean isStopped; + + DirectoryDownloadTask(TorConfig config, Directory directory, DirectoryDownloader downloader) { + this.config = config; + this.directory = directory; + this.downloader = downloader; + this.random = new TorRandom(); + this.outstandingDescriptorTasks = new AtomicInteger(); + this.descriptorProcessor = new DescriptorProcessor(config, directory); + } + + public synchronized void stop() { + if(isStopped) { + return; + } + executor.shutdownNow(); + isStopped = true; + } + + public void run() { + directory.loadFromStore(); + directory.waitUntilLoaded(); + setCurrentConsensus(directory.getCurrentConsensusDocument()); + while (!isStopped) { + checkCertificates(); + checkConsensus(); + checkDescriptors(); + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + } + } + + private void checkCertificates() { + if (isDownloadingCertificates + || directory.getRequiredCertificates().isEmpty()) { + return; + } + + isDownloadingCertificates = true; + executor.execute(new DownloadCertificatesTask()); + } + + void setCurrentConsensus(ConsensusDocument consensus) { + if (consensus != null) { + currentConsensus = consensus; + consensusDownloadTime = chooseDownloadTimeForConsensus(consensus); + } else { + currentConsensus = null; + consensusDownloadTime = null; + } + } + + /* + * dir-spec 5.1: Downloading network-status documents + * + * To avoid swarming the caches whenever a consensus expires, the clients + * download new consensuses at a randomly chosen time after the caches are + * expected to have a fresh consensus, but before their consensus will + * expire. (This time is chosen uniformly at random from the interval + * between the time 3/4 into the first interval after the consensus is no + * longer fresh, and 7/8 of the time remaining after that before the + * consensus is invalid.) + * + * [For example, if a cache has a consensus that became valid at 1:00, and + * is fresh until 2:00, and expires at 4:00, that cache will fetch a new + * consensus at a random time between 2:45 and 3:50, since 3/4 of the + * one-hour interval is 45 minutes, and 7/8 of the remaining 75 minutes is + * 65 minutes.] + */ + private Date chooseDownloadTimeForConsensus(ConsensusDocument consensus) { + final long va = getMilliseconds(consensus.getValidAfterTime()); + final long fu = getMilliseconds(consensus.getFreshUntilTime()); + final long vu = getMilliseconds(consensus.getValidUntilTime()); + final long i1 = fu - va; + final long start = fu + ((i1 * 3) / 4); + final long i2 = ((vu - start) * 7) / 8; + final long r = random.nextLong(i2); + final long download = start + r; + return new Date(download); + } + + private boolean needConsensusDownload() { + if(directory.hasPendingConsensus()) { + return false; + } + if (currentConsensus == null || !currentConsensus.isLive()) { + if(currentConsensus == null) { + logger.info("Downloading consensus because we have no consensus document"); + } else { + logger.info("Downloading consensus because the document we have is not live"); + } + return true; + } + return consensusDownloadTime.before(new Date()); + } + + private long getMilliseconds(Timestamp ts) { + return ts.getDate().getTime(); + } + + private void checkConsensus() { + if (isDownloadingConsensus || !needConsensusDownload()) { + return; + } + + isDownloadingConsensus = true; + executor.execute(new DownloadConsensusTask()); + } + + private void checkDescriptors() { + if (outstandingDescriptorTasks.get() > 0) { + return; + } + List> ds = descriptorProcessor + .getDescriptorDigestsToDownload(); + if (ds.isEmpty()) { + return; + } + for (List dlist : ds) { + outstandingDescriptorTasks.incrementAndGet(); + executor.execute(new DownloadRouterDescriptorsTask(dlist, useMicrodescriptors())); + } + } + + private boolean useMicrodescriptors() { + return config.getUseMicrodescriptors() != AutoBoolValue.FALSE; + } + + private class DownloadConsensusTask implements Runnable { + public void run() { + try { + final ConsensusDocument consensus = downloader.downloadCurrentConsensus(useMicrodescriptors()); + setCurrentConsensus(consensus); + directory.addConsensusDocument(consensus, false); + + } catch (DirectoryRequestFailedException e) { + logger.warning("Failed to download current consensus document: "+ e.getMessage()); + } finally { + isDownloadingConsensus = false; + } + } + } + + private class DownloadRouterDescriptorsTask implements Runnable { + private final Set fingerprints; + private final boolean useMicrodescriptors; + + public DownloadRouterDescriptorsTask(Collection fingerprints, boolean useMicrodescriptors) { + this.fingerprints = new HashSet(fingerprints); + this.useMicrodescriptors = useMicrodescriptors; + } + + public void run() { + try { + if(useMicrodescriptors) { + directory.addRouterMicrodescriptors(downloader.downloadRouterMicrodescriptors(fingerprints)); + } else { + directory.addRouterDescriptors(downloader.downloadRouterDescriptors(fingerprints)); + } + } catch (DirectoryRequestFailedException e) { + logger.warning("Failed to download router descriptors: "+ e.getMessage()); + } finally { + outstandingDescriptorTasks.decrementAndGet(); + } + } + } + + private class DownloadCertificatesTask implements Runnable { + public void run() { + try { + for(KeyCertificate c: downloader.downloadKeyCertificates(directory.getRequiredCertificates())) { + directory.addCertificate(c); + } + directory.storeCertificates(); + } catch (DirectoryRequestFailedException e) { + logger.warning("Failed to download key certificates: "+ e.getMessage()); + } finally { + isDownloadingCertificates = false; + } + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDownloaderImpl.java b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDownloaderImpl.java new file mode 100644 index 00000000..389ba25e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryDownloaderImpl.java @@ -0,0 +1,144 @@ +package com.subgraph.orchid.directory.downloader; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; + +import com.subgraph.orchid.CircuitManager; +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.ConsensusDocument.RequiredCertificate; +import com.subgraph.orchid.Descriptor; +import com.subgraph.orchid.Directory; +import com.subgraph.orchid.DirectoryCircuit; +import com.subgraph.orchid.DirectoryDownloader; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Router; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.circuits.TorInitializationTracker; +import com.subgraph.orchid.data.HexDigest; + +public class DirectoryDownloaderImpl implements DirectoryDownloader { + private final static Logger logger = Logger.getLogger(DirectoryDownloaderImpl.class.getName()); + + private final TorConfig config; + private final TorInitializationTracker initializationTracker; + private CircuitManager circuitManager; + private boolean isStarted; + private boolean isStopped; + private DirectoryDownloadTask downloadTask; + private Thread downloadTaskThread; + + + public DirectoryDownloaderImpl(TorConfig config, TorInitializationTracker initializationTracker) { + this.config = config; + this.initializationTracker = initializationTracker; + } + + public void setCircuitManager(CircuitManager circuitManager) { + this.circuitManager = circuitManager; + } + + public synchronized void start(Directory directory) { + if(isStarted) { + logger.warning("Directory downloader already running"); + return; + } + if(circuitManager == null) { + throw new IllegalStateException("Must set CircuitManager instance with setCircuitManager() before starting."); + } + + downloadTask = new DirectoryDownloadTask(config, directory, this); + downloadTaskThread = new Thread(downloadTask); + downloadTaskThread.start(); + isStarted = true; + } + + public synchronized void stop() { + if(!isStarted || isStopped) { + return; + } + downloadTask.stop(); + downloadTaskThread.interrupt(); + } + + public RouterDescriptor downloadBridgeDescriptor(Router bridge) throws DirectoryRequestFailedException { + final DirectoryDocumentRequestor requestor = new DirectoryDocumentRequestor(openBridgeCircuit(bridge)); + return requestor.downloadBridgeDescriptor(bridge); + } + + + public ConsensusDocument downloadCurrentConsensus(boolean useMicrodescriptors) throws DirectoryRequestFailedException { + return downloadCurrentConsensus(useMicrodescriptors, openCircuit()); + } + + public ConsensusDocument downloadCurrentConsensus(boolean useMicrodescriptors, DirectoryCircuit circuit) throws DirectoryRequestFailedException { + final DirectoryDocumentRequestor requestor = new DirectoryDocumentRequestor(circuit, initializationTracker); + return requestor.downloadCurrentConsensus(useMicrodescriptors); + } + + public List downloadKeyCertificates(Set required) throws DirectoryRequestFailedException { + return downloadKeyCertificates(required, openCircuit()); + } + + public List downloadKeyCertificates(Set required, DirectoryCircuit circuit) throws DirectoryRequestFailedException { + final DirectoryDocumentRequestor requestor = new DirectoryDocumentRequestor(circuit, initializationTracker); + return requestor.downloadKeyCertificates(required); + } + + public List downloadRouterDescriptors(Set fingerprints) throws DirectoryRequestFailedException { + return downloadRouterDescriptors(fingerprints, openCircuit()); + } + + public List downloadRouterDescriptors(Set fingerprints, DirectoryCircuit circuit) throws DirectoryRequestFailedException { + final DirectoryDocumentRequestor requestor = new DirectoryDocumentRequestor(circuit, initializationTracker); + final List ds = requestor.downloadRouterDescriptors(fingerprints); + return removeUnrequestedDescriptors(fingerprints, ds); + } + + public List downloadRouterMicrodescriptors(Set fingerprints) throws DirectoryRequestFailedException { + return downloadRouterMicrodescriptors(fingerprints, openCircuit()); + } + + public List downloadRouterMicrodescriptors(Set fingerprints, DirectoryCircuit circuit) throws DirectoryRequestFailedException { + final DirectoryDocumentRequestor requestor = new DirectoryDocumentRequestor(circuit, initializationTracker); + final List ds = requestor.downloadRouterMicrodescriptors(fingerprints); + return removeUnrequestedDescriptors(fingerprints, ds); + } + + private List removeUnrequestedDescriptors(Set requested, List received) { + final List result = new ArrayList(); + int unrequestedCount = 0; + for(T d: received) { + if(requested.contains(d.getDescriptorDigest())) { + result.add(d); + } else { + unrequestedCount += 1; + } + } + if(unrequestedCount > 0) { + logger.warning("Discarding "+ unrequestedCount + " received descriptor(s) with fingerprints that did not match requested descriptors"); + } + return result; + } + + private DirectoryCircuit openCircuit() throws DirectoryRequestFailedException { + try { + return circuitManager.openDirectoryCircuit(); + } catch (OpenFailedException e) { + throw new DirectoryRequestFailedException("Failed to open directory circuit", e); + } + } + + private DirectoryCircuit openBridgeCircuit(Router bridge) throws DirectoryRequestFailedException { + try { + return circuitManager.openDirectoryCircuitTo(Arrays.asList(bridge)); + } catch (OpenFailedException e) { + throw new DirectoryRequestFailedException("Failed to open directory circuit to bridge "+ bridge, e); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryRequestFailedException.java b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryRequestFailedException.java new file mode 100644 index 00000000..c41df470 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/DirectoryRequestFailedException.java @@ -0,0 +1,15 @@ +package com.subgraph.orchid.directory.downloader; + +public class DirectoryRequestFailedException extends Exception { + + private static final long serialVersionUID = 1L; + + public DirectoryRequestFailedException(String message) { + super(message); + } + + public DirectoryRequestFailedException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/DocumentFetcher.java b/orchid/src/com/subgraph/orchid/directory/downloader/DocumentFetcher.java new file mode 100644 index 00000000..5cf73baa --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/DocumentFetcher.java @@ -0,0 +1,52 @@ +package com.subgraph.orchid.directory.downloader; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; + +import com.subgraph.orchid.directory.DocumentParserFactoryImpl; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParserFactory; + +public abstract class DocumentFetcher { + protected final static DocumentParserFactory PARSER_FACTORY = new DocumentParserFactoryImpl(); + + + abstract String getRequestPath(); + abstract DocumentParser createParser(ByteBuffer response); + + public List requestDocuments(HttpConnection httpConnection) throws IOException, DirectoryRequestFailedException { + final ByteBuffer body = makeRequest(httpConnection); + if(body.hasRemaining()) { + return processResponse(body); + }else { + return Collections.emptyList(); + } + } + + private ByteBuffer makeRequest(HttpConnection httpConnection) throws IOException, DirectoryRequestFailedException { + + httpConnection.sendGetRequest(getRequestPath()); + httpConnection.readResponse(); + if(httpConnection.getStatusCode() == 200) { + return httpConnection.getMessageBody(); + } + + throw new DirectoryRequestFailedException("Request "+ getRequestPath() +" to directory "+ + httpConnection.getHost() +" returned error code: "+ + httpConnection.getStatusCode() + " "+ httpConnection.getStatusMessage()); + + } + + private List processResponse(ByteBuffer response) throws DirectoryRequestFailedException { + final DocumentParser parser = createParser(response); + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + final boolean success = parser.parse(result); + if(success) { + return result.getParsedDocuments(); + } + throw new DirectoryRequestFailedException("Failed to parse response from directory: "+ result.getMessage()); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/HttpConnection.java b/orchid/src/com/subgraph/orchid/directory/downloader/HttpConnection.java new file mode 100644 index 00000000..44d778d3 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/HttpConnection.java @@ -0,0 +1,247 @@ +package com.subgraph.orchid.directory.downloader; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.zip.DataFormatException; +import java.util.zip.Inflater; + +import com.subgraph.orchid.Router; +import com.subgraph.orchid.Stream; + +public class HttpConnection { + private final static Charset CHARSET = Charset.forName("ISO-8859-1"); + + private final static String HTTP_RESPONSE_REGEX = "HTTP/1\\.(\\d) (\\d+) (.*)"; + private final static String CONTENT_LENGTH_HEADER = "Content-Length"; + private final static String CONTENT_ENCODING_HEADER = "Content-Encoding"; + private final static String COMPRESSION_SUFFIX = ".z"; + private final String hostname; + private final Stream stream; + private final InputStream input; + private final OutputStream output; + private final Map headers; + private final boolean useCompression; + private int responseCode; + private boolean bodyCompressed; + private String responseMessage; + private ByteBuffer messageBody; + + public HttpConnection(Stream stream) { + this(stream, true); + } + + public HttpConnection(Stream stream, boolean useCompression) { + this.hostname = getHostnameFromStream(stream); + this.stream = stream; + this.headers = new HashMap(); + this.input = stream.getInputStream(); + this.output = stream.getOutputStream(); + this.useCompression = useCompression; + } + + private static String getHostnameFromStream(Stream stream) { + final StringBuilder sb = new StringBuilder(); + final Router r = stream.getCircuit().getFinalCircuitNode().getRouter(); + if(r == null) { + return null; + } + sb.append(r.getAddress().toString()); + if(r.getOnionPort() != 80) { + sb.append(":"); + sb.append(r.getOnionPort()); + } + return sb.toString(); + } + + public void sendGetRequest(String request) throws IOException { + final StringBuilder sb = new StringBuilder(); + sb.append("GET "); + sb.append(request); + if(useCompression && !request.endsWith(COMPRESSION_SUFFIX)) { + sb.append(COMPRESSION_SUFFIX); + } + sb.append(" HTTP/1.0\r\n"); + if(hostname != null) { + sb.append("Host: "+ hostname +"\r\n"); + } + sb.append("\r\n"); + + final String requestLine = sb.toString(); + output.write(requestLine.getBytes(CHARSET)); + output.flush(); + } + + public String getHost() { + if(hostname == null) { + return hostname; + } else { + return "(none)"; + } + } + + public void readResponse() throws IOException, DirectoryRequestFailedException { + readStatusLine(); + readHeaders(); + readBody(); + } + + public int getStatusCode() { + return responseCode; + } + + public String getStatusMessage() { + return responseMessage; + } + + public ByteBuffer getMessageBody() { + return messageBody; + } + + public void close() { + if(stream == null) { + return; + } + stream.close(); + } + + private void readStatusLine() throws IOException, DirectoryRequestFailedException { + final String line = nextResponseLine(); + final Pattern p = Pattern.compile(HTTP_RESPONSE_REGEX); + final Matcher m = p.matcher(line); + if(!m.find() || m.groupCount() != 3) + throw new DirectoryRequestFailedException("Error parsing HTTP response line: "+ line); + + try { + int n1 = Integer.parseInt(m.group(1)); + int n2 = Integer.parseInt(m.group(2)); + if( (n1 != 0 && n1 != 1) || + (n2 < 100 || n2 >= 600)) + throw new DirectoryRequestFailedException("Failed to parse header: "+ line); + responseCode = n2; + responseMessage = m.group(3); + } catch(NumberFormatException e) { + throw new DirectoryRequestFailedException("Failed to parse header: "+ line); + } + } + + private void readHeaders() throws IOException, DirectoryRequestFailedException { + headers.clear(); + while(true) { + final String line = nextResponseLine(); + if(line.length() == 0) + return; + final String[] args = line.split(": ", 2); + if(args.length != 2) + throw new DirectoryRequestFailedException("Failed to parse HTTP header: "+ line); + headers.put(args[0], args[1]); + } + } + + private String nextResponseLine() throws IOException, DirectoryRequestFailedException { + final String line = readInputLine(); + if(line == null) { + throw new DirectoryRequestFailedException("Unexpected EOF reading HTTP response"); + } + return line; + } + + private void readBody() throws IOException, DirectoryRequestFailedException { + processContentEncodingHeader(); + + if(headers.containsKey(CONTENT_LENGTH_HEADER)) { + readBodyFromContentLength(); + } else { + readBodyUntilEOF(); + } + } + + private void processContentEncodingHeader() throws DirectoryRequestFailedException { + final String encoding = headers.get(CONTENT_ENCODING_HEADER); + if(encoding == null || encoding.equals("identity")) + bodyCompressed = false; + else if(encoding.equals("deflate") || encoding.equals("x-deflate")) + bodyCompressed = true; + else + throw new DirectoryRequestFailedException("Unrecognized content encoding: "+ encoding); + } + + private void readBodyFromContentLength() throws IOException { + int bodyLength = Integer.parseInt(headers.get(CONTENT_LENGTH_HEADER)); + byte[] bodyBuffer = new byte[bodyLength]; + readAll(bodyBuffer); + messageBody = bytesToBody(bodyBuffer); + } + + private void readBodyUntilEOF() throws IOException { + final byte[] bodyBuffer = readToEOF(); + messageBody = bytesToBody(bodyBuffer); + } + + private ByteBuffer bytesToBody(byte[] bs) throws IOException { + if(bodyCompressed) { + return ByteBuffer.wrap(decompressBuffer(bs)); + } else { + return ByteBuffer.wrap(bs); + } + } + + private byte[] decompressBuffer(byte[] buffer) throws IOException { + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + final Inflater decompressor = new Inflater(); + final byte[] decompressBuffer = new byte[4096]; + decompressor.setInput(buffer); + int n; + try { + while((n = decompressor.inflate(decompressBuffer)) != 0) { + output.write(decompressBuffer, 0, n); + } + return output.toByteArray(); + } catch (DataFormatException e) { + throw new IOException("Error decompressing http body: "+ e); + } + } + + private byte[] readToEOF() throws IOException { + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + final byte[] buffer = new byte[2048]; + int n; + while( (n = input.read(buffer, 0, buffer.length)) != -1) { + output.write(buffer, 0, n); + } + return output.toByteArray(); + } + + private void readAll(byte[] buffer) throws IOException { + int offset = 0; + int remaining = buffer.length; + while(remaining > 0) { + int n = input.read(buffer, offset, remaining); + if(n == -1) { + throw new IOException("Unexpected early EOF reading HTTP body"); + } + offset += n; + remaining -= n; + } + } + + private String readInputLine() throws IOException { + final StringBuilder sb = new StringBuilder(); + int c; + while((c = input.read()) != -1) { + if(c == '\n') { + return sb.toString(); + } else if(c != '\r') { + sb.append((char) c); + } + } + return (sb.length() == 0) ? (null) : (sb.toString()); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/MicrodescriptorFetcher.java b/orchid/src/com/subgraph/orchid/directory/downloader/MicrodescriptorFetcher.java new file mode 100644 index 00000000..ab407cf9 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/MicrodescriptorFetcher.java @@ -0,0 +1,44 @@ +package com.subgraph.orchid.directory.downloader; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.parsing.DocumentParser; + +public class MicrodescriptorFetcher extends DocumentFetcher{ + + private final List fingerprints; + + public MicrodescriptorFetcher(Collection fingerprints) { + this.fingerprints = new ArrayList(fingerprints); + } + + @Override + String getRequestPath() { + return "/tor/micro/d/"+ fingerprintsToRequestString(); + } + + private String fingerprintsToRequestString() { + final StringBuilder sb = new StringBuilder(); + for(HexDigest fp: fingerprints) { + appendFingerprint(sb, fp); + } + return sb.toString(); + } + + private void appendFingerprint(StringBuilder sb, HexDigest fp) { + if(sb.length() > 0) { + sb.append("-"); + } + sb.append(fp.toBase64(true)); + } + + @Override + DocumentParser createParser(ByteBuffer response) { + return PARSER_FACTORY.createRouterMicrodescriptorParser(response); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/downloader/RouterDescriptorFetcher.java b/orchid/src/com/subgraph/orchid/directory/downloader/RouterDescriptorFetcher.java new file mode 100644 index 00000000..4b03e88c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/downloader/RouterDescriptorFetcher.java @@ -0,0 +1,43 @@ +package com.subgraph.orchid.directory.downloader; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.directory.parsing.DocumentParser; + +public class RouterDescriptorFetcher extends DocumentFetcher{ + + private final List fingerprints; + + public RouterDescriptorFetcher(Collection fingerprints) { + this.fingerprints = new ArrayList(fingerprints); + } + + @Override + String getRequestPath() { + return "/tor/server/d/"+ fingerprintsToRequestString(); + } + + private String fingerprintsToRequestString() { + final StringBuilder sb = new StringBuilder(); + for(HexDigest fp: fingerprints) { + appendFingerprint(sb, fp); + } + return sb.toString(); + } + private void appendFingerprint(StringBuilder sb, HexDigest fp) { + if(sb.length() > 0) { + sb.append("+"); + } + sb.append(fp.toString()); + } + + @Override + DocumentParser createParser(ByteBuffer response) { + return PARSER_FACTORY.createRouterDescriptorParser(response, true); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/BasicDocumentParsingResult.java b/orchid/src/com/subgraph/orchid/directory/parsing/BasicDocumentParsingResult.java new file mode 100644 index 00000000..e8a997dc --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/BasicDocumentParsingResult.java @@ -0,0 +1,70 @@ +package com.subgraph.orchid.directory.parsing; + +import java.util.ArrayList; +import java.util.List; + +public class BasicDocumentParsingResult implements DocumentParsingResultHandler, DocumentParsingResult { + + private final List documents; + private T invalidDocument; + private boolean isOkay; + private boolean isInvalid; + private boolean isError; + private String message; + + public BasicDocumentParsingResult() { + documents = new ArrayList(); + isOkay = true; + isInvalid = false; + isError = false; + message = ""; + } + + public T getDocument() { + if(documents.size() != 1) { + throw new IllegalStateException(); + } + return documents.get(0); + } + + public List getParsedDocuments() { + return new ArrayList(documents); + } + + public boolean isOkay() { + return isOkay; + } + + public boolean isInvalid() { + return isInvalid; + } + + public T getInvalidDocument() { + return invalidDocument; + } + + public boolean isError() { + return isError; + } + + public String getMessage() { + return message; + } + + public void documentParsed(T document) { + documents.add(document); + } + + public void documentInvalid(T document, String message) { + isOkay = false; + isInvalid = true; + invalidDocument = document; + this.message = message; + } + + public void parsingError(String message) { + isOkay = false; + isError = true; + this.message = message; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentFieldParser.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentFieldParser.java new file mode 100644 index 00000000..a67fd94d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentFieldParser.java @@ -0,0 +1,341 @@ +package com.subgraph.orchid.directory.parsing; + +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; + +/** + * This helper class is used by document parsing classes to extract individual + * fields from a directory document. The DocumentFieldParser also manages the + * InputStream which is the source of the document to parse. Parsing classes + * are implemented by creating an instance of the DocumentParsingHandler interface. + * + */ +public interface DocumentFieldParser { + + /** + * Run the document parser. The {@link #setHandler(DocumentParsingHandler)} method must be + * called before calling this method to set a DocumentParsingHandler for processing + * this document. + * + * @throws TorParsingException If a parsing error occurs while processing the document. + */ + void processDocument(); + + /** + * Returns the number of unprocessed argument items on the current keyword line. + * + * @return The number of remaining arguments. + */ + int argumentsRemaining(); + + /** + * Extract the next argument item and return it as a String + * + * @return The next argument as a String + * @throws TorParsingException If no arguments are remaining on the current keyword line. + */ + String parseString(); + + /** + * Take all remaining arguments on the current keyword line and return them as a single space + * delimited String. If no arguments are remaining, then an empty String is returned. + * + * @return The remaining arguments on the current keyword line concatenated together. + */ + String parseConcatenatedString(); + + /** + * Extract the next argument and interpret it as an integer boolean value. The legal values + * are '1' for true or '0' for false. + * @return Return the next argument interpreted as a boolean value. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot be + * parsed as a boolean integer value. + */ + boolean parseBoolean(); + + /** + * Extract the next argument item and return it as a String if it conforms to + * a legally formed router nickname (dir-spec.txt section 2.3). + * + * A router nickname must be between 1 and 19 alphanumeric characters ([A-Za-z0-9]) to + * be considered valid. + * + * @return The next argument as a String if it is a validly formatted nickname. + * @throws TorParsingException If no arguments are remaining or if the current argument is not + * a valid router nickname. + */ + String parseNickname(); + + /** + * Extract the next argument and interpret it as an integer. + * + * @return The next argument interpreted as an integer. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as an integer value. + */ + int parseInteger(); + + /** + * Parse the item argument as an integer. + * + * @param item A string to parse as an integer. + * @return The integer value of the item argument. + * @throws TorParsingException If the item argument cannot be parsed as an + * integer value. + */ + int parseInteger(String item); + + /** + * Extract the next argument and interpret it as a comma separated list of integers. + * + * @return An array of integers. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as a list of integers. + */ + int[] parseIntegerList(); + + /** + * Extract the next argument and interpret it as a network port value. A valid port + * value is an integer between 0 and 65535 inclusive. + * + * @return The next argument interpreted as an integer port value. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as a legal port value. + */ + int parsePort(); + + /** + * Parse the item arguement as a network port value. A valid port value + * is an integer between 0 and 65535 inclusive. + * + * @param item A string to parse as an integer port value. + * @return The port integer value of the item argument + * @throws TorParsingException If the item argument cannot be parsed as a + * legal port value. + */ + int parsePort(String item); + + /** + * Extract the next argument and interpret it as Base64 encoded binary data. + * + * @return The bytes decoded from the Base64 encoded argument. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as Base64 encoded data. + */ + byte[] parseBase64Data(); + + /** + * Extract the next two arguments and parse as a timestamp field. + * + * The format of a timestamp is: YYYY-MM-DD HH:MM:SS + * + * @return The parsed Timestamp value. + * @throws TorParsingException If there are not sufficient arguments remaining or if the current + * arguments could not be parsed as a timestamp field. + */ + Timestamp parseTimestamp(); + + /** + * Extract the next argument and interpret it as a hex encoded digest string. + * + * @return The parsed HexDigest value. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as a hex encoded digest string. + */ + HexDigest parseHexDigest(); + + + /** + * Extract the next argument and interpret it as a base 32 encoded digest string. + * + * @return The parsed HexDigest value. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as a base 32 encoded digest string. + */ + HexDigest parseBase32Digest(); + + /** + * Extract all remaining arguments and interpret the concatenated string as a + * hex encoded fingerprint string. + * + * @return The parsed HexDigest value extracted from the concatenated string. + * @throws TorParsingException If the concatenation of the remaining arguments could not be parsed + * as a hex encoded fingerprint string. + */ + HexDigest parseFingerprint(); + + /** + * Extract the next argument and interpret it as an IPv4 network address in dotted quad notation. + * + * @return The parsed IPv4Address value. + * @throws TorParsingException If no arguments are remaining or if the current argument cannot + * be parsed as an IPv4 network address. + */ + IPv4Address parseAddress(); + + /** + * Extract a document object following the current keyword line and interpret it as a PEM + * encoded public key. + * + * @return The extracted TorPublicKey value. + * @throws TorParsingException If no document object is found following the current keyword line, + * or if the document object cannot be parsed as a PEM encoded public key. + */ + TorPublicKey parsePublicKey(); + + byte[] parseNtorPublicKey(); + /** + * Extract a document object following the current keyword line and interpret it as a + * Base64 encoded PKCS1 signature object. + * + * @return The extracted TorSignature value. + * @throws TorParsingException If no document object is found following the current keyword line, + * or if the document object cannot be parsed as a signature. + */ + TorSignature parseSignature(); + + /** + * Extract a document object following the current keyword line and don't attempt to interpret + * it further. + * + * @return The extracted DocumentObject. + * @throws TorParsingException If no document object is found following the current keyword line. + */ + DocumentObject parseObject(); + + /** + * + * @return + */ + + NameIntegerParameter parseParameter(); + /** + * Return the keyword of the current keyword line. The keyword is the first token on the line + * unless the first token is 'opt' and 'opt' recognition is enabled. In this case, the keyword + * is the token immediately following the 'opt' token. + * + * @return The keyword token of the current keyword line. + */ + String getCurrentKeyword(); + + /** + * Return all lines from the current document as a single String. + * + * @return The raw data from the current document. + */ + String getRawDocument(); + + /** + * Empty the internal buffer which is capturing the raw data from + * the document which is being parsed. + */ + void resetRawDocument(); + + /** + * Empty the internal buffer which is capturing raw data from document being parsed and set buffer contents to initalContent. + * + * @param initialContent Initial raw document content. + */ + void resetRawDocument(String initialContent); + + /** + * Reset the document signing state. Any lines read after calling this method will be included + * in the current signature hash. + */ + void startSignedEntity(); + + /** + * Set the current keyword line as the last line included in the current signature hash. + */ + void endSignedEntity(); + + /** + * Tells the parser to not include lines that begin with token in the current + * signature calculation. + * + * @param token The parser will not include lines that begin with token in the + * current signature. + */ + void setSignatureIgnoreToken(String token); + + /** + * Return the internal message digest which is being used to calculate the + * signature over the current document. + * + * @return The TorMessageDigest instance or null if + * a signature is not being actively calculated. + */ + TorMessageDigest getSignatureMessageDigest(); + TorMessageDigest getSignatureMessageDigest256(); + + /** + * Verify that current signature hash matches the specified signature signed + * with the public key publicKey + * + * @param publicKey The public key used to verify the signature. + * @param signature The signature to verify against the current signature hash. + * @return trueIf the signature argument matches the hash currently + * calculated document hash. + */ + boolean verifySignedEntity(TorPublicKey publicKey, TorSignature signature); + + /** + * Test that the current keyword line has the correct number of arguments. + * + * @param keyword The name of the current keyword. (used for errors) + * @param argumentCount The expected number of arguments. + * @throws TorParsingException If the number of remaining arguments does not match + * argumentCount. + */ + void verifyExpectedArgumentCount(String keyword, int argumentCount); + + /** + * Set a flag so that 'opt' tokens will be recognized at the start of keyword lines. If + * this flag is set, a token string 'opt' at the start of a keyword line will be ignored + * and the token following the 'opt' string will be interpreted as the keyword. + */ + void setRecognizeOpt(); + + /** + * The default delimiter between keyword line tokens is any whitespace. This method may + * be called to specify a different delimiter policy. + * + * @param delimeter A regular expression which matches the desired delimiter. + */ + void setDelimiter(String delimeter); + + /** + * Set the callback handler which is used to process the document. This method must be called + * before calling {@link #processDocument()}. + * + * @param handler The callback handler. + */ + void setHandler(DocumentParsingHandler handler); + + /** + * Log the specified message at the debug logging level. + * + * @param message The message to log. + */ + void logDebug(String message); + + /** + * Log the specified message at the warn logging level. + * + * @param message The message to log. + */ + void logWarn(String message); + + /** + * Log the specified message at the error logging level. + * + * @param message The message to log. + */ + void logError(String message); + +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentObject.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentObject.java new file mode 100644 index 00000000..84d3c7e2 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentObject.java @@ -0,0 +1,43 @@ +package com.subgraph.orchid.directory.parsing; + +public class DocumentObject { + + final private String keyword; + final private String headerLine; + private String footerLine; + private String bodyContent; + final private StringBuilder stringContent; + + public DocumentObject(String keyword, String headerLine) { + this.keyword = keyword; + this.headerLine = headerLine; + this.stringContent = new StringBuilder(); + } + + public String getKeyword() { + return keyword; + } + + public void addContent(String content) { + stringContent.append(content); + stringContent.append("\n"); + } + + public void addFooterLine(String footer) { + footerLine = footer; + bodyContent = stringContent.toString(); + } + + public String getContent() { + return getContent(true); + } + + public String getContent(boolean includeHeaders) { + if(includeHeaders) { + return headerLine + "\n" + bodyContent + footerLine + "\n"; + } else { + return bodyContent; + } + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParser.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParser.java new file mode 100644 index 00000000..6cedaf53 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParser.java @@ -0,0 +1,7 @@ +package com.subgraph.orchid.directory.parsing; + + +public interface DocumentParser { + boolean parse(DocumentParsingResultHandler resultHandler); + DocumentParsingResult parse(); +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParserFactory.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParserFactory.java new file mode 100644 index 00000000..a931bf06 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParserFactory.java @@ -0,0 +1,18 @@ +package com.subgraph.orchid.directory.parsing; + +import java.nio.ByteBuffer; + +import com.subgraph.orchid.ConsensusDocument; +import com.subgraph.orchid.KeyCertificate; +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.RouterMicrodescriptor; + +public interface DocumentParserFactory { + DocumentParser createRouterDescriptorParser(ByteBuffer buffer, boolean verifySignatures); + + DocumentParser createRouterMicrodescriptorParser(ByteBuffer buffer); + + DocumentParser createKeyCertificateParser(ByteBuffer buffer); + + DocumentParser createConsensusDocumentParser(ByteBuffer buffer); +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingHandler.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingHandler.java new file mode 100644 index 00000000..c1c81ed1 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingHandler.java @@ -0,0 +1,6 @@ +package com.subgraph.orchid.directory.parsing; + +public interface DocumentParsingHandler { + void parseKeywordLine(); + void endOfDocument(); +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingResult.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingResult.java new file mode 100644 index 00000000..f7b53d0f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingResult.java @@ -0,0 +1,13 @@ +package com.subgraph.orchid.directory.parsing; + +import java.util.List; + +public interface DocumentParsingResult { + T getDocument(); + List getParsedDocuments(); + boolean isOkay(); + boolean isInvalid(); + T getInvalidDocument(); + boolean isError(); + String getMessage(); +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingResultHandler.java b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingResultHandler.java new file mode 100644 index 00000000..b9034bc2 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/DocumentParsingResultHandler.java @@ -0,0 +1,8 @@ +package com.subgraph.orchid.directory.parsing; + + +public interface DocumentParsingResultHandler { + void documentParsed(T document); + void documentInvalid(T document, String message); + void parsingError(String message); +} diff --git a/orchid/src/com/subgraph/orchid/directory/parsing/NameIntegerParameter.java b/orchid/src/com/subgraph/orchid/directory/parsing/NameIntegerParameter.java new file mode 100644 index 00000000..55756534 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/parsing/NameIntegerParameter.java @@ -0,0 +1,24 @@ +package com.subgraph.orchid.directory.parsing; + +public class NameIntegerParameter { + + private final String name; + private final int value; + + public NameIntegerParameter(String name, int value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public int getValue() { + return value; + } + + public String toString() { + return name +"="+ value; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/MicrodescriptorCacheLocation.java b/orchid/src/com/subgraph/orchid/directory/router/MicrodescriptorCacheLocation.java new file mode 100644 index 00000000..b1a6fd49 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/MicrodescriptorCacheLocation.java @@ -0,0 +1,25 @@ +package com.subgraph.orchid.directory.router; + +public class MicrodescriptorCacheLocation { + + private final int offset; + private final int length; + + public MicrodescriptorCacheLocation(int offset, int length) { + this.offset = offset; + this.length = length; + } + + public int getOffset() { + return offset; + } + + public int getLength() { + return length; + } + + public String toString() { + return "MD Cache offset: "+ offset + " length: "+ length; + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorImpl.java b/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorImpl.java new file mode 100644 index 00000000..95869e97 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorImpl.java @@ -0,0 +1,317 @@ +package com.subgraph.orchid.directory.router; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.BandwidthHistory; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.data.exitpolicy.ExitPolicy; + +public class RouterDescriptorImpl implements RouterDescriptor { + private String nickname; + private IPv4Address address; + private int routerPort; + private int directoryPort; + + private int averageBandwidth = -1; + private int burstBandwidth = -1; + private int observedBandwidth = -1; + + private String platform; + + private Timestamp published; + + private HexDigest fingerprint; + + private boolean hibernating; + + private int uptime; + + private TorPublicKey onionKey; + private byte[] ntorOnionKey; + private TorPublicKey identityKey; + private ExitPolicy exitPolicy = new ExitPolicy(); + + private String contact; + private Set familyMembers = Collections.emptySet(); + private Set linkProtocols = Collections.emptySet(); + private Set circuitProtocols = Collections.emptySet(); + + private BandwidthHistory readHistory; + private BandwidthHistory writeHistory; + + private boolean eventDNS = false; + private boolean cachesExtraInfo = false; + private boolean hiddenServiceDir = false; + private HexDigest extraInfoDigest = null; + private boolean allowSingleHopExits = false; + private boolean hasValidSignature = false; + + private HexDigest descriptorDigest; + private String rawDocumentData; + + private long lastListed; + private CacheLocation cacheLocation = CacheLocation.NOT_CACHED; + + public void setNickname(String nickname) { this.nickname = nickname; } + public void setAddress(IPv4Address address) { this.address = address; } + public void setRouterPort(int port) { this.routerPort = port; } + void setDirectoryPort(int port) { this.directoryPort = port; } + void setPlatform(String platform) { this.platform = platform; } + void setPublished(Timestamp published) { this.published = published; } + void setFingerprint(HexDigest fingerprint) { this.fingerprint = fingerprint; } + void setHibernating(boolean flag) { this.hibernating = flag; } + void setUptime(int uptime) { this.uptime = uptime; } + public void setOnionKey(TorPublicKey key) { this.onionKey = key; } + void setNtorOnionKey(byte[] key) { this.ntorOnionKey = key; } + void setIdentityKey(TorPublicKey key) { this.identityKey = key; } + void setContact(String contact) { this.contact = contact; } + void setEventDNS() { eventDNS = true; } + void setHiddenServiceDir() { hiddenServiceDir = true; } + void setExtraInfoDigest(HexDigest digest) { this.extraInfoDigest = digest; } + void setCachesExtraInfo() { cachesExtraInfo = true; } + void setAllowSingleHopExits() { allowSingleHopExits = true; } + void setReadHistory(BandwidthHistory history) { this.readHistory= history; } + void setWriteHistory(BandwidthHistory history) { this.writeHistory = history; } + void setValidSignature() { hasValidSignature = true; } + void setDescriptorHash(HexDigest digest) { descriptorDigest = digest; } + void setRawDocumentData(String rawData) { rawDocumentData = rawData; } + + void addAcceptRule(String rule) { + exitPolicy.addAcceptRule(rule); + } + + void addRejectRule(String rule) { + exitPolicy.addRejectRule(rule); + } + + void setBandwidthValues(int average, int burst, int observed) { + this.averageBandwidth = average; + this.burstBandwidth = burst; + this.observedBandwidth = observed; + } + + void addFamilyMember(String familyMember) { + if(familyMembers.isEmpty()) { + familyMembers = new HashSet(); + } + familyMembers.add(familyMember); + } + + void addCircuitProtocolVersion(int version) { + if(circuitProtocols.isEmpty()) + circuitProtocols = new HashSet(); + circuitProtocols.add(version); + } + + void addLinkProtocolVersion(int version) { + if(linkProtocols.isEmpty()) + linkProtocols = new HashSet(); + linkProtocols.add(version); + } + + public boolean isValidDocument() { + // verify required fields exist, see dirspec.txt section 2.1 + return hasValidSignature && (nickname != null) && (address != null) && + (averageBandwidth != -1) && (routerPort != 0 || directoryPort != 0) && + (published != null) && (onionKey != null) && (identityKey != null) && + (descriptorDigest != null); + } + + public String getNickname() { + return nickname; + } + + public IPv4Address getAddress() { + return address; + } + + public int getRouterPort() { + return routerPort; + } + + public int getDirectoryPort() { + return directoryPort; + } + + public int getAverageBandwidth() { + return averageBandwidth; + } + + public int getBurstBandwidth() { + return burstBandwidth; + } + + public int getObservedBandwidth() { + return observedBandwidth; + } + + public String getPlatform() { + return platform; + } + + public HexDigest getFingerprint() { + return fingerprint; + } + + public int getUptime() { + return uptime; + } + + public TorPublicKey getOnionKey() { + return onionKey; + } + + public byte[] getNTorOnionKey() { + return ntorOnionKey; + } + + public TorPublicKey getIdentityKey() { + return identityKey; + } + + public String getContact() { + return contact; + } + + public boolean isHibernating() { + return hibernating; + } + + public boolean cachesExtraInfo() { + return cachesExtraInfo; + } + + public boolean allowsSingleHopExits() { + return allowSingleHopExits; + } + + public Timestamp getPublishedTime() { + return published; + } + + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Router Descriptor: (name: "); + builder.append(nickname); + builder.append(" orport="); + builder.append(routerPort); + builder.append(" dirport="); + builder.append(directoryPort); + builder.append(" address="); + builder.append(address); + builder.append(" platform="); + builder.append(platform); + builder.append(" published="); + builder.append(published.getDate()); + builder.append(")"); + return builder.toString(); + } + + public void print() { + System.out.println("nickname: "+ nickname +" IP: "+ address +" port: "+ routerPort); + System.out.println("directory port: "+ directoryPort +" platform: "+ platform); + System.out.println("Bandwidth(avg/burst/observed): "+ averageBandwidth +"/"+ burstBandwidth +"/"+ observedBandwidth); + System.out.println("Publication time: "+ published +" Uptime: "+ uptime); + if(fingerprint != null) + System.out.println("Fingerprint: "+ fingerprint); + if(contact != null) + System.out.println("Contact: "+ contact); + } + public boolean exitPolicyAccepts(IPv4Address address, int port) { + return exitPolicy.acceptsDestination(address, port); + } + + public boolean exitPolicyAccepts(int port) { + return exitPolicy.acceptsPort(port); + } + + public HexDigest getExtraInfoDigest() { + return extraInfoDigest; + } + + public boolean isHiddenServiceDirectory() { + return hiddenServiceDir; + } + + public Set getFamilyMembers() { + return familyMembers; + } + + public boolean supportsEventDNS() { + return eventDNS; + } + + public BandwidthHistory getReadHistory() { + return readHistory; + } + + public BandwidthHistory getWriteHistory() { + return writeHistory; + } + + public boolean isNewerThan(RouterDescriptor other) { + return other.getPublishedTime().isBefore(published); + } + + public HexDigest getDescriptorDigest() { + return descriptorDigest; + } + + public String getRawDocumentData() { + return rawDocumentData; + } + + public ByteBuffer getRawDocumentBytes() { + if(getRawDocumentData() == null) { + return ByteBuffer.allocate(0); + } else { + return ByteBuffer.wrap(getRawDocumentData().getBytes(Tor.getDefaultCharset())); + } + } + + public boolean equals(Object o) { + if(!(o instanceof RouterDescriptorImpl)) + return false; + final RouterDescriptorImpl other = (RouterDescriptorImpl) o; + if(other.getDescriptorDigest() == null || descriptorDigest == null) + return false; + + return other.getDescriptorDigest().equals(descriptorDigest); + } + + public int hashCode() { + if(descriptorDigest == null) + return 0; + return descriptorDigest.hashCode(); + } + + public ExitPolicy getExitPolicy() { + return exitPolicy; + } + + public void setLastListed(long timestamp) { + this.lastListed = timestamp; + } + + public long getLastListed() { + return lastListed; + } + public void setCacheLocation(CacheLocation location) { + this.cacheLocation = location; + } + public CacheLocation getCacheLocation() { + return cacheLocation; + } + + public int getBodyLength() { + return rawDocumentData.length(); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorKeyword.java b/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorKeyword.java new file mode 100644 index 00000000..50322dff --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorKeyword.java @@ -0,0 +1,63 @@ +package com.subgraph.orchid.directory.router; + +public enum RouterDescriptorKeyword { + /* + * See dir-spec.txt + * Section 2.1. Router descriptor format + */ + ROUTER("router", 5), + BANDWIDTH("bandwidth", 3), + PLATFORM("platform"), + PUBLISHED("published", 2), + FINGERPRINT("fingerprint", 10), + HIBERNATING("hibernating", 1), + UPTIME("uptime", 1), + ONION_KEY("onion-key", 0), + NTOR_ONION_KEY("ntor-onion-key", 1), + SIGNING_KEY("signing-key", 0), + ACCEPT("accept", 1), + REJECT("reject", 1), + ROUTER_SIGNATURE("router-signature", 0), + CONTACT("contact"), + FAMILY("family"), + READ_HISTORY("read-history"), + WRITE_HISTORY("write-history"), + EVENTDNS("eventdns", 1), + CACHES_EXTRA_INFO("caches-extra-info", 0), + EXTRA_INFO_DIGEST("extra-info-digest", 1), + HIDDEN_SERVICE_DIR("hidden-service-dir"), + PROTOCOLS("protocols"), + ALLOW_SINGLE_HOP_EXITS("allow-single-hop-exits", 0), + UNKNOWN_KEYWORD("KEYWORD NOT FOUND"); + + public final static int VARIABLE_ARGUMENT_COUNT = -1; + + private final String keyword; + private final int argumentCount; + + RouterDescriptorKeyword(String keyword) { + this(keyword, VARIABLE_ARGUMENT_COUNT); + } + + RouterDescriptorKeyword(String keyword, int argumentCount) { + this.keyword = keyword; + this.argumentCount = argumentCount; + } + + String getKeyword() { + return keyword; + } + + int getArgumentCount() { + return argumentCount; + } + + static RouterDescriptorKeyword findKeyword(String keyword) { + for(RouterDescriptorKeyword k: values()) + if(k.getKeyword().equals(keyword)) + return k; + + return UNKNOWN_KEYWORD; + } + +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorParser.java b/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorParser.java new file mode 100644 index 00000000..e23b68a5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/RouterDescriptorParser.java @@ -0,0 +1,223 @@ +package com.subgraph.orchid.directory.router; + +import com.subgraph.orchid.RouterDescriptor; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorSignature; +import com.subgraph.orchid.data.BandwidthHistory; +import com.subgraph.orchid.data.Timestamp; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; + +public class RouterDescriptorParser implements DocumentParser { + private final DocumentFieldParser fieldParser; + private final boolean verifySignatures; + + private RouterDescriptorImpl currentDescriptor; + private DocumentParsingResultHandler resultHandler; + + public RouterDescriptorParser(DocumentFieldParser fieldParser, boolean verifySignatures) { + this.fieldParser = fieldParser; + this.fieldParser.setHandler(createParsingHandler()); + this.fieldParser.setRecognizeOpt(); + this.verifySignatures = verifySignatures; + } + + private DocumentParsingHandler createParsingHandler() { + return new DocumentParsingHandler() { + public void endOfDocument() { + } + public void parseKeywordLine() { + processKeywordLine(); + } + }; + } + + private void processKeywordLine() { + final RouterDescriptorKeyword keyword = RouterDescriptorKeyword.findKeyword(fieldParser.getCurrentKeyword()); + /* + * dirspec.txt (1.2) + * When interpreting a Document, software MUST ignore any KeywordLine that + * starts with a keyword it doesn't recognize; + */ + if(!keyword.equals(RouterDescriptorKeyword.UNKNOWN_KEYWORD)) + processKeyword(keyword); + } + + private void startNewDescriptor() { + fieldParser.resetRawDocument(); + fieldParser.startSignedEntity(); + currentDescriptor = new RouterDescriptorImpl(); + } + + public boolean parse(DocumentParsingResultHandler resultHandler) { + this.resultHandler = resultHandler; + startNewDescriptor(); + try { + fieldParser.processDocument(); + return true; + } catch(TorParsingException e) { + resultHandler.parsingError(e.getMessage()); + return false; + } + } + + public DocumentParsingResult parse() { + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + parse(result); + return result; + } + + private void processKeyword(RouterDescriptorKeyword keyword) { + fieldParser.verifyExpectedArgumentCount(keyword.getKeyword(), keyword.getArgumentCount()); + + switch(keyword) { + case ROUTER: + processRouter(); + return; + case BANDWIDTH: + processBandwidth(); + break; + case PLATFORM: + currentDescriptor.setPlatform(fieldParser.parseConcatenatedString()); + break; + case PUBLISHED: + currentDescriptor.setPublished(fieldParser.parseTimestamp()); + break; + case FINGERPRINT: + currentDescriptor.setFingerprint(fieldParser.parseFingerprint()); + break; + case HIBERNATING: + currentDescriptor.setHibernating(fieldParser.parseBoolean()); + break; + case UPTIME: + currentDescriptor.setUptime(fieldParser.parseInteger()); + break; + case ONION_KEY: + currentDescriptor.setOnionKey(fieldParser.parsePublicKey()); + break; + case NTOR_ONION_KEY: + currentDescriptor.setNtorOnionKey(fieldParser.parseNtorPublicKey()); + break; + case SIGNING_KEY: + currentDescriptor.setIdentityKey(fieldParser.parsePublicKey()); + break; + case ROUTER_SIGNATURE: + processSignature(); + break; + case ACCEPT: + currentDescriptor.addAcceptRule(fieldParser.parseString()); + break; + case REJECT: + currentDescriptor.addRejectRule(fieldParser.parseString()); + break; + case CONTACT: + currentDescriptor.setContact(fieldParser.parseConcatenatedString()); + break; + case FAMILY: + while(fieldParser.argumentsRemaining() > 0) + currentDescriptor.addFamilyMember(fieldParser.parseString()); + break; + case EVENTDNS: + if(fieldParser.parseBoolean()) + currentDescriptor.setEventDNS(); + break; + case PROTOCOLS: + processProtocols(); + break; + case CACHES_EXTRA_INFO: + currentDescriptor.setCachesExtraInfo(); + break; + case HIDDEN_SERVICE_DIR: + currentDescriptor.setHiddenServiceDir(); + break; + case ALLOW_SINGLE_HOP_EXITS: + currentDescriptor.setAllowSingleHopExits(); + break; + case EXTRA_INFO_DIGEST: + currentDescriptor.setExtraInfoDigest(fieldParser.parseHexDigest()); + break; + case READ_HISTORY: + currentDescriptor.setReadHistory(parseHistory()); + break; + case WRITE_HISTORY: + currentDescriptor.setWriteHistory(parseHistory()); + break; + default: + break; + } + } + + private BandwidthHistory parseHistory() { + final Timestamp ts = fieldParser.parseTimestamp(); + final String nsec = fieldParser.parseString(); + fieldParser.parseString(); + final int interval = fieldParser.parseInteger(nsec.substring(1)); + final BandwidthHistory history = new BandwidthHistory(ts, interval); + if(fieldParser.argumentsRemaining() == 0) + return history; + final String[] samples = fieldParser.parseString().split(","); + for(String s: samples) + history.addSample(fieldParser.parseInteger(s)); + return history; + } + + private void processRouter() { + currentDescriptor.setNickname(fieldParser.parseNickname()); + currentDescriptor.setAddress(fieldParser.parseAddress()); + currentDescriptor.setRouterPort(fieldParser.parsePort()); + /* 2.1 SOCKSPort is deprecated and should always be 0 */ + fieldParser.parsePort(); + currentDescriptor.setDirectoryPort(fieldParser.parsePort()); + } + + private boolean verifyCurrentDescriptor(TorSignature signature) { + if(verifySignatures && !fieldParser.verifySignedEntity(currentDescriptor.getIdentityKey(), signature)) { + resultHandler.documentInvalid(currentDescriptor, "Signature failed."); + fieldParser.logWarn("Signature failed for router: " + currentDescriptor.getNickname()); + return false; + } + currentDescriptor.setValidSignature(); + if(!currentDescriptor.isValidDocument()) { + resultHandler.documentInvalid(currentDescriptor, "Router data invalid"); + fieldParser.logWarn("Router data invalid for router: " + currentDescriptor.getNickname()); + } + return currentDescriptor.isValidDocument(); + } + + private void processBandwidth() { + final int average = fieldParser.parseInteger(); + final int burst = fieldParser.parseInteger(); + final int observed = fieldParser.parseInteger(); + currentDescriptor.setBandwidthValues(average, burst, observed); + } + + private void processProtocols() { + String kw = fieldParser.parseString(); + if(!kw.equals("Link")) + throw new TorParsingException("Expected 'Link' token in protocol line got: " + kw); + while(true) { + kw = fieldParser.parseString(); + if(kw.equals("Circuit")) + break; + currentDescriptor.addLinkProtocolVersion(fieldParser.parseInteger(kw)); + } + while(fieldParser.argumentsRemaining() > 0) + currentDescriptor.addCircuitProtocolVersion(fieldParser.parseInteger()); + + } + + private void processSignature() { + fieldParser.endSignedEntity(); + currentDescriptor.setDescriptorHash(fieldParser.getSignatureMessageDigest().getHexDigest()); + final TorSignature signature = fieldParser.parseSignature(); + currentDescriptor.setRawDocumentData(fieldParser.getRawDocument()); + + if(verifyCurrentDescriptor(signature)) + resultHandler.documentParsed(currentDescriptor); + startNewDescriptor(); + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorImpl.java b/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorImpl.java new file mode 100644 index 00000000..af75e2f1 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorImpl.java @@ -0,0 +1,157 @@ +package com.subgraph.orchid.directory.router; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.crypto.TorPublicKey; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.data.exitpolicy.ExitPorts; + +public class RouterMicrodescriptorImpl implements RouterMicrodescriptor { + + private IPv4Address address; + private int routerPort; + private TorPublicKey onionKey; + private byte[] ntorOnionKey; + private Set familyMembers = Collections.emptySet(); + private ExitPorts acceptPorts; + private ExitPorts rejectPorts; + private String rawDocumentData; + private HexDigest descriptorDigest; + private long lastListed; + private CacheLocation cacheLocation = CacheLocation.NOT_CACHED; + + public void setAddress(IPv4Address address) { + this.address = address; + } + + public void setRouterPort(int port) { + this.routerPort = port; + } + + public void setOnionKey(TorPublicKey onionKey) { + this.onionKey = onionKey; + } + + public void setNtorOnionKey(byte[] ntorOnionKey) { + this.ntorOnionKey = ntorOnionKey; + } + + public void addFamilyMember(String familyMember) { + if(familyMembers.isEmpty()) { + familyMembers = new HashSet(); + } + familyMembers.add(familyMember); + } + + public void addAcceptPorts(String portlist) { + acceptPorts = ExitPorts.createAcceptExitPorts(portlist); + } + + public void addRejectPorts(String portlist) { + rejectPorts = ExitPorts.createRejectExitPorts(portlist); + } + + public void setRawDocumentData(String rawData) { + this.rawDocumentData = rawData; + } + + public void setDescriptorDigest(HexDigest descriptorDigest) { + this.descriptorDigest = descriptorDigest; + } + + public void setLastListed(long ts) { + this.lastListed = ts; + } + + public boolean isValidDocument() { + return (descriptorDigest != null) && (onionKey != null); + } + + public String getRawDocumentData() { + return rawDocumentData; + } + + public TorPublicKey getOnionKey() { + return onionKey; + } + + public byte[] getNTorOnionKey() { + return ntorOnionKey; + } + + public IPv4Address getAddress() { + return address; + } + + public int getRouterPort() { + return routerPort; + } + + public Set getFamilyMembers() { + return familyMembers; + } + + public boolean exitPolicyAccepts(IPv4Address address, int port) { + return exitPolicyAccepts(port); + } + + public boolean exitPolicyAccepts(int port) { + if(acceptPorts == null) { + return false; + } + if(rejectPorts != null && !rejectPorts.acceptsPort(port)) { + return false; + } + return acceptPorts.acceptsPort(port); + } + + public HexDigest getDescriptorDigest() { + return descriptorDigest; + } + + public boolean equals(Object o) { + if(!(o instanceof RouterMicrodescriptorImpl)) + return false; + final RouterMicrodescriptorImpl other = (RouterMicrodescriptorImpl) o; + if(other.getDescriptorDigest() == null || descriptorDigest == null) + return false; + + return other.getDescriptorDigest().equals(descriptorDigest); + } + + public int hashCode() { + if(descriptorDigest == null) + return 0; + return descriptorDigest.hashCode(); + } + + public long getLastListed() { + return lastListed; + } + + public void setCacheLocation(CacheLocation location) { + this.cacheLocation = location; + } + + public CacheLocation getCacheLocation() { + return cacheLocation; + } + + public int getBodyLength() { + return rawDocumentData.length(); + } + + public ByteBuffer getRawDocumentBytes() { + if(getRawDocumentData() == null) { + return ByteBuffer.allocate(0); + } else { + return ByteBuffer.wrap(getRawDocumentData().getBytes(Tor.getDefaultCharset())); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorKeyword.java b/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorKeyword.java new file mode 100644 index 00000000..7f4cf6d5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorKeyword.java @@ -0,0 +1,41 @@ +package com.subgraph.orchid.directory.router; + +public enum RouterMicrodescriptorKeyword { + ONION_KEY("onion-key", 0), + NTOR_ONION_KEY("ntor-onion-key", 1), + A("a", 1), + FAMILY("family"), + P("p", 2), + UNKNOWN_KEYWORD("KEYWORD NOT FOUNE"); + + public final static int VARIABLE_ARGUMENT_COUNT = -1; + + private final String keyword; + private final int argumentCount; + + RouterMicrodescriptorKeyword(String keyword) { + this(keyword, VARIABLE_ARGUMENT_COUNT); + } + + RouterMicrodescriptorKeyword(String keyword, int argumentCount) { + this.keyword = keyword; + this.argumentCount = argumentCount; + } + + String getKeyword() { + return keyword; + } + + int getArgumentCount() { + return argumentCount; + } + + static RouterMicrodescriptorKeyword findKeyword(String keyword) { + for(RouterMicrodescriptorKeyword k: values()) { + if(k.getKeyword().equals(keyword)) { + return k; + } + } + return UNKNOWN_KEYWORD; + } +} diff --git a/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorParser.java b/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorParser.java new file mode 100644 index 00000000..418ccf3d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/directory/router/RouterMicrodescriptorParser.java @@ -0,0 +1,130 @@ +package com.subgraph.orchid.directory.router; + +import com.subgraph.orchid.RouterMicrodescriptor; +import com.subgraph.orchid.TorParsingException; +import com.subgraph.orchid.crypto.TorMessageDigest; +import com.subgraph.orchid.directory.parsing.BasicDocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingHandler; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; +import com.subgraph.orchid.directory.parsing.DocumentParsingResultHandler; + +public class RouterMicrodescriptorParser implements DocumentParser{ + + + private final DocumentFieldParser fieldParser; + + private RouterMicrodescriptorImpl currentDescriptor; + private DocumentParsingResultHandler resultHandler; + + public RouterMicrodescriptorParser(DocumentFieldParser fieldParser) { + this.fieldParser = fieldParser; + this.fieldParser.setHandler(createParsingHandler()); + } + + private DocumentParsingHandler createParsingHandler() { + return new DocumentParsingHandler() { + public void parseKeywordLine() { + processKeywordLine(); + } + public void endOfDocument() { + if(currentDescriptor != null) { + finalizeDescriptor(currentDescriptor); + } + } + }; + } + + public boolean parse(DocumentParsingResultHandler resultHandler) { + this.resultHandler = resultHandler; + try { + fieldParser.processDocument(); + return true; + } catch(TorParsingException e) { + resultHandler.parsingError(e.getMessage()); + return false; + } + } + + public DocumentParsingResult parse() { + final BasicDocumentParsingResult result = new BasicDocumentParsingResult(); + parse(result); + return result; + } + + private void processKeywordLine() { + final RouterMicrodescriptorKeyword keyword = RouterMicrodescriptorKeyword.findKeyword(fieldParser.getCurrentKeyword()); + if(!keyword.equals(RouterMicrodescriptorKeyword.UNKNOWN_KEYWORD)) { + processKeyword(keyword); + } + if(currentDescriptor != null) { + currentDescriptor.setRawDocumentData(fieldParser.getRawDocument()); + } + + } + + + private void processKeyword(RouterMicrodescriptorKeyword keyword) { + fieldParser.verifyExpectedArgumentCount(keyword.getKeyword(), keyword.getArgumentCount()); + switch(keyword) { + case ONION_KEY: + processOnionKeyLine(); + break; + + case NTOR_ONION_KEY: + if(currentDescriptor != null) { + currentDescriptor.setNtorOnionKey(fieldParser.parseNtorPublicKey()); + } + break; + + case FAMILY: + while(fieldParser.argumentsRemaining() > 0 && currentDescriptor != null) { + currentDescriptor.addFamilyMember(fieldParser.parseString()); + } + break; + + case P: + processP(); + break; + + case A: + default: + break; + } + } + + private void processOnionKeyLine() { + if(currentDescriptor != null) { + finalizeDescriptor(currentDescriptor); + } + currentDescriptor = new RouterMicrodescriptorImpl(); + fieldParser.resetRawDocument(RouterMicrodescriptorKeyword.ONION_KEY.getKeyword() + "\n"); + currentDescriptor.setOnionKey(fieldParser.parsePublicKey()); + } + + private void finalizeDescriptor(RouterMicrodescriptorImpl descriptor) { + final TorMessageDigest digest = new TorMessageDigest(true); + digest.update(descriptor.getRawDocumentData()); + descriptor.setDescriptorDigest(digest.getHexDigest()); + if(!descriptor.isValidDocument()) { + resultHandler.documentInvalid(descriptor, "Microdescriptor data invalid"); + } else { + resultHandler.documentParsed(descriptor); + } + } + + private void processP() { + if(currentDescriptor == null) { + return; + } + final String ruleType = fieldParser.parseString(); + if("accept".equals(ruleType)) { + currentDescriptor.addAcceptPorts(fieldParser.parseString()); + } else if("reject".equals(ruleType)) { + currentDescriptor.addRejectPorts(fieldParser.parseString()); + } else { + fieldParser.logWarn("Unexpected P field in microdescriptor: "+ ruleType); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/encoders/Base64.java b/orchid/src/com/subgraph/orchid/encoders/Base64.java new file mode 100644 index 00000000..e1aaa924 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/Base64.java @@ -0,0 +1,121 @@ +package com.subgraph.orchid.encoders; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +public class Base64 +{ + private static final Encoder encoder = new Base64Encoder(); + + /** + * encode the input data producing a base 64 encoded byte array. + * + * @return a byte array containing the base 64 encoded data. + */ + public static byte[] encode( + byte[] data) + { + int len = (data.length + 2) / 3 * 4; + ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); + + try + { + encoder.encode(data, 0, data.length, bOut); + } + catch (Exception e) + { + throw new EncoderException("exception encoding base64 string: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * Encode the byte data to base 64 writing it to the given output stream. + * + * @return the number of bytes produced. + */ + public static int encode( + byte[] data, + OutputStream out) + throws IOException + { + return encoder.encode(data, 0, data.length, out); + } + + /** + * Encode the byte data to base 64 writing it to the given output stream. + * + * @return the number of bytes produced. + */ + public static int encode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + return encoder.encode(data, off, length, out); + } + + /** + * decode the base 64 encoded input data. It is assumed the input data is valid. + * + * @return a byte array representing the decoded data. + */ + public static byte[] decode( + byte[] data) + { + int len = data.length / 4 * 3; + ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); + + try + { + encoder.decode(data, 0, data.length, bOut); + } + catch (Exception e) + { + throw new DecoderException("unable to decode base64 data: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * decode the base 64 encoded String data - whitespace will be ignored. + * + * @return a byte array representing the decoded data. + */ + public static byte[] decode( + String data) + { + int len = data.length() / 4 * 3; + ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); + + try + { + encoder.decode(data, bOut); + } + catch (Exception e) + { + throw new DecoderException("unable to decode base64 string: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * decode the base 64 encoded String data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public static int decode( + String data, + OutputStream out) + throws IOException + { + return encoder.decode(data, out); + } +} diff --git a/orchid/src/com/subgraph/orchid/encoders/Base64Encoder.java b/orchid/src/com/subgraph/orchid/encoders/Base64Encoder.java new file mode 100644 index 00000000..d27eb6a5 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/Base64Encoder.java @@ -0,0 +1,328 @@ +package com.subgraph.orchid.encoders; + +import java.io.IOException; +import java.io.OutputStream; + +public class Base64Encoder + implements Encoder +{ + protected final byte[] encodingTable = + { + (byte)'A', (byte)'B', (byte)'C', (byte)'D', (byte)'E', (byte)'F', (byte)'G', + (byte)'H', (byte)'I', (byte)'J', (byte)'K', (byte)'L', (byte)'M', (byte)'N', + (byte)'O', (byte)'P', (byte)'Q', (byte)'R', (byte)'S', (byte)'T', (byte)'U', + (byte)'V', (byte)'W', (byte)'X', (byte)'Y', (byte)'Z', + (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f', (byte)'g', + (byte)'h', (byte)'i', (byte)'j', (byte)'k', (byte)'l', (byte)'m', (byte)'n', + (byte)'o', (byte)'p', (byte)'q', (byte)'r', (byte)'s', (byte)'t', (byte)'u', + (byte)'v', + (byte)'w', (byte)'x', (byte)'y', (byte)'z', + (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', (byte)'5', (byte)'6', + (byte)'7', (byte)'8', (byte)'9', + (byte)'+', (byte)'/' + }; + + protected byte padding = (byte)'='; + + /* + * set up the decoding table. + */ + protected final byte[] decodingTable = new byte[128]; + + protected void initialiseDecodingTable() + { + for (int i = 0; i < decodingTable.length; i++) + { + decodingTable[i] = (byte)0xff; + } + + for (int i = 0; i < encodingTable.length; i++) + { + decodingTable[encodingTable[i]] = (byte)i; + } + } + + public Base64Encoder() + { + initialiseDecodingTable(); + } + + /** + * encode the input data producing a base 64 output stream. + * + * @return the number of bytes produced. + */ + public int encode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + int modulus = length % 3; + int dataLength = (length - modulus); + int a1, a2, a3; + + for (int i = off; i < off + dataLength; i += 3) + { + a1 = data[i] & 0xff; + a2 = data[i + 1] & 0xff; + a3 = data[i + 2] & 0xff; + + out.write(encodingTable[(a1 >>> 2) & 0x3f]); + out.write(encodingTable[((a1 << 4) | (a2 >>> 4)) & 0x3f]); + out.write(encodingTable[((a2 << 2) | (a3 >>> 6)) & 0x3f]); + out.write(encodingTable[a3 & 0x3f]); + } + + /* + * process the tail end. + */ + int b1, b2, b3; + int d1, d2; + + switch (modulus) + { + case 0: /* nothing left to do */ + break; + case 1: + d1 = data[off + dataLength] & 0xff; + b1 = (d1 >>> 2) & 0x3f; + b2 = (d1 << 4) & 0x3f; + + out.write(encodingTable[b1]); + out.write(encodingTable[b2]); + out.write(padding); + out.write(padding); + break; + case 2: + d1 = data[off + dataLength] & 0xff; + d2 = data[off + dataLength + 1] & 0xff; + + b1 = (d1 >>> 2) & 0x3f; + b2 = ((d1 << 4) | (d2 >>> 4)) & 0x3f; + b3 = (d2 << 2) & 0x3f; + + out.write(encodingTable[b1]); + out.write(encodingTable[b2]); + out.write(encodingTable[b3]); + out.write(padding); + break; + } + + return (dataLength / 3) * 4 + ((modulus == 0) ? 0 : 4); + } + + private boolean ignore( + char c) + { + return (c == '\n' || c =='\r' || c == '\t' || c == ' '); + } + + /** + * decode the base 64 encoded byte data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public int decode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + byte b1, b2, b3, b4; + int outLen = 0; + + int end = off + length; + + while (end > off) + { + if (!ignore((char)data[end - 1])) + { + break; + } + + end--; + } + + int i = off; + int finish = end - 4; + + i = nextI(data, i, finish); + + while (i < finish) + { + b1 = decodingTable[data[i++]]; + + i = nextI(data, i, finish); + + b2 = decodingTable[data[i++]]; + + i = nextI(data, i, finish); + + b3 = decodingTable[data[i++]]; + + i = nextI(data, i, finish); + + b4 = decodingTable[data[i++]]; + + if ((b1 | b2 | b3 | b4) < 0) + { + throw new IOException("invalid characters encountered in base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + out.write((b3 << 6) | b4); + + outLen += 3; + + i = nextI(data, i, finish); + } + + outLen += decodeLastBlock(out, (char)data[end - 4], (char)data[end - 3], (char)data[end - 2], (char)data[end - 1]); + + return outLen; + } + + private int nextI(byte[] data, int i, int finish) + { + while ((i < finish) && ignore((char)data[i])) + { + i++; + } + return i; + } + + /** + * decode the base 64 encoded String data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public int decode( + String data, + OutputStream out) + throws IOException + { + byte b1, b2, b3, b4; + int length = 0; + + int end = data.length(); + + while (end > 0) + { + if (!ignore(data.charAt(end - 1))) + { + break; + } + + end--; + } + + int i = 0; + int finish = end - 4; + + i = nextI(data, i, finish); + + while (i < finish) + { + b1 = decodingTable[data.charAt(i++)]; + + i = nextI(data, i, finish); + + b2 = decodingTable[data.charAt(i++)]; + + i = nextI(data, i, finish); + + b3 = decodingTable[data.charAt(i++)]; + + i = nextI(data, i, finish); + + b4 = decodingTable[data.charAt(i++)]; + + if ((b1 | b2 | b3 | b4) < 0) + { + throw new IOException("invalid characters encountered in base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + out.write((b3 << 6) | b4); + + length += 3; + + i = nextI(data, i, finish); + } + + length += decodeLastBlock(out, data.charAt(end - 4), data.charAt(end - 3), data.charAt(end - 2), data.charAt(end - 1)); + + return length; + } + + private int decodeLastBlock(OutputStream out, char c1, char c2, char c3, char c4) + throws IOException + { + byte b1, b2, b3, b4; + + if (c3 == padding) + { + b1 = decodingTable[c1]; + b2 = decodingTable[c2]; + + if ((b1 | b2) < 0) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + + return 1; + } + else if (c4 == padding) + { + b1 = decodingTable[c1]; + b2 = decodingTable[c2]; + b3 = decodingTable[c3]; + + if ((b1 | b2 | b3) < 0) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + + return 2; + } + else + { + b1 = decodingTable[c1]; + b2 = decodingTable[c2]; + b3 = decodingTable[c3]; + b4 = decodingTable[c4]; + + if ((b1 | b2 | b3 | b4) < 0) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + out.write((b3 << 6) | b4); + + return 3; + } + } + + private int nextI(String data, int i, int finish) + { + while ((i < finish) && ignore(data.charAt(i))) + { + i++; + } + return i; + } +} diff --git a/orchid/src/com/subgraph/orchid/encoders/DecoderException.java b/orchid/src/com/subgraph/orchid/encoders/DecoderException.java new file mode 100644 index 00000000..3d627937 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/DecoderException.java @@ -0,0 +1,20 @@ +package com.subgraph.orchid.encoders; + +public class DecoderException + extends IllegalStateException +{ + private static final long serialVersionUID = 4997418733670548381L; + private Throwable cause; + + DecoderException(String msg, Throwable cause) + { + super(msg); + + this.cause = cause; + } + + public Throwable getCause() + { + return cause; + } +} diff --git a/orchid/src/com/subgraph/orchid/encoders/Encoder.java b/orchid/src/com/subgraph/orchid/encoders/Encoder.java new file mode 100644 index 00000000..f1b931cc --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/Encoder.java @@ -0,0 +1,17 @@ +package com.subgraph.orchid.encoders; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Encode and decode byte arrays (typically from binary to 7-bit ASCII + * encodings). + */ +public interface Encoder +{ + int encode(byte[] data, int off, int length, OutputStream out) throws IOException; + + int decode(byte[] data, int off, int length, OutputStream out) throws IOException; + + int decode(String data, OutputStream out) throws IOException; +} diff --git a/orchid/src/com/subgraph/orchid/encoders/EncoderException.java b/orchid/src/com/subgraph/orchid/encoders/EncoderException.java new file mode 100644 index 00000000..5051540e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/EncoderException.java @@ -0,0 +1,20 @@ +package com.subgraph.orchid.encoders; + +public class EncoderException + extends IllegalStateException +{ + private static final long serialVersionUID = 6589388628939318400L; + private Throwable cause; + + EncoderException(String msg, Throwable cause) + { + super(msg); + + this.cause = cause; + } + + public Throwable getCause() + { + return cause; + } +} diff --git a/orchid/src/com/subgraph/orchid/encoders/Hex.java b/orchid/src/com/subgraph/orchid/encoders/Hex.java new file mode 100644 index 00000000..225c22ad --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/Hex.java @@ -0,0 +1,131 @@ +package com.subgraph.orchid.encoders; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +public class Hex +{ + private static final Encoder encoder = new HexEncoder(); + + /** + * encode the input data producing a Hex encoded byte array. + * + * @return a byte array containing the Hex encoded data. + */ + public static byte[] encode( + byte[] data) + { + return encode(data, 0, data.length); + } + + /** + * encode the input data producing a Hex encoded byte array. + * + * @return a byte array containing the Hex encoded data. + */ + public static byte[] encode( + byte[] data, + int off, + int length) + { + ByteArrayOutputStream bOut = new ByteArrayOutputStream(); + + try + { + encoder.encode(data, off, length, bOut); + } + catch (Exception e) + { + throw new EncoderException("exception encoding Hex string: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * Hex encode the byte data writing it to the given output stream. + * + * @return the number of bytes produced. + */ + public static int encode( + byte[] data, + OutputStream out) + throws IOException + { + return encoder.encode(data, 0, data.length, out); + } + + /** + * Hex encode the byte data writing it to the given output stream. + * + * @return the number of bytes produced. + */ + public static int encode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + return encoder.encode(data, off, length, out); + } + + /** + * decode the Hex encoded input data. It is assumed the input data is valid. + * + * @return a byte array representing the decoded data. + */ + public static byte[] decode( + byte[] data) + { + ByteArrayOutputStream bOut = new ByteArrayOutputStream(); + + try + { + encoder.decode(data, 0, data.length, bOut); + } + catch (Exception e) + { + throw new DecoderException("exception decoding Hex data: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * decode the Hex encoded String data - whitespace will be ignored. + * + * @return a byte array representing the decoded data. + */ + public static byte[] decode( + String data) + { + ByteArrayOutputStream bOut = new ByteArrayOutputStream(); + + try + { + encoder.decode(data, bOut); + } + catch (Exception e) + { + throw new DecoderException("exception decoding Hex string: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * decode the Hex encoded String data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public static int decode( + String data, + OutputStream out) + throws IOException + { + return encoder.decode(data, out); + } +} diff --git a/orchid/src/com/subgraph/orchid/encoders/HexEncoder.java b/orchid/src/com/subgraph/orchid/encoders/HexEncoder.java new file mode 100644 index 00000000..c859b645 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/encoders/HexEncoder.java @@ -0,0 +1,187 @@ +package com.subgraph.orchid.encoders; + +import java.io.IOException; +import java.io.OutputStream; + +public class HexEncoder + implements Encoder +{ + protected final byte[] encodingTable = + { + (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', (byte)'5', (byte)'6', (byte)'7', + (byte)'8', (byte)'9', (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f' + }; + + /* + * set up the decoding table. + */ + protected final byte[] decodingTable = new byte[128]; + + protected void initialiseDecodingTable() + { + for (int i = 0; i < decodingTable.length; i++) + { + decodingTable[i] = (byte)0xff; + } + + for (int i = 0; i < encodingTable.length; i++) + { + decodingTable[encodingTable[i]] = (byte)i; + } + + decodingTable['A'] = decodingTable['a']; + decodingTable['B'] = decodingTable['b']; + decodingTable['C'] = decodingTable['c']; + decodingTable['D'] = decodingTable['d']; + decodingTable['E'] = decodingTable['e']; + decodingTable['F'] = decodingTable['f']; + } + + public HexEncoder() + { + initialiseDecodingTable(); + } + + /** + * encode the input data producing a Hex output stream. + * + * @return the number of bytes produced. + */ + public int encode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + for (int i = off; i < (off + length); i++) + { + int v = data[i] & 0xff; + + out.write(encodingTable[(v >>> 4)]); + out.write(encodingTable[v & 0xf]); + } + + return length * 2; + } + + private static boolean ignore( + char c) + { + return c == '\n' || c =='\r' || c == '\t' || c == ' '; + } + + /** + * decode the Hex encoded byte data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public int decode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + byte b1, b2; + int outLen = 0; + + int end = off + length; + + while (end > off) + { + if (!ignore((char)data[end - 1])) + { + break; + } + + end--; + } + + int i = off; + while (i < end) + { + while (i < end && ignore((char)data[i])) + { + i++; + } + + b1 = decodingTable[data[i++]]; + + while (i < end && ignore((char)data[i])) + { + i++; + } + + b2 = decodingTable[data[i++]]; + + if ((b1 | b2) < 0) + { + throw new IOException("invalid characters encountered in Hex data"); + } + + out.write((b1 << 4) | b2); + + outLen++; + } + + return outLen; + } + + /** + * decode the Hex encoded String data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public int decode( + String data, + OutputStream out) + throws IOException + { + byte b1, b2; + int length = 0; + + int end = data.length(); + + while (end > 0) + { + if (!ignore(data.charAt(end - 1))) + { + break; + } + + end--; + } + + int i = 0; + while (i < end) + { + while (i < end && ignore(data.charAt(i))) + { + i++; + } + + b1 = decodingTable[data.charAt(i++)]; + + while (i < end && ignore(data.charAt(i))) + { + i++; + } + + b2 = decodingTable[data.charAt(i++)]; + + if ((b1 | b2) < 0) + { + throw new IOException("invalid characters encountered in Hex string"); + } + + out.write((b1 << 4) | b2); + + length++; + } + + return length; + } +} diff --git a/orchid/src/com/subgraph/orchid/events/Event.java b/orchid/src/com/subgraph/orchid/events/Event.java new file mode 100644 index 00000000..f9921f65 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/events/Event.java @@ -0,0 +1,3 @@ +package com.subgraph.orchid.events; + +public interface Event {} diff --git a/orchid/src/com/subgraph/orchid/events/EventHandler.java b/orchid/src/com/subgraph/orchid/events/EventHandler.java new file mode 100644 index 00000000..9002e2b6 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/events/EventHandler.java @@ -0,0 +1,5 @@ +package com.subgraph.orchid.events; + +public interface EventHandler { + void handleEvent(Event event); +} diff --git a/orchid/src/com/subgraph/orchid/events/EventManager.java b/orchid/src/com/subgraph/orchid/events/EventManager.java new file mode 100644 index 00000000..235dc7d8 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/events/EventManager.java @@ -0,0 +1,34 @@ +package com.subgraph.orchid.events; + +import java.util.ArrayList; +import java.util.List; + +public class EventManager { + private final List handlers = new ArrayList(); + + public void addListener(final EventHandler listener) { + synchronized(this) { + handlers.add(listener); + } + } + + public void removeListener(final EventHandler listener) { + synchronized(this) { + handlers.remove(listener); + } + } + + public void fireEvent(final Event event) { + EventHandler[] handlersCopy; + + synchronized(this) { + handlersCopy = new EventHandler[handlers.size()]; + handlers.toArray(handlersCopy); + } + for(EventHandler handler : handlersCopy) { + handler.handleEvent(event); + } + + } + +} diff --git a/orchid/src/com/subgraph/orchid/geoip/CountryCodeService.java b/orchid/src/com/subgraph/orchid/geoip/CountryCodeService.java new file mode 100644 index 00000000..b2c49f60 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/geoip/CountryCodeService.java @@ -0,0 +1,172 @@ +package com.subgraph.orchid.geoip; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.logging.Logger; + +import com.subgraph.orchid.data.IPv4Address; + +public class CountryCodeService { + private final static Logger logger = Logger.getLogger(CountryCodeService.class.getName()); + private final static String DATABASE_FILENAME = "GeoIP.dat"; + private final static int COUNTRY_BEGIN = 16776960; + private final static int STANDARD_RECORD_LENGTH = 3; + private final static int MAX_RECORD_LENGTH = 4; + private final static CountryCodeService DEFAULT_INSTANCE = new CountryCodeService(); + + public static CountryCodeService getInstance() { + return DEFAULT_INSTANCE; + } + + private static final String[] COUNTRY_CODES = { "--", "AP", "EU", "AD", "AE", + "AF", "AG", "AI", "AL", "AM", "CW", "AO", "AQ", "AR", "AS", "AT", + "AU", "AW", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", + "BJ", "BM", "BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", + "CA", "CC", "CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", + "CO", "CR", "CU", "CV", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", + "DO", "DZ", "EC", "EE", "EG", "EH", "ER", "ES", "ET", "FI", "FJ", + "FK", "FM", "FO", "FR", "SX", "GA", "GB", "GD", "GE", "GF", "GH", + "GI", "GL", "GM", "GN", "GP", "GQ", "GR", "GS", "GT", "GU", "GW", + "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID", "IE", "IL", "IN", + "IO", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", "KE", "KG", "KH", + "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB", "LC", + "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD", + "MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", + "MT", "MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", + "NG", "NI", "NL", "NO", "NP", "NR", "NU", "NZ", "OM", "PA", "PE", + "PF", "PG", "PH", "PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", + "PY", "QA", "RE", "RO", "RU", "RW", "SA", "SB", "SC", "SD", "SE", + "SG", "SH", "SI", "SJ", "SK", "SL", "SM", "SN", "SO", "SR", "ST", + "SV", "SY", "SZ", "TC", "TD", "TF", "TG", "TH", "TJ", "TK", "TM", + "TN", "TO", "TL", "TR", "TT", "TV", "TW", "TZ", "UA", "UG", "UM", + "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN", "VU", "WF", + "WS", "YE", "YT", "RS", "ZA", "ZM", "ME", "ZW", "A1", "A2", "O1", + "AX", "GG", "IM", "JE", "BL", "MF", "BQ", "SS", "O1" }; + + private final byte[] database; + + public CountryCodeService() { + this.database = loadDatabase(); + } + + private static byte[] loadDatabase() { + final InputStream input = openDatabaseStream(); + if(input == null) { + logger.warning("Failed to open '"+ DATABASE_FILENAME + "' database file for country code lookups"); + return null; + } + try { + return loadEntireStream(input); + } catch (IOException e) { + logger.warning("IO error reading database file for country code lookups"); + return null; + } finally { + try { + input.close(); + } catch (IOException e) { } + } + } + + private static InputStream openDatabaseStream() { + final InputStream input = tryResourceOpen(); + if(input != null) { + return input; + } else { + return tryFilesystemOpen(); + } + } + + private static InputStream tryFilesystemOpen() { + final File dataDir = new File(System.getProperty("user.dir"), "data"); + final File dbFile = new File(dataDir, DATABASE_FILENAME); + if(!dbFile.canRead()) { + return null; + } + try { + return new FileInputStream(dbFile); + } catch (FileNotFoundException e) { + return null; + } + } + + private static InputStream tryResourceOpen() { + return CountryCodeService.class.getResourceAsStream("/data/"+ DATABASE_FILENAME); + } + + private static byte[] loadEntireStream(InputStream input) throws IOException { + final ByteArrayOutputStream output = new ByteArrayOutputStream(4096); + copy(input, output); + return output.toByteArray(); + } + + private static int copy(InputStream input, OutputStream output) throws IOException { + final byte[] buffer = new byte[4096]; + int count = 0; + int n = 0; + while((n = input.read(buffer)) != -1) { + output.write(buffer, 0, n); + count += n; + } + return count; + } + + public String getCountryCodeForAddress(IPv4Address address) { + return COUNTRY_CODES[seekCountry(address)]; + } + + private int seekCountry(IPv4Address address) { + if(database == null) { + return 0; + } + + final byte[] record = new byte[2 * MAX_RECORD_LENGTH]; + final int[] x = new int[2]; + final long ip = address.getAddressData() & 0xFFFFFFFFL; + + int offset = 0; + for(int depth = 31; depth >= 0; depth--) { + loadRecord(offset, record); + + x[0] = unpackRecordValue(record, 0); + x[1] = unpackRecordValue(record, 1); + + int xx = ((ip & (1 << depth)) > 0) ? (x[1]) : (x[0]); + + if(xx >= COUNTRY_BEGIN) { + final int idx = xx - COUNTRY_BEGIN; + if(idx < 0 || idx > COUNTRY_CODES.length) { + logger.warning("Invalid index calculated looking up country code record for ("+ address +") idx = "+ idx); + return 0; + } else { + return idx; + } + } else { + offset = xx; + } + + } + logger.warning("No record found looking up country code record for ("+ address + ")"); + return 0; + } + + private void loadRecord(int offset, byte[] recordBuffer) { + final int dbOffset = 2 * STANDARD_RECORD_LENGTH * offset; + System.arraycopy(database, dbOffset, recordBuffer, 0, recordBuffer.length); + } + + private int unpackRecordValue(byte[] record, int idx) { + final int valueOffset = idx * STANDARD_RECORD_LENGTH; + int value = 0; + for(int i = 0; i < STANDARD_RECORD_LENGTH; i++) { + int octet = record[valueOffset + i] & 0xFF; + value += (octet << (i * 8)); + } + return value; + } + +} diff --git a/orchid/src/com/subgraph/orchid/misc/GuardedBy.java b/orchid/src/com/subgraph/orchid/misc/GuardedBy.java new file mode 100644 index 00000000..cd7f913b --- /dev/null +++ b/orchid/src/com/subgraph/orchid/misc/GuardedBy.java @@ -0,0 +1,45 @@ +package com.subgraph.orchid.misc; + + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * The field or method to which this annotation is applied can only be accessed + * when holding a particular lock, which may be a built-in (synchronization) lock, + * or may be an explicit java.util.concurrent.Lock. + * + * The argument determines which lock guards the annotated field or method: + *
    + *
  • + * this : The intrinsic lock of the object in whose class the field is defined. + *
  • + *
  • + * class-name.this : For inner classes, it may be necessary to disambiguate 'this'; + * the class-name.this designation allows you to specify which 'this' reference is intended + *
  • + *
  • + * itself : For reference fields only; the object to which the field refers. + *
  • + *
  • + * field-name : The lock object is referenced by the (instance or static) field + * specified by field-name. + *
  • + *
  • + * class-name.field-name : The lock object is reference by the static field specified + * by class-name.field-name. + *
  • + *
  • + * method-name() : The lock object is returned by calling the named nil-ary method. + *
  • + *
  • + * class-name.class : The Class object for the specified class should be used as the lock object. + *
  • + */ +@Target({ElementType.FIELD, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface GuardedBy { + String value(); +} diff --git a/orchid/src/com/subgraph/orchid/misc/Immutable.java b/orchid/src/com/subgraph/orchid/misc/Immutable.java new file mode 100644 index 00000000..18e8a6b3 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/misc/Immutable.java @@ -0,0 +1,41 @@ +package com.subgraph.orchid.misc; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/* + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Any republication or derived work distributed in source code form + * must include this copyright and license notice. + */ + + +/** + * The class to which this annotation is applied is immutable. This means that + * its state cannot be seen to change by callers, which implies that + *
      + *
    • all public fields are final,
    • + *
    • all public final reference fields refer to other immutable objects, and
    • + *
    • constructors and methods do not publish references to any internal state + * which is potentially mutable by the implementation.
    • + *
    + * Immutable objects may still have internal mutable state for purposes of performance + * optimization; some state variables may be lazily computed, so long as they are computed + * from immutable state and that callers cannot tell the difference. + *

    + * Immutable objects are inherently thread-safe; they may be passed between threads or + * published without synchronization. + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface Immutable { + +} diff --git a/orchid/src/com/subgraph/orchid/misc/NotThreadSafe.java b/orchid/src/com/subgraph/orchid/misc/NotThreadSafe.java new file mode 100644 index 00000000..7ca2360f --- /dev/null +++ b/orchid/src/com/subgraph/orchid/misc/NotThreadSafe.java @@ -0,0 +1,34 @@ +package com.subgraph.orchid.misc; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/* + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Any republication or derived work distributed in source code form + * must include this copyright and license notice. + */ + + +/** + * The class to which this annotation is applied is not thread-safe. + * This annotation primarily exists for clarifying the non-thread-safety of a class + * that might otherwise be assumed to be thread-safe, despite the fact that it is a bad + * idea to assume a class is thread-safe without good reason. + * @see ThreadSafe + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface NotThreadSafe { +} + + + diff --git a/orchid/src/com/subgraph/orchid/misc/ThreadSafe.java b/orchid/src/com/subgraph/orchid/misc/ThreadSafe.java new file mode 100644 index 00000000..6fe32781 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/misc/ThreadSafe.java @@ -0,0 +1,33 @@ +package com.subgraph.orchid.misc; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/* + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Any republication or derived work distributed in source code form + * must include this copyright and license notice. + */ + + + +/** + * The class to which this annotation is applied is thread-safe. This means that + * no sequences of accesses (reads and writes to public fields, calls to public methods) + * may put the object into an invalid state, regardless of the interleaving of those actions + * by the runtime, and without requiring any additional synchronization or coordination on the + * part of the caller. + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface ThreadSafe { +} + diff --git a/orchid/src/com/subgraph/orchid/misc/Utils.java b/orchid/src/com/subgraph/orchid/misc/Utils.java new file mode 100644 index 00000000..4e12925d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/misc/Utils.java @@ -0,0 +1,14 @@ +package com.subgraph.orchid.misc; + +public class Utils { + public static boolean constantTimeArrayEquals(byte[] a1, byte[] a2) { + if(a1.length != a2.length) { + return false; + } + int result = 0; + for(int i = 0; i < a1.length; i++) { + result += (a1[i] & 0xFF) ^ (a2[i] & 0xFF); + } + return result == 0; + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/AndroidSSLSocketFactory.java b/orchid/src/com/subgraph/orchid/sockets/AndroidSSLSocketFactory.java new file mode 100644 index 00000000..877a5f6c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/AndroidSSLSocketFactory.java @@ -0,0 +1,66 @@ +package com.subgraph.orchid.sockets; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.net.UnknownHostException; +import java.security.NoSuchAlgorithmException; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; + +import com.subgraph.orchid.sockets.sslengine.SSLEngineSSLSocket; + +public class AndroidSSLSocketFactory extends SSLSocketFactory { + + private final SSLContext sslContext; + + public AndroidSSLSocketFactory() throws NoSuchAlgorithmException { + this(SSLContext.getDefault()); + } + + public AndroidSSLSocketFactory(SSLContext sslContext) { + this.sslContext = sslContext; + } + + @Override + public String[] getDefaultCipherSuites() { + return sslContext.getDefaultSSLParameters().getCipherSuites(); + } + + @Override + public String[] getSupportedCipherSuites() { + return sslContext.getSupportedSSLParameters().getCipherSuites(); + + } + + @Override + public Socket createSocket(Socket s, String host, int port, + boolean autoClose) throws IOException { + + return new SSLEngineSSLSocket(s, sslContext); + } + + @Override + public Socket createSocket(String host, int port) throws IOException, + UnknownHostException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, + int localPort) throws IOException, UnknownHostException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket(InetAddress host, int port) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket(InetAddress address, int port, + InetAddress localAddress, int localPort) throws IOException { + throw new UnsupportedOperationException(); + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/AndroidSocket.java b/orchid/src/com/subgraph/orchid/sockets/AndroidSocket.java new file mode 100644 index 00000000..b911df93 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/AndroidSocket.java @@ -0,0 +1,69 @@ +package com.subgraph.orchid.sockets; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.util.logging.Logger; + +public class AndroidSocket extends Socket { + private final static Logger logger = Logger.getLogger(AndroidSocket.class.getName()); + + private final Field isConnectedField; + private final OrchidSocketImpl impl; + private final Object lock = new Object(); + private boolean isSocketConnected; + + AndroidSocket(OrchidSocketImpl impl) throws SocketException { + super(impl); + this.impl = impl; + this.isConnectedField = getField("isConnected"); + } + + public void connect(SocketAddress endpoint) throws IOException { + connect(endpoint, 0); + } + + public void connect(SocketAddress endpoint, int timeout) throws IOException { + synchronized(lock) { + if(isSocketConnected) { + throw new SocketException("Already connected"); + } + try { + impl.connect(endpoint, timeout); + setIsConnected(); + } catch(IOException e) { + impl.close(); + throw e; + } + } + } + + protected void setIsConnected() { + isSocketConnected = true; + try { + if(isConnectedField != null) { + isConnectedField.setBoolean(this, true); + } + } catch (IllegalArgumentException e) { + logger.warning("Illegal argument trying to reflect value into isConnected field of Socket : "+ e.getMessage()); + } catch (IllegalAccessException e) { + logger.warning("Illegal access trying to reflect value into isConnected field of Socket : "+ e.getMessage()); + } + } + + private Field getField(String name) { + try { + final Field f = Socket.class.getDeclaredField(name); + f.setAccessible(true); + return f; + } catch (NoSuchFieldException e) { + logger.warning("Could not locate field '"+ name +"' in Socket class, disabling Android reflection"); + return null; + } catch (SecurityException e) { + logger.warning("Reflection access to field '"+ name +"' in Socket class not permitted."+ e.getMessage()); + return null; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/OrchidSocketFactory.java b/orchid/src/com/subgraph/orchid/sockets/OrchidSocketFactory.java new file mode 100644 index 00000000..77b8a532 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/OrchidSocketFactory.java @@ -0,0 +1,74 @@ +package com.subgraph.orchid.sockets; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.net.UnknownHostException; + +import javax.net.SocketFactory; + +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.TorClient; + +public class OrchidSocketFactory extends SocketFactory { + private final TorClient torClient; + private final boolean exceptionOnLocalBind; + + public OrchidSocketFactory(TorClient torClient) { + this(torClient, true); + } + + public OrchidSocketFactory(TorClient torClient, boolean exceptionOnLocalBind) { + this.torClient = torClient; + this.exceptionOnLocalBind = exceptionOnLocalBind; + } + + @Override + public Socket createSocket(String host, int port) throws IOException, + UnknownHostException { + return createOrchidSocket(host, port); + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, + int localPort) throws IOException, UnknownHostException { + if(exceptionOnLocalBind) { + throw new UnsupportedOperationException("Cannot bind to local address"); + } + return createSocket(host, port); + } + + @Override + public Socket createSocket(InetAddress address, int port) throws IOException { + return createOrchidSocket(address.getHostAddress(), port); + } + + @Override + public Socket createSocket(InetAddress address, int port, + InetAddress localAddress, int localPort) throws IOException { + if(exceptionOnLocalBind) { + throw new UnsupportedOperationException("Cannot bind to local address"); + } + return createSocket(address, port); + } + + private Socket createOrchidSocket(String host, int port) throws IOException { + final Socket s = createSocketInstance(); + final SocketAddress endpoint = InetSocketAddress.createUnresolved(host, port); + s.connect(endpoint); + return s; + } + + private Socket createSocketInstance() throws SocketException { + final OrchidSocketImpl impl = new OrchidSocketImpl(torClient); + if(Tor.isAndroidRuntime()) { + return new AndroidSocket(impl); + } else { + // call protected constructor + return new Socket(impl) {}; + } + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/OrchidSocketImpl.java b/orchid/src/com/subgraph/orchid/sockets/OrchidSocketImpl.java new file mode 100644 index 00000000..9c6efc15 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/OrchidSocketImpl.java @@ -0,0 +1,169 @@ +package com.subgraph.orchid.sockets; + +import java.io.FileDescriptor; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.ConnectException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.SocketException; +import java.net.SocketImpl; +import java.net.SocketOptions; +import java.net.SocketTimeoutException; +import java.util.concurrent.TimeoutException; + +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.TorClient; + +public class OrchidSocketImpl extends SocketImpl { + + private final TorClient torClient; + private final Object streamLock = new Object(); + + private Stream stream; + + OrchidSocketImpl(TorClient torClient) { + this.torClient = torClient; + this.fd = new FileDescriptor(); + } + + public void setOption(int optID, Object value) throws SocketException { + throw new UnsupportedOperationException(); + } + + public Object getOption(int optID) throws SocketException { + if(optID == SocketOptions.SO_LINGER) { + return 0; + } else if(optID == SocketOptions.TCP_NODELAY) { + return Boolean.TRUE; + } else if(optID == SocketOptions.SO_TIMEOUT) { + return 0; + } else { + return 0; + } + } + + @Override + protected void create(boolean stream) throws IOException { + + } + + @Override + protected void connect(String host, int port) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected void connect(InetAddress address, int port) throws IOException { + throw new UnsupportedOperationException(); + + } + + @Override + protected void connect(SocketAddress address, int timeout) + throws IOException { + if(!(address instanceof InetSocketAddress)) { + throw new IllegalArgumentException("Unsupported address type"); + } + final InetSocketAddress inetAddress = (InetSocketAddress) address; + + doConnect(addressToName(inetAddress), inetAddress.getPort()); + } + + private String addressToName(InetSocketAddress address) { + if(address.getAddress() != null) { + return address.getAddress().getHostAddress(); + } else { + return address.getHostName(); + } + } + + private void doConnect(String host, int port) throws IOException { + synchronized(streamLock) { + if(stream != null) { + throw new SocketException("Already connected"); + } + try { + stream = torClient.openExitStreamTo(host, port); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new SocketException("connect() interrupted"); + } catch (TimeoutException e) { + throw new SocketTimeoutException(); + } catch (OpenFailedException e) { + throw new ConnectException(e.getMessage()); + } + } + } + + @Override + protected void bind(InetAddress host, int port) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected void listen(int backlog) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected void accept(SocketImpl s) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected InputStream getInputStream() throws IOException { + synchronized (streamLock) { + if(stream == null) { + throw new IOException("Not connected"); + } + return stream.getInputStream(); + } + } + + @Override + protected OutputStream getOutputStream() throws IOException { + synchronized (streamLock) { + if(stream == null) { + throw new IOException("Not connected"); + } + return stream.getOutputStream(); + } + } + + @Override + protected int available() throws IOException { + synchronized(streamLock) { + if(stream == null) { + throw new IOException("Not connected"); + } + return stream.getInputStream().available(); + } + } + + @Override + protected void close() throws IOException { + synchronized (streamLock) { + if(stream != null) { + stream.close(); + stream = null; + } + } + } + + @Override + protected void sendUrgentData(int data) throws IOException { + throw new UnsupportedOperationException(); + } + + protected void shutdownInput() throws IOException { + //throw new IOException("Method not implemented!"); + } + + protected void shutdownOutput() throws IOException { + //throw new IOException("Method not implemented!"); + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/OrchidSocketImplFactory.java b/orchid/src/com/subgraph/orchid/sockets/OrchidSocketImplFactory.java new file mode 100644 index 00000000..7c96fcbb --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/OrchidSocketImplFactory.java @@ -0,0 +1,18 @@ +package com.subgraph.orchid.sockets; + +import java.net.SocketImpl; +import java.net.SocketImplFactory; + +import com.subgraph.orchid.TorClient; + +public class OrchidSocketImplFactory implements SocketImplFactory { + private final TorClient torClient; + + public OrchidSocketImplFactory(TorClient torClient) { + this.torClient = torClient; + } + + public SocketImpl createSocketImpl() { + return new OrchidSocketImpl(torClient); + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/sslengine/HandshakeCallbackHandler.java b/orchid/src/com/subgraph/orchid/sockets/sslengine/HandshakeCallbackHandler.java new file mode 100644 index 00000000..b26c20c4 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/sslengine/HandshakeCallbackHandler.java @@ -0,0 +1,5 @@ +package com.subgraph.orchid.sockets.sslengine; + +public interface HandshakeCallbackHandler { + void handshakeCompleted(); +} diff --git a/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineInputStream.java b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineInputStream.java new file mode 100644 index 00000000..75ade871 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineInputStream.java @@ -0,0 +1,57 @@ +package com.subgraph.orchid.sockets.sslengine; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +public class SSLEngineInputStream extends InputStream { + + private final SSLEngineManager manager; + private final ByteBuffer recvBuffer; + private boolean isEOF; + + SSLEngineInputStream(SSLEngineManager manager) { + this.manager = manager; + this.recvBuffer = manager.getRecvBuffer(); + } + + @Override + public int read() throws IOException { + if(!fillRecvBufferIfEmpty()) { + return -1; + } + final int b = recvBuffer.get() & 0xFF; + recvBuffer.compact(); + return b; + } + + @Override + public int read(byte b[], int off, int len) throws IOException { + if(!fillRecvBufferIfEmpty()) { + return -1; + } + final int copyLen = Math.min(recvBuffer.remaining(), len); + recvBuffer.get(b, off, copyLen); + recvBuffer.compact(); + return copyLen; + } + + @Override + public void close() throws IOException { + manager.close(); + } + + private boolean fillRecvBufferIfEmpty() throws IOException { + if(isEOF) { + return false; + } + if(recvBuffer.position() == 0) { + if(manager.read() < 0) { + isEOF = true; + return false; + } + } + recvBuffer.flip(); + return recvBuffer.hasRemaining(); + } +} diff --git a/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineManager.java b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineManager.java new file mode 100644 index 00000000..5cc6928e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineManager.java @@ -0,0 +1,343 @@ +package com.subgraph.orchid.sockets.sslengine; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.SocketException; +import java.nio.BufferOverflowException; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLEngineResult.HandshakeStatus; +import javax.net.ssl.SSLEngineResult.Status; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSession; + +public class SSLEngineManager { + private final static Logger logger = Logger.getLogger(SSLEngineManager.class.getName()); + + private final SSLEngine engine; + private final InputStream input; + private final OutputStream output; + + private final ByteBuffer peerApplicationBuffer; + private final ByteBuffer peerNetworkBuffer; + private final ByteBuffer myApplicationBuffer; + private final ByteBuffer myNetworkBuffer; + + private final HandshakeCallbackHandler handshakeCallback; + + private boolean handshakeStarted = false; + + + SSLEngineManager(SSLEngine engine, HandshakeCallbackHandler handshakeCallback, InputStream input, OutputStream output) { + this.engine = engine; + this.handshakeCallback = handshakeCallback; + this.input = input; + this.output = output; + final SSLSession session = engine.getSession(); + this.peerApplicationBuffer = createApplicationBuffer(session); + this.peerNetworkBuffer = createPacketBuffer(session); + this.myApplicationBuffer = createApplicationBuffer(session); + this.myNetworkBuffer = createPacketBuffer(session); + } + + private static ByteBuffer createApplicationBuffer(SSLSession session) { + return createBuffer(session.getApplicationBufferSize()); + } + + private static ByteBuffer createPacketBuffer(SSLSession session) { + return createBuffer(session.getPacketBufferSize()); + } + + private static ByteBuffer createBuffer(int sz) { + final byte[] array = new byte[sz]; + return ByteBuffer.wrap(array); + } + + void startHandshake() throws IOException { + logger.fine("startHandshake()"); + handshakeStarted = true; + engine.beginHandshake(); + runHandshake(); + } + + ByteBuffer getSendBuffer() { + return myApplicationBuffer; + } + + ByteBuffer getRecvBuffer() { + return peerApplicationBuffer; + } + + + int write() throws IOException { + logger.fine("write()"); + if(!handshakeStarted) { + startHandshake(); + } + final int p = myApplicationBuffer.position(); + if(p == 0) { + return 0; + } + myNetworkBuffer.clear(); + myApplicationBuffer.flip(); + final SSLEngineResult result = engine.wrap(myApplicationBuffer, myNetworkBuffer); + myApplicationBuffer.compact(); + if(logger.isLoggable(Level.FINE)) { + logResult(result); + } + + switch(result.getStatus()) { + case BUFFER_OVERFLOW: + throw new BufferOverflowException(); + case BUFFER_UNDERFLOW: + throw new BufferUnderflowException(); + case CLOSED: + throw new SSLException("SSLEngine is closed"); + + case OK: + break; + default: + break; + } + + flush(); + if(runHandshake()) { + write(); + } + + return p - myApplicationBuffer.position(); + + } + + // either return -1 or peerApplicationBuffer has data to read + int read() throws IOException { + logger.fine("read()"); + if(!handshakeStarted) { + startHandshake(); + } + + if(engine.isInboundDone()) { + return -1; + } + + final int n = networkReadBuffer(peerNetworkBuffer); + if(n == -1) { + return -1; + } + final int p = peerApplicationBuffer.position(); + + peerNetworkBuffer.flip(); + final SSLEngineResult result = engine.unwrap(peerNetworkBuffer, peerApplicationBuffer); + peerNetworkBuffer.compact(); + if(logger.isLoggable(Level.FINE)) { + logResult(result); + } + + switch(result.getStatus()) { + case BUFFER_OVERFLOW: + throw new BufferOverflowException(); + + case BUFFER_UNDERFLOW: + return 0; // <-- illegal return according to invariant + + case CLOSED: + input.close(); + break; + case OK: + break; + default: + break; + } + + runHandshake(); + + if(n == -1) { // <-- can't happen + engine.closeInbound(); + } + if(engine.isInboundDone()) { + return -1; + } + return peerApplicationBuffer.position() - p; + } + + void close() throws IOException { + try { + flush(); + if(!engine.isOutboundDone()) { + engine.closeOutbound(); + runHandshake(); + } else if(!engine.isInboundDone()) { + engine.closeInbound(); + runHandshake(); + } + } finally { + output.close(); + } + } + + void flush() throws IOException { + myNetworkBuffer.flip(); + networkWriteBuffer(myNetworkBuffer); + myNetworkBuffer.compact(); + } + + + private boolean runHandshake() throws IOException { + boolean handshakeRan = false; + while(true) { + if(!processHandshake()) { + return handshakeRan; + } else { + handshakeRan = true; + } + } + } + + private boolean processHandshake() throws IOException { + final HandshakeStatus hs = engine.getHandshakeStatus(); + logger.fine("processHandshake() hs = "+ hs); + switch(hs) { + case NEED_TASK: + synchronousRunDelegatedTasks(); + return processHandshake(); + + case NEED_UNWRAP: + return handshakeUnwrap(); + + case NEED_WRAP: + return handshakeWrap(); + + default: + return false; + } + } + + private void synchronousRunDelegatedTasks() { + logger.fine("runDelegatedTasks()"); + while(true) { + Runnable r = engine.getDelegatedTask(); + if(r == null) { + return; + } + logger.fine("Running a task: "+ r); + r.run(); + } + } + + private boolean handshakeUnwrap() throws IOException { + logger.fine("handshakeUnwrap()"); + + if(!engine.isInboundDone() && peerNetworkBuffer.position() == 0) { + if(networkReadBuffer(peerNetworkBuffer) < 0) { + return false; + } + } + peerNetworkBuffer.flip(); + final SSLEngineResult result = engine.unwrap(peerNetworkBuffer, peerApplicationBuffer); + peerNetworkBuffer.compact(); + + if(logger.isLoggable(Level.FINE)) { + logResult(result); + } + + if(result.getHandshakeStatus() == HandshakeStatus.FINISHED) { + handshakeFinished(); + } + switch(result.getStatus()) { + + case CLOSED: + if(engine.isOutboundDone()) { + output.close(); + } + return false; + case OK: + return true; + case BUFFER_UNDERFLOW: + if(networkReadBuffer(peerNetworkBuffer) < 0) { + return false; + } + return true; + default: + return false; + } + } + + private boolean handshakeWrap() throws IOException { + logger.fine("handshakeWrap()"); + myApplicationBuffer.flip(); + final SSLEngineResult result = engine.wrap(myApplicationBuffer, myNetworkBuffer); + myApplicationBuffer.compact(); + if(logger.isLoggable(Level.FINE)) { + logResult(result); + } + + if(result.getHandshakeStatus() == HandshakeStatus.FINISHED) { + handshakeFinished(); + } + + if(result.getStatus() == Status.CLOSED) { + try { + flush(); + } catch (SocketException e) { + e.printStackTrace(); + } + } else { + flush(); + } + + switch(result.getStatus()) { + case CLOSED: + if(engine.isOutboundDone()) { + output.close(); + } + return false; + + case OK: + return true; + + default: + return false; + + } + } + + private void logResult(SSLEngineResult result) { + logger.fine("Result status="+result.getStatus() + " hss="+ result.getHandshakeStatus() + " consumed = "+ result.bytesConsumed() + " produced = "+ result.bytesProduced()); + } + + private void handshakeFinished() { + if(handshakeCallback != null) { + handshakeCallback.handshakeCompleted(); + } + } + + private void networkWriteBuffer(ByteBuffer buffer) throws IOException { + final byte[] bs = buffer.array(); + final int off = buffer.position(); + final int len = buffer.limit() - off; + logger.fine("networkWriteBuffer(b, "+ off + ", "+ len +")"); + output.write(bs, off, len); + output.flush(); + buffer.position(buffer.limit()); + } + + private int networkReadBuffer(ByteBuffer buffer) throws IOException { + final byte[] bs = buffer.array(); + final int off = buffer.position(); + final int len = buffer.limit() - off; + + final int n = input.read(bs, off, len); + if(n != -1) { + buffer.position(off + n); + } + logger.fine("networkReadBuffer(b, "+ off +", "+ len +") = "+ n); + return n; + } + +} diff --git a/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineOutputStream.java b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineOutputStream.java new file mode 100644 index 00000000..62cc11aa --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineOutputStream.java @@ -0,0 +1,46 @@ +package com.subgraph.orchid.sockets.sslengine; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +public class SSLEngineOutputStream extends OutputStream { + + private final SSLEngineManager manager; + private final ByteBuffer outputBuffer; + + public SSLEngineOutputStream(SSLEngineManager manager) { + this.manager = manager; + this.outputBuffer = manager.getSendBuffer(); + } + + @Override + public void write(int b) throws IOException { + outputBuffer.put((byte) b); + manager.write(); + } + + @Override + public void write(byte b[], int off, int len) throws IOException { + int written = 0; + + while(written < len) { + int n = doWrite(b, off + written, len - written); + + written += n; + } + } + + @Override + public void close() throws IOException { + manager.close(); + } + + private int doWrite(byte[] b, int off, int len) throws IOException { + int putLength = Math.min(len, outputBuffer.remaining()); + outputBuffer.put(b, off, putLength); + manager.write(); + return putLength; + } + +} diff --git a/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineSSLSocket.java b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineSSLSocket.java new file mode 100644 index 00000000..0f87e05d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/sockets/sslengine/SSLEngineSSLSocket.java @@ -0,0 +1,315 @@ +package com.subgraph.orchid.sockets.sslengine; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +import javax.net.ssl.HandshakeCompletedEvent; +import javax.net.ssl.HandshakeCompletedListener; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; + +public class SSLEngineSSLSocket extends SSLSocket implements HandshakeCallbackHandler { + + private final SSLEngine engine; + private final SSLEngineManager manager; + + private Socket socket; + private InputStream inputStream; + private OutputStream outputStream; + private final List listenerList; + public SSLEngineSSLSocket(Socket socket, SSLContext context) throws IOException { + this.engine = createSSLEngine(context); + this.socket = socket; + this.manager = new SSLEngineManager(engine, this, socket.getInputStream(), socket.getOutputStream()); + this.listenerList = new CopyOnWriteArrayList(); + } + + private static SSLEngine createSSLEngine(SSLContext context) { + final SSLEngine engine = context.createSSLEngine(); + engine.setUseClientMode(true); + return engine; + } + + @Override + public String[] getSupportedCipherSuites() { + return engine.getSupportedCipherSuites(); + } + + @Override + public String[] getEnabledCipherSuites() { + return engine.getEnabledCipherSuites(); + } + + @Override + public void setEnabledCipherSuites(String[] suites) { + engine.setEnabledCipherSuites(suites); + } + + @Override + public String[] getSupportedProtocols() { + return engine.getSupportedProtocols(); + } + + @Override + public String[] getEnabledProtocols() { + return engine.getEnabledProtocols(); + } + + @Override + public void setEnabledProtocols(String[] protocols) { + engine.setEnabledProtocols(protocols); + } + + @Override + public SSLSession getSession() { + return engine.getSession(); + } + + @Override + public void addHandshakeCompletedListener( + HandshakeCompletedListener listener) { + listenerList.add(listener); + } + + @Override + public void removeHandshakeCompletedListener( + HandshakeCompletedListener listener) { + listenerList.remove(listener); + } + + @Override + public void startHandshake() throws IOException { + manager.startHandshake(); + } + + @Override + public void setUseClientMode(boolean mode) { + engine.setUseClientMode(mode); + } + + @Override + public boolean getUseClientMode() { + return engine.getUseClientMode(); + } + + @Override + public void setNeedClientAuth(boolean need) { + engine.setNeedClientAuth(need); + } + + @Override + public boolean getNeedClientAuth() { + return engine.getNeedClientAuth(); + } + + @Override + public void setWantClientAuth(boolean want) { + engine.setWantClientAuth(want); + } + + @Override + public boolean getWantClientAuth() { + return engine.getWantClientAuth(); + } + + @Override + public void connect(SocketAddress endpoint) throws IOException { + throw new IOException("Socket is already connected"); + } + + @Override + public void connect(SocketAddress endpoint, int timeout) throws IOException { + throw new IOException("Socket is already connected"); + } + + @Override + public void bind(SocketAddress bindpoint) throws IOException { + throw new IOException("Socket is already connected"); + } + + @Override + public InetAddress getInetAddress() { + return socket.getInetAddress(); + } + + @Override + public InetAddress getLocalAddress() { + return socket.getLocalAddress(); + } + + @Override + public int getPort() { + return socket.getPort(); + } + + @Override + public int getLocalPort() { + return socket.getLocalPort(); + } + + @Override + public SocketAddress getRemoteSocketAddress() { + return socket.getRemoteSocketAddress(); + } + + @Override + public SocketAddress getLocalSocketAddress() { + return socket.getLocalSocketAddress(); + } + + @Override + public void setTcpNoDelay(boolean on) throws SocketException { + socket.setTcpNoDelay(on); + } + + @Override + public boolean getTcpNoDelay() throws SocketException { + return socket.getTcpNoDelay(); + } + + @Override + public void setSoLinger(boolean on, int linger) throws SocketException { + socket.setSoLinger(on, linger); + } + + @Override + public int getSoLinger() throws SocketException { + return socket.getSoLinger(); + } + + @Override + public void setOOBInline(boolean on) throws SocketException { + socket.setOOBInline(on); + } + + @Override + public boolean getOOBInline() throws SocketException { + return socket.getOOBInline(); + } + + @Override + public synchronized void setSoTimeout(int timeout) throws SocketException { + socket.setSoTimeout(timeout); + } + + @Override + public synchronized int getSoTimeout() throws SocketException { + return socket.getSoTimeout(); + } + + @Override + public synchronized void setSendBufferSize(int size) throws SocketException { + socket.setSendBufferSize(size); + } + + @Override + public synchronized int getSendBufferSize() throws SocketException { + return socket.getSendBufferSize(); + } + + @Override + public synchronized void setReceiveBufferSize(int size) + throws SocketException { + socket.setReceiveBufferSize(size); + } + + @Override + public synchronized int getReceiveBufferSize() throws SocketException { + return socket.getReceiveBufferSize(); + } + + @Override + public void setKeepAlive(boolean on) throws SocketException { + socket.setKeepAlive(on); + } + + @Override + public boolean getKeepAlive() throws SocketException { + return socket.getKeepAlive(); + } + + @Override + public void setTrafficClass(int tc) throws SocketException { + socket.setTrafficClass(tc); + } + + @Override + public int getTrafficClass() throws SocketException { + return socket.getTrafficClass(); + } + + @Override + public void setReuseAddress(boolean on) throws SocketException { + socket.setReuseAddress(on); + } + + @Override + public boolean getReuseAddress() throws SocketException { + return socket.getReuseAddress(); + } + + @Override + public void shutdownInput() throws IOException { + throw new UnsupportedOperationException("shutdownInput() not supported on SSL Sockets"); + } + + @Override + public void shutdownOutput() throws IOException { + throw new UnsupportedOperationException("shutdownOutput() not supported on SSL Sockets"); + } + + @Override + public boolean isInputShutdown() { + return socket.isInputShutdown(); + } + + @Override + public boolean isOutputShutdown() { + return socket.isOutputShutdown(); + } + + @Override + public void setEnableSessionCreation(boolean flag) { + engine.setEnableSessionCreation(flag); + } + + @Override + public boolean getEnableSessionCreation() { + return engine.getEnableSessionCreation(); + } + + @Override + public synchronized InputStream getInputStream() throws IOException { + if(inputStream == null) { + inputStream = new SSLEngineInputStream(manager); + } + return inputStream; + } + + @Override + public OutputStream getOutputStream() throws IOException { + if(outputStream == null) { + outputStream = new SSLEngineOutputStream(manager); + } + return outputStream; + } + + public void handshakeCompleted() { + if(listenerList.isEmpty()) { + return; + } + final HandshakeCompletedEvent event = new HandshakeCompletedEvent(this, engine.getSession()); + for(HandshakeCompletedListener listener: listenerList) { + listener.handshakeCompleted(event); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/Socks4Request.java b/orchid/src/com/subgraph/orchid/socks/Socks4Request.java new file mode 100644 index 00000000..ef79b6e0 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/Socks4Request.java @@ -0,0 +1,70 @@ +package com.subgraph.orchid.socks; + +import java.net.Socket; + +import com.subgraph.orchid.TorConfig; + +public class Socks4Request extends SocksRequest { + final static int SOCKS_COMMAND_CONNECT = 1; + final static int SOCKS_COMMAND_RESOLV = 0xF0; + private final static int SOCKS_STATUS_SUCCESS = 0x5a; + private final static int SOCKS_STATUS_FAILURE = 0x5b; + private int command; + + Socks4Request(TorConfig config, Socket socket) { + super(config, socket); + } + + public boolean isConnectRequest() { + return command == SOCKS_COMMAND_CONNECT; + } + + public int getCommandCode() { + return command; + } + + public void sendConnectionRefused() throws SocksRequestException { + sendError(false); + } + + public void sendError(boolean isUnsupportedCommand) throws SocksRequestException { + sendResponse(SOCKS_STATUS_FAILURE); + } + + public void sendSuccess() throws SocksRequestException { + sendResponse(SOCKS_STATUS_SUCCESS); + } + + public void readRequest() throws SocksRequestException { + command = readByte(); + setPortData(readPortData()); + byte[] ipv4Data = readIPv4AddressData(); + readNullTerminatedString(); // Username + if(isVersion4aHostname(ipv4Data)) + setHostname(readNullTerminatedString()); + else + setIPv4AddressData(ipv4Data); + } + + private boolean isVersion4aHostname(byte[] data) { + /* + * For version 4A, if the client cannot resolve the destination host's + * domain name to find its IP address, it should set the first three bytes + * of DSTIP to NULL and the last byte to a non-zero value. (This corresponds + * to IP address 0.0.0.x, with x nonzero. + */ + if(data.length != 4) + return false; + for(int i = 0; i < 3; i++) + if(data[i] != 0) + return false; + return data[3] != 0; + } + + private void sendResponse(int code) throws SocksRequestException { + final byte[] responseBuffer = new byte[8]; + responseBuffer[0] = 0; + responseBuffer[1] = (byte) code; + socketWrite(responseBuffer); + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/Socks5Request.java b/orchid/src/com/subgraph/orchid/socks/Socks5Request.java new file mode 100644 index 00000000..f7f0a225 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/Socks5Request.java @@ -0,0 +1,145 @@ +package com.subgraph.orchid.socks; + +import java.net.Socket; + +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorException; + +public class Socks5Request extends SocksRequest { + final static int SOCKS5_VERSION = 5; + final static int SOCKS5_AUTH_NONE = 0; + final static int SOCKS5_COMMAND_CONNECT = 1; + final static int SOCKS5_COMMAND_RESOLV = 0xF0; + final static int SOCKS5_COMMAND_RESOLV_PTR = 0xF1; + final static int SOCKS5_ADDRESS_IPV4 = 1; + final static int SOCKS5_ADDRESS_HOSTNAME = 3; + final static int SOCKS5_ADDRESS_IPV6 = 4; + final static int SOCKS5_STATUS_SUCCESS = 0; + final static int SOCKS5_STATUS_FAILURE = 1; + final static int SOCKS5_STATUS_CONNECTION_REFUSED = 5; + final static int SOCKS5_STATUS_COMMAND_NOT_SUPPORTED = 7; + + private int command; + private int addressType; + private byte[] addressBytes = new byte[0]; + private byte[] portBytes = new byte[0]; + + Socks5Request(TorConfig config, Socket socket) { + super(config, socket); + } + + public boolean isConnectRequest() { + return command == SOCKS5_COMMAND_CONNECT; + } + + public int getCommandCode() { + return command; + } + + private String addressBytesToHostname() { + if(addressType != SOCKS5_ADDRESS_HOSTNAME) + throw new TorException("SOCKS 4 request is not a hostname request"); + final StringBuilder sb = new StringBuilder(); + for(int i = 1; i < addressBytes.length; i++) { + char c = (char) (addressBytes[i] & 0xFF); + sb.append(c); + } + return sb.toString(); + } + + public void readRequest() throws SocksRequestException { + if(!processAuthentication()) { + throw new SocksRequestException("Failed to negotiate authentication"); + } + if(readByte() != SOCKS5_VERSION) + throw new SocksRequestException(); + + command = readByte(); + readByte(); // Reserved + addressType = readByte(); + addressBytes = readAddressBytes(); + portBytes = readPortData(); + if(addressType == SOCKS5_ADDRESS_IPV4) + setIPv4AddressData(addressBytes); + else if(addressType == SOCKS5_ADDRESS_HOSTNAME) + setHostname(addressBytesToHostname()); + else + throw new SocksRequestException(); + setPortData(portBytes); + } + + public void sendConnectionRefused() throws SocksRequestException { + sendResponse(SOCKS5_STATUS_CONNECTION_REFUSED); + } + + public void sendError(boolean isUnsupportedCommand) throws SocksRequestException { + if(isUnsupportedCommand) { + sendResponse(SOCKS5_STATUS_COMMAND_NOT_SUPPORTED); + } else { + sendResponse(SOCKS5_STATUS_FAILURE); + } + } + + public void sendSuccess() throws SocksRequestException { + sendResponse(SOCKS5_STATUS_SUCCESS); + } + + private void sendResponse(int status) throws SocksRequestException { + final int responseLength = 4 + addressBytes.length + portBytes.length; + final byte[] response = new byte[responseLength]; + response[0] = SOCKS5_VERSION; + response[1] = (byte) status; + response[2] = 0; + response[3] = (byte) addressType; + System.arraycopy(addressBytes, 0, response, 4, addressBytes.length); + System.arraycopy(portBytes, 0, response, 4 + addressBytes.length, portBytes.length); + socketWrite(response); + } + + private boolean processAuthentication() throws SocksRequestException { + final int nmethods = readByte(); + boolean foundAuthNone = false; + for(int i = 0; i < nmethods; i++) { + final int meth = readByte(); + if(meth == SOCKS5_AUTH_NONE) + foundAuthNone = true; + } + + if(foundAuthNone) { + sendAuthenticationResponse(SOCKS5_AUTH_NONE); + return true; + } else { + sendAuthenticationResponse(0xFF); + return false; + } + } + + + private void sendAuthenticationResponse(int method) throws SocksRequestException { + final byte[] response = new byte[2]; + response[0] = SOCKS5_VERSION; + response[1] = (byte) method; + socketWrite(response); + } + + private byte[] readAddressBytes() throws SocksRequestException { + switch(addressType) { + case SOCKS5_ADDRESS_IPV4: + return readIPv4AddressData(); + case SOCKS5_ADDRESS_IPV6: + return readIPv6AddressData(); + case SOCKS5_ADDRESS_HOSTNAME: + return readHostnameData(); + default: + throw new SocksRequestException(); + } + } + + private byte[] readHostnameData() throws SocksRequestException { + final int length = readByte(); + final byte[] addrData = new byte[length + 1]; + addrData[0] = (byte) length; + readAll(addrData, 1, length); + return addrData; + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/SocksClientTask.java b/orchid/src/com/subgraph/orchid/socks/SocksClientTask.java new file mode 100644 index 00000000..b7cf781d --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/SocksClientTask.java @@ -0,0 +1,122 @@ +package com.subgraph.orchid.socks; + +import java.io.IOException; +import java.net.Socket; +import java.util.concurrent.TimeoutException; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.subgraph.orchid.CircuitManager; +import com.subgraph.orchid.OpenFailedException; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorException; + +public class SocksClientTask implements Runnable { + private final static Logger logger = Logger.getLogger(SocksClientTask.class.getName()); + + private final TorConfig config; + private final Socket socket; + private final CircuitManager circuitManager; + + SocksClientTask(TorConfig config, Socket socket, CircuitManager circuitManager) { + this.config = config; + this.socket = socket; + this.circuitManager = circuitManager; + } + + public void run() { + final int version = readByte(); + dispatchRequest(version); + closeSocket(); + } + + private int readByte() { + try { + return socket.getInputStream().read(); + } catch (IOException e) { + logger.warning("IO error reading version byte: "+ e.getMessage()); + return -1; + } + } + + private void dispatchRequest(int versionByte) { + switch(versionByte) { + case 'H': + case 'G': + case 'P': + sendHttpPage(); + break; + case 4: + processRequest(new Socks4Request(config, socket)); + break; + case 5: + processRequest(new Socks5Request(config, socket)); + break; + default: + // fall through, do nothing + break; + } + } + + private void processRequest(SocksRequest request) { + try { + request.readRequest(); + if(!request.isConnectRequest()) { + logger.warning("Non connect command ("+ request.getCommandCode() + ")"); + request.sendError(true); + return; + } + + try { + final Stream stream = openConnectStream(request); + logger.fine("SOCKS CONNECT to "+ request.getTarget()+ " completed"); + request.sendSuccess(); + runOpenConnection(stream); + } catch (InterruptedException e) { + logger.info("SOCKS CONNECT to "+ request.getTarget() + " was thread interrupted"); + Thread.currentThread().interrupt(); + request.sendError(false); + } catch (TimeoutException e) { + logger.info("SOCKS CONNECT to "+ request.getTarget() + " timed out"); + request.sendError(false); + } catch (OpenFailedException e) { + logger.info("SOCKS CONNECT to "+ request.getTarget() + " failed: "+ e.getMessage()); + request.sendConnectionRefused(); + } + } catch (SocksRequestException e) { + logger.log(Level.WARNING, "Failure reading SOCKS request: "+ e.getMessage()); + try { + request.sendError(false); + socket.close(); + } catch (Exception ignore) { } + } + } + + + private void runOpenConnection(Stream stream) { + SocksStreamConnection.runConnection(socket, stream); + } + + private Stream openConnectStream(SocksRequest request) throws InterruptedException, TimeoutException, OpenFailedException { + if(request.hasHostname()) { + logger.fine("SOCKS CONNECT request to "+ request.getHostname() +":"+ request.getPort()); + return circuitManager.openExitStreamTo(request.getHostname(), request.getPort()); + } else { + logger.fine("SOCKS CONNECT request to "+ request.getAddress() +":"+ request.getPort()); + return circuitManager.openExitStreamTo(request.getAddress(), request.getPort()); + } + } + + private void sendHttpPage() { + throw new TorException("Returning HTTP page not implemented"); + } + + private void closeSocket() { + try { + socket.close(); + } catch (IOException e) { + logger.warning("Error closing SOCKS socket: "+ e.getMessage()); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/SocksPortListenerImpl.java b/orchid/src/com/subgraph/orchid/socks/SocksPortListenerImpl.java new file mode 100644 index 00000000..76abfb3e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/SocksPortListenerImpl.java @@ -0,0 +1,116 @@ +package com.subgraph.orchid.socks; + +import java.io.IOException; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.logging.Logger; + +import com.subgraph.orchid.CircuitManager; +import com.subgraph.orchid.SocksPortListener; +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.TorException; + +public class SocksPortListenerImpl implements SocksPortListener { + private final static Logger logger = Logger.getLogger(SocksPortListenerImpl.class.getName()); + private final Set listeningPorts = new HashSet(); + private final Map acceptThreads = new HashMap(); + private final TorConfig config; + private final CircuitManager circuitManager; + private final ExecutorService executor; + private boolean isStopped; + + public SocksPortListenerImpl(TorConfig config, CircuitManager circuitManager) { + this.config = config; + this.circuitManager = circuitManager; + executor = Executors.newCachedThreadPool(); + } + + public void addListeningPort(int port) { + if(port <= 0 || port > 65535) { + throw new TorException("Illegal listening port: "+ port); + } + + synchronized(listeningPorts) { + if(isStopped) { + throw new IllegalStateException("Cannot add listening port because Socks proxy has been stopped"); + } + if(listeningPorts.contains(port)) + return; + listeningPorts.add(port); + try { + startListening(port); + logger.fine("Listening for SOCKS connections on port "+ port); + } catch (IOException e) { + listeningPorts.remove(port); + throw new TorException("Failed to listen on port "+ port +" : "+ e.getMessage()); + } + } + + } + + public void stop() { + synchronized (listeningPorts) { + for(AcceptTask t: acceptThreads.values()) { + t.stop(); + } + executor.shutdownNow(); + isStopped = true; + } + } + + private void startListening(int port) throws IOException { + final AcceptTask task = new AcceptTask(port); + acceptThreads.put(port, task); + executor.execute(task); + } + + private Runnable newClientSocket(final Socket s) { + return new SocksClientTask(config, s, circuitManager); + } + + private class AcceptTask implements Runnable { + private final ServerSocket socket; + private final int port; + private volatile boolean stopped; + + AcceptTask(int port) throws IOException { + this.socket = new ServerSocket(port); + this.port = port; + } + + void stop() { + stopped = true; + try { + socket.close(); + } catch (IOException e) { } + } + + public void run() { + try { + runAcceptLoop(); + } catch (IOException e) { + if(!stopped) { + logger.warning("System error accepting SOCKS socket connections: "+ e.getMessage()); + } + } finally { + synchronized (listeningPorts) { + listeningPorts.remove(port); + acceptThreads.remove(port); + } + } + } + + private void runAcceptLoop() throws IOException { + while(!Thread.interrupted() && !stopped) { + final Socket s = socket.accept(); + executor.execute(newClientSocket(s)); + } + } + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/SocksRequest.java b/orchid/src/com/subgraph/orchid/socks/SocksRequest.java new file mode 100644 index 00000000..a28497d8 --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/SocksRequest.java @@ -0,0 +1,179 @@ +package com.subgraph.orchid.socks; + +import java.io.IOException; +import java.net.Socket; +import java.util.logging.Logger; + +import com.subgraph.orchid.TorConfig; +import com.subgraph.orchid.data.IPv4Address; + +public abstract class SocksRequest { + private final static Logger logger = Logger.getLogger(SocksRequest.class.getName()); + + private final TorConfig config; + private final Socket socket; + + private byte[] addressData; + private IPv4Address address; + private String hostname; + private int port; + + private long lastWarningTimestamp = 0; + + protected SocksRequest(TorConfig config, Socket socket) { + this.config = config; + this.socket = socket; + } + + abstract public void readRequest() throws SocksRequestException; + abstract public int getCommandCode(); + abstract public boolean isConnectRequest(); + abstract void sendError(boolean isUnsupportedCommand) throws SocksRequestException; + abstract void sendSuccess() throws SocksRequestException; + abstract void sendConnectionRefused() throws SocksRequestException; + + public int getPort() { + return port; + } + + public IPv4Address getAddress() { + return address; + } + + public boolean hasHostname() { + return hostname != null; + } + + public String getHostname() { + return hostname; + } + + public String getTarget() { + if(config.getSafeLogging()) { + return "[scrubbed]:"+ port; + } + if(hostname != null) { + return hostname + ":" + port; + } else { + return address + ":" + port; + } + } + + protected void setPortData(byte[] data) throws SocksRequestException { + if(data.length != 2) + throw new SocksRequestException(); + port = ((data[0] & 0xFF) << 8) | (data[1] & 0xFF); + } + + protected void setIPv4AddressData(byte[] data) throws SocksRequestException { + logUnsafeSOCKS(); + + if(data.length != 4) + throw new SocksRequestException(); + addressData = data; + + int addressValue = 0; + for(byte b: addressData) { + addressValue <<= 8; + addressValue |= (b & 0xFF); + } + address = new IPv4Address(addressValue); + } + + private boolean testRateLimit() { + final long now = System.currentTimeMillis(); + final long diff = now - lastWarningTimestamp; + lastWarningTimestamp = now; + return diff > 5000; + } + + private void logUnsafeSOCKS() throws SocksRequestException { + if((config.getWarnUnsafeSocks() || config.getSafeSocks()) && testRateLimit()) { + logger.warning("Your application is giving Orchid only "+ + "an IP address. Applications that do DNS "+ + "resolves themselves may leak information. "+ + "Consider using Socks4a (e.g. via privoxy or socat) "+ + "instead. For more information please see "+ + "https://wiki.torproject.org/TheOnionRouter/TorFAQ#SOCKSAndDNS"); + } + if(config.getSafeSocks()) { + throw new SocksRequestException("Rejecting unsafe SOCKS request"); + } + } + + protected void setHostname(String name) { + hostname = name; + } + + protected byte[] readPortData() throws SocksRequestException { + final byte[] data = new byte[2]; + readAll(data, 0, 2); + return data; + } + + protected byte[] readIPv4AddressData() throws SocksRequestException { + final byte[] data = new byte[4]; + readAll(data); + return data; + } + + protected byte[] readIPv6AddressData() throws SocksRequestException { + final byte[] data = new byte[16]; + readAll(data); + return data; + } + + protected String readNullTerminatedString() throws SocksRequestException { + try { + final StringBuilder sb = new StringBuilder(); + while(true) { + final int c = socket.getInputStream().read(); + if(c == -1) + throw new SocksRequestException(); + if(c == 0) + return sb.toString(); + char ch = (char) c; + sb.append(ch); + } + } catch (IOException e) { + throw new SocksRequestException(e); + } + } + + protected int readByte() throws SocksRequestException { + try { + final int n = socket.getInputStream().read(); + if(n == -1) + throw new SocksRequestException(); + return n; + } catch (IOException e) { + throw new SocksRequestException(e); + } + } + + protected void readAll(byte[] buffer) throws SocksRequestException { + readAll(buffer, 0, buffer.length); + } + + protected void readAll(byte[] buffer, int offset, int length) throws SocksRequestException { + try { + while(length > 0) { + int n = socket.getInputStream().read(buffer, offset, length); + if(n == -1) + throw new SocksRequestException(); + offset += n; + length -= n; + } + } catch (IOException e) { + throw new SocksRequestException(e); + } + } + + protected void socketWrite(byte[] buffer) throws SocksRequestException { + try { + socket.getOutputStream().write(buffer); + } catch(IOException e) { + throw new SocksRequestException(e); + } + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/SocksRequestException.java b/orchid/src/com/subgraph/orchid/socks/SocksRequestException.java new file mode 100644 index 00000000..789f6b1c --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/SocksRequestException.java @@ -0,0 +1,15 @@ +package com.subgraph.orchid.socks; + +public class SocksRequestException extends Exception { + + private static final long serialVersionUID = 844055056337565049L; + + SocksRequestException() {} + SocksRequestException(String msg) { + super(msg); + } + + SocksRequestException(Throwable ex) { + super(ex); + } +} diff --git a/orchid/src/com/subgraph/orchid/socks/SocksStreamConnection.java b/orchid/src/com/subgraph/orchid/socks/SocksStreamConnection.java new file mode 100644 index 00000000..70c9ea1e --- /dev/null +++ b/orchid/src/com/subgraph/orchid/socks/SocksStreamConnection.java @@ -0,0 +1,138 @@ +package com.subgraph.orchid.socks; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.Socket; +import java.util.logging.Logger; + +import com.subgraph.orchid.Stream; + +public class SocksStreamConnection { + private final static Logger logger = Logger.getLogger(SocksStreamConnection.class.getName()); + + public static void runConnection(Socket socket, Stream stream) { + SocksStreamConnection ssc = new SocksStreamConnection(socket, stream); + ssc.run(); + } + private final static int TRANSFER_BUFFER_SIZE = 4096; + private final Stream stream; + private final InputStream torInputStream; + private final OutputStream torOutputStream; + private final Socket socket; + private final Thread incomingThread; + private final Thread outgoingThread; + private final Object lock = new Object(); + private volatile boolean outgoingClosed; + private volatile boolean incomingClosed; + + private SocksStreamConnection(Socket socket, Stream stream) { + this.socket = socket; + this.stream = stream; + torInputStream = stream.getInputStream(); + torOutputStream = stream.getOutputStream(); + + incomingThread = createIncomingThread(); + outgoingThread = createOutgoingThread(); + } + + private void run() { + incomingThread.start(); + outgoingThread.start(); + synchronized(lock) { + while(!(outgoingClosed && incomingClosed)) { + try { + lock.wait(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + } + + try { + socket.close(); + } catch (IOException e) { + logger.warning("IOException on SOCKS socket close(): "+ e.getMessage()); + } + closeStream(torInputStream); + closeStream(torOutputStream); + } + } + + private Thread createIncomingThread() { + return new Thread(new Runnable() { public void run() { + try { + incomingTransferLoop(); + } catch (IOException e) { + logger.fine("System error on incoming stream IO "+ stream +" : "+ e.getMessage()); + } finally { + synchronized(lock) { + incomingClosed = true; + lock.notifyAll(); + } + } + }}); + } + + private Thread createOutgoingThread() { + return new Thread(new Runnable() { public void run() { + try { + outgoingTransferLoop(); + } catch (IOException e) { + logger.fine("System error on outgoing stream IO "+ stream +" : "+ e.getMessage()); + } finally { + synchronized(lock) { + outgoingClosed = true; + lock.notifyAll(); + } + } + }}); + } + + private void incomingTransferLoop() throws IOException { + final byte[] incomingBuffer = new byte[TRANSFER_BUFFER_SIZE]; + while(true) { + final int n = torInputStream.read(incomingBuffer); + if(n == -1) { + logger.fine("EOF on TOR input stream "+ stream); + socket.shutdownOutput(); + return; + } else if(n > 0) { + logger.fine("Transferring "+ n +" bytes from "+ stream +" to SOCKS socket"); + if(!socket.isOutputShutdown()) { + socket.getOutputStream().write(incomingBuffer, 0, n); + socket.getOutputStream().flush(); + } else { + closeStream(torInputStream); + return; + } + } + } + } + + private void outgoingTransferLoop() throws IOException { + final byte[] outgoingBuffer = new byte[TRANSFER_BUFFER_SIZE]; + while(true) { + stream.waitForSendWindow(); + final int n = socket.getInputStream().read(outgoingBuffer); + if(n == -1) { + torOutputStream.close(); + logger.fine("EOF on SOCKS socket connected to "+ stream); + return; + } else if(n > 0) { + logger.fine("Transferring "+ n +" bytes from SOCKS socket to "+ stream); + torOutputStream.write(outgoingBuffer, 0, n); + torOutputStream.flush(); + } + } + } + + private void closeStream(Closeable c) { + try { + c.close(); + } catch (IOException e) { + logger.warning("Close failed on "+ c + " : "+ e.getMessage()); + } + } +} diff --git a/orchid/test/com/subgraph/orchid/TorConfigTest.java b/orchid/test/com/subgraph/orchid/TorConfigTest.java new file mode 100644 index 00000000..e108fd9c --- /dev/null +++ b/orchid/test/com/subgraph/orchid/TorConfigTest.java @@ -0,0 +1,116 @@ +package com.subgraph.orchid; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.Test; + +import com.subgraph.orchid.circuits.hs.HSDescriptorCookie; +import com.subgraph.orchid.config.TorConfigBridgeLine; +import com.subgraph.orchid.data.HexDigest; +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.encoders.Hex; + +public class TorConfigTest { + + private TorConfig config; + + @Before + public void setup() { + config = Tor.createConfig(); + } + + + @Test + public void testCircuitBuildTimeout() { + final long timeout = config.getCircuitBuildTimeout(); + assertEquals(TimeUnit.MILLISECONDS.convert(60, TimeUnit.SECONDS), timeout); + config.setCircuitBuildTimeout(2, TimeUnit.MINUTES); + assertTrue(config.getCircuitBuildTimeout() > timeout); + } + + @Test + public void testDataDirectory() { + final File dd = config.getDataDirectory(); + assertTrue(dd.getPath().charAt(0) != '~'); + final String testPath = "/foo/dir"; + config.setDataDirectory(new File(testPath)); + assertEquals(new File(testPath), config.getDataDirectory()); + } + + @Test + public void testMaxCircuitsPending() { + assertEquals(32, config.getMaxClientCircuitsPending()); + config.setMaxClientCircuitsPending(23); + assertEquals(23, config.getMaxClientCircuitsPending()); + } + + @Test + public void testEnforceDistinctSubnets() { + assertEquals(true, config.getEnforceDistinctSubnets()); + config.setEnforceDistinctSubnets(false); + assertEquals(false, config.getEnforceDistinctSubnets()); + } + + @Test + public void testCircuitStreamTimeout() { + assertEquals(0, config.getCircuitStreamTimeout()); + config.setCircuitStreamTimeout(30, TimeUnit.SECONDS); + assertEquals(30 * 1000, config.getCircuitStreamTimeout()); + } + + @Test + public void testHidServAuth() { + final String address = "3t43tfluce4qcxbo"; + final String onion = address + ".onion"; + + final String hex = "022b99d1d272285c80f7214bd6c07c27"; + final String descriptor = "AiuZ0dJyKFyA9yFL1sB8Jw"; + + assertNull(config.getHidServAuth(onion)); + + config.addHidServAuth(onion, descriptor); + + HSDescriptorCookie cookie = config.getHidServAuth(onion); + assertNotNull(cookie); + assertEquals(hex, new String(Hex.encode(cookie.getValue()))); + assertSame(cookie, config.getHidServAuth(address)); + } + + @Test + public void testAutoBool() { + assertEquals(TorConfig.AutoBoolValue.AUTO, config.getUseNTorHandshake()); + config.setUseNTorHandshake(TorConfig.AutoBoolValue.TRUE); + assertEquals(TorConfig.AutoBoolValue.TRUE, config.getUseNTorHandshake()); + config.setUseNTorHandshake(TorConfig.AutoBoolValue.AUTO); + assertEquals(TorConfig.AutoBoolValue.AUTO, config.getUseNTorHandshake()); + } + + @Test + public void testBridges() { + final IPv4Address a1 = IPv4Address.createFromString("1.2.3.4"); + final IPv4Address a2 = IPv4Address.createFromString("4.4.4.4"); + final HexDigest fp = HexDigest.createFromString("0EA20CAA3CE696E561BC08B15E00106700E8F682"); + config.addBridge(a1, 88); + config.addBridge(a2, 101, fp); + List bs = config.getBridges(); + assertEquals(2, bs.size()); + final TorConfigBridgeLine b1 = bs.get(0); + final TorConfigBridgeLine b2 = bs.get(1); + + assertEquals(a1, b1.getAddress()); + assertEquals(a2, b2.getAddress()); + assertEquals(88, b1.getPort()); + assertEquals(101, b2.getPort()); + assertNull(b1.getFingerprint()); + assertSame(b2.getFingerprint(), fp); + } +} diff --git a/orchid/test/com/subgraph/orchid/circuits/TorInputStreamTest.java b/orchid/test/com/subgraph/orchid/circuits/TorInputStreamTest.java new file mode 100644 index 00000000..ff30e457 --- /dev/null +++ b/orchid/test/com/subgraph/orchid/circuits/TorInputStreamTest.java @@ -0,0 +1,184 @@ +package com.subgraph.orchid.circuits; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.expectLastCall; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.*; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.subgraph.orchid.RelayCell; +import com.subgraph.orchid.Stream; +import com.subgraph.orchid.circuits.TorInputStream; + +public class TorInputStreamTest { + + private TorInputStream inputStream; + private Stream mockStream; + + @Before + public void before() { + mockStream = createMock("mockStream", Stream.class); + mockStream.close(); + replay(mockStream); + inputStream = new TorInputStream(mockStream); + } + + @After + public void after() { + inputStream.close(); + verify(mockStream); + } + + private static RelayCell createDataCell(byte[] data) { + final RelayCell cell = createMock("dataCell", RelayCell.class); + expect(cell.cellBytesRemaining()).andReturn(data.length); + expectLastCall().times(2); + expect(cell.getRelayCommand()).andReturn(RelayCell.RELAY_DATA); + expect(cell.getPayloadBuffer()).andReturn(ByteBuffer.wrap(data)); + replay(cell); + return cell; + } + + private static RelayCell createEndCell() { + final RelayCell cell = createMock("endCell", RelayCell.class); + expect(cell.getRelayCommand()).andReturn(RelayCell.RELAY_END); + replay(cell); + return cell; + } + + private void sendData(int... data) { + byte[] bytes = new byte[data.length]; + for(int i = 0; i < data.length; i++) { + bytes[i] = (byte) data[i]; + } + inputStream.addInputCell(createDataCell(bytes)); + } + + private void sendEnd() { + inputStream.addEndCell(createEndCell()); + } + + @Test + public void testAvailable() throws IOException { + assertEquals(0, inputStream.available()); + sendData(1,2,3); + assertEquals(3, inputStream.available()); + assertEquals(1, inputStream.read()); + assertEquals(2, inputStream.available()); + sendData(4,5); + assertEquals(4, inputStream.available()); + } + + @Test(timeout=100) + public void testRead() throws IOException { + sendData(1,2,3); + sendData(4); + sendData(5); + assertEquals(1, inputStream.read()); + assertEquals(2, inputStream.read()); + sendEnd(); + assertEquals(3, inputStream.read()); + assertEquals(4, inputStream.read()); + assertEquals(5, inputStream.read()); + assertEquals(-1, inputStream.read()); + } + + + private void setupTestClose() throws IOException { + sendData(1,2,3,4,5,6); + sendEnd(); + + assertEquals(1, inputStream.read()); + assertEquals(2, inputStream.read()); + + inputStream.close(); + } + + @Test(expected=IOException.class, timeout=100) + public void testClose1() throws IOException { + setupTestClose(); + /* throws IOException("Input stream closed") */ + inputStream.read(); + } + + @Test(expected=IOException.class, timeout=100) + public void testClose2() throws IOException { + setupTestClose(); + /* throws IOException("Input stream closed") */ + inputStream.read(new byte[2]); + } + + @Test(timeout=100) + public void testReadBuffer() throws IOException { + final byte[] buffer = new byte[3]; + + sendData(1,2,3); + sendData(4,5,6); + + + /* read two bytes at offset 1 */ + assertEquals(2, inputStream.read(buffer, 1, 2)); + assertArrayEquals(new byte[] {0, 1, 2}, buffer); + + /* read entire buffer (3 bytes) */ + assertEquals(3, inputStream.read(buffer)); + assertArrayEquals(new byte[] {3, 4, 5 }, buffer); + + /* reset buffer to {0,0,0}, read entire buffer */ + Arrays.fill(buffer, (byte)0); + assertEquals(1, inputStream.read(buffer)); + assertArrayEquals(new byte[] { 6, 0, 0 }, buffer); + + sendEnd(); + /* read entire buffer at EOF */ + assertEquals(-1, inputStream.read(buffer)); + } + + private boolean doesNullBufferThrowException() throws IOException { + try { + inputStream.read(null); + return false; + } catch(NullPointerException e) { + return true; + } + } + + private boolean throwsOOBException(byte[] b, int off, int len) throws IOException { + try { + inputStream.read(b, off, len); + return false; + } catch (IndexOutOfBoundsException e) { + return true; + } + } + + private void testOOB(String message, int bufferLength, int off, int len) throws IOException { + final byte[] buffer = new byte[bufferLength]; + assertTrue(message, throwsOOBException(buffer, off, len)); + } + + @Test(timeout=100) + public void testBadReadArguments() throws IOException { + final byte[] buffer = new byte[16]; + sendData(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); + sendEnd(); + + assertTrue("Null buffer must throw NPE", doesNullBufferThrowException()); + assertFalse("(offset + len) == b.length must not throw OOB", throwsOOBException(buffer, 8, 8)); + + testOOB("Negative offset must throw OOB", 16, -2, 4); + testOOB("Negative length must throw OOB", 16, 0, -10); + testOOB("off >= b.length must throw OOB", 16, 16, 10); + testOOB("(off + len) > b.length must throw OOB", 16, 8, 9); + testOOB("(off + len) < 0 must throw OOB", 16, Integer.MAX_VALUE, 10); + } +} diff --git a/orchid/test/com/subgraph/orchid/circuits/hs/HSDescriptorParserTest.java b/orchid/test/com/subgraph/orchid/circuits/hs/HSDescriptorParserTest.java new file mode 100644 index 00000000..1ec51d14 --- /dev/null +++ b/orchid/test/com/subgraph/orchid/circuits/hs/HSDescriptorParserTest.java @@ -0,0 +1,166 @@ +package com.subgraph.orchid.circuits.hs; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.nio.ByteBuffer; +import java.util.List; + +import org.junit.Test; + +import com.subgraph.orchid.Tor; +import com.subgraph.orchid.directory.DocumentFieldParserImpl; +import com.subgraph.orchid.directory.parsing.DocumentFieldParser; +import com.subgraph.orchid.directory.parsing.DocumentParsingResult; + +public class HSDescriptorParserTest { + private final static String TEST_DESCRIPTOR = + "rendezvous-service-descriptor apue4vh2fduecfztrrwczoo7cprlki4s\n"+ + "version 2\n"+ + "permanent-key\n"+ + "-----BEGIN RSA PUBLIC KEY-----\n"+ + "MIGJAoGBAMNTmy7L/isS+XTkCf1B1aik0ApE9sxcNpLwNR2JOZyy5puEGPuVY1FW\n"+ + "nw+CnMmTWXchTTRfboFmIv4F3i8ZTLHdWJ7wqRGyc0aabvkDZBSRWVHby3oDf/uQ\n"+ + "abtrJxXzYjy/dP29v5bLkb7a2zaAeP1ojX8ZwpxgJ9BCI+2fvBArAgMBAAE=\n"+ + "-----END RSA PUBLIC KEY-----\n"+ + "secret-id-part xaib3au35yqklp5txmncxbi2uic6jqor\n"+ + "publication-time 2013-07-07 23:20:40\n"+ + "protocol-versions 2,3\n"+ + "introduction-points\n"+ + "-----BEGIN MESSAGE-----\n"+ + "aW50cm9kdWN0aW9uLXBvaW50IGpla2tubHY0dWh2cGNoajVpcnZtd3I0Ym5rb2Ry\n"+ + "N3ZkCmlwLWFkZHJlc3MgMTczLjI1NS4yNDkuMjIyCm9uaW9uLXBvcnQgNDQzCm9u\n"+ + "aW9uLWtleQotLS0tLUJFR0lOIFJTQSBQVUJMSUMgS0VZLS0tLS0KTUlHSkFvR0JB\n"+ + "TG9OeXdIeW1QRGo2c2NvdUsvbGJZR01MRllGRGxDOVJyN2Jjc0MxQW12MWp0MjBH\n"+ + "WlBOdGFHMgorbjdDdHhMK3JWM3g5eFRQSDZBWUlDQmxycnA3TngzRlJQMWorQ3JI\n"+ + "WWk3WkNrTWhDUmg3NXNadmhIV01GT3liClM1QUUyWlhCMTA4cUVucGJnSFdrWmFX\n"+ + "SXdZZXdGZUZxdU5JV3ZjYVgxTU1lc3BONTJ2c0JBZ01CQUFFPQotLS0tLUVORCBS\n"+ + "U0EgUFVCTElDIEtFWS0tLS0tCnNlcnZpY2Uta2V5Ci0tLS0tQkVHSU4gUlNBIFBV\n"+ + "QkxJQyBLRVktLS0tLQpNSUdKQW9HQkFMZDJqYVk0a3oydVBlS05MRnBVMW80MUFV\n"+ + "UmpiQW42bWdzWGtFNm15TTFhcDczS09FUGFQaUFwCmpib1pZSFdCV29QVVZFUFhu\n"+ + "ZE9XcU92ZmFEVGJsbndGU1F1NU54VWVPVkNELzdOYnd6Y0l0c2ZkQ1RBMzVzcHIK\n"+ + "ZGFUK3ZwWmFRWGgxbWt0NlFMN1dKeHZLaXI2bDFuMitwdXFwZ201ZUJhSXRCOEt4\n"+ + "cnVLaEFnTUJBQUU9Ci0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0KaW50cm9k\n"+ + "dWN0aW9uLXBvaW50IDcyb3R1Mnl4ZXJoNGZocHAybjNpMnBwNmo3Z2ZrNWZzCmlw\n"+ + "LWFkZHJlc3MgMTk4LjI3LjY0LjI5Cm9uaW9uLXBvcnQgNDQzCm9uaW9uLWtleQot\n"+ + "LS0tLUJFR0lOIFJTQSBQVUJMSUMgS0VZLS0tLS0KTUlHSkFvR0JBT0pLc2UzQmdv\n"+ + "TzhKdytFMURHUXhVbTV6UGQwcjFscHl3U25IamFKb2ZIbitDaUdSTHRnS2JNNQpN\n"+ + "R01UUnRhNVZKWTRUNjFpUFdmN05Ma0FiVnZuSllMcXVHZjdScnh4MCtnNm5jdTVG\n"+ + "blJRMTQwOVkwVXRpNDFmCmVMeGI2YWJlMkorQTRLN2ZGdkMzVjVBSnhtZDIrV2xt\n"+ + "dFhQbGV4aHlYN21SWGhBVzZ1UXpBZ01CQUFFPQotLS0tLUVORCBSU0EgUFVCTElD\n"+ + "IEtFWS0tLS0tCnNlcnZpY2Uta2V5Ci0tLS0tQkVHSU4gUlNBIFBVQkxJQyBLRVkt\n"+ + "LS0tLQpNSUdKQW9HQkFOa0I3eTVaN2FhVUs3R3ZTUWdKVHl6aU43anhlNXlvcEpU\n"+ + "LzBIRURCSGN3cVBqMkdZMytTZ2VJCjRpUWFCRG1SL1V0Y3FuU1JLaGNyMFBSRFBy\n"+ + "T2wxa3lSRmhLWTdqNWttSGRiMko3aEZ2eER1emRTNE43RWdCVVQKbHEvaFdZa2po\n"+ + "QzVDck9uVTNIY1h5Q3RlU1p0bk5qRlkxVHJnSUx4Z1NwcXA0SU5ZZ1NpcEFnTUJB\n"+ + "QUU9Ci0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0KaW50cm9kdWN0aW9uLXBv\n"+ + "aW50IHNzNTR1amRlcWJ6a3RzbnBibzJwZXV5eDJpem1wbmZhCmlwLWFkZHJlc3Mg\n"+ + "MzcuMTU3LjE5NS40OApvbmlvbi1wb3J0IDkwMDEKb25pb24ta2V5Ci0tLS0tQkVH\n"+ + "SU4gUlNBIFBVQkxJQyBLRVktLS0tLQpNSUdKQW9HQkFKaUZhRE05Nm1acWQ0QTRj\n"+ + "L3lkallpQjRXbGx5b0J5NGt2WXhYZUNnRHA4VGpNVmFzcUFRQUYzCjE5UnJUL01v\n"+ + "TzE4SHVnWjMrUkticFptK2xLeHZlRkpIdGpmTUZQL0NEbDVFOUZ3VFcrdEVUdXMy\n"+ + "RmVYcHJrVGMKbDc3YjIzSkpYd0FtQ3lYMFgyQWNUVVBoVkg4YXdGZU43T0xkRnk0\n"+ + "ZzF3UjZhVzA1SFpIRkFnTUJBQUU9Ci0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0t\n"+ + "LS0Kc2VydmljZS1rZXkKLS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1J\n"+ + "R0pBb0dCQUtSTVVIUTdWaWxRZ1l6c3ZJVEJuZko1QXVSOUNZQWc5eGFUQzNmVWZ1\n"+ + "T21udkZlNHZzZTFJeWsKUkNRSzZHOTVPbGxOd3B5akU4WXRCSlIxUlZLUFBqcHNI\n"+ + "YUxJTmszVmROM3Z5NWxlL0VVQXY2c0dEUnRZODNDLwo3MFhGN1h6YjFGUVBNcm5a\n"+ + "d1o2N3A4Wkc2KzNub1JSUFZLbFJzQU9wa2YwMWJOb3ZpR09iQWdNQkFBRT0KLS0t\n"+ + "LS1FTkQgUlNBIFBVQkxJQyBLRVktLS0tLQoK\n"+ + "-----END MESSAGE-----\n"+ + "signature\n"+ + "-----BEGIN SIGNATURE-----\n"+ + "p1yxzPiIWpS0m+MTQW9LdJmgiOgaUTbwTz9GyoInPi5lC/WvX8/AnccsLoOUWjKs\n"+ + "3q8xV/8Gtz3qyigsWSggFuXyc3mRGM28tpdCNNkFovKAQgiZ0KjLky9BaQPEFOpr\n"+ + "v4Yo65ZbYvujPyc9xpqbtPNRf7LBe6GaqHvzP4kWqr8=\n"+ + "-----END SIGNATURE-----\n"; + + private final static String[] TEST_IPS = { + "introduction-point mjxsa2bywdvbft6kltuqfwwyru4ggo7o\n" + + "ip-address 86.59.119.82\n" + + "onion-port 443\n" + + "onion-key\n" + + "-----BEGIN RSA PUBLIC KEY-----\n" + + "MIGJAoGBAKIXLeVl4ut60oNnXeZtxJk7DMKFmklF/zeD+TqB1oW/QALt5wMVmO8u\n" + + "RBK7BfSxXG6IWQ0O5vBVSM25qss7+Nv/brS61VcB7IZKDaEd4n3f6Tlu4G8vxjNm\n" + + "zX0S1iYLqMOY1vcvuBIN2T43khkO5uyKjgF7EkAXLaH6hJgMSW9bAgMBAAE=\n" + + "-----END RSA PUBLIC KEY-----\n" + + "service-key\n" + + "-----BEGIN RSA PUBLIC KEY-----\n" + + "MIGJAoGBAL3FbGOkQ8cjlB70Fy1gv4178MwdNZrBPXwySubW206S0WILGePcXrZX\n" + + "4yVCNb4V4i4l9XisSAzyYS2D3CSAtYkinnSlafV3tCvt+QCKeGgtALT42oLt5UOn\n" + + "v494xZHVYKCiAwBScjqi7f+/BeclDPqBnm9af8p+cIkeCNrLt0WRAgMBAAE=\n" + + "-----END RSA PUBLIC KEY-----\n" + + "introduction-point 3ju2px3yec7ylznlwr2fyflabz5nq5kq\n" + + "ip-address 209.236.66.136\n" + + "onion-port 443\n" + + "onion-key\n" + + "-----BEGIN RSA PUBLIC KEY-----\n" + + "MIGJAoGBAKQLOS9Z5oKUE3EkYgXf5M086S/iJ6YzPB8wPsPRNCNgnGDFYXCLHtw8\n" + + "9mfm3jEG7/I5ab3+9hShMfls3uk0kIuOvD7b2VxNpsf5+z7RhZIpkCdby7etR3VL\n" + + "RlQO41EIujAfoVFKMk0WmmtpMp7FzPZc8pg3jAfvkwN/wkCeONcBAgMBAAE=\n" + + "-----END RSA PUBLIC KEY-----\n" + + "service-key\n" + + "-----BEGIN RSA PUBLIC KEY-----\n" + + "MIGJAoGBALGabFwhhBa5P8br8SScwAK7qJIJlirf95pKASeY4phORZaZFo9qOy7B\n" + + "qcIHQNGt3XIbW3MGMvOgIBklus97Bti8KDSTapWvmL4G3uF/XUoP8aPxUO56F+Gv\n" + + "RqDQEuf/sk6MbMLPLipG7xWLnn5wYzwsCxutcv2RJdA4mCDcQJYlAgMBAAE=\n" + + "-----END RSA PUBLIC KEY-----\n", + + "introduction-point f6b7o3f7hh7eudpc4cjocmew6kmnacsy\n" + + "ip-address 37.157.192.150\n" + + "onion-port 443\n" + + "onion-key\n" + + "-----BEGIN RSA PUBLIC KEY-----\n" + + "MIGJAoGBALoaGfBx/MWM4yVrYO4jxKiVfyTVtvgXlk523ifA2beO6yfeDVKR+4u0\n" + + "S/ABa9/kdQFXw4s9Ahz6vI0imdMPyUgYTXp+mP7pa45xp2uLi8kPgZLYzsJZc1Lm\n" + + "pyS5CA4Fzq7jblR3R7rGJyRBm1h8Pa8p9xE3RI6oRJnjAoCW+3LBAgMBAAE=\n" + + "-----END RSA PUBLIC KEY-----\n" + + "service-key\n" + + "-----BEGIN RSA PUBLIC KEY-----\n" + + "MIGJAoGBAK4GlIJ95emUzWG3zfWGemJbR7UZU+Ufysrgn8VZh2oH01jvTXj14qwD\n" + + "8PxI5R8CDlgfzCMMsUwp4tDZHd1IQZSyxRtonprq+j1ACDYm1hvYzwB1kjwlbp5g\n" + + "OYl2PtveH5zu2pkvCjknZxW8TCKry5jL8RqY23zLwe+AZWU9BZJdAgMBAAE=\n" + + "-----END RSA PUBLIC KEY-----\n"}; + + + @Test + public void testDescriptorParser() { + final HSDescriptorParser parser = createDescriptorParserFromString(TEST_DESCRIPTOR); + DocumentParsingResult result = parser.parse(); + assertTrue(result.isOkay()); + HSDescriptor descriptor = result.getDocument(); + List ips = descriptor.getIntroductionPoints(); + assertEquals(3, ips.size()); + for(IntroductionPoint ip: ips) { + assertTrue(ip.isValidDocument()); + } + } + + @Test + public void testIntroductionPointParser() { + final IntroductionPointParser parser = createIntroductionPointParserFromString(TEST_IPS[0]); + DocumentParsingResult result = parser.parse(); + assertTrue(result.isOkay()); + final List ips = result.getParsedDocuments(); + assertEquals(2, ips.size()); + for(IntroductionPoint ip: result.getParsedDocuments()) { + assertTrue(ip.isValidDocument()); + } + + } + + + private HSDescriptorParser createDescriptorParserFromString(String s) { + return new HSDescriptorParser(null, createFieldParser(s)); + } + + private IntroductionPointParser createIntroductionPointParserFromString(String s) { + return new IntroductionPointParser(createFieldParser(s)); + } + + private DocumentFieldParser createFieldParser(String s) { + ByteBuffer buffer = ByteBuffer.wrap(s.getBytes(Tor.getDefaultCharset())); + return new DocumentFieldParserImpl(buffer); + } +} diff --git a/orchid/test/com/subgraph/orchid/circuits/path/ConfigNodeFilterTest.java b/orchid/test/com/subgraph/orchid/circuits/path/ConfigNodeFilterTest.java new file mode 100644 index 00000000..5ccc589a --- /dev/null +++ b/orchid/test/com/subgraph/orchid/circuits/path/ConfigNodeFilterTest.java @@ -0,0 +1,73 @@ +package com.subgraph.orchid.circuits.path; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.List; + +import org.junit.Test; + +import com.subgraph.orchid.Router; +import com.subgraph.orchid.circuits.path.ConfigNodeFilter; +import com.subgraph.orchid.circuits.path.RouterFilter; +import com.subgraph.orchid.data.IPv4Address; + +public class ConfigNodeFilterTest { + + @Test + public void testIsAddressString() { + final List validStrings = Arrays.asList( + "1.2.3.4/16", + "0.0.0.0/1", + "255.0.255.0/16"); + + final List invalidStrings = Arrays.asList( + "1.2.3.256/16", + "1.2.3.4/61", + "1.2.3.4/0", + "1.2.3.4/22x", + "1.2.3.4/", + "1.2.3.4"); + + for(String s: validStrings) { + assertTrue(s, ConfigNodeFilter.isAddressString(s)); + } + for(String s: invalidStrings) { + assertFalse(s, ConfigNodeFilter.isAddressString(s)); + } + + } + + @Test + public void testIsCountryCode() { + final List validStrings = Arrays.asList("{CC}", "{xx}"); + final List invalidStrings = Arrays.asList("US", "{xxx}"); + for(String s: validStrings) { assertTrue(s, ConfigNodeFilter.isCountryCodeString(s)); } + for(String s: invalidStrings) { assertFalse(s, ConfigNodeFilter.isCountryCodeString(s)); } + } + + private Router createRouterMockWithAddress(String ip) { + final IPv4Address address = IPv4Address.createFromString(ip); + final Router router = createMock("mockRouter", Router.class); + expect(router.getAddress()).andReturn(address); + replay(router); + return router; + } + + @Test + public void testMaskFilter() { + final Router r1 = createRouterMockWithAddress("1.2.3.4"); + final Router r2 = createRouterMockWithAddress("1.7.3.4"); + final RouterFilter f = ConfigNodeFilter.createFilterFor("1.2.3.0/16"); + assertTrue(f.filter(r1)); + assertFalse(f.filter(r2)); + verify(r1, r2); + } + + +} diff --git a/orchid/test/com/subgraph/orchid/crypto/ASN1ParserTest.java b/orchid/test/com/subgraph/orchid/crypto/ASN1ParserTest.java new file mode 100644 index 00000000..38a5bae8 --- /dev/null +++ b/orchid/test/com/subgraph/orchid/crypto/ASN1ParserTest.java @@ -0,0 +1,65 @@ +package com.subgraph.orchid.crypto; + +import static org.junit.Assert.*; + +import java.math.BigInteger; +import java.nio.ByteBuffer; + +import org.junit.Before; +import org.junit.Test; + +import com.subgraph.orchid.crypto.ASN1Parser.ASN1BitString; +import com.subgraph.orchid.crypto.ASN1Parser.ASN1Integer; +import com.subgraph.orchid.encoders.Hex; + +public class ASN1ParserTest { + + private ASN1Parser parser; + + @Before + public void setup() { + parser = new ASN1Parser(); + } + + ByteBuffer createBuffer(String hexData) { + final byte[] bs = Hex.decode(hexData); + return ByteBuffer.wrap(bs); + } + + + + @Test + public void testParseASN1Length() { + assertEquals(20, parser.parseASN1Length(createBuffer("14000000"))); + assertEquals(23, parser.parseASN1Length(createBuffer("81170000"))); + assertEquals(256, parser.parseASN1Length(createBuffer("82010000"))); + assertEquals(65535, parser.parseASN1Length(createBuffer("82FFFF00"))); + } + + @Test(expected=IllegalArgumentException.class) + public void testParseASN1LengthException() { + parser.parseASN1Length(createBuffer("80ACDCACDC")); + } + + @Test(expected=IllegalArgumentException.class) + public void testParseASN1LengthException2() { + parser.parseASN1Length(createBuffer("88ABCDABCD")); + } + + @Test + public void testParseASN1Integer() { + ASN1Integer asn1Integer = parser.parseASN1Integer(createBuffer("01020304")); + assertEquals(new BigInteger("01020304", 16), asn1Integer.getValue()); + } + + @Test + public void testParseASN1BitString() { + ASN1BitString bitString = parser.parseASN1BitString(createBuffer("0001020304")); + assertArrayEquals(new byte[] {1, 2, 3, 4}, bitString.getBytes()); + } + + @Test(expected=IllegalArgumentException.class) + public void testParseASN1BitStringException() { + parser.parseASN1BitString(createBuffer("01020304")); + } +} diff --git a/orchid/test/com/subgraph/orchid/crypto/RSAKeyEncoderTest.java b/orchid/test/com/subgraph/orchid/crypto/RSAKeyEncoderTest.java new file mode 100644 index 00000000..ad09ba01 --- /dev/null +++ b/orchid/test/com/subgraph/orchid/crypto/RSAKeyEncoderTest.java @@ -0,0 +1,53 @@ +package com.subgraph.orchid.crypto; + +import static org.junit.Assert.assertEquals; + +import java.math.BigInteger; +import java.security.GeneralSecurityException; +import java.security.InvalidKeyException; +import java.security.interfaces.RSAPublicKey; + +import org.junit.Before; +import org.junit.Test; + +public class RSAKeyEncoderTest { + + private RSAKeyEncoder encoder; + + final static String PEM_ENCODED_PUBKEY = + + "-----BEGIN RSA PUBLIC KEY-----\n"+ + "MIGJAoGBAMuf0v+d3HUNk5jbYJuZA+q30NlqFStNBmB/BA4y6h9DTpJ2ULhdy6I8\n"+ + "5tLq76TSTbGl2wiWpDjW73OkAfpbUyb+2fIFz4Ildth18ZA4dqNvnYNCnckO1p+B\n"+ + "x6e+8YoafedZhXsv1Z9RMl6WK6WGXpmgCSTTlLnXlrsJLrG/mW9dAgMBAAE=\n"+ + "-----END RSA PUBLIC KEY-----\n"; + + final static String MODULUS_STRING = + + "142989855534119842624281223201112183062179043858844190077277374317180853428"+ + "067855510754484639210124041049484315690046733530717435491654607786952431473"+ + "291786675652833142146809594339105386135143284841697658385761023403765912288"+ + "684940376854709443039663769117423844056151668935507268155717373127166136614"+ + "724923229"; + + final static BigInteger MODULUS = new BigInteger(MODULUS_STRING); + final static BigInteger EXPONENT = BigInteger.valueOf(65537); + + @Before + public void setup() { + encoder = new RSAKeyEncoder(); + } + + @Test + public void testParsePEMPublicKey() throws GeneralSecurityException { + final RSAPublicKey publicKey = encoder.parsePEMPublicKey(PEM_ENCODED_PUBKEY); + assertEquals(MODULUS, publicKey.getModulus()); + assertEquals(EXPONENT, publicKey.getPublicExponent()); + } + + @Test(expected=InvalidKeyException.class) + public void testParsePEMPublicKeyException() throws GeneralSecurityException { + encoder.parsePEMPublicKey(PEM_ENCODED_PUBKEY.substring(1)); + } + +} diff --git a/orchid/test/com/subgraph/orchid/geoip/CountryCodeServiceTest.java b/orchid/test/com/subgraph/orchid/geoip/CountryCodeServiceTest.java new file mode 100644 index 00000000..b90005fd --- /dev/null +++ b/orchid/test/com/subgraph/orchid/geoip/CountryCodeServiceTest.java @@ -0,0 +1,37 @@ +package com.subgraph.orchid.geoip; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; + +import org.junit.Before; +import org.junit.Test; + +import com.subgraph.orchid.data.IPv4Address; +import com.subgraph.orchid.geoip.CountryCodeService; + +public class CountryCodeServiceTest { + + private CountryCodeService ccs; + + @Before + public void before() { + ccs = CountryCodeService.getInstance(); + } + + @Test + public void test() throws IOException { + testAddress("FR", "217.70.184.1"); // www.gandi.net + testAddress("DE", "213.165.65.50"); // www.gmx.de + testAddress("AR", "200.42.136.212"); // www.clarin.com + testAddress("GB", "77.91.248.30"); // www.guardian.co.uk + testAddress("CA", "132.216.177.160"); // www.mcgill.ca + testAddress("US", "38.229.72.14"); // www.torproject.net + } + + private void testAddress(String expectedCC, String address) { + IPv4Address a = IPv4Address.createFromString(address); + String cc = ccs.getCountryCodeForAddress(a); + assertEquals("Country Code lookup for "+ address, expectedCC, cc); + } +} diff --git a/pom.xml b/pom.xml index b6fa9ec8..cfbf765f 100644 --- a/pom.xml +++ b/pom.xml @@ -11,6 +11,7 @@ core examples tools + orchid diff --git a/tools/src/main/java/com/google/bitcoin/tools/WalletTool.java b/tools/src/main/java/com/google/bitcoin/tools/WalletTool.java index f9ce9062..ad7d7909 100644 --- a/tools/src/main/java/com/google/bitcoin/tools/WalletTool.java +++ b/tools/src/main/java/com/google/bitcoin/tools/WalletTool.java @@ -19,8 +19,10 @@ package com.google.bitcoin.tools; import com.google.bitcoin.core.*; import com.google.bitcoin.crypto.KeyCrypterException; +import com.google.bitcoin.net.BlockingClientManager; import com.google.bitcoin.net.discovery.DnsDiscovery; import com.google.bitcoin.net.discovery.PeerDiscovery; +import com.google.bitcoin.net.discovery.TorDiscovery; import com.google.bitcoin.params.MainNetParams; import com.google.bitcoin.params.RegTestParams; import com.google.bitcoin.params.TestNet3Params; @@ -34,6 +36,8 @@ import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.io.Resources; import com.google.common.util.concurrent.ListenableFuture; +import com.subgraph.orchid.TorClient; + import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; @@ -80,6 +84,7 @@ public class WalletTool { private static ValidationMode mode; private static String password; private static org.bitcoin.protocols.payments.Protos.PaymentRequest paymentRequest; + private static TorClient torClient; public static class Condition { public enum Type { @@ -209,6 +214,7 @@ public class WalletTool { OptionSpec passwordFlag = parser.accepts("password").withRequiredArg(); OptionSpec paymentRequestLocation = parser.accepts("payment-request").withRequiredArg(); parser.accepts("no-pki"); + parser.accepts("tor"); options = parser.parse(args); final String HELP_TEXT = Resources.toString(WalletTool.class.getResource("wallet-tool-help.txt"), Charsets.UTF_8); @@ -688,7 +694,13 @@ public class WalletTool { } // This will ensure the wallet is saved when it changes. wallet.autosaveToFile(walletFile, 200, TimeUnit.MILLISECONDS, null); - peers = new PeerGroup(params, chain); + if (options.has("tor")) { + torClient = new TorClient(); + torClient.start(); + peers = new PeerGroup(params, chain, new BlockingClientManager(torClient.getSocketFactory())); + } else { + peers = new PeerGroup(params, chain); + } peers.setUserAgent("WalletTool", "1.0"); peers.addWallet(wallet); if (options.has("peers")) { @@ -702,6 +714,8 @@ public class WalletTool { System.exit(1); } } + } else if (options.has("tor")) { + peers.addPeerDiscovery(new TorDiscovery(params, torClient)); } else { if (params == RegTestParams.get()) { log.info("Assuming regtest node on localhost");